2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "opt_inet6.h"
36 #include "opt_ratelimit.h"
39 #include <sys/param.h>
42 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <sys/firmware.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <net/ethernet.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/if_vlan_var.h>
64 #include <net/rss_config.h>
66 #if defined(__i386__) || defined(__amd64__)
67 #include <machine/md_var.h>
68 #include <machine/cputypes.h>
72 #include <crypto/rijndael/rijndael.h>
75 #include <ddb/db_lex.h>
78 #include "common/common.h"
79 #include "common/t4_msg.h"
80 #include "common/t4_regs.h"
81 #include "common/t4_regs_values.h"
82 #include "cudbg/cudbg.h"
85 #include "t4_mp_ring.h"
89 /* T4 bus driver interface */
90 static int t4_probe(device_t);
91 static int t4_attach(device_t);
92 static int t4_detach(device_t);
93 static int t4_ready(device_t);
94 static int t4_read_port_device(device_t, int, device_t *);
95 static device_method_t t4_methods[] = {
96 DEVMETHOD(device_probe, t4_probe),
97 DEVMETHOD(device_attach, t4_attach),
98 DEVMETHOD(device_detach, t4_detach),
100 DEVMETHOD(t4_is_main_ready, t4_ready),
101 DEVMETHOD(t4_read_port_device, t4_read_port_device),
105 static driver_t t4_driver = {
108 sizeof(struct adapter)
112 /* T4 port (cxgbe) interface */
113 static int cxgbe_probe(device_t);
114 static int cxgbe_attach(device_t);
115 static int cxgbe_detach(device_t);
116 device_method_t cxgbe_methods[] = {
117 DEVMETHOD(device_probe, cxgbe_probe),
118 DEVMETHOD(device_attach, cxgbe_attach),
119 DEVMETHOD(device_detach, cxgbe_detach),
122 static driver_t cxgbe_driver = {
125 sizeof(struct port_info)
128 /* T4 VI (vcxgbe) interface */
129 static int vcxgbe_probe(device_t);
130 static int vcxgbe_attach(device_t);
131 static int vcxgbe_detach(device_t);
132 static device_method_t vcxgbe_methods[] = {
133 DEVMETHOD(device_probe, vcxgbe_probe),
134 DEVMETHOD(device_attach, vcxgbe_attach),
135 DEVMETHOD(device_detach, vcxgbe_detach),
138 static driver_t vcxgbe_driver = {
141 sizeof(struct vi_info)
144 static d_ioctl_t t4_ioctl;
146 static struct cdevsw t4_cdevsw = {
147 .d_version = D_VERSION,
152 /* T5 bus driver interface */
153 static int t5_probe(device_t);
154 static device_method_t t5_methods[] = {
155 DEVMETHOD(device_probe, t5_probe),
156 DEVMETHOD(device_attach, t4_attach),
157 DEVMETHOD(device_detach, t4_detach),
159 DEVMETHOD(t4_is_main_ready, t4_ready),
160 DEVMETHOD(t4_read_port_device, t4_read_port_device),
164 static driver_t t5_driver = {
167 sizeof(struct adapter)
171 /* T5 port (cxl) interface */
172 static driver_t cxl_driver = {
175 sizeof(struct port_info)
178 /* T5 VI (vcxl) interface */
179 static driver_t vcxl_driver = {
182 sizeof(struct vi_info)
185 /* T6 bus driver interface */
186 static int t6_probe(device_t);
187 static device_method_t t6_methods[] = {
188 DEVMETHOD(device_probe, t6_probe),
189 DEVMETHOD(device_attach, t4_attach),
190 DEVMETHOD(device_detach, t4_detach),
192 DEVMETHOD(t4_is_main_ready, t4_ready),
193 DEVMETHOD(t4_read_port_device, t4_read_port_device),
197 static driver_t t6_driver = {
200 sizeof(struct adapter)
204 /* T6 port (cc) interface */
205 static driver_t cc_driver = {
208 sizeof(struct port_info)
211 /* T6 VI (vcc) interface */
212 static driver_t vcc_driver = {
215 sizeof(struct vi_info)
218 /* ifnet + media interface */
219 static void cxgbe_init(void *);
220 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
221 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
222 static void cxgbe_qflush(struct ifnet *);
223 static int cxgbe_media_change(struct ifnet *);
224 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
226 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
229 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
230 * then ADAPTER_LOCK, then t4_uld_list_lock.
232 static struct sx t4_list_lock;
233 SLIST_HEAD(, adapter) t4_list;
235 static struct sx t4_uld_list_lock;
236 SLIST_HEAD(, uld_info) t4_uld_list;
240 * Tunables. See tweak_tunables() too.
242 * Each tunable is set to a default value here if it's known at compile-time.
243 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
244 * provide a reasonable default (upto n) when the driver is loaded.
246 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
247 * T5 are under hw.cxl.
251 * Number of queues for tx and rx, NIC and offload.
255 TUNABLE_INT("hw.cxgbe.ntxq", &t4_ntxq);
256 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
260 TUNABLE_INT("hw.cxgbe.nrxq", &t4_nrxq);
261 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
264 static int t4_ntxq_vi = -NTXQ_VI;
265 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
268 static int t4_nrxq_vi = -NRXQ_VI;
269 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
271 static int t4_rsrv_noflowq = 0;
272 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
274 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
276 static int t4_nofldtxq = -NOFLDTXQ;
277 TUNABLE_INT("hw.cxgbe.nofldtxq", &t4_nofldtxq);
280 static int t4_nofldrxq = -NOFLDRXQ;
281 TUNABLE_INT("hw.cxgbe.nofldrxq", &t4_nofldrxq);
283 #define NOFLDTXQ_VI 1
284 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
285 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
287 #define NOFLDRXQ_VI 1
288 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
289 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
291 #define TMR_IDX_OFLD 1
292 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
293 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_ofld", &t4_tmr_idx_ofld);
295 #define PKTC_IDX_OFLD (-1)
296 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
297 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_ofld", &t4_pktc_idx_ofld);
299 /* 0 means chip/fw default, non-zero number is value in microseconds */
300 static u_long t4_toe_keepalive_idle = 0;
301 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_idle", &t4_toe_keepalive_idle);
303 /* 0 means chip/fw default, non-zero number is value in microseconds */
304 static u_long t4_toe_keepalive_interval = 0;
305 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_interval", &t4_toe_keepalive_interval);
307 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
308 static int t4_toe_keepalive_count = 0;
309 TUNABLE_INT("hw.cxgbe.toe.keepalive_count", &t4_toe_keepalive_count);
311 /* 0 means chip/fw default, non-zero number is value in microseconds */
312 static u_long t4_toe_rexmt_min = 0;
313 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_min", &t4_toe_rexmt_min);
315 /* 0 means chip/fw default, non-zero number is value in microseconds */
316 static u_long t4_toe_rexmt_max = 0;
317 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_max", &t4_toe_rexmt_max);
319 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
320 static int t4_toe_rexmt_count = 0;
321 TUNABLE_INT("hw.cxgbe.toe.rexmt_count", &t4_toe_rexmt_count);
323 /* -1 means chip/fw default, other values are raw backoff values to use */
324 static int t4_toe_rexmt_backoff[16] = {
325 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
327 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.0", &t4_toe_rexmt_backoff[0]);
328 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.1", &t4_toe_rexmt_backoff[1]);
329 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.2", &t4_toe_rexmt_backoff[2]);
330 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.3", &t4_toe_rexmt_backoff[3]);
331 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.4", &t4_toe_rexmt_backoff[4]);
332 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.5", &t4_toe_rexmt_backoff[5]);
333 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.6", &t4_toe_rexmt_backoff[6]);
334 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.7", &t4_toe_rexmt_backoff[7]);
335 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.8", &t4_toe_rexmt_backoff[8]);
336 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.9", &t4_toe_rexmt_backoff[9]);
337 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.10", &t4_toe_rexmt_backoff[10]);
338 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.11", &t4_toe_rexmt_backoff[11]);
339 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.12", &t4_toe_rexmt_backoff[12]);
340 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.13", &t4_toe_rexmt_backoff[13]);
341 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.14", &t4_toe_rexmt_backoff[14]);
342 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.15", &t4_toe_rexmt_backoff[15]);
347 static int t4_nnmtxq_vi = -NNMTXQ_VI;
348 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
351 static int t4_nnmrxq_vi = -NNMRXQ_VI;
352 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
356 * Holdoff parameters for ports.
359 int t4_tmr_idx = TMR_IDX;
360 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx", &t4_tmr_idx);
361 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
363 #define PKTC_IDX (-1)
364 int t4_pktc_idx = PKTC_IDX;
365 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx", &t4_pktc_idx);
366 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
369 * Size (# of entries) of each tx and rx queue.
371 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
372 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
374 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
375 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
378 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
380 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
381 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
384 * Configuration file. All the _CF names here are special.
386 #define DEFAULT_CF "default"
387 #define BUILTIN_CF "built-in"
388 #define FLASH_CF "flash"
389 #define UWIRE_CF "uwire"
390 #define FPGA_CF "fpga"
391 static char t4_cfg_file[32] = DEFAULT_CF;
392 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
395 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
396 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
397 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
398 * mark or when signalled to do so, 0 to never emit PAUSE.
400 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
401 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
404 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS,
405 * FEC_RESERVED respectively).
406 * -1 to run with the firmware default.
409 static int t4_fec = -1;
410 TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
413 * Link autonegotiation.
414 * -1 to run with the firmware default.
418 static int t4_autoneg = -1;
419 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
422 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
423 * encouraged respectively).
425 static unsigned int t4_fw_install = 1;
426 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
429 * ASIC features that will be used. Disable the ones you don't want so that the
430 * chip resources aren't wasted on features that will not be used.
432 static int t4_nbmcaps_allowed = 0;
433 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
435 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
436 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
438 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
439 FW_CAPS_CONFIG_SWITCH_EGRESS;
440 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
442 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
443 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
444 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
446 static int t4_toecaps_allowed = -1;
447 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
449 static int t4_rdmacaps_allowed = -1;
450 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
452 static int t4_cryptocaps_allowed = -1;
453 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
455 static int t4_iscsicaps_allowed = -1;
456 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
458 static int t4_fcoecaps_allowed = 0;
459 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
461 static int t5_write_combine = 0;
462 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
464 static int t4_num_vis = 1;
465 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
467 * PCIe Relaxed Ordering.
468 * -1: driver should figure out a good value.
473 static int pcie_relaxed_ordering = -1;
474 TUNABLE_INT("hw.cxgbe.pcie_relaxed_ordering", &pcie_relaxed_ordering);
476 static int t4_panic_on_fatal_err = 0;
477 TUNABLE_INT("hw.cxgbe.panic_on_fatal_err", &t4_panic_on_fatal_err);
483 static int t4_cop_managed_offloading = 0;
484 TUNABLE_INT("hw.cxgbe.cop_managed_offloading", &t4_cop_managed_offloading);
487 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
488 static int vi_mac_funcs[] = {
492 FW_VI_FUNC_OPENISCSI,
498 struct intrs_and_queues {
499 uint16_t intr_type; /* INTx, MSI, or MSI-X */
500 uint16_t num_vis; /* number of VIs for each port */
501 uint16_t nirq; /* Total # of vectors */
502 uint16_t ntxq; /* # of NIC txq's for each port */
503 uint16_t nrxq; /* # of NIC rxq's for each port */
504 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
505 uint16_t nofldrxq; /* # of TOE rxq's for each port */
507 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
508 uint16_t ntxq_vi; /* # of NIC txq's */
509 uint16_t nrxq_vi; /* # of NIC rxq's */
510 uint16_t nofldtxq_vi; /* # of TOE txq's */
511 uint16_t nofldrxq_vi; /* # of TOE rxq's */
512 uint16_t nnmtxq_vi; /* # of netmap txq's */
513 uint16_t nnmrxq_vi; /* # of netmap rxq's */
516 static void setup_memwin(struct adapter *);
517 static void position_memwin(struct adapter *, int, uint32_t);
518 static int validate_mem_range(struct adapter *, uint32_t, int);
519 static int fwmtype_to_hwmtype(int);
520 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
522 static int fixup_devlog_params(struct adapter *);
523 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
524 static int prep_firmware(struct adapter *);
525 static int partition_resources(struct adapter *, const struct firmware *,
527 static int get_params__pre_init(struct adapter *);
528 static int get_params__post_init(struct adapter *);
529 static int set_params__post_init(struct adapter *);
530 static void t4_set_desc(struct adapter *);
531 static void build_medialist(struct port_info *, struct ifmedia *);
532 static void init_l1cfg(struct port_info *);
533 static int apply_l1cfg(struct port_info *);
534 static int cxgbe_init_synchronized(struct vi_info *);
535 static int cxgbe_uninit_synchronized(struct vi_info *);
536 static void quiesce_txq(struct adapter *, struct sge_txq *);
537 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
538 static void quiesce_iq(struct adapter *, struct sge_iq *);
539 static void quiesce_fl(struct adapter *, struct sge_fl *);
540 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
541 driver_intr_t *, void *, char *);
542 static int t4_free_irq(struct adapter *, struct irq *);
543 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
544 static void vi_refresh_stats(struct adapter *, struct vi_info *);
545 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
546 static void cxgbe_tick(void *);
547 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
548 static void cxgbe_sysctls(struct port_info *);
549 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
550 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
551 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
552 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
553 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
554 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
555 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
556 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
557 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
558 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
559 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
560 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
561 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
562 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
563 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
564 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
565 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
566 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
567 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
568 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
569 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
570 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
571 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
572 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
573 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
574 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
575 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
576 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
577 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
578 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
579 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
580 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
581 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
582 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
583 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
584 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
585 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
586 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
587 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
588 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
589 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
590 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
591 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS);
592 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
594 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
595 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
596 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
597 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
598 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
599 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
600 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
601 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
603 static int get_sge_context(struct adapter *, struct t4_sge_context *);
604 static int load_fw(struct adapter *, struct t4_data *);
605 static int load_cfg(struct adapter *, struct t4_data *);
606 static int load_boot(struct adapter *, struct t4_bootrom *);
607 static int load_bootcfg(struct adapter *, struct t4_data *);
608 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
609 static void free_offload_policy(struct t4_offload_policy *);
610 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
611 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
612 static int read_i2c(struct adapter *, struct t4_i2c_data *);
614 static int toe_capability(struct vi_info *, int);
616 static int mod_event(module_t, int, void *);
617 static int notify_siblings(device_t, int);
623 {0xa000, "Chelsio Terminator 4 FPGA"},
624 {0x4400, "Chelsio T440-dbg"},
625 {0x4401, "Chelsio T420-CR"},
626 {0x4402, "Chelsio T422-CR"},
627 {0x4403, "Chelsio T440-CR"},
628 {0x4404, "Chelsio T420-BCH"},
629 {0x4405, "Chelsio T440-BCH"},
630 {0x4406, "Chelsio T440-CH"},
631 {0x4407, "Chelsio T420-SO"},
632 {0x4408, "Chelsio T420-CX"},
633 {0x4409, "Chelsio T420-BT"},
634 {0x440a, "Chelsio T404-BT"},
635 {0x440e, "Chelsio T440-LP-CR"},
637 {0xb000, "Chelsio Terminator 5 FPGA"},
638 {0x5400, "Chelsio T580-dbg"},
639 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
640 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
641 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
642 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
643 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
644 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
645 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
646 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
647 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
648 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
649 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
650 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
651 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
652 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
653 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
654 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
655 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
657 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
658 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
659 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
660 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
661 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
662 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
663 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
664 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
665 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
666 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
667 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
668 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
669 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
670 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
671 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
672 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
675 {0x6480, "Custom T6225-CR"},
676 {0x6481, "Custom T62100-CR"},
677 {0x6482, "Custom T6225-CR"},
678 {0x6483, "Custom T62100-CR"},
679 {0x6484, "Custom T64100-CR"},
680 {0x6485, "Custom T6240-SO"},
681 {0x6486, "Custom T6225-SO-CR"},
682 {0x6487, "Custom T6225-CR"},
687 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
688 * exactly the same for both rxq and ofld_rxq.
690 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
691 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
693 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
696 t4_probe(device_t dev)
699 uint16_t v = pci_get_vendor(dev);
700 uint16_t d = pci_get_device(dev);
701 uint8_t f = pci_get_function(dev);
703 if (v != PCI_VENDOR_ID_CHELSIO)
706 /* Attach only to PF0 of the FPGA */
707 if (d == 0xa000 && f != 0)
710 for (i = 0; i < nitems(t4_pciids); i++) {
711 if (d == t4_pciids[i].device) {
712 device_set_desc(dev, t4_pciids[i].desc);
713 return (BUS_PROBE_DEFAULT);
721 t5_probe(device_t dev)
724 uint16_t v = pci_get_vendor(dev);
725 uint16_t d = pci_get_device(dev);
726 uint8_t f = pci_get_function(dev);
728 if (v != PCI_VENDOR_ID_CHELSIO)
731 /* Attach only to PF0 of the FPGA */
732 if (d == 0xb000 && f != 0)
735 for (i = 0; i < nitems(t5_pciids); i++) {
736 if (d == t5_pciids[i].device) {
737 device_set_desc(dev, t5_pciids[i].desc);
738 return (BUS_PROBE_DEFAULT);
746 t6_probe(device_t dev)
749 uint16_t v = pci_get_vendor(dev);
750 uint16_t d = pci_get_device(dev);
752 if (v != PCI_VENDOR_ID_CHELSIO)
755 for (i = 0; i < nitems(t6_pciids); i++) {
756 if (d == t6_pciids[i].device) {
757 device_set_desc(dev, t6_pciids[i].desc);
758 return (BUS_PROBE_DEFAULT);
766 t5_attribute_workaround(device_t dev)
772 * The T5 chips do not properly echo the No Snoop and Relaxed
773 * Ordering attributes when replying to a TLP from a Root
774 * Port. As a workaround, find the parent Root Port and
775 * disable No Snoop and Relaxed Ordering. Note that this
776 * affects all devices under this root port.
778 root_port = pci_find_pcie_root_port(dev);
779 if (root_port == NULL) {
780 device_printf(dev, "Unable to find parent root port\n");
784 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
785 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
786 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
788 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
789 device_get_nameunit(root_port));
792 static const struct devnames devnames[] = {
794 .nexus_name = "t4nex",
795 .ifnet_name = "cxgbe",
796 .vi_ifnet_name = "vcxgbe",
797 .pf03_drv_name = "t4iov",
798 .vf_nexus_name = "t4vf",
799 .vf_ifnet_name = "cxgbev"
801 .nexus_name = "t5nex",
803 .vi_ifnet_name = "vcxl",
804 .pf03_drv_name = "t5iov",
805 .vf_nexus_name = "t5vf",
806 .vf_ifnet_name = "cxlv"
808 .nexus_name = "t6nex",
810 .vi_ifnet_name = "vcc",
811 .pf03_drv_name = "t6iov",
812 .vf_nexus_name = "t6vf",
813 .vf_ifnet_name = "ccv"
818 t4_init_devnames(struct adapter *sc)
823 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
824 sc->names = &devnames[id - CHELSIO_T4];
826 device_printf(sc->dev, "chip id %d is not supported.\n", id);
832 t4_attach(device_t dev)
835 int rc = 0, i, j, rqidx, tqidx, nports;
836 struct make_dev_args mda;
837 struct intrs_and_queues iaq;
840 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
847 int nm_rqidx, nm_tqidx;
851 sc = device_get_softc(dev);
853 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
855 if ((pci_get_device(dev) & 0xff00) == 0x5400)
856 t5_attribute_workaround(dev);
857 pci_enable_busmaster(dev);
858 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
861 pci_set_max_read_req(dev, 4096);
862 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
863 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
864 if (pcie_relaxed_ordering == 0 &&
865 (v | PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
866 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
867 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
868 } else if (pcie_relaxed_ordering == 1 &&
869 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
870 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
871 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
875 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
876 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
878 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
879 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
880 device_get_nameunit(dev));
882 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
883 device_get_nameunit(dev));
884 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
887 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
888 TAILQ_INIT(&sc->sfl);
889 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
891 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
894 rw_init(&sc->policy_lock, "connection offload policy");
896 rc = t4_map_bars_0_and_4(sc);
898 goto done; /* error message displayed already */
900 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
902 /* Prepare the adapter for operation. */
903 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
904 rc = -t4_prep_adapter(sc, buf);
907 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
912 * This is the real PF# to which we're attaching. Works from within PCI
913 * passthrough environments too, where pci_get_function() could return a
914 * different PF# depending on the passthrough configuration. We need to
915 * use the real PF# in all our communication with the firmware.
917 j = t4_read_reg(sc, A_PL_WHOAMI);
918 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
921 t4_init_devnames(sc);
922 if (sc->names == NULL) {
924 goto done; /* error message displayed already */
928 * Do this really early, with the memory windows set up even before the
929 * character device. The userland tool's register i/o and mem read
930 * will work even in "recovery mode".
933 if (t4_init_devlog_params(sc, 0) == 0)
934 fixup_devlog_params(sc);
935 make_dev_args_init(&mda);
936 mda.mda_devsw = &t4_cdevsw;
937 mda.mda_uid = UID_ROOT;
938 mda.mda_gid = GID_WHEEL;
940 mda.mda_si_drv1 = sc;
941 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
943 device_printf(dev, "failed to create nexus char device: %d.\n",
946 /* Go no further if recovery mode has been requested. */
947 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
948 device_printf(dev, "recovery mode.\n");
952 #if defined(__i386__)
953 if ((cpu_feature & CPUID_CX8) == 0) {
954 device_printf(dev, "64 bit atomics not available.\n");
960 /* Prepare the firmware for operation */
961 rc = prep_firmware(sc);
963 goto done; /* error message displayed already */
965 rc = get_params__post_init(sc);
967 goto done; /* error message displayed already */
969 rc = set_params__post_init(sc);
971 goto done; /* error message displayed already */
973 rc = t4_map_bar_2(sc);
975 goto done; /* error message displayed already */
977 rc = t4_create_dma_tag(sc);
979 goto done; /* error message displayed already */
982 * First pass over all the ports - allocate VIs and initialize some
983 * basic parameters like mac address, port type, etc.
985 for_each_port(sc, i) {
986 struct port_info *pi;
988 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
991 /* These must be set before t4_port_init */
995 * XXX: vi[0] is special so we can't delay this allocation until
996 * pi->nvi's final value is known.
998 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1002 * Allocate the "main" VI and initialize parameters
1005 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1007 device_printf(dev, "unable to initialize port %d: %d\n",
1009 free(pi->vi, M_CXGBE);
1015 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1016 device_get_nameunit(dev), i);
1017 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1018 sc->chan_map[pi->tx_chan] = i;
1020 /* All VIs on this port share this media. */
1021 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1022 cxgbe_media_status);
1024 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
1025 if (pi->dev == NULL) {
1027 "failed to add device for port %d.\n", i);
1031 pi->vi[0].dev = pi->dev;
1032 device_set_softc(pi->dev, pi);
1036 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1038 nports = sc->params.nports;
1039 rc = cfg_itype_and_nqueues(sc, &iaq);
1041 goto done; /* error message displayed already */
1043 num_vis = iaq.num_vis;
1044 sc->intr_type = iaq.intr_type;
1045 sc->intr_count = iaq.nirq;
1048 s->nrxq = nports * iaq.nrxq;
1049 s->ntxq = nports * iaq.ntxq;
1051 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1052 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1054 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1055 s->neq += nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
1056 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1057 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1058 if (is_offload(sc) || is_ethoffload(sc)) {
1059 s->nofldtxq = nports * iaq.nofldtxq;
1061 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1062 s->neq += s->nofldtxq;
1064 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1065 M_CXGBE, M_ZERO | M_WAITOK);
1069 if (is_offload(sc)) {
1070 s->nofldrxq = nports * iaq.nofldrxq;
1072 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1073 s->neq += s->nofldrxq; /* free list */
1074 s->niq += s->nofldrxq;
1076 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1077 M_CXGBE, M_ZERO | M_WAITOK);
1082 s->nnmrxq = nports * (num_vis - 1) * iaq.nnmrxq_vi;
1083 s->nnmtxq = nports * (num_vis - 1) * iaq.nnmtxq_vi;
1085 s->neq += s->nnmtxq + s->nnmrxq;
1086 s->niq += s->nnmrxq;
1088 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1089 M_CXGBE, M_ZERO | M_WAITOK);
1090 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1091 M_CXGBE, M_ZERO | M_WAITOK);
1094 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1096 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1098 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1100 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1102 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1105 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1108 t4_init_l2t(sc, M_WAITOK);
1109 t4_init_smt(sc, M_WAITOK);
1110 t4_init_tx_sched(sc);
1112 t4_init_etid_table(sc);
1116 * Second pass over the ports. This time we know the number of rx and
1117 * tx queues that each port should get.
1120 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1127 nm_rqidx = nm_tqidx = 0;
1129 for_each_port(sc, i) {
1130 struct port_info *pi = sc->port[i];
1137 for_each_vi(pi, j, vi) {
1139 vi->qsize_rxq = t4_qsize_rxq;
1140 vi->qsize_txq = t4_qsize_txq;
1142 vi->first_rxq = rqidx;
1143 vi->first_txq = tqidx;
1144 vi->tmr_idx = t4_tmr_idx;
1145 vi->pktc_idx = t4_pktc_idx;
1146 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1147 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1152 if (j == 0 && vi->ntxq > 1)
1153 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1155 vi->rsrv_noflowq = 0;
1157 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1158 vi->first_ofld_txq = ofld_tqidx;
1159 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1160 ofld_tqidx += vi->nofldtxq;
1163 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1164 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1165 vi->first_ofld_rxq = ofld_rqidx;
1166 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1168 ofld_rqidx += vi->nofldrxq;
1172 vi->first_nm_rxq = nm_rqidx;
1173 vi->first_nm_txq = nm_tqidx;
1174 vi->nnmrxq = iaq.nnmrxq_vi;
1175 vi->nnmtxq = iaq.nnmtxq_vi;
1176 nm_rqidx += vi->nnmrxq;
1177 nm_tqidx += vi->nnmtxq;
1183 rc = t4_setup_intr_handlers(sc);
1186 "failed to setup interrupt handlers: %d\n", rc);
1190 rc = bus_generic_probe(dev);
1192 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1197 * Ensure thread-safe mailbox access (in debug builds).
1199 * So far this was the only thread accessing the mailbox but various
1200 * ifnets and sysctls are about to be created and their handlers/ioctls
1201 * will access the mailbox from different threads.
1203 sc->flags |= CHK_MBOX_ACCESS;
1205 rc = bus_generic_attach(dev);
1208 "failed to attach all child ports: %d\n", rc);
1213 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1214 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1215 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1216 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1217 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1221 notify_siblings(dev, 0);
1224 if (rc != 0 && sc->cdev) {
1225 /* cdev was created and so cxgbetool works; recover that way. */
1227 "error during attach, adapter is now in recovery mode.\n");
1232 t4_detach_common(dev);
1240 t4_ready(device_t dev)
1244 sc = device_get_softc(dev);
1245 if (sc->flags & FW_OK)
1251 t4_read_port_device(device_t dev, int port, device_t *child)
1254 struct port_info *pi;
1256 sc = device_get_softc(dev);
1257 if (port < 0 || port >= MAX_NPORTS)
1259 pi = sc->port[port];
1260 if (pi == NULL || pi->dev == NULL)
1267 notify_siblings(device_t dev, int detaching)
1273 for (i = 0; i < PCI_FUNCMAX; i++) {
1274 if (i == pci_get_function(dev))
1276 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1277 pci_get_slot(dev), i);
1278 if (sibling == NULL || !device_is_attached(sibling))
1281 error = T4_DETACH_CHILD(sibling);
1283 (void)T4_ATTACH_CHILD(sibling);
1294 t4_detach(device_t dev)
1299 sc = device_get_softc(dev);
1301 rc = notify_siblings(dev, 1);
1304 "failed to detach sibling devices: %d\n", rc);
1308 return (t4_detach_common(dev));
1312 t4_detach_common(device_t dev)
1315 struct port_info *pi;
1318 sc = device_get_softc(dev);
1321 destroy_dev(sc->cdev);
1325 sc->flags &= ~CHK_MBOX_ACCESS;
1326 if (sc->flags & FULL_INIT_DONE) {
1327 if (!(sc->flags & IS_VF))
1328 t4_intr_disable(sc);
1331 if (device_is_attached(dev)) {
1332 rc = bus_generic_detach(dev);
1335 "failed to detach child devices: %d\n", rc);
1340 for (i = 0; i < sc->intr_count; i++)
1341 t4_free_irq(sc, &sc->irq[i]);
1343 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1344 t4_free_tx_sched(sc);
1346 for (i = 0; i < MAX_NPORTS; i++) {
1349 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1351 device_delete_child(dev, pi->dev);
1353 mtx_destroy(&pi->pi_lock);
1354 free(pi->vi, M_CXGBE);
1359 device_delete_children(dev);
1361 if (sc->flags & FULL_INIT_DONE)
1362 adapter_full_uninit(sc);
1364 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1365 t4_fw_bye(sc, sc->mbox);
1367 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1368 pci_release_msi(dev);
1371 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1375 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1379 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1383 t4_free_l2t(sc->l2t);
1385 t4_free_smt(sc->smt);
1387 t4_free_etid_table(sc);
1390 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1391 free(sc->sge.ofld_txq, M_CXGBE);
1394 free(sc->sge.ofld_rxq, M_CXGBE);
1397 free(sc->sge.nm_rxq, M_CXGBE);
1398 free(sc->sge.nm_txq, M_CXGBE);
1400 free(sc->irq, M_CXGBE);
1401 free(sc->sge.rxq, M_CXGBE);
1402 free(sc->sge.txq, M_CXGBE);
1403 free(sc->sge.ctrlq, M_CXGBE);
1404 free(sc->sge.iqmap, M_CXGBE);
1405 free(sc->sge.eqmap, M_CXGBE);
1406 free(sc->tids.ftid_tab, M_CXGBE);
1407 if (sc->tids.hftid_tab)
1408 free_hftid_tab(&sc->tids);
1409 free(sc->tids.atid_tab, M_CXGBE);
1410 free(sc->tids.tid_tab, M_CXGBE);
1411 free(sc->tt.tls_rx_ports, M_CXGBE);
1412 t4_destroy_dma_tag(sc);
1413 if (mtx_initialized(&sc->sc_lock)) {
1414 sx_xlock(&t4_list_lock);
1415 SLIST_REMOVE(&t4_list, sc, adapter, link);
1416 sx_xunlock(&t4_list_lock);
1417 mtx_destroy(&sc->sc_lock);
1420 callout_drain(&sc->sfl_callout);
1421 if (mtx_initialized(&sc->tids.ftid_lock)) {
1422 mtx_destroy(&sc->tids.ftid_lock);
1423 cv_destroy(&sc->tids.ftid_cv);
1425 if (mtx_initialized(&sc->tids.atid_lock))
1426 mtx_destroy(&sc->tids.atid_lock);
1427 if (mtx_initialized(&sc->sfl_lock))
1428 mtx_destroy(&sc->sfl_lock);
1429 if (mtx_initialized(&sc->ifp_lock))
1430 mtx_destroy(&sc->ifp_lock);
1431 if (mtx_initialized(&sc->reg_lock))
1432 mtx_destroy(&sc->reg_lock);
1434 if (rw_initialized(&sc->policy_lock)) {
1435 rw_destroy(&sc->policy_lock);
1437 if (sc->policy != NULL)
1438 free_offload_policy(sc->policy);
1442 for (i = 0; i < NUM_MEMWIN; i++) {
1443 struct memwin *mw = &sc->memwin[i];
1445 if (rw_initialized(&mw->mw_lock))
1446 rw_destroy(&mw->mw_lock);
1449 bzero(sc, sizeof(*sc));
1455 cxgbe_probe(device_t dev)
1458 struct port_info *pi = device_get_softc(dev);
1460 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1461 device_set_desc_copy(dev, buf);
1463 return (BUS_PROBE_DEFAULT);
1466 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1467 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1468 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1469 #define T4_CAP_ENABLE (T4_CAP)
1472 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1477 vi->xact_addr_filt = -1;
1478 callout_init(&vi->tick, 1);
1480 /* Allocate an ifnet and set it up */
1481 ifp = if_alloc(IFT_ETHER);
1483 device_printf(dev, "Cannot allocate ifnet\n");
1489 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1490 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1492 ifp->if_init = cxgbe_init;
1493 ifp->if_ioctl = cxgbe_ioctl;
1494 ifp->if_transmit = cxgbe_transmit;
1495 ifp->if_qflush = cxgbe_qflush;
1496 ifp->if_get_counter = cxgbe_get_counter;
1498 ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
1499 ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
1500 ifp->if_snd_tag_query = cxgbe_snd_tag_query;
1501 ifp->if_snd_tag_free = cxgbe_snd_tag_free;
1504 ifp->if_capabilities = T4_CAP;
1506 if (vi->nofldrxq != 0)
1507 ifp->if_capabilities |= IFCAP_TOE;
1510 if (vi->nnmrxq != 0)
1511 ifp->if_capabilities |= IFCAP_NETMAP;
1514 if (is_ethoffload(vi->pi->adapter) && vi->nofldtxq != 0)
1515 ifp->if_capabilities |= IFCAP_TXRTLMT;
1517 ifp->if_capenable = T4_CAP_ENABLE;
1518 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1519 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1521 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1522 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1523 ifp->if_hw_tsomaxsegsize = 65536;
1525 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1526 EVENTHANDLER_PRI_ANY);
1528 ether_ifattach(ifp, vi->hw_addr);
1530 if (ifp->if_capabilities & IFCAP_NETMAP)
1531 cxgbe_nm_attach(vi);
1533 sb = sbuf_new_auto();
1534 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1535 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1536 switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
1538 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
1540 case IFCAP_TOE | IFCAP_TXRTLMT:
1541 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
1544 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
1549 if (ifp->if_capabilities & IFCAP_TOE)
1550 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
1553 if (ifp->if_capabilities & IFCAP_NETMAP)
1554 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1555 vi->nnmtxq, vi->nnmrxq);
1558 device_printf(dev, "%s\n", sbuf_data(sb));
1567 cxgbe_attach(device_t dev)
1569 struct port_info *pi = device_get_softc(dev);
1570 struct adapter *sc = pi->adapter;
1574 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1576 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1580 for_each_vi(pi, i, vi) {
1583 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1584 if (vi->dev == NULL) {
1585 device_printf(dev, "failed to add VI %d\n", i);
1588 device_set_softc(vi->dev, vi);
1593 bus_generic_attach(dev);
1599 cxgbe_vi_detach(struct vi_info *vi)
1601 struct ifnet *ifp = vi->ifp;
1603 ether_ifdetach(ifp);
1606 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1608 /* Let detach proceed even if these fail. */
1610 if (ifp->if_capabilities & IFCAP_NETMAP)
1611 cxgbe_nm_detach(vi);
1613 cxgbe_uninit_synchronized(vi);
1614 callout_drain(&vi->tick);
1622 cxgbe_detach(device_t dev)
1624 struct port_info *pi = device_get_softc(dev);
1625 struct adapter *sc = pi->adapter;
1628 /* Detach the extra VIs first. */
1629 rc = bus_generic_detach(dev);
1632 device_delete_children(dev);
1634 doom_vi(sc, &pi->vi[0]);
1636 if (pi->flags & HAS_TRACEQ) {
1637 sc->traceq = -1; /* cloner should not create ifnet */
1638 t4_tracer_port_detach(sc);
1641 cxgbe_vi_detach(&pi->vi[0]);
1642 callout_drain(&pi->tick);
1643 ifmedia_removeall(&pi->media);
1645 end_synchronized_op(sc, 0);
1651 cxgbe_init(void *arg)
1653 struct vi_info *vi = arg;
1654 struct adapter *sc = vi->pi->adapter;
1656 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1658 cxgbe_init_synchronized(vi);
1659 end_synchronized_op(sc, 0);
1663 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1665 int rc = 0, mtu, flags;
1666 struct vi_info *vi = ifp->if_softc;
1667 struct port_info *pi = vi->pi;
1668 struct adapter *sc = pi->adapter;
1669 struct ifreq *ifr = (struct ifreq *)data;
1675 if (mtu < ETHERMIN || mtu > MAX_MTU)
1678 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1682 if (vi->flags & VI_INIT_DONE) {
1683 t4_update_fl_bufsize(ifp);
1684 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1685 rc = update_mac_settings(ifp, XGMAC_MTU);
1687 end_synchronized_op(sc, 0);
1691 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
1695 if (ifp->if_flags & IFF_UP) {
1696 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1697 flags = vi->if_flags;
1698 if ((ifp->if_flags ^ flags) &
1699 (IFF_PROMISC | IFF_ALLMULTI)) {
1700 rc = update_mac_settings(ifp,
1701 XGMAC_PROMISC | XGMAC_ALLMULTI);
1704 rc = cxgbe_init_synchronized(vi);
1706 vi->if_flags = ifp->if_flags;
1707 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1708 rc = cxgbe_uninit_synchronized(vi);
1710 end_synchronized_op(sc, 0);
1715 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
1718 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1719 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1720 end_synchronized_op(sc, 0);
1724 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1728 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1729 if (mask & IFCAP_TXCSUM) {
1730 ifp->if_capenable ^= IFCAP_TXCSUM;
1731 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1733 if (IFCAP_TSO4 & ifp->if_capenable &&
1734 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1735 ifp->if_capenable &= ~IFCAP_TSO4;
1737 "tso4 disabled due to -txcsum.\n");
1740 if (mask & IFCAP_TXCSUM_IPV6) {
1741 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1742 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1744 if (IFCAP_TSO6 & ifp->if_capenable &&
1745 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1746 ifp->if_capenable &= ~IFCAP_TSO6;
1748 "tso6 disabled due to -txcsum6.\n");
1751 if (mask & IFCAP_RXCSUM)
1752 ifp->if_capenable ^= IFCAP_RXCSUM;
1753 if (mask & IFCAP_RXCSUM_IPV6)
1754 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1757 * Note that we leave CSUM_TSO alone (it is always set). The
1758 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1759 * sending a TSO request our way, so it's sufficient to toggle
1762 if (mask & IFCAP_TSO4) {
1763 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1764 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1765 if_printf(ifp, "enable txcsum first.\n");
1769 ifp->if_capenable ^= IFCAP_TSO4;
1771 if (mask & IFCAP_TSO6) {
1772 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1773 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1774 if_printf(ifp, "enable txcsum6 first.\n");
1778 ifp->if_capenable ^= IFCAP_TSO6;
1780 if (mask & IFCAP_LRO) {
1781 #if defined(INET) || defined(INET6)
1783 struct sge_rxq *rxq;
1785 ifp->if_capenable ^= IFCAP_LRO;
1786 for_each_rxq(vi, i, rxq) {
1787 if (ifp->if_capenable & IFCAP_LRO)
1788 rxq->iq.flags |= IQ_LRO_ENABLED;
1790 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1795 if (mask & IFCAP_TOE) {
1796 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1798 rc = toe_capability(vi, enable);
1802 ifp->if_capenable ^= mask;
1805 if (mask & IFCAP_VLAN_HWTAGGING) {
1806 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1807 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1808 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1810 if (mask & IFCAP_VLAN_MTU) {
1811 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1813 /* Need to find out how to disable auto-mtu-inflation */
1815 if (mask & IFCAP_VLAN_HWTSO)
1816 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1817 if (mask & IFCAP_VLAN_HWCSUM)
1818 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1820 if (mask & IFCAP_TXRTLMT)
1821 ifp->if_capenable ^= IFCAP_TXRTLMT;
1824 #ifdef VLAN_CAPABILITIES
1825 VLAN_CAPABILITIES(ifp);
1828 end_synchronized_op(sc, 0);
1834 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1838 struct ifi2creq i2c;
1840 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
1843 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1847 if (i2c.len > sizeof(i2c.data)) {
1851 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1854 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1855 i2c.offset, i2c.len, &i2c.data[0]);
1856 end_synchronized_op(sc, 0);
1858 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
1863 rc = ether_ioctl(ifp, cmd, data);
1870 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1872 struct vi_info *vi = ifp->if_softc;
1873 struct port_info *pi = vi->pi;
1874 struct adapter *sc = pi->adapter;
1875 struct sge_txq *txq;
1880 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
1882 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1887 rc = parse_pkt(sc, &m);
1888 if (__predict_false(rc != 0)) {
1889 MPASS(m == NULL); /* was freed already */
1890 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1894 if (m->m_pkthdr.snd_tag != NULL) {
1895 /* EAGAIN tells the stack we are not the correct interface. */
1896 if (__predict_false(ifp != m->m_pkthdr.snd_tag->ifp)) {
1901 return (ethofld_transmit(ifp, m));
1906 txq = &sc->sge.txq[vi->first_txq];
1907 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1908 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1912 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1913 if (__predict_false(rc != 0))
1920 cxgbe_qflush(struct ifnet *ifp)
1922 struct vi_info *vi = ifp->if_softc;
1923 struct sge_txq *txq;
1926 /* queues do not exist if !VI_INIT_DONE. */
1927 if (vi->flags & VI_INIT_DONE) {
1928 for_each_txq(vi, i, txq) {
1930 txq->eq.flags |= EQ_QFLUSH;
1932 while (!mp_ring_is_idle(txq->r)) {
1933 mp_ring_check_drainage(txq->r, 0);
1937 txq->eq.flags &= ~EQ_QFLUSH;
1945 vi_get_counter(struct ifnet *ifp, ift_counter c)
1947 struct vi_info *vi = ifp->if_softc;
1948 struct fw_vi_stats_vf *s = &vi->stats;
1950 vi_refresh_stats(vi->pi->adapter, vi);
1953 case IFCOUNTER_IPACKETS:
1954 return (s->rx_bcast_frames + s->rx_mcast_frames +
1955 s->rx_ucast_frames);
1956 case IFCOUNTER_IERRORS:
1957 return (s->rx_err_frames);
1958 case IFCOUNTER_OPACKETS:
1959 return (s->tx_bcast_frames + s->tx_mcast_frames +
1960 s->tx_ucast_frames + s->tx_offload_frames);
1961 case IFCOUNTER_OERRORS:
1962 return (s->tx_drop_frames);
1963 case IFCOUNTER_IBYTES:
1964 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1966 case IFCOUNTER_OBYTES:
1967 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1968 s->tx_ucast_bytes + s->tx_offload_bytes);
1969 case IFCOUNTER_IMCASTS:
1970 return (s->rx_mcast_frames);
1971 case IFCOUNTER_OMCASTS:
1972 return (s->tx_mcast_frames);
1973 case IFCOUNTER_OQDROPS: {
1977 if (vi->flags & VI_INIT_DONE) {
1979 struct sge_txq *txq;
1981 for_each_txq(vi, i, txq)
1982 drops += counter_u64_fetch(txq->r->drops);
1990 return (if_get_counter_default(ifp, c));
1995 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1997 struct vi_info *vi = ifp->if_softc;
1998 struct port_info *pi = vi->pi;
1999 struct adapter *sc = pi->adapter;
2000 struct port_stats *s = &pi->stats;
2002 if (pi->nvi > 1 || sc->flags & IS_VF)
2003 return (vi_get_counter(ifp, c));
2005 cxgbe_refresh_stats(sc, pi);
2008 case IFCOUNTER_IPACKETS:
2009 return (s->rx_frames);
2011 case IFCOUNTER_IERRORS:
2012 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2013 s->rx_fcs_err + s->rx_len_err);
2015 case IFCOUNTER_OPACKETS:
2016 return (s->tx_frames);
2018 case IFCOUNTER_OERRORS:
2019 return (s->tx_error_frames);
2021 case IFCOUNTER_IBYTES:
2022 return (s->rx_octets);
2024 case IFCOUNTER_OBYTES:
2025 return (s->tx_octets);
2027 case IFCOUNTER_IMCASTS:
2028 return (s->rx_mcast_frames);
2030 case IFCOUNTER_OMCASTS:
2031 return (s->tx_mcast_frames);
2033 case IFCOUNTER_IQDROPS:
2034 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2035 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2036 s->rx_trunc3 + pi->tnl_cong_drops);
2038 case IFCOUNTER_OQDROPS: {
2042 if (vi->flags & VI_INIT_DONE) {
2044 struct sge_txq *txq;
2046 for_each_txq(vi, i, txq)
2047 drops += counter_u64_fetch(txq->r->drops);
2055 return (if_get_counter_default(ifp, c));
2060 * The kernel picks a media from the list we had provided so we do not have to
2061 * validate the request.
2064 cxgbe_media_change(struct ifnet *ifp)
2066 struct vi_info *vi = ifp->if_softc;
2067 struct port_info *pi = vi->pi;
2068 struct ifmedia *ifm = &pi->media;
2069 struct link_config *lc = &pi->link_cfg;
2070 struct adapter *sc = pi->adapter;
2073 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
2077 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2078 MPASS(lc->supported & FW_PORT_CAP_ANEG);
2079 lc->requested_aneg = AUTONEG_ENABLE;
2081 lc->requested_aneg = AUTONEG_DISABLE;
2082 lc->requested_speed =
2083 ifmedia_baudrate(ifm->ifm_media) / 1000000;
2084 lc->requested_fc = 0;
2085 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
2086 lc->requested_fc |= PAUSE_RX;
2087 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
2088 lc->requested_fc |= PAUSE_TX;
2091 rc = apply_l1cfg(pi);
2093 end_synchronized_op(sc, 0);
2098 * Mbps to FW_PORT_CAP_SPEED_* bit.
2101 speed_to_fwspeed(int speed)
2106 return (FW_PORT_CAP_SPEED_100G);
2108 return (FW_PORT_CAP_SPEED_40G);
2110 return (FW_PORT_CAP_SPEED_25G);
2112 return (FW_PORT_CAP_SPEED_10G);
2114 return (FW_PORT_CAP_SPEED_1G);
2116 return (FW_PORT_CAP_SPEED_100M);
2123 * Base media word (without ETHER, pause, link active, etc.) for the port at the
2127 port_mword(struct port_info *pi, uint16_t speed)
2130 MPASS(speed & M_FW_PORT_CAP_SPEED);
2131 MPASS(powerof2(speed));
2133 switch(pi->port_type) {
2134 case FW_PORT_TYPE_BT_SGMII:
2135 case FW_PORT_TYPE_BT_XFI:
2136 case FW_PORT_TYPE_BT_XAUI:
2139 case FW_PORT_CAP_SPEED_100M:
2141 case FW_PORT_CAP_SPEED_1G:
2142 return (IFM_1000_T);
2143 case FW_PORT_CAP_SPEED_10G:
2147 case FW_PORT_TYPE_KX4:
2148 if (speed == FW_PORT_CAP_SPEED_10G)
2149 return (IFM_10G_KX4);
2151 case FW_PORT_TYPE_CX4:
2152 if (speed == FW_PORT_CAP_SPEED_10G)
2153 return (IFM_10G_CX4);
2155 case FW_PORT_TYPE_KX:
2156 if (speed == FW_PORT_CAP_SPEED_1G)
2157 return (IFM_1000_KX);
2159 case FW_PORT_TYPE_KR:
2160 case FW_PORT_TYPE_BP_AP:
2161 case FW_PORT_TYPE_BP4_AP:
2162 case FW_PORT_TYPE_BP40_BA:
2163 case FW_PORT_TYPE_KR4_100G:
2164 case FW_PORT_TYPE_KR_SFP28:
2165 case FW_PORT_TYPE_KR_XLAUI:
2167 case FW_PORT_CAP_SPEED_1G:
2168 return (IFM_1000_KX);
2169 case FW_PORT_CAP_SPEED_10G:
2170 return (IFM_10G_KR);
2171 case FW_PORT_CAP_SPEED_25G:
2172 return (IFM_25G_KR);
2173 case FW_PORT_CAP_SPEED_40G:
2174 return (IFM_40G_KR4);
2175 case FW_PORT_CAP_SPEED_100G:
2176 return (IFM_100G_KR4);
2179 case FW_PORT_TYPE_FIBER_XFI:
2180 case FW_PORT_TYPE_FIBER_XAUI:
2181 case FW_PORT_TYPE_SFP:
2182 case FW_PORT_TYPE_QSFP_10G:
2183 case FW_PORT_TYPE_QSA:
2184 case FW_PORT_TYPE_QSFP:
2185 case FW_PORT_TYPE_CR4_QSFP:
2186 case FW_PORT_TYPE_CR_QSFP:
2187 case FW_PORT_TYPE_CR2_QSFP:
2188 case FW_PORT_TYPE_SFP28:
2189 /* Pluggable transceiver */
2190 switch (pi->mod_type) {
2191 case FW_PORT_MOD_TYPE_LR:
2193 case FW_PORT_CAP_SPEED_1G:
2194 return (IFM_1000_LX);
2195 case FW_PORT_CAP_SPEED_10G:
2196 return (IFM_10G_LR);
2197 case FW_PORT_CAP_SPEED_25G:
2198 return (IFM_25G_LR);
2199 case FW_PORT_CAP_SPEED_40G:
2200 return (IFM_40G_LR4);
2201 case FW_PORT_CAP_SPEED_100G:
2202 return (IFM_100G_LR4);
2205 case FW_PORT_MOD_TYPE_SR:
2207 case FW_PORT_CAP_SPEED_1G:
2208 return (IFM_1000_SX);
2209 case FW_PORT_CAP_SPEED_10G:
2210 return (IFM_10G_SR);
2211 case FW_PORT_CAP_SPEED_25G:
2212 return (IFM_25G_SR);
2213 case FW_PORT_CAP_SPEED_40G:
2214 return (IFM_40G_SR4);
2215 case FW_PORT_CAP_SPEED_100G:
2216 return (IFM_100G_SR4);
2219 case FW_PORT_MOD_TYPE_ER:
2220 if (speed == FW_PORT_CAP_SPEED_10G)
2221 return (IFM_10G_ER);
2223 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2224 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2226 case FW_PORT_CAP_SPEED_1G:
2227 return (IFM_1000_CX);
2228 case FW_PORT_CAP_SPEED_10G:
2229 return (IFM_10G_TWINAX);
2230 case FW_PORT_CAP_SPEED_25G:
2231 return (IFM_25G_CR);
2232 case FW_PORT_CAP_SPEED_40G:
2233 return (IFM_40G_CR4);
2234 case FW_PORT_CAP_SPEED_100G:
2235 return (IFM_100G_CR4);
2238 case FW_PORT_MOD_TYPE_LRM:
2239 if (speed == FW_PORT_CAP_SPEED_10G)
2240 return (IFM_10G_LRM);
2242 case FW_PORT_MOD_TYPE_NA:
2243 MPASS(0); /* Not pluggable? */
2245 case FW_PORT_MOD_TYPE_ERROR:
2246 case FW_PORT_MOD_TYPE_UNKNOWN:
2247 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
2249 case FW_PORT_MOD_TYPE_NONE:
2253 case FW_PORT_TYPE_NONE:
2257 return (IFM_UNKNOWN);
2261 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2263 struct vi_info *vi = ifp->if_softc;
2264 struct port_info *pi = vi->pi;
2265 struct adapter *sc = pi->adapter;
2266 struct link_config *lc = &pi->link_cfg;
2268 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2272 if (pi->up_vis == 0) {
2274 * If all the interfaces are administratively down the firmware
2275 * does not report transceiver changes. Refresh port info here
2276 * so that ifconfig displays accurate ifmedia at all times.
2277 * This is the only reason we have a synchronized op in this
2278 * function. Just PORT_LOCK would have been enough otherwise.
2280 t4_update_port_info(pi);
2281 build_medialist(pi, &pi->media);
2285 ifmr->ifm_status = IFM_AVALID;
2286 if (lc->link_ok == 0)
2288 ifmr->ifm_status |= IFM_ACTIVE;
2291 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2292 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2293 if (lc->fc & PAUSE_RX)
2294 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2295 if (lc->fc & PAUSE_TX)
2296 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2297 ifmr->ifm_active |= port_mword(pi, speed_to_fwspeed(lc->speed));
2300 end_synchronized_op(sc, 0);
2304 vcxgbe_probe(device_t dev)
2307 struct vi_info *vi = device_get_softc(dev);
2309 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2311 device_set_desc_copy(dev, buf);
2313 return (BUS_PROBE_DEFAULT);
2317 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2319 int func, index, rc;
2320 uint32_t param, val;
2322 ASSERT_SYNCHRONIZED_OP(sc);
2324 index = vi - pi->vi;
2325 MPASS(index > 0); /* This function deals with _extra_ VIs only */
2326 KASSERT(index < nitems(vi_mac_funcs),
2327 ("%s: VI %s doesn't have a MAC func", __func__,
2328 device_get_nameunit(vi->dev)));
2329 func = vi_mac_funcs[index];
2330 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2331 vi->hw_addr, &vi->rss_size, func, 0);
2333 device_printf(vi->dev, "failed to allocate virtual interface %d"
2334 "for port %d: %d\n", index, pi->port_id, -rc);
2338 if (chip_id(sc) <= CHELSIO_T5)
2339 vi->smt_idx = (rc & 0x7f) << 1;
2341 vi->smt_idx = (rc & 0x7f);
2343 if (vi->rss_size == 1) {
2345 * This VI didn't get a slice of the RSS table. Reduce the
2346 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2347 * configuration file (nvi, rssnvi for this PF) if this is a
2350 device_printf(vi->dev, "RSS table not available.\n");
2351 vi->rss_base = 0xffff;
2356 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2357 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2358 V_FW_PARAMS_PARAM_YZ(vi->viid);
2359 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2361 vi->rss_base = 0xffff;
2363 MPASS((val >> 16) == vi->rss_size);
2364 vi->rss_base = val & 0xffff;
2371 vcxgbe_attach(device_t dev)
2374 struct port_info *pi;
2378 vi = device_get_softc(dev);
2382 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2385 rc = alloc_extra_vi(sc, pi, vi);
2386 end_synchronized_op(sc, 0);
2390 rc = cxgbe_vi_attach(dev, vi);
2392 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2399 vcxgbe_detach(device_t dev)
2404 vi = device_get_softc(dev);
2405 sc = vi->pi->adapter;
2409 cxgbe_vi_detach(vi);
2410 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2412 end_synchronized_op(sc, 0);
2418 t4_fatal_err(struct adapter *sc)
2420 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2421 t4_intr_disable(sc);
2422 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2423 device_get_nameunit(sc->dev));
2424 if (t4_panic_on_fatal_err)
2425 panic("panic requested on fatal error");
2429 t4_add_adapter(struct adapter *sc)
2431 sx_xlock(&t4_list_lock);
2432 SLIST_INSERT_HEAD(&t4_list, sc, link);
2433 sx_xunlock(&t4_list_lock);
2437 t4_map_bars_0_and_4(struct adapter *sc)
2439 sc->regs_rid = PCIR_BAR(0);
2440 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2441 &sc->regs_rid, RF_ACTIVE);
2442 if (sc->regs_res == NULL) {
2443 device_printf(sc->dev, "cannot map registers.\n");
2446 sc->bt = rman_get_bustag(sc->regs_res);
2447 sc->bh = rman_get_bushandle(sc->regs_res);
2448 sc->mmio_len = rman_get_size(sc->regs_res);
2449 setbit(&sc->doorbells, DOORBELL_KDB);
2451 sc->msix_rid = PCIR_BAR(4);
2452 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2453 &sc->msix_rid, RF_ACTIVE);
2454 if (sc->msix_res == NULL) {
2455 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2463 t4_map_bar_2(struct adapter *sc)
2467 * T4: only iWARP driver uses the userspace doorbells. There is no need
2468 * to map it if RDMA is disabled.
2470 if (is_t4(sc) && sc->rdmacaps == 0)
2473 sc->udbs_rid = PCIR_BAR(2);
2474 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2475 &sc->udbs_rid, RF_ACTIVE);
2476 if (sc->udbs_res == NULL) {
2477 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2480 sc->udbs_base = rman_get_virtual(sc->udbs_res);
2482 if (chip_id(sc) >= CHELSIO_T5) {
2483 setbit(&sc->doorbells, DOORBELL_UDB);
2484 #if defined(__i386__) || defined(__amd64__)
2485 if (t5_write_combine) {
2489 * Enable write combining on BAR2. This is the
2490 * userspace doorbell BAR and is split into 128B
2491 * (UDBS_SEG_SIZE) doorbell regions, each associated
2492 * with an egress queue. The first 64B has the doorbell
2493 * and the second 64B can be used to submit a tx work
2494 * request with an implicit doorbell.
2497 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2498 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2500 clrbit(&sc->doorbells, DOORBELL_UDB);
2501 setbit(&sc->doorbells, DOORBELL_WCWR);
2502 setbit(&sc->doorbells, DOORBELL_UDBWC);
2504 device_printf(sc->dev,
2505 "couldn't enable write combining: %d\n",
2509 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2510 t4_write_reg(sc, A_SGE_STAT_CFG,
2511 V_STATSOURCE_T5(7) | mode);
2515 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
2520 struct memwin_init {
2525 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2526 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2527 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2528 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2531 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2532 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2533 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2534 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2538 setup_memwin(struct adapter *sc)
2540 const struct memwin_init *mw_init;
2547 * Read low 32b of bar0 indirectly via the hardware backdoor
2548 * mechanism. Works from within PCI passthrough environments
2549 * too, where rman_get_start() can return a different value. We
2550 * need to program the T4 memory window decoders with the actual
2551 * addresses that will be coming across the PCIe link.
2553 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2554 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2556 mw_init = &t4_memwin[0];
2558 /* T5+ use the relative offset inside the PCIe BAR */
2561 mw_init = &t5_memwin[0];
2564 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2565 rw_init(&mw->mw_lock, "memory window access");
2566 mw->mw_base = mw_init->base;
2567 mw->mw_aperture = mw_init->aperture;
2570 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2571 (mw->mw_base + bar0) | V_BIR(0) |
2572 V_WINDOW(ilog2(mw->mw_aperture) - 10));
2573 rw_wlock(&mw->mw_lock);
2574 position_memwin(sc, i, 0);
2575 rw_wunlock(&mw->mw_lock);
2579 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2583 * Positions the memory window at the given address in the card's address space.
2584 * There are some alignment requirements and the actual position may be at an
2585 * address prior to the requested address. mw->mw_curpos always has the actual
2586 * position of the window.
2589 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2595 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2596 mw = &sc->memwin[idx];
2597 rw_assert(&mw->mw_lock, RA_WLOCKED);
2601 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
2603 pf = V_PFNUM(sc->pf);
2604 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
2606 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2607 t4_write_reg(sc, reg, mw->mw_curpos | pf);
2608 t4_read_reg(sc, reg); /* flush */
2612 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2618 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2620 /* Memory can only be accessed in naturally aligned 4 byte units */
2621 if (addr & 3 || len & 3 || len <= 0)
2624 mw = &sc->memwin[idx];
2626 rw_rlock(&mw->mw_lock);
2627 mw_end = mw->mw_curpos + mw->mw_aperture;
2628 if (addr >= mw_end || addr < mw->mw_curpos) {
2629 /* Will need to reposition the window */
2630 if (!rw_try_upgrade(&mw->mw_lock)) {
2631 rw_runlock(&mw->mw_lock);
2632 rw_wlock(&mw->mw_lock);
2634 rw_assert(&mw->mw_lock, RA_WLOCKED);
2635 position_memwin(sc, idx, addr);
2636 rw_downgrade(&mw->mw_lock);
2637 mw_end = mw->mw_curpos + mw->mw_aperture;
2639 rw_assert(&mw->mw_lock, RA_RLOCKED);
2640 while (addr < mw_end && len > 0) {
2642 v = t4_read_reg(sc, mw->mw_base + addr -
2644 *val++ = le32toh(v);
2647 t4_write_reg(sc, mw->mw_base + addr -
2648 mw->mw_curpos, htole32(v));
2653 rw_runlock(&mw->mw_lock);
2660 alloc_atid_tab(struct tid_info *t, int flags)
2664 MPASS(t->natids > 0);
2665 MPASS(t->atid_tab == NULL);
2667 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
2669 if (t->atid_tab == NULL)
2671 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
2672 t->afree = t->atid_tab;
2673 t->atids_in_use = 0;
2674 for (i = 1; i < t->natids; i++)
2675 t->atid_tab[i - 1].next = &t->atid_tab[i];
2676 t->atid_tab[t->natids - 1].next = NULL;
2682 free_atid_tab(struct tid_info *t)
2685 KASSERT(t->atids_in_use == 0,
2686 ("%s: %d atids still in use.", __func__, t->atids_in_use));
2688 if (mtx_initialized(&t->atid_lock))
2689 mtx_destroy(&t->atid_lock);
2690 free(t->atid_tab, M_CXGBE);
2695 alloc_atid(struct adapter *sc, void *ctx)
2697 struct tid_info *t = &sc->tids;
2700 mtx_lock(&t->atid_lock);
2702 union aopen_entry *p = t->afree;
2704 atid = p - t->atid_tab;
2705 MPASS(atid <= M_TID_TID);
2710 mtx_unlock(&t->atid_lock);
2715 lookup_atid(struct adapter *sc, int atid)
2717 struct tid_info *t = &sc->tids;
2719 return (t->atid_tab[atid].data);
2723 free_atid(struct adapter *sc, int atid)
2725 struct tid_info *t = &sc->tids;
2726 union aopen_entry *p = &t->atid_tab[atid];
2728 mtx_lock(&t->atid_lock);
2732 mtx_unlock(&t->atid_lock);
2736 queue_tid_release(struct adapter *sc, int tid)
2739 CXGBE_UNIMPLEMENTED("deferred tid release");
2743 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
2746 struct cpl_tid_release *req;
2748 wr = alloc_wrqe(sizeof(*req), ctrlq);
2750 queue_tid_release(sc, tid); /* defer */
2755 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
2761 t4_range_cmp(const void *a, const void *b)
2763 return ((const struct t4_range *)a)->start -
2764 ((const struct t4_range *)b)->start;
2768 * Verify that the memory range specified by the addr/len pair is valid within
2769 * the card's address space.
2772 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2774 struct t4_range mem_ranges[4], *r, *next;
2775 uint32_t em, addr_len;
2776 int i, n, remaining;
2778 /* Memory can only be accessed in naturally aligned 4 byte units */
2779 if (addr & 3 || len & 3 || len <= 0)
2782 /* Enabled memories */
2783 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2787 bzero(r, sizeof(mem_ranges));
2788 if (em & F_EDRAM0_ENABLE) {
2789 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2790 r->size = G_EDRAM0_SIZE(addr_len) << 20;
2792 r->start = G_EDRAM0_BASE(addr_len) << 20;
2793 if (addr >= r->start &&
2794 addr + len <= r->start + r->size)
2800 if (em & F_EDRAM1_ENABLE) {
2801 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2802 r->size = G_EDRAM1_SIZE(addr_len) << 20;
2804 r->start = G_EDRAM1_BASE(addr_len) << 20;
2805 if (addr >= r->start &&
2806 addr + len <= r->start + r->size)
2812 if (em & F_EXT_MEM_ENABLE) {
2813 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2814 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2816 r->start = G_EXT_MEM_BASE(addr_len) << 20;
2817 if (addr >= r->start &&
2818 addr + len <= r->start + r->size)
2824 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2825 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2826 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2828 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2829 if (addr >= r->start &&
2830 addr + len <= r->start + r->size)
2836 MPASS(n <= nitems(mem_ranges));
2839 /* Sort and merge the ranges. */
2840 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2842 /* Start from index 0 and examine the next n - 1 entries. */
2844 for (remaining = n - 1; remaining > 0; remaining--, r++) {
2846 MPASS(r->size > 0); /* r is a valid entry. */
2848 MPASS(next->size > 0); /* and so is the next one. */
2850 while (r->start + r->size >= next->start) {
2851 /* Merge the next one into the current entry. */
2852 r->size = max(r->start + r->size,
2853 next->start + next->size) - r->start;
2854 n--; /* One fewer entry in total. */
2855 if (--remaining == 0)
2856 goto done; /* short circuit */
2859 if (next != r + 1) {
2861 * Some entries were merged into r and next
2862 * points to the first valid entry that couldn't
2865 MPASS(next->size > 0); /* must be valid */
2866 memcpy(r + 1, next, remaining * sizeof(*r));
2869 * This so that the foo->size assertion in the
2870 * next iteration of the loop do the right
2871 * thing for entries that were pulled up and are
2874 MPASS(n < nitems(mem_ranges));
2875 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2876 sizeof(struct t4_range));
2881 /* Done merging the ranges. */
2884 for (i = 0; i < n; i++, r++) {
2885 if (addr >= r->start &&
2886 addr + len <= r->start + r->size)
2895 fwmtype_to_hwmtype(int mtype)
2899 case FW_MEMTYPE_EDC0:
2901 case FW_MEMTYPE_EDC1:
2903 case FW_MEMTYPE_EXTMEM:
2905 case FW_MEMTYPE_EXTMEM1:
2908 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2913 * Verify that the memory range specified by the memtype/offset/len pair is
2914 * valid and lies entirely within the memtype specified. The global address of
2915 * the start of the range is returned in addr.
2918 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2921 uint32_t em, addr_len, maddr;
2923 /* Memory can only be accessed in naturally aligned 4 byte units */
2924 if (off & 3 || len & 3 || len == 0)
2927 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2928 switch (fwmtype_to_hwmtype(mtype)) {
2930 if (!(em & F_EDRAM0_ENABLE))
2932 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2933 maddr = G_EDRAM0_BASE(addr_len) << 20;
2936 if (!(em & F_EDRAM1_ENABLE))
2938 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2939 maddr = G_EDRAM1_BASE(addr_len) << 20;
2942 if (!(em & F_EXT_MEM_ENABLE))
2944 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2945 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2948 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2950 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2951 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2957 *addr = maddr + off; /* global address */
2958 return (validate_mem_range(sc, *addr, len));
2962 fixup_devlog_params(struct adapter *sc)
2964 struct devlog_params *dparams = &sc->params.devlog;
2967 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2968 dparams->size, &dparams->addr);
2974 update_nirq(struct intrs_and_queues *iaq, int nports)
2976 int extra = T4_EXTRA_INTR;
2979 iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq);
2980 iaq->nirq += nports * (iaq->num_vis - 1) *
2981 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
2982 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
2986 * Adjust requirements to fit the number of interrupts available.
2989 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
2993 const int nports = sc->params.nports;
2998 bzero(iaq, sizeof(*iaq));
2999 iaq->intr_type = itype;
3000 iaq->num_vis = t4_num_vis;
3001 iaq->ntxq = t4_ntxq;
3002 iaq->ntxq_vi = t4_ntxq_vi;
3003 iaq->nrxq = t4_nrxq;
3004 iaq->nrxq_vi = t4_nrxq_vi;
3005 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3006 if (is_offload(sc) || is_ethoffload(sc)) {
3007 iaq->nofldtxq = t4_nofldtxq;
3008 iaq->nofldtxq_vi = t4_nofldtxq_vi;
3012 if (is_offload(sc)) {
3013 iaq->nofldrxq = t4_nofldrxq;
3014 iaq->nofldrxq_vi = t4_nofldrxq_vi;
3018 iaq->nnmtxq_vi = t4_nnmtxq_vi;
3019 iaq->nnmrxq_vi = t4_nnmrxq_vi;
3022 update_nirq(iaq, nports);
3023 if (iaq->nirq <= navail &&
3024 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3026 * This is the normal case -- there are enough interrupts for
3033 * If extra VIs have been configured try reducing their count and see if
3036 while (iaq->num_vis > 1) {
3038 update_nirq(iaq, nports);
3039 if (iaq->nirq <= navail &&
3040 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3041 device_printf(sc->dev, "virtual interfaces per port "
3042 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
3043 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
3044 "itype %d, navail %u, nirq %d.\n",
3045 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
3046 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
3047 itype, navail, iaq->nirq);
3053 * Extra VIs will not be created. Log a message if they were requested.
3055 MPASS(iaq->num_vis == 1);
3056 iaq->ntxq_vi = iaq->nrxq_vi = 0;
3057 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
3058 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
3059 if (iaq->num_vis != t4_num_vis) {
3060 device_printf(sc->dev, "extra virtual interfaces disabled. "
3061 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
3062 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
3063 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
3064 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
3068 * Keep reducing the number of NIC rx queues to the next lower power of
3069 * 2 (for even RSS distribution) and halving the TOE rx queues and see
3073 if (iaq->nrxq > 1) {
3076 } while (!powerof2(iaq->nrxq));
3078 if (iaq->nofldrxq > 1)
3079 iaq->nofldrxq >>= 1;
3081 old_nirq = iaq->nirq;
3082 update_nirq(iaq, nports);
3083 if (iaq->nirq <= navail &&
3084 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3085 device_printf(sc->dev, "running with reduced number of "
3086 "rx queues because of shortage of interrupts. "
3087 "nrxq=%u, nofldrxq=%u. "
3088 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
3089 iaq->nofldrxq, itype, navail, iaq->nirq);
3092 } while (old_nirq != iaq->nirq);
3094 /* One interrupt for everything. Ugh. */
3095 device_printf(sc->dev, "running with minimal number of queues. "
3096 "itype %d, navail %u.\n", itype, navail);
3098 MPASS(iaq->nrxq == 1);
3100 if (iaq->nofldrxq > 1)
3103 MPASS(iaq->num_vis > 0);
3104 if (iaq->num_vis > 1) {
3105 MPASS(iaq->nrxq_vi > 0);
3106 MPASS(iaq->ntxq_vi > 0);
3108 MPASS(iaq->nirq > 0);
3109 MPASS(iaq->nrxq > 0);
3110 MPASS(iaq->ntxq > 0);
3111 if (itype == INTR_MSI) {
3112 MPASS(powerof2(iaq->nirq));
3117 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
3119 int rc, itype, navail, nalloc;
3121 for (itype = INTR_MSIX; itype; itype >>= 1) {
3123 if ((itype & t4_intr_types) == 0)
3124 continue; /* not allowed */
3126 if (itype == INTR_MSIX)
3127 navail = pci_msix_count(sc->dev);
3128 else if (itype == INTR_MSI)
3129 navail = pci_msi_count(sc->dev);
3136 calculate_iaq(sc, iaq, itype, navail);
3139 if (itype == INTR_MSIX)
3140 rc = pci_alloc_msix(sc->dev, &nalloc);
3141 else if (itype == INTR_MSI)
3142 rc = pci_alloc_msi(sc->dev, &nalloc);
3144 if (rc == 0 && nalloc > 0) {
3145 if (nalloc == iaq->nirq)
3149 * Didn't get the number requested. Use whatever number
3150 * the kernel is willing to allocate.
3152 device_printf(sc->dev, "fewer vectors than requested, "
3153 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
3154 itype, iaq->nirq, nalloc);
3155 pci_release_msi(sc->dev);
3160 device_printf(sc->dev,
3161 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
3162 itype, rc, iaq->nirq, nalloc);
3165 device_printf(sc->dev,
3166 "failed to find a usable interrupt type. "
3167 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
3168 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
3173 #define FW_VERSION(chip) ( \
3174 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
3175 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
3176 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
3177 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
3178 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
3184 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
3188 .kld_name = "t4fw_cfg",
3189 .fw_mod_name = "t4fw",
3191 .chip = FW_HDR_CHIP_T4,
3192 .fw_ver = htobe32(FW_VERSION(T4)),
3193 .intfver_nic = FW_INTFVER(T4, NIC),
3194 .intfver_vnic = FW_INTFVER(T4, VNIC),
3195 .intfver_ofld = FW_INTFVER(T4, OFLD),
3196 .intfver_ri = FW_INTFVER(T4, RI),
3197 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
3198 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3199 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
3200 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3204 .kld_name = "t5fw_cfg",
3205 .fw_mod_name = "t5fw",
3207 .chip = FW_HDR_CHIP_T5,
3208 .fw_ver = htobe32(FW_VERSION(T5)),
3209 .intfver_nic = FW_INTFVER(T5, NIC),
3210 .intfver_vnic = FW_INTFVER(T5, VNIC),
3211 .intfver_ofld = FW_INTFVER(T5, OFLD),
3212 .intfver_ri = FW_INTFVER(T5, RI),
3213 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
3214 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3215 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
3216 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3220 .kld_name = "t6fw_cfg",
3221 .fw_mod_name = "t6fw",
3223 .chip = FW_HDR_CHIP_T6,
3224 .fw_ver = htobe32(FW_VERSION(T6)),
3225 .intfver_nic = FW_INTFVER(T6, NIC),
3226 .intfver_vnic = FW_INTFVER(T6, VNIC),
3227 .intfver_ofld = FW_INTFVER(T6, OFLD),
3228 .intfver_ri = FW_INTFVER(T6, RI),
3229 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3230 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3231 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3232 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3237 static struct fw_info *
3238 find_fw_info(int chip)
3242 for (i = 0; i < nitems(fw_info); i++) {
3243 if (fw_info[i].chip == chip)
3244 return (&fw_info[i]);
3250 * Is the given firmware API compatible with the one the driver was compiled
3254 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3257 /* short circuit if it's the exact same firmware version */
3258 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3262 * XXX: Is this too conservative? Perhaps I should limit this to the
3263 * features that are supported in the driver.
3265 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3266 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3267 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3268 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3276 * The firmware in the KLD is usable, but should it be installed? This routine
3277 * explains itself in detail if it indicates the KLD firmware should be
3281 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
3285 if (!card_fw_usable) {
3286 reason = "incompatible or unusable";
3291 reason = "older than the version bundled with this driver";
3295 if (t4_fw_install == 2 && k != c) {
3296 reason = "different than the version bundled with this driver";
3303 if (t4_fw_install == 0) {
3304 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3305 "but the driver is prohibited from installing a different "
3306 "firmware on the card.\n",
3307 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3308 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3313 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3314 "installing firmware %u.%u.%u.%u on card.\n",
3315 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3316 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3317 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3318 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3324 * Establish contact with the firmware and determine if we are the master driver
3325 * or not, and whether we are responsible for chip initialization.
3328 prep_firmware(struct adapter *sc)
3330 const struct firmware *fw = NULL, *default_cfg;
3331 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
3332 enum dev_state state;
3333 struct fw_info *fw_info;
3334 struct fw_hdr *card_fw; /* fw on the card */
3335 const struct fw_hdr *kld_fw; /* fw in the KLD */
3336 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
3339 /* This is the firmware whose headers the driver was compiled against */
3340 fw_info = find_fw_info(chip_id(sc));
3341 if (fw_info == NULL) {
3342 device_printf(sc->dev,
3343 "unable to look up firmware information for chip %d.\n",
3347 drv_fw = &fw_info->fw_hdr;
3350 * The firmware KLD contains many modules. The KLD name is also the
3351 * name of the module that contains the default config file.
3353 default_cfg = firmware_get(fw_info->kld_name);
3355 /* This is the firmware in the KLD */
3356 fw = firmware_get(fw_info->fw_mod_name);
3358 kld_fw = (const void *)fw->data;
3359 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
3365 /* Read the header of the firmware on the card */
3366 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3367 rc = -t4_read_flash(sc, FLASH_FW_START,
3368 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
3370 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
3371 if (card_fw->fw_ver == be32toh(0xffffffff)) {
3372 uint32_t d = be32toh(kld_fw->fw_ver);
3374 if (!kld_fw_usable) {
3375 device_printf(sc->dev,
3376 "no firmware on the card and no usable "
3377 "firmware bundled with the driver.\n");
3380 } else if (t4_fw_install == 0) {
3381 device_printf(sc->dev,
3382 "no firmware on the card and the driver "
3383 "is prohibited from installing new "
3389 device_printf(sc->dev, "no firmware on the card, "
3390 "installing firmware %d.%d.%d.%d\n",
3391 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3392 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3393 rc = t4_fw_forceinstall(sc, fw->data, fw->datasize);
3396 device_printf(sc->dev,
3397 "firmware install failed: %d.\n", rc);
3400 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3405 device_printf(sc->dev,
3406 "Unable to read card's firmware header: %d\n", rc);
3410 /* Contact firmware. */
3411 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
3412 if (rc < 0 || state == DEV_STATE_ERR) {
3414 device_printf(sc->dev,
3415 "failed to connect to the firmware: %d, %d.\n", rc, state);
3420 sc->flags |= MASTER_PF;
3421 else if (state == DEV_STATE_UNINIT) {
3423 * We didn't get to be the master so we definitely won't be
3424 * configuring the chip. It's a bug if someone else hasn't
3425 * configured it already.
3427 device_printf(sc->dev, "couldn't be master(%d), "
3428 "device not already initialized either(%d).\n", rc, state);
3433 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3434 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
3436 * Common case: the firmware on the card is an exact match and
3437 * the KLD is an exact match too, or the KLD is
3438 * absent/incompatible. Note that t4_fw_install = 2 is ignored
3439 * here -- use cxgbetool loadfw if you want to reinstall the
3440 * same firmware as the one on the card.
3442 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
3443 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
3444 be32toh(card_fw->fw_ver))) {
3446 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3448 device_printf(sc->dev,
3449 "failed to install firmware: %d\n", rc);
3453 /* Installed successfully, update the cached header too. */
3454 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3456 need_fw_reset = 0; /* already reset as part of load_fw */
3459 if (!card_fw_usable) {
3462 d = ntohl(drv_fw->fw_ver);
3463 c = ntohl(card_fw->fw_ver);
3464 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3466 device_printf(sc->dev, "Cannot find a usable firmware: "
3467 "fw_install %d, chip state %d, "
3468 "driver compiled with %d.%d.%d.%d, "
3469 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3470 t4_fw_install, state,
3471 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3472 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3473 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3474 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3475 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3476 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3482 if (need_fw_reset &&
3483 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3484 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3485 if (rc != ETIMEDOUT && rc != EIO)
3486 t4_fw_bye(sc, sc->mbox);
3491 rc = get_params__pre_init(sc);
3493 goto done; /* error message displayed already */
3495 /* Partition adapter resources as specified in the config file. */
3496 if (state == DEV_STATE_UNINIT) {
3498 KASSERT(sc->flags & MASTER_PF,
3499 ("%s: trying to change chip settings when not master.",
3502 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3504 goto done; /* error message displayed already */
3506 t4_tweak_chip_settings(sc);
3508 /* get basic stuff going */
3509 rc = -t4_fw_initialize(sc, sc->mbox);
3511 device_printf(sc->dev, "fw init failed: %d.\n", rc);
3515 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3520 free(card_fw, M_CXGBE);
3522 firmware_put(fw, FIRMWARE_UNLOAD);
3523 if (default_cfg != NULL)
3524 firmware_put(default_cfg, FIRMWARE_UNLOAD);
3529 #define FW_PARAM_DEV(param) \
3530 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3531 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3532 #define FW_PARAM_PFVF(param) \
3533 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3534 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3537 * Partition chip resources for use between various PFs, VFs, etc.
3540 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3541 const char *name_prefix)
3543 const struct firmware *cfg = NULL;
3545 struct fw_caps_config_cmd caps;
3546 uint32_t mtype, moff, finicsum, cfcsum;
3549 * Figure out what configuration file to use. Pick the default config
3550 * file for the card if the user hasn't specified one explicitly.
3552 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3553 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3554 /* Card specific overrides go here. */
3555 if (pci_get_device(sc->dev) == 0x440a)
3556 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3558 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3559 } else if (strncmp(t4_cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0)
3560 goto use_built_in_config; /* go straight to config. */
3563 * We need to load another module if the profile is anything except
3564 * "default" or "flash".
3566 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3567 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3570 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3571 cfg = firmware_get(s);
3573 if (default_cfg != NULL) {
3574 device_printf(sc->dev,
3575 "unable to load module \"%s\" for "
3576 "configuration profile \"%s\", will use "
3577 "the default config file instead.\n",
3579 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3582 device_printf(sc->dev,
3583 "unable to load module \"%s\" for "
3584 "configuration profile \"%s\", will use "
3585 "the config file on the card's flash "
3586 "instead.\n", s, sc->cfg_file);
3587 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3593 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3594 default_cfg == NULL) {
3595 device_printf(sc->dev,
3596 "default config file not available, will use the config "
3597 "file on the card's flash instead.\n");
3598 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3601 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3603 const uint32_t *cfdata;
3604 uint32_t param, val, addr;
3606 KASSERT(cfg != NULL || default_cfg != NULL,
3607 ("%s: no config to upload", __func__));
3610 * Ask the firmware where it wants us to upload the config file.
3612 param = FW_PARAM_DEV(CF);
3613 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3615 /* No support for config file? Shouldn't happen. */
3616 device_printf(sc->dev,
3617 "failed to query config file location: %d.\n", rc);
3620 mtype = G_FW_PARAMS_PARAM_Y(val);
3621 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3624 * XXX: sheer laziness. We deliberately added 4 bytes of
3625 * useless stuffing/comments at the end of the config file so
3626 * it's ok to simply throw away the last remaining bytes when
3627 * the config file is not an exact multiple of 4. This also
3628 * helps with the validate_mt_off_len check.
3631 cflen = cfg->datasize & ~3;
3634 cflen = default_cfg->datasize & ~3;
3635 cfdata = default_cfg->data;
3638 if (cflen > FLASH_CFG_MAX_SIZE) {
3639 device_printf(sc->dev,
3640 "config file too long (%d, max allowed is %d). "
3641 "Will try to use the config on the card, if any.\n",
3642 cflen, FLASH_CFG_MAX_SIZE);
3643 goto use_config_on_flash;
3646 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3648 device_printf(sc->dev,
3649 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
3650 "Will try to use the config on the card, if any.\n",
3651 __func__, mtype, moff, cflen, rc);
3652 goto use_config_on_flash;
3654 write_via_memwin(sc, 2, addr, cfdata, cflen);
3656 use_config_on_flash:
3657 mtype = FW_MEMTYPE_FLASH;
3658 moff = t4_flash_cfg_addr(sc);
3661 bzero(&caps, sizeof(caps));
3662 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3663 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3664 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3665 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3666 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3667 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3669 device_printf(sc->dev,
3670 "failed to pre-process config file: %d "
3671 "(mtype %d, moff 0x%x). Will reset the firmware and retry "
3672 "with the built-in configuration.\n", rc, mtype, moff);
3674 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
3676 device_printf(sc->dev,
3677 "firmware reset failed: %d.\n", rc);
3678 if (rc != ETIMEDOUT && rc != EIO) {
3679 t4_fw_bye(sc, sc->mbox);
3680 sc->flags &= ~FW_OK;
3684 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", "built-in");
3685 use_built_in_config:
3686 bzero(&caps, sizeof(caps));
3687 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3688 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3689 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3690 rc = t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3692 device_printf(sc->dev,
3693 "built-in configuration failed: %d.\n", rc);
3698 finicsum = be32toh(caps.finicsum);
3699 cfcsum = be32toh(caps.cfcsum);
3700 if (finicsum != cfcsum) {
3701 device_printf(sc->dev,
3702 "WARNING: config file checksum mismatch: %08x %08x\n",
3705 sc->cfcsum = cfcsum;
3707 #define LIMIT_CAPS(x) do { \
3708 caps.x &= htobe16(t4_##x##_allowed); \
3712 * Let the firmware know what features will (not) be used so it can tune
3713 * things accordingly.
3715 LIMIT_CAPS(nbmcaps);
3716 LIMIT_CAPS(linkcaps);
3717 LIMIT_CAPS(switchcaps);
3718 LIMIT_CAPS(niccaps);
3719 LIMIT_CAPS(toecaps);
3720 LIMIT_CAPS(rdmacaps);
3721 LIMIT_CAPS(cryptocaps);
3722 LIMIT_CAPS(iscsicaps);
3723 LIMIT_CAPS(fcoecaps);
3726 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
3728 * TOE and hashfilters are mutually exclusive. It is a config
3729 * file or firmware bug if both are reported as available. Try
3730 * to cope with the situation in non-debug builds by disabling
3733 MPASS(caps.toecaps == 0);
3740 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3741 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3742 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3743 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3745 device_printf(sc->dev,
3746 "failed to process config file: %d.\n", rc);
3750 firmware_put(cfg, FIRMWARE_UNLOAD);
3755 * Retrieve parameters that are needed (or nice to have) very early.
3758 get_params__pre_init(struct adapter *sc)
3761 uint32_t param[2], val[2];
3763 t4_get_version_info(sc);
3765 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3766 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3767 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3768 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3769 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3771 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3772 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3773 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3774 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3775 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3777 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3778 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3779 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3780 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3781 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3783 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3784 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3785 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3786 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3787 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3789 param[0] = FW_PARAM_DEV(PORTVEC);
3790 param[1] = FW_PARAM_DEV(CCLK);
3791 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3793 device_printf(sc->dev,
3794 "failed to query parameters (pre_init): %d.\n", rc);
3798 sc->params.portvec = val[0];
3799 sc->params.nports = bitcount32(val[0]);
3800 sc->params.vpd.cclk = val[1];
3802 /* Read device log parameters. */
3803 rc = -t4_init_devlog_params(sc, 1);
3805 fixup_devlog_params(sc);
3807 device_printf(sc->dev,
3808 "failed to get devlog parameters: %d.\n", rc);
3809 rc = 0; /* devlog isn't critical for device operation */
3816 * Retrieve various parameters that are of interest to the driver. The device
3817 * has been initialized by the firmware at this point.
3820 get_params__post_init(struct adapter *sc)
3823 uint32_t param[7], val[7];
3824 struct fw_caps_config_cmd caps;
3826 param[0] = FW_PARAM_PFVF(IQFLINT_START);
3827 param[1] = FW_PARAM_PFVF(EQ_START);
3828 param[2] = FW_PARAM_PFVF(FILTER_START);
3829 param[3] = FW_PARAM_PFVF(FILTER_END);
3830 param[4] = FW_PARAM_PFVF(L2T_START);
3831 param[5] = FW_PARAM_PFVF(L2T_END);
3832 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3833 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
3834 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
3835 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
3837 device_printf(sc->dev,
3838 "failed to query parameters (post_init): %d.\n", rc);
3842 sc->sge.iq_start = val[0];
3843 sc->sge.eq_start = val[1];
3844 sc->tids.ftid_base = val[2];
3845 sc->tids.nftids = val[3] - val[2] + 1;
3846 sc->params.ftid_min = val[2];
3847 sc->params.ftid_max = val[3];
3848 sc->vres.l2t.start = val[4];
3849 sc->vres.l2t.size = val[5] - val[4] + 1;
3850 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3851 ("%s: L2 table size (%u) larger than expected (%u)",
3852 __func__, sc->vres.l2t.size, L2T_SIZE));
3853 sc->params.core_vdd = val[6];
3856 * MPSBGMAP is queried separately because only recent firmwares support
3857 * it as a parameter and we don't want the compound query above to fail
3858 * on older firmwares.
3860 param[0] = FW_PARAM_DEV(MPSBGMAP);
3862 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3864 sc->params.mps_bg_map = val[0];
3866 sc->params.mps_bg_map = 0;
3869 * Determine whether the firmware supports the filter2 work request.
3870 * This is queried separately for the same reason as MPSBGMAP above.
3872 param[0] = FW_PARAM_DEV(FILTER2_WR);
3874 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3876 sc->params.filter2_wr_support = val[0] != 0;
3878 sc->params.filter2_wr_support = 0;
3880 /* get capabilites */
3881 bzero(&caps, sizeof(caps));
3882 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3883 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3884 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3885 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3887 device_printf(sc->dev,
3888 "failed to get card capabilities: %d.\n", rc);
3892 #define READ_CAPS(x) do { \
3893 sc->x = htobe16(caps.x); \
3896 READ_CAPS(linkcaps);
3897 READ_CAPS(switchcaps);
3900 READ_CAPS(rdmacaps);
3901 READ_CAPS(cryptocaps);
3902 READ_CAPS(iscsicaps);
3903 READ_CAPS(fcoecaps);
3905 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
3906 MPASS(chip_id(sc) > CHELSIO_T4);
3907 MPASS(sc->toecaps == 0);
3910 param[0] = FW_PARAM_DEV(NTID);
3911 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3913 device_printf(sc->dev,
3914 "failed to query HASHFILTER parameters: %d.\n", rc);
3917 sc->tids.ntids = val[0];
3918 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3919 sc->params.hash_filter = 1;
3921 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3922 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3923 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3924 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3925 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3927 device_printf(sc->dev,
3928 "failed to query NIC parameters: %d.\n", rc);
3931 sc->tids.etid_base = val[0];
3932 sc->params.etid_min = val[0];
3933 sc->params.etid_max = val[1];
3934 sc->tids.netids = val[1] - val[0] + 1;
3935 sc->params.eo_wr_cred = val[2];
3936 sc->params.ethoffload = 1;
3939 /* query offload-related parameters */
3940 param[0] = FW_PARAM_DEV(NTID);
3941 param[1] = FW_PARAM_PFVF(SERVER_START);
3942 param[2] = FW_PARAM_PFVF(SERVER_END);
3943 param[3] = FW_PARAM_PFVF(TDDP_START);
3944 param[4] = FW_PARAM_PFVF(TDDP_END);
3945 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3946 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3948 device_printf(sc->dev,
3949 "failed to query TOE parameters: %d.\n", rc);
3952 sc->tids.ntids = val[0];
3953 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3954 sc->tids.stid_base = val[1];
3955 sc->tids.nstids = val[2] - val[1] + 1;
3956 sc->vres.ddp.start = val[3];
3957 sc->vres.ddp.size = val[4] - val[3] + 1;
3958 sc->params.ofldq_wr_cred = val[5];
3959 sc->params.offload = 1;
3962 * The firmware attempts memfree TOE configuration for -SO cards
3963 * and will report toecaps=0 if it runs out of resources (this
3964 * depends on the config file). It may not report 0 for other
3965 * capabilities dependent on the TOE in this case. Set them to
3966 * 0 here so that the driver doesn't bother tracking resources
3967 * that will never be used.
3973 param[0] = FW_PARAM_PFVF(STAG_START);
3974 param[1] = FW_PARAM_PFVF(STAG_END);
3975 param[2] = FW_PARAM_PFVF(RQ_START);
3976 param[3] = FW_PARAM_PFVF(RQ_END);
3977 param[4] = FW_PARAM_PFVF(PBL_START);
3978 param[5] = FW_PARAM_PFVF(PBL_END);
3979 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3981 device_printf(sc->dev,
3982 "failed to query RDMA parameters(1): %d.\n", rc);
3985 sc->vres.stag.start = val[0];
3986 sc->vres.stag.size = val[1] - val[0] + 1;
3987 sc->vres.rq.start = val[2];
3988 sc->vres.rq.size = val[3] - val[2] + 1;
3989 sc->vres.pbl.start = val[4];
3990 sc->vres.pbl.size = val[5] - val[4] + 1;
3992 param[0] = FW_PARAM_PFVF(SQRQ_START);
3993 param[1] = FW_PARAM_PFVF(SQRQ_END);
3994 param[2] = FW_PARAM_PFVF(CQ_START);
3995 param[3] = FW_PARAM_PFVF(CQ_END);
3996 param[4] = FW_PARAM_PFVF(OCQ_START);
3997 param[5] = FW_PARAM_PFVF(OCQ_END);
3998 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4000 device_printf(sc->dev,
4001 "failed to query RDMA parameters(2): %d.\n", rc);
4004 sc->vres.qp.start = val[0];
4005 sc->vres.qp.size = val[1] - val[0] + 1;
4006 sc->vres.cq.start = val[2];
4007 sc->vres.cq.size = val[3] - val[2] + 1;
4008 sc->vres.ocq.start = val[4];
4009 sc->vres.ocq.size = val[5] - val[4] + 1;
4011 param[0] = FW_PARAM_PFVF(SRQ_START);
4012 param[1] = FW_PARAM_PFVF(SRQ_END);
4013 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
4014 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4015 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
4017 device_printf(sc->dev,
4018 "failed to query RDMA parameters(3): %d.\n", rc);
4021 sc->vres.srq.start = val[0];
4022 sc->vres.srq.size = val[1] - val[0] + 1;
4023 sc->params.max_ordird_qp = val[2];
4024 sc->params.max_ird_adapter = val[3];
4026 if (sc->iscsicaps) {
4027 param[0] = FW_PARAM_PFVF(ISCSI_START);
4028 param[1] = FW_PARAM_PFVF(ISCSI_END);
4029 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4031 device_printf(sc->dev,
4032 "failed to query iSCSI parameters: %d.\n", rc);
4035 sc->vres.iscsi.start = val[0];
4036 sc->vres.iscsi.size = val[1] - val[0] + 1;
4038 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
4039 param[0] = FW_PARAM_PFVF(TLS_START);
4040 param[1] = FW_PARAM_PFVF(TLS_END);
4041 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4043 device_printf(sc->dev,
4044 "failed to query TLS parameters: %d.\n", rc);
4047 sc->vres.key.start = val[0];
4048 sc->vres.key.size = val[1] - val[0] + 1;
4051 t4_init_sge_params(sc);
4054 * We've got the params we wanted to query via the firmware. Now grab
4055 * some others directly from the chip.
4057 rc = t4_read_chip_settings(sc);
4063 set_params__post_init(struct adapter *sc)
4065 uint32_t param, val;
4070 /* ask for encapsulated CPLs */
4071 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4073 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4077 * Override the TOE timers with user provided tunables. This is not the
4078 * recommended way to change the timers (the firmware config file is) so
4079 * these tunables are not documented.
4081 * All the timer tunables are in microseconds.
4083 if (t4_toe_keepalive_idle != 0) {
4084 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
4085 v &= M_KEEPALIVEIDLE;
4086 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
4087 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
4089 if (t4_toe_keepalive_interval != 0) {
4090 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
4091 v &= M_KEEPALIVEINTVL;
4092 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
4093 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
4095 if (t4_toe_keepalive_count != 0) {
4096 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
4097 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4098 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
4099 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
4100 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
4102 if (t4_toe_rexmt_min != 0) {
4103 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
4105 t4_set_reg_field(sc, A_TP_RXT_MIN,
4106 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
4108 if (t4_toe_rexmt_max != 0) {
4109 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
4111 t4_set_reg_field(sc, A_TP_RXT_MAX,
4112 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
4114 if (t4_toe_rexmt_count != 0) {
4115 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
4116 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4117 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
4118 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
4119 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
4121 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
4122 if (t4_toe_rexmt_backoff[i] != -1) {
4123 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
4124 shift = (i & 3) << 3;
4125 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
4126 M_TIMERBACKOFFINDEX0 << shift, v << shift);
4133 #undef FW_PARAM_PFVF
4137 t4_set_desc(struct adapter *sc)
4140 struct adapter_params *p = &sc->params;
4142 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
4144 device_set_desc_copy(sc->dev, buf);
4148 ifmedia_add4(struct ifmedia *ifm, int m)
4151 ifmedia_add(ifm, m, 0, NULL);
4152 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
4153 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
4154 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
4158 set_current_media(struct port_info *pi, struct ifmedia *ifm)
4160 struct link_config *lc;
4163 PORT_LOCK_ASSERT_OWNED(pi);
4165 /* Leave current media alone if it's already set to IFM_NONE. */
4166 if (ifm->ifm_cur != NULL &&
4167 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
4171 if (lc->requested_aneg == AUTONEG_ENABLE &&
4172 lc->supported & FW_PORT_CAP_ANEG) {
4173 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
4176 mword = IFM_ETHER | IFM_FDX;
4177 if (lc->requested_fc & PAUSE_TX)
4178 mword |= IFM_ETH_TXPAUSE;
4179 if (lc->requested_fc & PAUSE_RX)
4180 mword |= IFM_ETH_RXPAUSE;
4181 mword |= port_mword(pi, speed_to_fwspeed(lc->requested_speed));
4182 ifmedia_set(ifm, mword);
4186 build_medialist(struct port_info *pi, struct ifmedia *ifm)
4189 int unknown, mword, bit;
4190 struct link_config *lc;
4192 PORT_LOCK_ASSERT_OWNED(pi);
4194 if (pi->flags & FIXED_IFMEDIA)
4198 * First setup all the requested_ fields so that they comply with what's
4199 * supported by the port + transceiver. Note that this clobbers any
4200 * user preferences set via sysctl_pause_settings or sysctl_autoneg.
4205 * Now (re)build the ifmedia list.
4207 ifmedia_removeall(ifm);
4209 ss = G_FW_PORT_CAP_SPEED(lc->supported); /* Supported Speeds */
4210 if (__predict_false(ss == 0)) { /* not supposed to happen. */
4213 MPASS(LIST_EMPTY(&ifm->ifm_list));
4214 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
4215 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
4220 for (bit = 0; bit < fls(ss); bit++) {
4222 MPASS(speed & M_FW_PORT_CAP_SPEED);
4224 mword = port_mword(pi, speed);
4225 if (mword == IFM_NONE) {
4227 } else if (mword == IFM_UNKNOWN)
4230 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
4233 if (unknown > 0) /* Add one unknown for all unknown media types. */
4234 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
4235 if (lc->supported & FW_PORT_CAP_ANEG)
4236 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
4238 set_current_media(pi, ifm);
4242 * Update all the requested_* fields in the link config to something valid (and
4246 init_l1cfg(struct port_info *pi)
4248 struct link_config *lc = &pi->link_cfg;
4250 PORT_LOCK_ASSERT_OWNED(pi);
4253 lc->requested_speed = port_top_speed(pi) * 1000;
4255 if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) {
4256 lc->requested_aneg = AUTONEG_ENABLE;
4258 lc->requested_aneg = AUTONEG_DISABLE;
4261 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX);
4264 if (t4_fec & FEC_RS && lc->supported & FW_PORT_CAP_FEC_RS) {
4265 lc->requested_fec = FEC_RS;
4266 } else if (t4_fec & FEC_BASER_RS &&
4267 lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
4268 lc->requested_fec = FEC_BASER_RS;
4270 lc->requested_fec = 0;
4273 /* Use the suggested value provided by the firmware in acaps */
4274 if (lc->advertising & FW_PORT_CAP_FEC_RS &&
4275 lc->supported & FW_PORT_CAP_FEC_RS) {
4276 lc->requested_fec = FEC_RS;
4277 } else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS &&
4278 lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
4279 lc->requested_fec = FEC_BASER_RS;
4281 lc->requested_fec = 0;
4287 * Apply the settings in requested_* to the hardware. The parameters are
4288 * expected to be sane.
4291 apply_l1cfg(struct port_info *pi)
4293 struct adapter *sc = pi->adapter;
4294 struct link_config *lc = &pi->link_cfg;
4299 ASSERT_SYNCHRONIZED_OP(sc);
4300 PORT_LOCK_ASSERT_OWNED(pi);
4302 if (lc->requested_aneg == AUTONEG_ENABLE)
4303 MPASS(lc->supported & FW_PORT_CAP_ANEG);
4304 if (lc->requested_fc & PAUSE_TX)
4305 MPASS(lc->supported & FW_PORT_CAP_FC_TX);
4306 if (lc->requested_fc & PAUSE_RX)
4307 MPASS(lc->supported & FW_PORT_CAP_FC_RX);
4308 if (lc->requested_fec == FEC_RS)
4309 MPASS(lc->supported & FW_PORT_CAP_FEC_RS);
4310 if (lc->requested_fec == FEC_BASER_RS)
4311 MPASS(lc->supported & FW_PORT_CAP_FEC_BASER_RS);
4312 fwspeed = speed_to_fwspeed(lc->requested_speed);
4313 MPASS(fwspeed != 0);
4314 MPASS(lc->supported & fwspeed);
4316 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
4318 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
4320 lc->fc = lc->requested_fc;
4321 lc->fec = lc->requested_fec;
4326 #define FW_MAC_EXACT_CHUNK 7
4329 * Program the port's XGMAC based on parameters in ifnet. The caller also
4330 * indicates which parameters should be programmed (the rest are left alone).
4333 update_mac_settings(struct ifnet *ifp, int flags)
4336 struct vi_info *vi = ifp->if_softc;
4337 struct port_info *pi = vi->pi;
4338 struct adapter *sc = pi->adapter;
4339 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
4341 ASSERT_SYNCHRONIZED_OP(sc);
4342 KASSERT(flags, ("%s: not told what to update.", __func__));
4344 if (flags & XGMAC_MTU)
4347 if (flags & XGMAC_PROMISC)
4348 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
4350 if (flags & XGMAC_ALLMULTI)
4351 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
4353 if (flags & XGMAC_VLANEX)
4354 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
4356 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
4357 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
4358 allmulti, 1, vlanex, false);
4360 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
4366 if (flags & XGMAC_UCADDR) {
4367 uint8_t ucaddr[ETHER_ADDR_LEN];
4369 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
4370 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
4371 ucaddr, true, true);
4374 if_printf(ifp, "change_mac failed: %d\n", rc);
4377 vi->xact_addr_filt = rc;
4382 if (flags & XGMAC_MCADDRS) {
4383 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
4386 struct ifmultiaddr *ifma;
4389 if_maddr_rlock(ifp);
4390 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4391 if (ifma->ifma_addr->sa_family != AF_LINK)
4394 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
4395 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
4398 if (i == FW_MAC_EXACT_CHUNK) {
4399 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
4400 del, i, mcaddr, NULL, &hash, 0);
4403 for (j = 0; j < i; j++) {
4405 "failed to add mc address"
4407 "%02x:%02x:%02x rc=%d\n",
4408 mcaddr[j][0], mcaddr[j][1],
4409 mcaddr[j][2], mcaddr[j][3],
4410 mcaddr[j][4], mcaddr[j][5],
4420 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
4421 mcaddr, NULL, &hash, 0);
4424 for (j = 0; j < i; j++) {
4426 "failed to add mc address"
4428 "%02x:%02x:%02x rc=%d\n",
4429 mcaddr[j][0], mcaddr[j][1],
4430 mcaddr[j][2], mcaddr[j][3],
4431 mcaddr[j][4], mcaddr[j][5],
4438 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
4440 if_printf(ifp, "failed to set mc address hash: %d", rc);
4442 if_maddr_runlock(ifp);
4449 * {begin|end}_synchronized_op must be called from the same thread.
4452 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
4458 /* the caller thinks it's ok to sleep, but is it really? */
4459 if (flags & SLEEP_OK)
4460 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
4461 "begin_synchronized_op");
4472 if (vi && IS_DOOMED(vi)) {
4482 if (!(flags & SLEEP_OK)) {
4487 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
4493 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
4496 sc->last_op = wmesg;
4497 sc->last_op_thr = curthread;
4498 sc->last_op_flags = flags;
4502 if (!(flags & HOLD_LOCK) || rc)
4509 * Tell if_ioctl and if_init that the VI is going away. This is
4510 * special variant of begin_synchronized_op and must be paired with a
4511 * call to end_synchronized_op.
4514 doom_vi(struct adapter *sc, struct vi_info *vi)
4521 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
4524 sc->last_op = "t4detach";
4525 sc->last_op_thr = curthread;
4526 sc->last_op_flags = 0;
4532 * {begin|end}_synchronized_op must be called from the same thread.
4535 end_synchronized_op(struct adapter *sc, int flags)
4538 if (flags & LOCK_HELD)
4539 ADAPTER_LOCK_ASSERT_OWNED(sc);
4543 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
4550 cxgbe_init_synchronized(struct vi_info *vi)
4552 struct port_info *pi = vi->pi;
4553 struct adapter *sc = pi->adapter;
4554 struct ifnet *ifp = vi->ifp;
4556 struct sge_txq *txq;
4558 ASSERT_SYNCHRONIZED_OP(sc);
4560 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4561 return (0); /* already running */
4563 if (!(sc->flags & FULL_INIT_DONE) &&
4564 ((rc = adapter_full_init(sc)) != 0))
4565 return (rc); /* error message displayed already */
4567 if (!(vi->flags & VI_INIT_DONE) &&
4568 ((rc = vi_full_init(vi)) != 0))
4569 return (rc); /* error message displayed already */
4571 rc = update_mac_settings(ifp, XGMAC_ALL);
4573 goto done; /* error message displayed already */
4575 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
4577 if_printf(ifp, "enable_vi failed: %d\n", rc);
4582 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
4586 for_each_txq(vi, i, txq) {
4588 txq->eq.flags |= EQ_ENABLED;
4593 * The first iq of the first port to come up is used for tracing.
4595 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
4596 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
4597 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
4598 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
4599 V_QUEUENUMBER(sc->traceq));
4600 pi->flags |= HAS_TRACEQ;
4605 if (pi->up_vis++ == 0) {
4606 t4_update_port_info(pi);
4607 build_medialist(pi, &pi->media);
4610 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4612 if (pi->nvi > 1 || sc->flags & IS_VF)
4613 callout_reset(&vi->tick, hz, vi_tick, vi);
4615 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
4619 cxgbe_uninit_synchronized(vi);
4628 cxgbe_uninit_synchronized(struct vi_info *vi)
4630 struct port_info *pi = vi->pi;
4631 struct adapter *sc = pi->adapter;
4632 struct ifnet *ifp = vi->ifp;
4634 struct sge_txq *txq;
4636 ASSERT_SYNCHRONIZED_OP(sc);
4638 if (!(vi->flags & VI_INIT_DONE)) {
4639 if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4640 KASSERT(0, ("uninited VI is running"));
4641 if_printf(ifp, "uninited VI with running ifnet. "
4642 "vi->flags 0x%016lx, if_flags 0x%08x, "
4643 "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
4650 * Disable the VI so that all its data in either direction is discarded
4651 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
4652 * tick) intact as the TP can deliver negative advice or data that it's
4653 * holding in its RAM (for an offloaded connection) even after the VI is
4656 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4658 if_printf(ifp, "disable_vi failed: %d\n", rc);
4662 for_each_txq(vi, i, txq) {
4664 txq->eq.flags &= ~EQ_ENABLED;
4669 if (pi->nvi > 1 || sc->flags & IS_VF)
4670 callout_stop(&vi->tick);
4672 callout_stop(&pi->tick);
4673 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4677 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4679 if (pi->up_vis > 0) {
4684 pi->link_cfg.link_ok = 0;
4685 pi->link_cfg.speed = 0;
4686 pi->link_cfg.link_down_rc = 255;
4687 t4_os_link_changed(pi);
4688 pi->old_link_cfg = pi->link_cfg;
4695 * It is ok for this function to fail midway and return right away. t4_detach
4696 * will walk the entire sc->irq list and clean up whatever is valid.
4699 t4_setup_intr_handlers(struct adapter *sc)
4701 int rc, rid, p, q, v;
4704 struct port_info *pi;
4706 struct sge *sge = &sc->sge;
4707 struct sge_rxq *rxq;
4709 struct sge_ofld_rxq *ofld_rxq;
4712 struct sge_nm_rxq *nm_rxq;
4715 int nbuckets = rss_getnumbuckets();
4722 rid = sc->intr_type == INTR_INTX ? 0 : 1;
4723 if (forwarding_intr_to_fwq(sc))
4724 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4726 /* Multiple interrupts. */
4727 if (sc->flags & IS_VF)
4728 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4729 ("%s: too few intr.", __func__));
4731 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4732 ("%s: too few intr.", __func__));
4734 /* The first one is always error intr on PFs */
4735 if (!(sc->flags & IS_VF)) {
4736 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4743 /* The second one is always the firmware event queue (first on VFs) */
4744 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4750 for_each_port(sc, p) {
4752 for_each_vi(pi, v, vi) {
4753 vi->first_intr = rid - 1;
4755 if (vi->nnmrxq > 0) {
4756 int n = max(vi->nrxq, vi->nnmrxq);
4758 rxq = &sge->rxq[vi->first_rxq];
4760 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4762 for (q = 0; q < n; q++) {
4763 snprintf(s, sizeof(s), "%x%c%x", p,
4769 irq->nm_rxq = nm_rxq++;
4771 rc = t4_alloc_irq(sc, irq, rid,
4772 t4_vi_intr, irq, s);
4777 bus_bind_intr(sc->dev, irq->res,
4778 rss_getcpu(q % nbuckets));
4786 for_each_rxq(vi, q, rxq) {
4787 snprintf(s, sizeof(s), "%x%c%x", p,
4789 rc = t4_alloc_irq(sc, irq, rid,
4794 bus_bind_intr(sc->dev, irq->res,
4795 rss_getcpu(q % nbuckets));
4803 for_each_ofld_rxq(vi, q, ofld_rxq) {
4804 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
4805 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
4816 MPASS(irq == &sc->irq[sc->intr_count]);
4822 adapter_full_init(struct adapter *sc)
4826 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4827 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4830 ASSERT_SYNCHRONIZED_OP(sc);
4831 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4832 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
4833 ("%s: FULL_INIT_DONE already", __func__));
4836 * queues that belong to the adapter (not any particular port).
4838 rc = t4_setup_adapter_queues(sc);
4842 for (i = 0; i < nitems(sc->tq); i++) {
4843 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
4844 taskqueue_thread_enqueue, &sc->tq[i]);
4845 if (sc->tq[i] == NULL) {
4846 device_printf(sc->dev,
4847 "failed to allocate task queue %d\n", i);
4851 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
4852 device_get_nameunit(sc->dev), i);
4855 MPASS(RSS_KEYSIZE == 40);
4856 rss_getkey((void *)&raw_rss_key[0]);
4857 for (i = 0; i < nitems(rss_key); i++) {
4858 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
4860 t4_write_rss_key(sc, &rss_key[0], -1, 1);
4863 if (!(sc->flags & IS_VF))
4865 sc->flags |= FULL_INIT_DONE;
4868 adapter_full_uninit(sc);
4874 adapter_full_uninit(struct adapter *sc)
4878 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4880 t4_teardown_adapter_queues(sc);
4882 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
4883 taskqueue_free(sc->tq[i]);
4887 sc->flags &= ~FULL_INIT_DONE;
4893 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
4894 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
4895 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
4896 RSS_HASHTYPE_RSS_UDP_IPV6)
4898 /* Translates kernel hash types to hardware. */
4900 hashconfig_to_hashen(int hashconfig)
4904 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
4905 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
4906 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
4907 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
4908 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
4909 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4910 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4912 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
4913 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4914 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4916 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
4917 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4918 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
4919 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4924 /* Translates hardware hash types to kernel. */
4926 hashen_to_hashconfig(int hashen)
4930 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
4932 * If UDP hashing was enabled it must have been enabled for
4933 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
4934 * enabling any 4-tuple hash is nonsense configuration.
4936 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4937 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
4939 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4940 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
4941 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4942 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
4944 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4945 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
4946 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4947 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
4948 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
4949 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
4950 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
4951 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
4953 return (hashconfig);
4958 vi_full_init(struct vi_info *vi)
4960 struct adapter *sc = vi->pi->adapter;
4961 struct ifnet *ifp = vi->ifp;
4963 struct sge_rxq *rxq;
4964 int rc, i, j, hashen;
4966 int nbuckets = rss_getnumbuckets();
4967 int hashconfig = rss_gethashconfig();
4971 ASSERT_SYNCHRONIZED_OP(sc);
4972 KASSERT((vi->flags & VI_INIT_DONE) == 0,
4973 ("%s: VI_INIT_DONE already", __func__));
4975 sysctl_ctx_init(&vi->ctx);
4976 vi->flags |= VI_SYSCTL_CTX;
4979 * Allocate tx/rx/fl queues for this VI.
4981 rc = t4_setup_vi_queues(vi);
4983 goto done; /* error message displayed already */
4986 * Setup RSS for this VI. Save a copy of the RSS table for later use.
4988 if (vi->nrxq > vi->rss_size) {
4989 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
4990 "some queues will never receive traffic.\n", vi->nrxq,
4992 } else if (vi->rss_size % vi->nrxq) {
4993 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
4994 "expect uneven traffic distribution.\n", vi->nrxq,
4998 if (vi->nrxq != nbuckets) {
4999 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
5000 "performance will be impacted.\n", vi->nrxq, nbuckets);
5003 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
5004 for (i = 0; i < vi->rss_size;) {
5006 j = rss_get_indirection_to_bucket(i);
5008 rxq = &sc->sge.rxq[vi->first_rxq + j];
5009 rss[i++] = rxq->iq.abs_id;
5011 for_each_rxq(vi, j, rxq) {
5012 rss[i++] = rxq->iq.abs_id;
5013 if (i == vi->rss_size)
5019 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
5022 if_printf(ifp, "rss_config failed: %d\n", rc);
5027 hashen = hashconfig_to_hashen(hashconfig);
5030 * We may have had to enable some hashes even though the global config
5031 * wants them disabled. This is a potential problem that must be
5032 * reported to the user.
5034 extra = hashen_to_hashconfig(hashen) ^ hashconfig;
5037 * If we consider only the supported hash types, then the enabled hashes
5038 * are a superset of the requested hashes. In other words, there cannot
5039 * be any supported hash that was requested but not enabled, but there
5040 * can be hashes that were not requested but had to be enabled.
5042 extra &= SUPPORTED_RSS_HASHTYPES;
5043 MPASS((extra & hashconfig) == 0);
5047 "global RSS config (0x%x) cannot be accommodated.\n",
5050 if (extra & RSS_HASHTYPE_RSS_IPV4)
5051 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
5052 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
5053 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
5054 if (extra & RSS_HASHTYPE_RSS_IPV6)
5055 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
5056 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
5057 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
5058 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
5059 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
5060 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
5061 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
5063 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
5064 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
5065 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5066 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
5068 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
5070 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
5075 vi->flags |= VI_INIT_DONE;
5087 vi_full_uninit(struct vi_info *vi)
5089 struct port_info *pi = vi->pi;
5090 struct adapter *sc = pi->adapter;
5092 struct sge_rxq *rxq;
5093 struct sge_txq *txq;
5095 struct sge_ofld_rxq *ofld_rxq;
5096 struct sge_wrq *ofld_txq;
5099 if (vi->flags & VI_INIT_DONE) {
5101 /* Need to quiesce queues. */
5103 /* XXX: Only for the first VI? */
5104 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
5105 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
5107 for_each_txq(vi, i, txq) {
5108 quiesce_txq(sc, txq);
5112 for_each_ofld_txq(vi, i, ofld_txq) {
5113 quiesce_wrq(sc, ofld_txq);
5117 for_each_rxq(vi, i, rxq) {
5118 quiesce_iq(sc, &rxq->iq);
5119 quiesce_fl(sc, &rxq->fl);
5123 for_each_ofld_rxq(vi, i, ofld_rxq) {
5124 quiesce_iq(sc, &ofld_rxq->iq);
5125 quiesce_fl(sc, &ofld_rxq->fl);
5128 free(vi->rss, M_CXGBE);
5129 free(vi->nm_rss, M_CXGBE);
5132 t4_teardown_vi_queues(vi);
5133 vi->flags &= ~VI_INIT_DONE;
5139 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
5141 struct sge_eq *eq = &txq->eq;
5142 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
5144 (void) sc; /* unused */
5148 MPASS((eq->flags & EQ_ENABLED) == 0);
5152 /* Wait for the mp_ring to empty. */
5153 while (!mp_ring_is_idle(txq->r)) {
5154 mp_ring_check_drainage(txq->r, 0);
5155 pause("rquiesce", 1);
5158 /* Then wait for the hardware to finish. */
5159 while (spg->cidx != htobe16(eq->pidx))
5160 pause("equiesce", 1);
5162 /* Finally, wait for the driver to reclaim all descriptors. */
5163 while (eq->cidx != eq->pidx)
5164 pause("dquiesce", 1);
5168 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
5175 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
5177 (void) sc; /* unused */
5179 /* Synchronize with the interrupt handler */
5180 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
5185 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
5187 mtx_lock(&sc->sfl_lock);
5189 fl->flags |= FL_DOOMED;
5191 callout_stop(&sc->sfl_callout);
5192 mtx_unlock(&sc->sfl_lock);
5194 KASSERT((fl->flags & FL_STARVING) == 0,
5195 ("%s: still starving", __func__));
5199 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
5200 driver_intr_t *handler, void *arg, char *name)
5205 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
5206 RF_SHAREABLE | RF_ACTIVE);
5207 if (irq->res == NULL) {
5208 device_printf(sc->dev,
5209 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
5213 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
5214 NULL, handler, arg, &irq->tag);
5216 device_printf(sc->dev,
5217 "failed to setup interrupt for rid %d, name %s: %d\n",
5220 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
5226 t4_free_irq(struct adapter *sc, struct irq *irq)
5229 bus_teardown_intr(sc->dev, irq->res, irq->tag);
5231 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
5233 bzero(irq, sizeof(*irq));
5239 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
5242 regs->version = chip_id(sc) | chip_rev(sc) << 10;
5243 t4_get_regs(sc, buf, regs->len);
5246 #define A_PL_INDIR_CMD 0x1f8
5248 #define S_PL_AUTOINC 31
5249 #define M_PL_AUTOINC 0x1U
5250 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
5251 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
5253 #define S_PL_VFID 20
5254 #define M_PL_VFID 0xffU
5255 #define V_PL_VFID(x) ((x) << S_PL_VFID)
5256 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
5259 #define M_PL_ADDR 0xfffffU
5260 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
5261 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
5263 #define A_PL_INDIR_DATA 0x1fc
5266 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
5270 mtx_assert(&sc->reg_lock, MA_OWNED);
5271 if (sc->flags & IS_VF) {
5272 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
5273 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
5275 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5276 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5277 V_PL_ADDR(VF_MPS_REG(reg)));
5278 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
5279 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
5281 return (((uint64_t)stats[1]) << 32 | stats[0]);
5285 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
5286 struct fw_vi_stats_vf *stats)
5289 #define GET_STAT(name) \
5290 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
5292 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
5293 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
5294 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
5295 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
5296 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
5297 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
5298 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
5299 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
5300 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
5301 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
5302 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
5303 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
5304 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
5305 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
5306 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
5307 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
5313 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
5317 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5318 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5319 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
5320 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
5321 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
5322 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
5326 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
5329 const struct timeval interval = {0, 250000}; /* 250ms */
5331 if (!(vi->flags & VI_INIT_DONE))
5335 timevalsub(&tv, &interval);
5336 if (timevalcmp(&tv, &vi->last_refreshed, <))
5339 mtx_lock(&sc->reg_lock);
5340 t4_get_vi_stats(sc, vi->viid, &vi->stats);
5341 getmicrotime(&vi->last_refreshed);
5342 mtx_unlock(&sc->reg_lock);
5346 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
5348 u_int i, v, tnl_cong_drops, bg_map;
5350 const struct timeval interval = {0, 250000}; /* 250ms */
5353 timevalsub(&tv, &interval);
5354 if (timevalcmp(&tv, &pi->last_refreshed, <))
5358 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
5359 bg_map = pi->mps_bg_map;
5361 i = ffs(bg_map) - 1;
5362 mtx_lock(&sc->reg_lock);
5363 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
5364 A_TP_MIB_TNL_CNG_DROP_0 + i);
5365 mtx_unlock(&sc->reg_lock);
5366 tnl_cong_drops += v;
5367 bg_map &= ~(1 << i);
5369 pi->tnl_cong_drops = tnl_cong_drops;
5370 getmicrotime(&pi->last_refreshed);
5374 cxgbe_tick(void *arg)
5376 struct port_info *pi = arg;
5377 struct adapter *sc = pi->adapter;
5379 PORT_LOCK_ASSERT_OWNED(pi);
5380 cxgbe_refresh_stats(sc, pi);
5382 callout_schedule(&pi->tick, hz);
5388 struct vi_info *vi = arg;
5389 struct adapter *sc = vi->pi->adapter;
5391 vi_refresh_stats(sc, vi);
5393 callout_schedule(&vi->tick, hz);
5397 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
5401 if (arg != ifp || ifp->if_type != IFT_ETHER)
5404 vlan = VLAN_DEVAT(ifp, vid);
5405 VLAN_SETCOOKIE(vlan, ifp);
5409 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
5411 static char *caps_decoder[] = {
5412 "\20\001IPMI\002NCSI", /* 0: NBM */
5413 "\20\001PPP\002QFC\003DCBX", /* 1: link */
5414 "\20\001INGRESS\002EGRESS", /* 2: switch */
5415 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
5416 "\006HASHFILTER\007ETHOFLD",
5417 "\20\001TOE", /* 4: TOE */
5418 "\20\001RDDP\002RDMAC", /* 5: RDMA */
5419 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
5420 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
5421 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
5423 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
5424 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */
5425 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
5426 "\004PO_INITIATOR\005PO_TARGET",
5430 t4_sysctls(struct adapter *sc)
5432 struct sysctl_ctx_list *ctx;
5433 struct sysctl_oid *oid;
5434 struct sysctl_oid_list *children, *c0;
5435 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5437 ctx = device_get_sysctl_ctx(sc->dev);
5442 oid = device_get_sysctl_tree(sc->dev);
5443 c0 = children = SYSCTL_CHILDREN(oid);
5445 sc->sc_do_rxcopy = 1;
5446 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5447 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5449 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5450 sc->params.nports, "# of ports");
5452 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5453 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
5454 sysctl_bitfield, "A", "available doorbells");
5456 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5457 sc->params.vpd.cclk, "core clock frequency (in KHz)");
5459 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5460 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
5461 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
5462 "interrupt holdoff timer values (us)");
5464 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5465 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
5466 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
5467 "interrupt holdoff packet counter values");
5469 t4_sge_sysctls(sc, ctx, children);
5471 sc->lro_timeout = 100;
5472 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5473 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5475 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
5476 &sc->debug_flags, 0, "flags to enable runtime debugging");
5478 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
5479 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
5481 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5482 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5484 if (sc->flags & IS_VF)
5487 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5488 NULL, chip_rev(sc), "chip hardware revision");
5490 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
5491 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
5493 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
5494 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
5496 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
5497 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
5499 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
5500 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
5502 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
5503 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
5505 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
5506 sc->er_version, 0, "expansion ROM version");
5508 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
5509 sc->bs_version, 0, "bootstrap firmware version");
5511 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
5512 NULL, sc->params.scfg_vers, "serial config version");
5514 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
5515 NULL, sc->params.vpd_vers, "VPD version");
5517 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5518 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5520 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5521 sc->cfcsum, "config file checksum");
5523 #define SYSCTL_CAP(name, n, text) \
5524 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
5525 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \
5526 sysctl_bitfield, "A", "available " text " capabilities")
5528 SYSCTL_CAP(nbmcaps, 0, "NBM");
5529 SYSCTL_CAP(linkcaps, 1, "link");
5530 SYSCTL_CAP(switchcaps, 2, "switch");
5531 SYSCTL_CAP(niccaps, 3, "NIC");
5532 SYSCTL_CAP(toecaps, 4, "TCP offload");
5533 SYSCTL_CAP(rdmacaps, 5, "RDMA");
5534 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
5535 SYSCTL_CAP(cryptocaps, 7, "crypto");
5536 SYSCTL_CAP(fcoecaps, 8, "FCoE");
5539 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5540 NULL, sc->tids.nftids, "number of filters");
5542 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5543 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5544 "chip temperature (in Celsius)");
5546 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING |
5547 CTLFLAG_RD, sc, 0, sysctl_loadavg, "A",
5548 "microprocessor load averages (debug firmwares only)");
5550 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_vdd", CTLFLAG_RD,
5551 &sc->params.core_vdd, 0, "core Vdd (in mV)");
5553 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
5554 CTLTYPE_STRING | CTLFLAG_RD, sc, LOCAL_CPUS,
5555 sysctl_cpus, "A", "local CPUs");
5557 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
5558 CTLTYPE_STRING | CTLFLAG_RD, sc, INTR_CPUS,
5559 sysctl_cpus, "A", "preferred CPUs for interrupts");
5562 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
5564 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5565 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5566 "logs and miscellaneous information");
5567 children = SYSCTL_CHILDREN(oid);
5569 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5570 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5571 sysctl_cctrl, "A", "congestion control");
5573 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5574 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5575 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5577 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5578 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5579 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5581 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5582 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5583 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5585 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5586 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5587 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5589 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5590 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5591 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5593 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5594 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5595 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5597 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5598 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5599 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
5600 "A", "CIM logic analyzer");
5602 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5603 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5604 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5606 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5607 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5608 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5610 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5611 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5612 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5614 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5615 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5616 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5618 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5619 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5620 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5622 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5623 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5624 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5626 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5627 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5628 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5630 if (chip_id(sc) > CHELSIO_T4) {
5631 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5632 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5633 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5635 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5636 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5637 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5640 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5641 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5642 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5644 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5645 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5646 sysctl_cim_qcfg, "A", "CIM queue configuration");
5648 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5649 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5650 sysctl_cpl_stats, "A", "CPL statistics");
5652 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5653 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5654 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5656 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5657 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5658 sysctl_devlog, "A", "firmware's device log");
5660 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5661 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5662 sysctl_fcoe_stats, "A", "FCoE statistics");
5664 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5665 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5666 sysctl_hw_sched, "A", "hardware scheduler ");
5668 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5669 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5670 sysctl_l2t, "A", "hardware L2 table");
5672 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
5673 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5674 sysctl_smt, "A", "hardware source MAC table");
5676 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5677 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5678 sysctl_lb_stats, "A", "loopback statistics");
5680 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5681 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5682 sysctl_meminfo, "A", "memory regions");
5684 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5685 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5686 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5687 "A", "MPS TCAM entries");
5689 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5690 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5691 sysctl_path_mtus, "A", "path MTUs");
5693 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5694 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5695 sysctl_pm_stats, "A", "PM statistics");
5697 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5698 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5699 sysctl_rdma_stats, "A", "RDMA statistics");
5701 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5702 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5703 sysctl_tcp_stats, "A", "TCP statistics");
5705 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5706 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5707 sysctl_tids, "A", "TID information");
5709 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5710 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5711 sysctl_tp_err_stats, "A", "TP error statistics");
5713 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5714 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5715 "TP logic analyzer event capture mask");
5717 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5718 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5719 sysctl_tp_la, "A", "TP logic analyzer");
5721 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5722 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5723 sysctl_tx_rate, "A", "Tx rate");
5725 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5726 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5727 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5729 if (chip_id(sc) >= CHELSIO_T5) {
5730 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5731 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5732 sysctl_wcwr_stats, "A", "write combined work requests");
5736 if (is_offload(sc)) {
5743 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5744 NULL, "TOE parameters");
5745 children = SYSCTL_CHILDREN(oid);
5747 sc->tt.cong_algorithm = -1;
5748 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
5749 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
5750 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
5753 sc->tt.sndbuf = 256 * 1024;
5754 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5755 &sc->tt.sndbuf, 0, "max hardware send buffer size");
5758 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5759 &sc->tt.ddp, 0, "DDP allowed");
5761 sc->tt.rx_coalesce = 1;
5762 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5763 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5766 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW,
5767 &sc->tt.tls, 0, "Inline TLS allowed");
5769 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
5770 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports,
5771 "I", "TCP ports that use inline TLS+TOE RX");
5773 sc->tt.tx_align = 1;
5774 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5775 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5777 sc->tt.tx_zcopy = 0;
5778 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
5779 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
5780 "Enable zero-copy aio_write(2)");
5782 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
5783 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
5784 "cop_managed_offloading", CTLFLAG_RW,
5785 &sc->tt.cop_managed_offloading, 0,
5786 "COP (Connection Offload Policy) controls all TOE offload");
5788 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5789 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5790 "TP timer tick (us)");
5792 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5793 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5794 "TCP timestamp tick (us)");
5796 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5797 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5800 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5801 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5802 "IU", "DACK timer (us)");
5804 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5805 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5806 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)");
5808 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5809 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5810 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)");
5812 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5813 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5814 sysctl_tp_timer, "LU", "Persist timer min (us)");
5816 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5817 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5818 sysctl_tp_timer, "LU", "Persist timer max (us)");
5820 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5821 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5822 sysctl_tp_timer, "LU", "Keepalive idle timer (us)");
5824 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
5825 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
5826 sysctl_tp_timer, "LU", "Keepalive interval timer (us)");
5828 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
5829 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
5830 sysctl_tp_timer, "LU", "Initial SRTT (us)");
5832 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
5833 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
5834 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
5836 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
5837 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX,
5838 sysctl_tp_shift_cnt, "IU",
5839 "Number of SYN retransmissions before abort");
5841 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
5842 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2,
5843 sysctl_tp_shift_cnt, "IU",
5844 "Number of retransmissions before abort");
5846 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
5847 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2,
5848 sysctl_tp_shift_cnt, "IU",
5849 "Number of keepalive probes before abort");
5851 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
5852 CTLFLAG_RD, NULL, "TOE retransmit backoffs");
5853 children = SYSCTL_CHILDREN(oid);
5854 for (i = 0; i < 16; i++) {
5855 snprintf(s, sizeof(s), "%u", i);
5856 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
5857 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff,
5858 "IU", "TOE retransmit backoff");
5865 vi_sysctls(struct vi_info *vi)
5867 struct sysctl_ctx_list *ctx;
5868 struct sysctl_oid *oid;
5869 struct sysctl_oid_list *children;
5871 ctx = device_get_sysctl_ctx(vi->dev);
5874 * dev.v?(cxgbe|cxl).X.
5876 oid = device_get_sysctl_tree(vi->dev);
5877 children = SYSCTL_CHILDREN(oid);
5879 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5880 vi->viid, "VI identifer");
5881 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5882 &vi->nrxq, 0, "# of rx queues");
5883 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5884 &vi->ntxq, 0, "# of tx queues");
5885 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5886 &vi->first_rxq, 0, "index of first rx queue");
5887 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5888 &vi->first_txq, 0, "index of first tx queue");
5889 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
5890 vi->rss_size, "size of RSS indirection table");
5892 if (IS_MAIN_VI(vi)) {
5893 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
5894 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5895 "Reserve queue 0 for non-flowid packets");
5899 if (vi->nofldrxq != 0) {
5900 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5902 "# of rx queues for offloaded TCP connections");
5903 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5905 "# of tx queues for offloaded TCP connections");
5906 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5907 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5908 "index of first TOE rx queue");
5909 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5910 CTLFLAG_RD, &vi->first_ofld_txq, 0,
5911 "index of first TOE tx queue");
5912 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
5913 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5914 sysctl_holdoff_tmr_idx_ofld, "I",
5915 "holdoff timer index for TOE queues");
5916 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
5917 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5918 sysctl_holdoff_pktc_idx_ofld, "I",
5919 "holdoff packet counter index for TOE queues");
5923 if (vi->nnmrxq != 0) {
5924 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5925 &vi->nnmrxq, 0, "# of netmap rx queues");
5926 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5927 &vi->nnmtxq, 0, "# of netmap tx queues");
5928 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5929 CTLFLAG_RD, &vi->first_nm_rxq, 0,
5930 "index of first netmap rx queue");
5931 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5932 CTLFLAG_RD, &vi->first_nm_txq, 0,
5933 "index of first netmap tx queue");
5937 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5938 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5939 "holdoff timer index");
5940 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5941 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5942 "holdoff packet counter index");
5944 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5945 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5947 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5948 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5953 cxgbe_sysctls(struct port_info *pi)
5955 struct sysctl_ctx_list *ctx;
5956 struct sysctl_oid *oid;
5957 struct sysctl_oid_list *children, *children2;
5958 struct adapter *sc = pi->adapter;
5962 ctx = device_get_sysctl_ctx(pi->dev);
5967 oid = device_get_sysctl_tree(pi->dev);
5968 children = SYSCTL_CHILDREN(oid);
5970 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5971 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5972 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5973 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5974 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5975 "PHY temperature (in Celsius)");
5976 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5977 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5978 "PHY firmware version");
5981 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5982 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
5983 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5984 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
5985 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
5986 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
5987 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
5988 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
5989 "autonegotiation (-1 = not supported)");
5991 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
5992 port_top_speed(pi), "max speed (in Gbps)");
5993 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
5994 pi->mps_bg_map, "MPS buffer group map");
5995 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
5996 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
5998 if (sc->flags & IS_VF)
6002 * dev.(cxgbe|cxl).X.tc.
6004 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
6005 "Tx scheduler traffic classes (cl_rl)");
6006 for (i = 0; i < sc->chip_params->nsched_cls; i++) {
6007 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
6009 snprintf(name, sizeof(name), "%d", i);
6010 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
6011 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
6013 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD,
6014 &tc->flags, 0, "flags");
6015 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
6016 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
6017 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
6018 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
6019 sysctl_tc_params, "A", "traffic class parameters");
6023 * dev.cxgbe.X.stats.
6025 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
6026 NULL, "port statistics");
6027 children = SYSCTL_CHILDREN(oid);
6028 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
6029 &pi->tx_parse_error, 0,
6030 "# of tx packets with invalid length or # of segments");
6032 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
6033 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
6034 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
6035 sysctl_handle_t4_reg64, "QU", desc)
6037 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
6038 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
6039 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
6040 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
6041 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
6042 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
6043 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
6044 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
6045 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
6046 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
6047 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
6048 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
6049 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
6050 "# of tx frames in this range",
6051 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
6052 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
6053 "# of tx frames in this range",
6054 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
6055 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
6056 "# of tx frames in this range",
6057 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
6058 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
6059 "# of tx frames in this range",
6060 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
6061 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
6062 "# of tx frames in this range",
6063 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
6064 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
6065 "# of tx frames in this range",
6066 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
6067 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
6068 "# of tx frames in this range",
6069 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
6070 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
6071 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
6072 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
6073 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
6074 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
6075 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
6076 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
6077 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
6078 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
6079 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
6080 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
6081 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
6082 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
6083 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
6084 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
6085 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
6086 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
6087 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
6088 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
6089 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
6091 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
6092 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
6093 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
6094 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
6095 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
6096 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
6097 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
6098 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
6099 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
6100 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
6101 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
6102 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
6103 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
6104 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
6105 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
6106 "# of frames received with bad FCS",
6107 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
6108 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
6109 "# of frames received with length error",
6110 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
6111 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
6112 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
6113 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
6114 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
6115 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
6116 "# of rx frames in this range",
6117 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
6118 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
6119 "# of rx frames in this range",
6120 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
6121 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
6122 "# of rx frames in this range",
6123 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
6124 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
6125 "# of rx frames in this range",
6126 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
6127 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
6128 "# of rx frames in this range",
6129 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
6130 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
6131 "# of rx frames in this range",
6132 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
6133 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
6134 "# of rx frames in this range",
6135 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
6136 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
6137 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
6138 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
6139 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
6140 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
6141 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
6142 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
6143 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
6144 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
6145 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
6146 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
6147 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
6148 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
6149 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
6150 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
6151 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
6152 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
6153 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
6155 #undef SYSCTL_ADD_T4_REG64
6157 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
6158 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
6159 &pi->stats.name, desc)
6161 /* We get these from port_stats and they may be stale by up to 1s */
6162 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
6163 "# drops due to buffer-group 0 overflows");
6164 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
6165 "# drops due to buffer-group 1 overflows");
6166 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
6167 "# drops due to buffer-group 2 overflows");
6168 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
6169 "# drops due to buffer-group 3 overflows");
6170 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
6171 "# of buffer-group 0 truncated packets");
6172 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
6173 "# of buffer-group 1 truncated packets");
6174 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
6175 "# of buffer-group 2 truncated packets");
6176 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
6177 "# of buffer-group 3 truncated packets");
6179 #undef SYSCTL_ADD_T4_PORTSTAT
6181 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_records",
6182 CTLFLAG_RD, &pi->tx_tls_records,
6183 "# of TLS records transmitted");
6184 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_octets",
6185 CTLFLAG_RD, &pi->tx_tls_octets,
6186 "# of payload octets in transmitted TLS records");
6187 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_records",
6188 CTLFLAG_RD, &pi->rx_tls_records,
6189 "# of TLS records received");
6190 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_octets",
6191 CTLFLAG_RD, &pi->rx_tls_octets,
6192 "# of payload octets in received TLS records");
6196 sysctl_int_array(SYSCTL_HANDLER_ARGS)
6198 int rc, *i, space = 0;
6201 sbuf_new_for_sysctl(&sb, NULL, 64, req);
6202 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
6204 sbuf_printf(&sb, " ");
6205 sbuf_printf(&sb, "%d", *i);
6208 rc = sbuf_finish(&sb);
6214 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
6219 rc = sysctl_wire_old_buffer(req, 0);
6223 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6227 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
6228 rc = sbuf_finish(sb);
6235 sysctl_btphy(SYSCTL_HANDLER_ARGS)
6237 struct port_info *pi = arg1;
6239 struct adapter *sc = pi->adapter;
6243 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
6246 /* XXX: magic numbers */
6247 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
6249 end_synchronized_op(sc, 0);
6255 rc = sysctl_handle_int(oidp, &v, 0, req);
6260 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
6262 struct vi_info *vi = arg1;
6265 val = vi->rsrv_noflowq;
6266 rc = sysctl_handle_int(oidp, &val, 0, req);
6267 if (rc != 0 || req->newptr == NULL)
6270 if ((val >= 1) && (vi->ntxq > 1))
6271 vi->rsrv_noflowq = 1;
6273 vi->rsrv_noflowq = 0;
6279 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
6281 struct vi_info *vi = arg1;
6282 struct adapter *sc = vi->pi->adapter;
6284 struct sge_rxq *rxq;
6289 rc = sysctl_handle_int(oidp, &idx, 0, req);
6290 if (rc != 0 || req->newptr == NULL)
6293 if (idx < 0 || idx >= SGE_NTIMERS)
6296 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6301 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
6302 for_each_rxq(vi, i, rxq) {
6303 #ifdef atomic_store_rel_8
6304 atomic_store_rel_8(&rxq->iq.intr_params, v);
6306 rxq->iq.intr_params = v;
6311 end_synchronized_op(sc, LOCK_HELD);
6316 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
6318 struct vi_info *vi = arg1;
6319 struct adapter *sc = vi->pi->adapter;
6324 rc = sysctl_handle_int(oidp, &idx, 0, req);
6325 if (rc != 0 || req->newptr == NULL)
6328 if (idx < -1 || idx >= SGE_NCOUNTERS)
6331 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6336 if (vi->flags & VI_INIT_DONE)
6337 rc = EBUSY; /* cannot be changed once the queues are created */
6341 end_synchronized_op(sc, LOCK_HELD);
6346 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
6348 struct vi_info *vi = arg1;
6349 struct adapter *sc = vi->pi->adapter;
6352 qsize = vi->qsize_rxq;
6354 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6355 if (rc != 0 || req->newptr == NULL)
6358 if (qsize < 128 || (qsize & 7))
6361 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6366 if (vi->flags & VI_INIT_DONE)
6367 rc = EBUSY; /* cannot be changed once the queues are created */
6369 vi->qsize_rxq = qsize;
6371 end_synchronized_op(sc, LOCK_HELD);
6376 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
6378 struct vi_info *vi = arg1;
6379 struct adapter *sc = vi->pi->adapter;
6382 qsize = vi->qsize_txq;
6384 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6385 if (rc != 0 || req->newptr == NULL)
6388 if (qsize < 128 || qsize > 65536)
6391 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6396 if (vi->flags & VI_INIT_DONE)
6397 rc = EBUSY; /* cannot be changed once the queues are created */
6399 vi->qsize_txq = qsize;
6401 end_synchronized_op(sc, LOCK_HELD);
6406 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
6408 struct port_info *pi = arg1;
6409 struct adapter *sc = pi->adapter;
6410 struct link_config *lc = &pi->link_cfg;
6413 if (req->newptr == NULL) {
6415 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
6417 rc = sysctl_wire_old_buffer(req, 0);
6421 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6425 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
6426 rc = sbuf_finish(sb);
6432 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
6435 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6441 if (s[0] < '0' || s[0] > '9')
6442 return (EINVAL); /* not a number */
6444 if (n & ~(PAUSE_TX | PAUSE_RX))
6445 return (EINVAL); /* some other bit is set too */
6447 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6452 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
6453 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
6454 lc->requested_fc |= n;
6455 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6457 lc->fc = lc->requested_fc;
6458 set_current_media(pi, &pi->media);
6462 end_synchronized_op(sc, 0);
6469 sysctl_fec(SYSCTL_HANDLER_ARGS)
6471 struct port_info *pi = arg1;
6472 struct adapter *sc = pi->adapter;
6473 struct link_config *lc = &pi->link_cfg;
6476 if (req->newptr == NULL) {
6478 static char *bits = "\20\1RS\2BASER_RS\3RESERVED";
6480 rc = sysctl_wire_old_buffer(req, 0);
6484 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6488 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits);
6489 rc = sbuf_finish(sb);
6495 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC);
6498 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6504 if (s[0] < '0' || s[0] > '9')
6505 return (EINVAL); /* not a number */
6507 if (n & ~M_FW_PORT_CAP_FEC)
6508 return (EINVAL); /* some other bit is set too */
6510 return (EINVAL); /* one bit can be set at most */
6512 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6517 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) {
6518 lc->requested_fec = n &
6519 G_FW_PORT_CAP_FEC(lc->supported);
6520 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6522 lc->fec = lc->requested_fec;
6526 end_synchronized_op(sc, 0);
6533 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
6535 struct port_info *pi = arg1;
6536 struct adapter *sc = pi->adapter;
6537 struct link_config *lc = &pi->link_cfg;
6540 if (lc->supported & FW_PORT_CAP_ANEG)
6541 val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0;
6544 rc = sysctl_handle_int(oidp, &val, 0, req);
6545 if (rc != 0 || req->newptr == NULL)
6548 val = AUTONEG_DISABLE;
6550 val = AUTONEG_ENABLE;
6554 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6559 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) {
6563 if (lc->requested_aneg == val) {
6564 rc = 0; /* no change, do nothing. */
6567 old = lc->requested_aneg;
6568 lc->requested_aneg = val;
6569 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6571 lc->requested_aneg = old;
6573 set_current_media(pi, &pi->media);
6576 end_synchronized_op(sc, 0);
6581 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
6583 struct adapter *sc = arg1;
6587 val = t4_read_reg64(sc, reg);
6589 return (sysctl_handle_64(oidp, &val, 0, req));
6593 sysctl_temperature(SYSCTL_HANDLER_ARGS)
6595 struct adapter *sc = arg1;
6597 uint32_t param, val;
6599 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
6602 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6603 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
6604 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
6605 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
6606 end_synchronized_op(sc, 0);
6610 /* unknown is returned as 0 but we display -1 in that case */
6611 t = val == 0 ? -1 : val;
6613 rc = sysctl_handle_int(oidp, &t, 0, req);
6618 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
6620 struct adapter *sc = arg1;
6623 uint32_t param, val;
6625 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
6628 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6629 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
6630 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
6631 end_synchronized_op(sc, 0);
6635 rc = sysctl_wire_old_buffer(req, 0);
6639 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6643 if (val == 0xffffffff) {
6644 /* Only debug and custom firmwares report load averages. */
6645 sbuf_printf(sb, "not available");
6647 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
6648 (val >> 16) & 0xff);
6650 rc = sbuf_finish(sb);
6657 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
6659 struct adapter *sc = arg1;
6662 uint16_t incr[NMTUS][NCCTRL_WIN];
6663 static const char *dec_fac[] = {
6664 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
6668 rc = sysctl_wire_old_buffer(req, 0);
6672 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6676 t4_read_cong_tbl(sc, incr);
6678 for (i = 0; i < NCCTRL_WIN; ++i) {
6679 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
6680 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6681 incr[5][i], incr[6][i], incr[7][i]);
6682 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6683 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6684 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6685 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6688 rc = sbuf_finish(sb);
6694 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6695 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
6696 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
6697 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
6701 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6703 struct adapter *sc = arg1;
6705 int rc, i, n, qid = arg2;
6708 u_int cim_num_obq = sc->chip_params->cim_num_obq;
6710 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6711 ("%s: bad qid %d\n", __func__, qid));
6713 if (qid < CIM_NUM_IBQ) {
6716 n = 4 * CIM_IBQ_SIZE;
6717 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6718 rc = t4_read_cim_ibq(sc, qid, buf, n);
6720 /* outbound queue */
6723 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6724 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6725 rc = t4_read_cim_obq(sc, qid, buf, n);
6732 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
6734 rc = sysctl_wire_old_buffer(req, 0);
6738 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6744 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6745 for (i = 0, p = buf; i < n; i += 16, p += 4)
6746 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6749 rc = sbuf_finish(sb);
6757 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6759 struct adapter *sc = arg1;
6765 MPASS(chip_id(sc) <= CHELSIO_T5);
6767 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6771 rc = sysctl_wire_old_buffer(req, 0);
6775 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6779 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6782 rc = -t4_cim_read_la(sc, buf, NULL);
6786 sbuf_printf(sb, "Status Data PC%s",
6787 cfg & F_UPDBGLACAPTPCONLY ? "" :
6788 " LS0Stat LS0Addr LS0Data");
6790 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
6791 if (cfg & F_UPDBGLACAPTPCONLY) {
6792 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
6794 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
6795 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6796 p[4] & 0xff, p[5] >> 8);
6797 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
6798 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6799 p[1] & 0xf, p[2] >> 4);
6802 "\n %02x %x%07x %x%07x %08x %08x "
6804 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6805 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6810 rc = sbuf_finish(sb);
6818 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
6820 struct adapter *sc = arg1;
6826 MPASS(chip_id(sc) > CHELSIO_T5);
6828 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6832 rc = sysctl_wire_old_buffer(req, 0);
6836 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6840 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6843 rc = -t4_cim_read_la(sc, buf, NULL);
6847 sbuf_printf(sb, "Status Inst Data PC%s",
6848 cfg & F_UPDBGLACAPTPCONLY ? "" :
6849 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
6851 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
6852 if (cfg & F_UPDBGLACAPTPCONLY) {
6853 sbuf_printf(sb, "\n %02x %08x %08x %08x",
6854 p[3] & 0xff, p[2], p[1], p[0]);
6855 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
6856 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
6857 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
6858 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
6859 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
6860 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
6863 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
6864 "%08x %08x %08x %08x %08x %08x",
6865 (p[9] >> 16) & 0xff,
6866 p[9] & 0xffff, p[8] >> 16,
6867 p[8] & 0xffff, p[7] >> 16,
6868 p[7] & 0xffff, p[6] >> 16,
6869 p[2], p[1], p[0], p[5], p[4], p[3]);
6873 rc = sbuf_finish(sb);
6881 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6883 struct adapter *sc = arg1;
6889 rc = sysctl_wire_old_buffer(req, 0);
6893 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6897 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6900 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6903 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6904 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6908 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
6909 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6910 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
6911 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6912 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6913 (p[1] >> 2) | ((p[2] & 3) << 30),
6914 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6918 rc = sbuf_finish(sb);
6925 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6927 struct adapter *sc = arg1;
6933 rc = sysctl_wire_old_buffer(req, 0);
6937 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6941 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6944 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6947 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
6948 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6949 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
6950 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6951 p[4], p[3], p[2], p[1], p[0]);
6954 sbuf_printf(sb, "\n\nCntl ID Data");
6955 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6956 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
6957 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6960 rc = sbuf_finish(sb);
6967 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
6969 struct adapter *sc = arg1;
6972 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6973 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6974 uint16_t thres[CIM_NUM_IBQ];
6975 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
6976 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
6977 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
6979 cim_num_obq = sc->chip_params->cim_num_obq;
6981 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
6982 obq_rdaddr = A_UP_OBQ_0_REALADDR;
6984 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
6985 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
6987 nq = CIM_NUM_IBQ + cim_num_obq;
6989 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
6991 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
6995 t4_read_cimq_cfg(sc, base, size, thres);
6997 rc = sysctl_wire_old_buffer(req, 0);
7001 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7006 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
7008 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
7009 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
7010 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
7011 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7012 G_QUEREMFLITS(p[2]) * 16);
7013 for ( ; i < nq; i++, p += 4, wr += 2)
7014 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
7015 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
7016 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7017 G_QUEREMFLITS(p[2]) * 16);
7019 rc = sbuf_finish(sb);
7026 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
7028 struct adapter *sc = arg1;
7031 struct tp_cpl_stats stats;
7033 rc = sysctl_wire_old_buffer(req, 0);
7037 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7041 mtx_lock(&sc->reg_lock);
7042 t4_tp_get_cpl_stats(sc, &stats, 0);
7043 mtx_unlock(&sc->reg_lock);
7045 if (sc->chip_params->nchan > 2) {
7046 sbuf_printf(sb, " channel 0 channel 1"
7047 " channel 2 channel 3");
7048 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
7049 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
7050 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
7051 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
7053 sbuf_printf(sb, " channel 0 channel 1");
7054 sbuf_printf(sb, "\nCPL requests: %10u %10u",
7055 stats.req[0], stats.req[1]);
7056 sbuf_printf(sb, "\nCPL responses: %10u %10u",
7057 stats.rsp[0], stats.rsp[1]);
7060 rc = sbuf_finish(sb);
7067 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
7069 struct adapter *sc = arg1;
7072 struct tp_usm_stats stats;
7074 rc = sysctl_wire_old_buffer(req, 0);
7078 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7082 t4_get_usm_stats(sc, &stats, 1);
7084 sbuf_printf(sb, "Frames: %u\n", stats.frames);
7085 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
7086 sbuf_printf(sb, "Drops: %u", stats.drops);
7088 rc = sbuf_finish(sb);
7094 static const char * const devlog_level_strings[] = {
7095 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
7096 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
7097 [FW_DEVLOG_LEVEL_ERR] = "ERR",
7098 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
7099 [FW_DEVLOG_LEVEL_INFO] = "INFO",
7100 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
7103 static const char * const devlog_facility_strings[] = {
7104 [FW_DEVLOG_FACILITY_CORE] = "CORE",
7105 [FW_DEVLOG_FACILITY_CF] = "CF",
7106 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
7107 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
7108 [FW_DEVLOG_FACILITY_RES] = "RES",
7109 [FW_DEVLOG_FACILITY_HW] = "HW",
7110 [FW_DEVLOG_FACILITY_FLR] = "FLR",
7111 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
7112 [FW_DEVLOG_FACILITY_PHY] = "PHY",
7113 [FW_DEVLOG_FACILITY_MAC] = "MAC",
7114 [FW_DEVLOG_FACILITY_PORT] = "PORT",
7115 [FW_DEVLOG_FACILITY_VI] = "VI",
7116 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
7117 [FW_DEVLOG_FACILITY_ACL] = "ACL",
7118 [FW_DEVLOG_FACILITY_TM] = "TM",
7119 [FW_DEVLOG_FACILITY_QFC] = "QFC",
7120 [FW_DEVLOG_FACILITY_DCB] = "DCB",
7121 [FW_DEVLOG_FACILITY_ETH] = "ETH",
7122 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
7123 [FW_DEVLOG_FACILITY_RI] = "RI",
7124 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
7125 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
7126 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
7127 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
7128 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
7132 sysctl_devlog(SYSCTL_HANDLER_ARGS)
7134 struct adapter *sc = arg1;
7135 struct devlog_params *dparams = &sc->params.devlog;
7136 struct fw_devlog_e *buf, *e;
7137 int i, j, rc, nentries, first = 0;
7139 uint64_t ftstamp = UINT64_MAX;
7141 if (dparams->addr == 0)
7144 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
7148 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
7152 nentries = dparams->size / sizeof(struct fw_devlog_e);
7153 for (i = 0; i < nentries; i++) {
7156 if (e->timestamp == 0)
7159 e->timestamp = be64toh(e->timestamp);
7160 e->seqno = be32toh(e->seqno);
7161 for (j = 0; j < 8; j++)
7162 e->params[j] = be32toh(e->params[j]);
7164 if (e->timestamp < ftstamp) {
7165 ftstamp = e->timestamp;
7170 if (buf[first].timestamp == 0)
7171 goto done; /* nothing in the log */
7173 rc = sysctl_wire_old_buffer(req, 0);
7177 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7182 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
7183 "Seq#", "Tstamp", "Level", "Facility", "Message");
7188 if (e->timestamp == 0)
7191 sbuf_printf(sb, "%10d %15ju %8s %8s ",
7192 e->seqno, e->timestamp,
7193 (e->level < nitems(devlog_level_strings) ?
7194 devlog_level_strings[e->level] : "UNKNOWN"),
7195 (e->facility < nitems(devlog_facility_strings) ?
7196 devlog_facility_strings[e->facility] : "UNKNOWN"));
7197 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
7198 e->params[2], e->params[3], e->params[4],
7199 e->params[5], e->params[6], e->params[7]);
7201 if (++i == nentries)
7203 } while (i != first);
7205 rc = sbuf_finish(sb);
7213 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
7215 struct adapter *sc = arg1;
7218 struct tp_fcoe_stats stats[MAX_NCHAN];
7219 int i, nchan = sc->chip_params->nchan;
7221 rc = sysctl_wire_old_buffer(req, 0);
7225 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7229 for (i = 0; i < nchan; i++)
7230 t4_get_fcoe_stats(sc, i, &stats[i], 1);
7233 sbuf_printf(sb, " channel 0 channel 1"
7234 " channel 2 channel 3");
7235 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
7236 stats[0].octets_ddp, stats[1].octets_ddp,
7237 stats[2].octets_ddp, stats[3].octets_ddp);
7238 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
7239 stats[0].frames_ddp, stats[1].frames_ddp,
7240 stats[2].frames_ddp, stats[3].frames_ddp);
7241 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
7242 stats[0].frames_drop, stats[1].frames_drop,
7243 stats[2].frames_drop, stats[3].frames_drop);
7245 sbuf_printf(sb, " channel 0 channel 1");
7246 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
7247 stats[0].octets_ddp, stats[1].octets_ddp);
7248 sbuf_printf(sb, "\nframesDDP: %16u %16u",
7249 stats[0].frames_ddp, stats[1].frames_ddp);
7250 sbuf_printf(sb, "\nframesDrop: %16u %16u",
7251 stats[0].frames_drop, stats[1].frames_drop);
7254 rc = sbuf_finish(sb);
7261 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
7263 struct adapter *sc = arg1;
7266 unsigned int map, kbps, ipg, mode;
7267 unsigned int pace_tab[NTX_SCHED];
7269 rc = sysctl_wire_old_buffer(req, 0);
7273 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7277 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
7278 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
7279 t4_read_pace_tbl(sc, pace_tab);
7281 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
7282 "Class IPG (0.1 ns) Flow IPG (us)");
7284 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
7285 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
7286 sbuf_printf(sb, "\n %u %-5s %u ", i,
7287 (mode & (1 << i)) ? "flow" : "class", map & 3);
7289 sbuf_printf(sb, "%9u ", kbps);
7291 sbuf_printf(sb, " disabled ");
7294 sbuf_printf(sb, "%13u ", ipg);
7296 sbuf_printf(sb, " disabled ");
7299 sbuf_printf(sb, "%10u", pace_tab[i]);
7301 sbuf_printf(sb, " disabled");
7304 rc = sbuf_finish(sb);
7311 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
7313 struct adapter *sc = arg1;
7317 struct lb_port_stats s[2];
7318 static const char *stat_name[] = {
7319 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
7320 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
7321 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
7322 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
7323 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
7324 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
7325 "BG2FramesTrunc:", "BG3FramesTrunc:"
7328 rc = sysctl_wire_old_buffer(req, 0);
7332 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7336 memset(s, 0, sizeof(s));
7338 for (i = 0; i < sc->chip_params->nchan; i += 2) {
7339 t4_get_lb_stats(sc, i, &s[0]);
7340 t4_get_lb_stats(sc, i + 1, &s[1]);
7344 sbuf_printf(sb, "%s Loopback %u"
7345 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
7347 for (j = 0; j < nitems(stat_name); j++)
7348 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
7352 rc = sbuf_finish(sb);
7359 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
7362 struct port_info *pi = arg1;
7363 struct link_config *lc = &pi->link_cfg;
7366 rc = sysctl_wire_old_buffer(req, 0);
7369 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
7373 if (lc->link_ok || lc->link_down_rc == 255)
7374 sbuf_printf(sb, "n/a");
7376 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
7378 rc = sbuf_finish(sb);
7391 mem_desc_cmp(const void *a, const void *b)
7393 return ((const struct mem_desc *)a)->base -
7394 ((const struct mem_desc *)b)->base;
7398 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
7406 size = to - from + 1;
7410 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
7411 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
7415 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
7417 struct adapter *sc = arg1;
7420 uint32_t lo, hi, used, alloc;
7421 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
7422 static const char *region[] = {
7423 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
7424 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
7425 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
7426 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
7427 "RQUDP region:", "PBL region:", "TXPBL region:",
7428 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
7429 "On-chip queues:", "TLS keys:",
7431 struct mem_desc avail[4];
7432 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
7433 struct mem_desc *md = mem;
7435 rc = sysctl_wire_old_buffer(req, 0);
7439 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7443 for (i = 0; i < nitems(mem); i++) {
7448 /* Find and sort the populated memory ranges */
7450 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
7451 if (lo & F_EDRAM0_ENABLE) {
7452 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
7453 avail[i].base = G_EDRAM0_BASE(hi) << 20;
7454 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
7458 if (lo & F_EDRAM1_ENABLE) {
7459 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
7460 avail[i].base = G_EDRAM1_BASE(hi) << 20;
7461 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
7465 if (lo & F_EXT_MEM_ENABLE) {
7466 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
7467 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
7468 avail[i].limit = avail[i].base +
7469 (G_EXT_MEM_SIZE(hi) << 20);
7470 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
7473 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
7474 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
7475 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
7476 avail[i].limit = avail[i].base +
7477 (G_EXT_MEM1_SIZE(hi) << 20);
7481 if (!i) /* no memory available */
7483 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
7485 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
7486 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
7487 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
7488 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7489 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
7490 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
7491 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
7492 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
7493 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
7495 /* the next few have explicit upper bounds */
7496 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
7497 md->limit = md->base - 1 +
7498 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
7499 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
7502 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
7503 md->limit = md->base - 1 +
7504 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
7505 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
7508 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7509 if (chip_id(sc) <= CHELSIO_T5)
7510 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
7512 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
7516 md->idx = nitems(region); /* hide it */
7520 #define ulp_region(reg) \
7521 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
7522 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
7524 ulp_region(RX_ISCSI);
7525 ulp_region(RX_TDDP);
7527 ulp_region(RX_STAG);
7529 ulp_region(RX_RQUDP);
7535 md->idx = nitems(region);
7538 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
7539 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
7542 if (sge_ctrl & F_VFIFO_ENABLE)
7543 size = G_DBVFIFO_SIZE(fifo_size);
7545 size = G_T6_DBVFIFO_SIZE(fifo_size);
7548 md->base = G_BASEADDR(t4_read_reg(sc,
7549 A_SGE_DBVFIFO_BADDR));
7550 md->limit = md->base + (size << 2) - 1;
7555 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
7558 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
7562 md->base = sc->vres.ocq.start;
7563 if (sc->vres.ocq.size)
7564 md->limit = md->base + sc->vres.ocq.size - 1;
7566 md->idx = nitems(region); /* hide it */
7569 md->base = sc->vres.key.start;
7570 if (sc->vres.key.size)
7571 md->limit = md->base + sc->vres.key.size - 1;
7573 md->idx = nitems(region); /* hide it */
7576 /* add any address-space holes, there can be up to 3 */
7577 for (n = 0; n < i - 1; n++)
7578 if (avail[n].limit < avail[n + 1].base)
7579 (md++)->base = avail[n].limit;
7581 (md++)->base = avail[n].limit;
7584 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
7586 for (lo = 0; lo < i; lo++)
7587 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
7588 avail[lo].limit - 1);
7590 sbuf_printf(sb, "\n");
7591 for (i = 0; i < n; i++) {
7592 if (mem[i].idx >= nitems(region))
7593 continue; /* skip holes */
7595 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
7596 mem_region_show(sb, region[mem[i].idx], mem[i].base,
7600 sbuf_printf(sb, "\n");
7601 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
7602 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
7603 mem_region_show(sb, "uP RAM:", lo, hi);
7605 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
7606 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
7607 mem_region_show(sb, "uP Extmem2:", lo, hi);
7609 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
7610 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
7612 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
7613 (lo & F_PMRXNUMCHN) ? 2 : 1);
7615 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
7616 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
7617 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
7619 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
7620 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
7621 sbuf_printf(sb, "%u p-structs\n",
7622 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
7624 for (i = 0; i < 4; i++) {
7625 if (chip_id(sc) > CHELSIO_T5)
7626 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
7628 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
7630 used = G_T5_USED(lo);
7631 alloc = G_T5_ALLOC(lo);
7634 alloc = G_ALLOC(lo);
7636 /* For T6 these are MAC buffer groups */
7637 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
7640 for (i = 0; i < sc->chip_params->nchan; i++) {
7641 if (chip_id(sc) > CHELSIO_T5)
7642 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
7644 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
7646 used = G_T5_USED(lo);
7647 alloc = G_T5_ALLOC(lo);
7650 alloc = G_ALLOC(lo);
7652 /* For T6 these are MAC buffer groups */
7654 "\nLoopback %d using %u pages out of %u allocated",
7658 rc = sbuf_finish(sb);
7665 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
7669 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
7673 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
7675 struct adapter *sc = arg1;
7679 MPASS(chip_id(sc) <= CHELSIO_T5);
7681 rc = sysctl_wire_old_buffer(req, 0);
7685 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7690 "Idx Ethernet address Mask Vld Ports PF"
7691 " VF Replication P0 P1 P2 P3 ML");
7692 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7693 uint64_t tcamx, tcamy, mask;
7694 uint32_t cls_lo, cls_hi;
7695 uint8_t addr[ETHER_ADDR_LEN];
7697 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
7698 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
7701 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7702 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7703 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7704 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
7705 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
7706 addr[3], addr[4], addr[5], (uintmax_t)mask,
7707 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
7708 G_PORTMAP(cls_hi), G_PF(cls_lo),
7709 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
7711 if (cls_lo & F_REPLICATE) {
7712 struct fw_ldst_cmd ldst_cmd;
7714 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7715 ldst_cmd.op_to_addrspace =
7716 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7717 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7718 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7719 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7720 ldst_cmd.u.mps.rplc.fid_idx =
7721 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7722 V_FW_LDST_CMD_IDX(i));
7724 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7728 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7729 sizeof(ldst_cmd), &ldst_cmd);
7730 end_synchronized_op(sc, 0);
7733 sbuf_printf(sb, "%36d", rc);
7736 sbuf_printf(sb, " %08x %08x %08x %08x",
7737 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7738 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7739 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7740 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7743 sbuf_printf(sb, "%36s", "");
7745 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
7746 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
7747 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
7751 (void) sbuf_finish(sb);
7753 rc = sbuf_finish(sb);
7760 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
7762 struct adapter *sc = arg1;
7766 MPASS(chip_id(sc) > CHELSIO_T5);
7768 rc = sysctl_wire_old_buffer(req, 0);
7772 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7776 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
7777 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
7779 " P0 P1 P2 P3 ML\n");
7781 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7782 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
7784 uint64_t tcamx, tcamy, val, mask;
7785 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
7786 uint8_t addr[ETHER_ADDR_LEN];
7788 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
7790 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
7792 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
7793 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7794 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7795 tcamy = G_DMACH(val) << 32;
7796 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7797 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7798 lookup_type = G_DATALKPTYPE(data2);
7799 port_num = G_DATAPORTNUM(data2);
7800 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7801 /* Inner header VNI */
7802 vniy = ((data2 & F_DATAVIDH2) << 23) |
7803 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7804 dip_hit = data2 & F_DATADIPHIT;
7809 vlan_vld = data2 & F_DATAVIDH2;
7810 ivlan = G_VIDL(val);
7813 ctl |= V_CTLXYBITSEL(1);
7814 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7815 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7816 tcamx = G_DMACH(val) << 32;
7817 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7818 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7819 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7820 /* Inner header VNI mask */
7821 vnix = ((data2 & F_DATAVIDH2) << 23) |
7822 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7828 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7830 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7831 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7833 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7834 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7835 "%012jx %06x %06x - - %3c"
7836 " 'I' %4x %3c %#x%4u%4d", i, addr[0],
7837 addr[1], addr[2], addr[3], addr[4], addr[5],
7838 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
7839 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7840 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7841 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7843 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7844 "%012jx - - ", i, addr[0], addr[1],
7845 addr[2], addr[3], addr[4], addr[5],
7849 sbuf_printf(sb, "%4u Y ", ivlan);
7851 sbuf_printf(sb, " - N ");
7853 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
7854 lookup_type ? 'I' : 'O', port_num,
7855 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7856 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7857 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7861 if (cls_lo & F_T6_REPLICATE) {
7862 struct fw_ldst_cmd ldst_cmd;
7864 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7865 ldst_cmd.op_to_addrspace =
7866 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7867 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7868 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7869 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7870 ldst_cmd.u.mps.rplc.fid_idx =
7871 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7872 V_FW_LDST_CMD_IDX(i));
7874 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7878 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7879 sizeof(ldst_cmd), &ldst_cmd);
7880 end_synchronized_op(sc, 0);
7883 sbuf_printf(sb, "%72d", rc);
7886 sbuf_printf(sb, " %08x %08x %08x %08x"
7887 " %08x %08x %08x %08x",
7888 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
7889 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
7890 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
7891 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
7892 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7893 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7894 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7895 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7898 sbuf_printf(sb, "%72s", "");
7900 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
7901 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
7902 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
7903 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
7907 (void) sbuf_finish(sb);
7909 rc = sbuf_finish(sb);
7916 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
7918 struct adapter *sc = arg1;
7921 uint16_t mtus[NMTUS];
7923 rc = sysctl_wire_old_buffer(req, 0);
7927 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7931 t4_read_mtu_tbl(sc, mtus, NULL);
7933 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
7934 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
7935 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
7936 mtus[14], mtus[15]);
7938 rc = sbuf_finish(sb);
7945 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7947 struct adapter *sc = arg1;
7950 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
7951 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
7952 static const char *tx_stats[MAX_PM_NSTATS] = {
7953 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
7954 "Tx FIFO wait", NULL, "Tx latency"
7956 static const char *rx_stats[MAX_PM_NSTATS] = {
7957 "Read:", "Write bypass:", "Write mem:", "Flush:",
7958 "Rx FIFO wait", NULL, "Rx latency"
7961 rc = sysctl_wire_old_buffer(req, 0);
7965 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7969 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
7970 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
7972 sbuf_printf(sb, " Tx pcmds Tx bytes");
7973 for (i = 0; i < 4; i++) {
7974 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7978 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
7979 for (i = 0; i < 4; i++) {
7980 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7984 if (chip_id(sc) > CHELSIO_T5) {
7986 "\n Total wait Total occupancy");
7987 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7989 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7993 MPASS(i < nitems(tx_stats));
7996 "\n Reads Total wait");
7997 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7999 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8003 rc = sbuf_finish(sb);
8010 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
8012 struct adapter *sc = arg1;
8015 struct tp_rdma_stats stats;
8017 rc = sysctl_wire_old_buffer(req, 0);
8021 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8025 mtx_lock(&sc->reg_lock);
8026 t4_tp_get_rdma_stats(sc, &stats, 0);
8027 mtx_unlock(&sc->reg_lock);
8029 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
8030 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
8032 rc = sbuf_finish(sb);
8039 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
8041 struct adapter *sc = arg1;
8044 struct tp_tcp_stats v4, v6;
8046 rc = sysctl_wire_old_buffer(req, 0);
8050 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8054 mtx_lock(&sc->reg_lock);
8055 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
8056 mtx_unlock(&sc->reg_lock);
8060 sbuf_printf(sb, "OutRsts: %20u %20u\n",
8061 v4.tcp_out_rsts, v6.tcp_out_rsts);
8062 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
8063 v4.tcp_in_segs, v6.tcp_in_segs);
8064 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
8065 v4.tcp_out_segs, v6.tcp_out_segs);
8066 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
8067 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
8069 rc = sbuf_finish(sb);
8076 sysctl_tids(SYSCTL_HANDLER_ARGS)
8078 struct adapter *sc = arg1;
8081 struct tid_info *t = &sc->tids;
8083 rc = sysctl_wire_old_buffer(req, 0);
8087 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8092 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
8097 sbuf_printf(sb, "TID range: ");
8098 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8101 if (chip_id(sc) <= CHELSIO_T5) {
8102 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
8103 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
8105 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
8106 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
8110 sbuf_printf(sb, "0-%u, ", b - 1);
8111 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
8113 sbuf_printf(sb, "0-%u", t->ntids - 1);
8114 sbuf_printf(sb, ", in use: %u\n",
8115 atomic_load_acq_int(&t->tids_in_use));
8119 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
8120 t->stid_base + t->nstids - 1, t->stids_in_use);
8124 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
8125 t->ftid_base + t->nftids - 1);
8129 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
8130 t->etid_base + t->netids - 1, t->etids_in_use);
8133 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
8134 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
8135 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
8137 rc = sbuf_finish(sb);
8144 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
8146 struct adapter *sc = arg1;
8149 struct tp_err_stats stats;
8151 rc = sysctl_wire_old_buffer(req, 0);
8155 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8159 mtx_lock(&sc->reg_lock);
8160 t4_tp_get_err_stats(sc, &stats, 0);
8161 mtx_unlock(&sc->reg_lock);
8163 if (sc->chip_params->nchan > 2) {
8164 sbuf_printf(sb, " channel 0 channel 1"
8165 " channel 2 channel 3\n");
8166 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
8167 stats.mac_in_errs[0], stats.mac_in_errs[1],
8168 stats.mac_in_errs[2], stats.mac_in_errs[3]);
8169 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
8170 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
8171 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
8172 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
8173 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
8174 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
8175 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
8176 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
8177 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
8178 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
8179 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
8180 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
8181 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
8182 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
8183 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
8184 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
8185 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
8186 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
8187 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
8188 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
8189 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
8191 sbuf_printf(sb, " channel 0 channel 1\n");
8192 sbuf_printf(sb, "macInErrs: %10u %10u\n",
8193 stats.mac_in_errs[0], stats.mac_in_errs[1]);
8194 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
8195 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
8196 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
8197 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
8198 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
8199 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
8200 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
8201 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
8202 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
8203 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
8204 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
8205 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
8206 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
8207 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
8210 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
8211 stats.ofld_no_neigh, stats.ofld_cong_defer);
8213 rc = sbuf_finish(sb);
8220 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
8222 struct adapter *sc = arg1;
8223 struct tp_params *tpp = &sc->params.tp;
8227 mask = tpp->la_mask >> 16;
8228 rc = sysctl_handle_int(oidp, &mask, 0, req);
8229 if (rc != 0 || req->newptr == NULL)
8233 tpp->la_mask = mask << 16;
8234 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
8246 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
8252 uint64_t mask = (1ULL << f->width) - 1;
8253 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
8254 ((uintmax_t)v >> f->start) & mask);
8256 if (line_size + len >= 79) {
8258 sbuf_printf(sb, "\n ");
8260 sbuf_printf(sb, "%s ", buf);
8261 line_size += len + 1;
8264 sbuf_printf(sb, "\n");
8267 static const struct field_desc tp_la0[] = {
8268 { "RcfOpCodeOut", 60, 4 },
8270 { "WcfState", 52, 4 },
8271 { "RcfOpcSrcOut", 50, 2 },
8272 { "CRxError", 49, 1 },
8273 { "ERxError", 48, 1 },
8274 { "SanityFailed", 47, 1 },
8275 { "SpuriousMsg", 46, 1 },
8276 { "FlushInputMsg", 45, 1 },
8277 { "FlushInputCpl", 44, 1 },
8278 { "RssUpBit", 43, 1 },
8279 { "RssFilterHit", 42, 1 },
8281 { "InitTcb", 31, 1 },
8282 { "LineNumber", 24, 7 },
8284 { "EdataOut", 22, 1 },
8286 { "CdataOut", 20, 1 },
8287 { "EreadPdu", 19, 1 },
8288 { "CreadPdu", 18, 1 },
8289 { "TunnelPkt", 17, 1 },
8290 { "RcfPeerFin", 16, 1 },
8291 { "RcfReasonOut", 12, 4 },
8292 { "TxCchannel", 10, 2 },
8293 { "RcfTxChannel", 8, 2 },
8294 { "RxEchannel", 6, 2 },
8295 { "RcfRxChannel", 5, 1 },
8296 { "RcfDataOutSrdy", 4, 1 },
8298 { "RxOoDvld", 2, 1 },
8299 { "RxCongestion", 1, 1 },
8300 { "TxCongestion", 0, 1 },
8304 static const struct field_desc tp_la1[] = {
8305 { "CplCmdIn", 56, 8 },
8306 { "CplCmdOut", 48, 8 },
8307 { "ESynOut", 47, 1 },
8308 { "EAckOut", 46, 1 },
8309 { "EFinOut", 45, 1 },
8310 { "ERstOut", 44, 1 },
8315 { "DataIn", 39, 1 },
8316 { "DataInVld", 38, 1 },
8318 { "RxBufEmpty", 36, 1 },
8320 { "RxFbCongestion", 34, 1 },
8321 { "TxFbCongestion", 33, 1 },
8322 { "TxPktSumSrdy", 32, 1 },
8323 { "RcfUlpType", 28, 4 },
8325 { "Ebypass", 26, 1 },
8327 { "Static0", 24, 1 },
8329 { "Cbypass", 22, 1 },
8331 { "CPktOut", 20, 1 },
8332 { "RxPagePoolFull", 18, 2 },
8333 { "RxLpbkPkt", 17, 1 },
8334 { "TxLpbkPkt", 16, 1 },
8335 { "RxVfValid", 15, 1 },
8336 { "SynLearned", 14, 1 },
8337 { "SetDelEntry", 13, 1 },
8338 { "SetInvEntry", 12, 1 },
8339 { "CpcmdDvld", 11, 1 },
8340 { "CpcmdSave", 10, 1 },
8341 { "RxPstructsFull", 8, 2 },
8342 { "EpcmdDvld", 7, 1 },
8343 { "EpcmdFlush", 6, 1 },
8344 { "EpcmdTrimPrefix", 5, 1 },
8345 { "EpcmdTrimPostfix", 4, 1 },
8346 { "ERssIp4Pkt", 3, 1 },
8347 { "ERssIp6Pkt", 2, 1 },
8348 { "ERssTcpUdpPkt", 1, 1 },
8349 { "ERssFceFipPkt", 0, 1 },
8353 static const struct field_desc tp_la2[] = {
8354 { "CplCmdIn", 56, 8 },
8355 { "MpsVfVld", 55, 1 },
8362 { "DataIn", 39, 1 },
8363 { "DataInVld", 38, 1 },
8365 { "RxBufEmpty", 36, 1 },
8367 { "RxFbCongestion", 34, 1 },
8368 { "TxFbCongestion", 33, 1 },
8369 { "TxPktSumSrdy", 32, 1 },
8370 { "RcfUlpType", 28, 4 },
8372 { "Ebypass", 26, 1 },
8374 { "Static0", 24, 1 },
8376 { "Cbypass", 22, 1 },
8378 { "CPktOut", 20, 1 },
8379 { "RxPagePoolFull", 18, 2 },
8380 { "RxLpbkPkt", 17, 1 },
8381 { "TxLpbkPkt", 16, 1 },
8382 { "RxVfValid", 15, 1 },
8383 { "SynLearned", 14, 1 },
8384 { "SetDelEntry", 13, 1 },
8385 { "SetInvEntry", 12, 1 },
8386 { "CpcmdDvld", 11, 1 },
8387 { "CpcmdSave", 10, 1 },
8388 { "RxPstructsFull", 8, 2 },
8389 { "EpcmdDvld", 7, 1 },
8390 { "EpcmdFlush", 6, 1 },
8391 { "EpcmdTrimPrefix", 5, 1 },
8392 { "EpcmdTrimPostfix", 4, 1 },
8393 { "ERssIp4Pkt", 3, 1 },
8394 { "ERssIp6Pkt", 2, 1 },
8395 { "ERssTcpUdpPkt", 1, 1 },
8396 { "ERssFceFipPkt", 0, 1 },
8401 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
8404 field_desc_show(sb, *p, tp_la0);
8408 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
8412 sbuf_printf(sb, "\n");
8413 field_desc_show(sb, p[0], tp_la0);
8414 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8415 field_desc_show(sb, p[1], tp_la0);
8419 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
8423 sbuf_printf(sb, "\n");
8424 field_desc_show(sb, p[0], tp_la0);
8425 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8426 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
8430 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
8432 struct adapter *sc = arg1;
8437 void (*show_func)(struct sbuf *, uint64_t *, int);
8439 rc = sysctl_wire_old_buffer(req, 0);
8443 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8447 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
8449 t4_tp_read_la(sc, buf, NULL);
8452 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
8455 show_func = tp_la_show2;
8459 show_func = tp_la_show3;
8463 show_func = tp_la_show;
8466 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
8467 (*show_func)(sb, p, i);
8469 rc = sbuf_finish(sb);
8476 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
8478 struct adapter *sc = arg1;
8481 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
8483 rc = sysctl_wire_old_buffer(req, 0);
8487 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8491 t4_get_chan_txrate(sc, nrate, orate);
8493 if (sc->chip_params->nchan > 2) {
8494 sbuf_printf(sb, " channel 0 channel 1"
8495 " channel 2 channel 3\n");
8496 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
8497 nrate[0], nrate[1], nrate[2], nrate[3]);
8498 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
8499 orate[0], orate[1], orate[2], orate[3]);
8501 sbuf_printf(sb, " channel 0 channel 1\n");
8502 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
8503 nrate[0], nrate[1]);
8504 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
8505 orate[0], orate[1]);
8508 rc = sbuf_finish(sb);
8515 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
8517 struct adapter *sc = arg1;
8522 rc = sysctl_wire_old_buffer(req, 0);
8526 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8530 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
8533 t4_ulprx_read_la(sc, buf);
8536 sbuf_printf(sb, " Pcmd Type Message"
8538 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
8539 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
8540 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
8543 rc = sbuf_finish(sb);
8550 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
8552 struct adapter *sc = arg1;
8556 MPASS(chip_id(sc) >= CHELSIO_T5);
8558 rc = sysctl_wire_old_buffer(req, 0);
8562 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8566 v = t4_read_reg(sc, A_SGE_STAT_CFG);
8567 if (G_STATSOURCE_T5(v) == 7) {
8570 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
8572 sbuf_printf(sb, "total %d, incomplete %d",
8573 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8574 t4_read_reg(sc, A_SGE_STAT_MATCH));
8575 } else if (mode == 1) {
8576 sbuf_printf(sb, "total %d, data overflow %d",
8577 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8578 t4_read_reg(sc, A_SGE_STAT_MATCH));
8580 sbuf_printf(sb, "unknown mode %d", mode);
8583 rc = sbuf_finish(sb);
8590 sysctl_tc_params(SYSCTL_HANDLER_ARGS)
8592 struct adapter *sc = arg1;
8593 struct tx_cl_rl_params tc;
8595 int i, rc, port_id, mbps, gbps;
8597 rc = sysctl_wire_old_buffer(req, 0);
8601 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8605 port_id = arg2 >> 16;
8606 MPASS(port_id < sc->params.nports);
8607 MPASS(sc->port[port_id] != NULL);
8609 MPASS(i < sc->chip_params->nsched_cls);
8611 mtx_lock(&sc->tc_lock);
8612 tc = sc->port[port_id]->sched_params->cl_rl[i];
8613 mtx_unlock(&sc->tc_lock);
8615 switch (tc.rateunit) {
8616 case SCHED_CLASS_RATEUNIT_BITS:
8617 switch (tc.ratemode) {
8618 case SCHED_CLASS_RATEMODE_REL:
8619 /* XXX: top speed or actual link speed? */
8620 gbps = port_top_speed(sc->port[port_id]);
8621 sbuf_printf(sb, "%u%% of %uGbps", tc.maxrate, gbps);
8623 case SCHED_CLASS_RATEMODE_ABS:
8624 mbps = tc.maxrate / 1000;
8625 gbps = tc.maxrate / 1000000;
8626 if (tc.maxrate == gbps * 1000000)
8627 sbuf_printf(sb, "%uGbps", gbps);
8628 else if (tc.maxrate == mbps * 1000)
8629 sbuf_printf(sb, "%uMbps", mbps);
8631 sbuf_printf(sb, "%uKbps", tc.maxrate);
8638 case SCHED_CLASS_RATEUNIT_PKTS:
8639 sbuf_printf(sb, "%upps", tc.maxrate);
8647 case SCHED_CLASS_MODE_CLASS:
8648 sbuf_printf(sb, " aggregate");
8650 case SCHED_CLASS_MODE_FLOW:
8651 sbuf_printf(sb, " per-flow");
8660 rc = sbuf_finish(sb);
8667 sysctl_cpus(SYSCTL_HANDLER_ARGS)
8669 struct adapter *sc = arg1;
8670 enum cpu_sets op = arg2;
8675 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
8678 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
8682 rc = sysctl_wire_old_buffer(req, 0);
8686 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8691 sbuf_printf(sb, "%d ", i);
8692 rc = sbuf_finish(sb);
8700 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
8702 struct adapter *sc = arg1;
8703 int *old_ports, *new_ports;
8704 int i, new_count, rc;
8706 if (req->newptr == NULL && req->oldptr == NULL)
8707 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
8708 sizeof(sc->tt.tls_rx_ports[0])));
8710 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
8714 if (sc->tt.num_tls_rx_ports == 0) {
8716 rc = SYSCTL_OUT(req, &i, sizeof(i));
8718 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
8719 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
8720 if (rc == 0 && req->newptr != NULL) {
8721 new_count = req->newlen / sizeof(new_ports[0]);
8722 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
8724 rc = SYSCTL_IN(req, new_ports, new_count *
8725 sizeof(new_ports[0]));
8729 /* Allow setting to a single '-1' to clear the list. */
8730 if (new_count == 1 && new_ports[0] == -1) {
8732 old_ports = sc->tt.tls_rx_ports;
8733 sc->tt.tls_rx_ports = NULL;
8734 sc->tt.num_tls_rx_ports = 0;
8736 free(old_ports, M_CXGBE);
8738 for (i = 0; i < new_count; i++) {
8739 if (new_ports[i] < 1 ||
8740 new_ports[i] > IPPORT_MAX) {
8747 old_ports = sc->tt.tls_rx_ports;
8748 sc->tt.tls_rx_ports = new_ports;
8749 sc->tt.num_tls_rx_ports = new_count;
8751 free(old_ports, M_CXGBE);
8755 free(new_ports, M_CXGBE);
8757 end_synchronized_op(sc, 0);
8762 unit_conv(char *buf, size_t len, u_int val, u_int factor)
8764 u_int rem = val % factor;
8767 snprintf(buf, len, "%u", val / factor);
8769 while (rem % 10 == 0)
8771 snprintf(buf, len, "%u.%u", val / factor, rem);
8776 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
8778 struct adapter *sc = arg1;
8781 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8783 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8787 re = G_TIMERRESOLUTION(res);
8790 /* TCP timestamp tick */
8791 re = G_TIMESTAMPRESOLUTION(res);
8795 re = G_DELAYEDACKRESOLUTION(res);
8801 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
8803 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
8807 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
8809 struct adapter *sc = arg1;
8810 u_int res, dack_re, v;
8811 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8813 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8814 dack_re = G_DELAYEDACKRESOLUTION(res);
8815 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
8817 return (sysctl_handle_int(oidp, &v, 0, req));
8821 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
8823 struct adapter *sc = arg1;
8826 u_long tp_tick_us, v;
8827 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8829 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
8830 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
8831 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
8832 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
8834 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
8835 tp_tick_us = (cclk_ps << tre) / 1000000;
8837 if (reg == A_TP_INIT_SRTT)
8838 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
8840 v = tp_tick_us * t4_read_reg(sc, reg);
8842 return (sysctl_handle_long(oidp, &v, 0, req));
8846 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
8847 * passed to this function.
8850 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
8852 struct adapter *sc = arg1;
8856 MPASS(idx >= 0 && idx <= 24);
8858 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
8860 return (sysctl_handle_int(oidp, &v, 0, req));
8864 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
8866 struct adapter *sc = arg1;
8870 MPASS(idx >= 0 && idx < 16);
8872 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
8873 shift = (idx & 3) << 3;
8874 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
8876 return (sysctl_handle_int(oidp, &v, 0, req));
8880 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
8882 struct vi_info *vi = arg1;
8883 struct adapter *sc = vi->pi->adapter;
8885 struct sge_ofld_rxq *ofld_rxq;
8888 idx = vi->ofld_tmr_idx;
8890 rc = sysctl_handle_int(oidp, &idx, 0, req);
8891 if (rc != 0 || req->newptr == NULL)
8894 if (idx < 0 || idx >= SGE_NTIMERS)
8897 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8902 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
8903 for_each_ofld_rxq(vi, i, ofld_rxq) {
8904 #ifdef atomic_store_rel_8
8905 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
8907 ofld_rxq->iq.intr_params = v;
8910 vi->ofld_tmr_idx = idx;
8912 end_synchronized_op(sc, LOCK_HELD);
8917 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
8919 struct vi_info *vi = arg1;
8920 struct adapter *sc = vi->pi->adapter;
8923 idx = vi->ofld_pktc_idx;
8925 rc = sysctl_handle_int(oidp, &idx, 0, req);
8926 if (rc != 0 || req->newptr == NULL)
8929 if (idx < -1 || idx >= SGE_NCOUNTERS)
8932 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8937 if (vi->flags & VI_INIT_DONE)
8938 rc = EBUSY; /* cannot be changed once the queues are created */
8940 vi->ofld_pktc_idx = idx;
8942 end_synchronized_op(sc, LOCK_HELD);
8948 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8952 if (cntxt->cid > M_CTXTQID)
8955 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8956 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8959 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8963 if (sc->flags & FW_OK) {
8964 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8971 * Read via firmware failed or wasn't even attempted. Read directly via
8974 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
8976 end_synchronized_op(sc, 0);
8981 load_fw(struct adapter *sc, struct t4_data *fw)
8986 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
8991 * The firmware, with the sole exception of the memory parity error
8992 * handler, runs from memory and not flash. It is almost always safe to
8993 * install a new firmware on a running system. Just set bit 1 in
8994 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
8996 if (sc->flags & FULL_INIT_DONE &&
8997 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
9002 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
9003 if (fw_data == NULL) {
9008 rc = copyin(fw->data, fw_data, fw->len);
9010 rc = -t4_load_fw(sc, fw_data, fw->len);
9012 free(fw_data, M_CXGBE);
9014 end_synchronized_op(sc, 0);
9019 load_cfg(struct adapter *sc, struct t4_data *cfg)
9022 uint8_t *cfg_data = NULL;
9024 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9028 if (cfg->len == 0) {
9030 rc = -t4_load_cfg(sc, NULL, 0);
9034 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
9035 if (cfg_data == NULL) {
9040 rc = copyin(cfg->data, cfg_data, cfg->len);
9042 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
9044 free(cfg_data, M_CXGBE);
9046 end_synchronized_op(sc, 0);
9051 load_boot(struct adapter *sc, struct t4_bootrom *br)
9054 uint8_t *br_data = NULL;
9057 if (br->len > 1024 * 1024)
9060 if (br->pf_offset == 0) {
9062 if (br->pfidx_addr > 7)
9064 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
9065 A_PCIE_PF_EXPROM_OFST)));
9066 } else if (br->pf_offset == 1) {
9068 offset = G_OFFSET(br->pfidx_addr);
9073 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
9079 rc = -t4_load_boot(sc, NULL, offset, 0);
9083 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
9084 if (br_data == NULL) {
9089 rc = copyin(br->data, br_data, br->len);
9091 rc = -t4_load_boot(sc, br_data, offset, br->len);
9093 free(br_data, M_CXGBE);
9095 end_synchronized_op(sc, 0);
9100 load_bootcfg(struct adapter *sc, struct t4_data *bc)
9103 uint8_t *bc_data = NULL;
9105 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9111 rc = -t4_load_bootcfg(sc, NULL, 0);
9115 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
9116 if (bc_data == NULL) {
9121 rc = copyin(bc->data, bc_data, bc->len);
9123 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
9125 free(bc_data, M_CXGBE);
9127 end_synchronized_op(sc, 0);
9132 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
9135 struct cudbg_init *cudbg;
9138 /* buf is large, don't block if no memory is available */
9139 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
9143 handle = cudbg_alloc_handle();
9144 if (handle == NULL) {
9149 cudbg = cudbg_get_init(handle);
9151 cudbg->print = (cudbg_print_cb)printf;
9154 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
9155 __func__, dump->wr_flash, dump->len, dump->data);
9159 cudbg->use_flash = 1;
9160 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
9161 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
9163 rc = cudbg_collect(handle, buf, &dump->len);
9167 rc = copyout(buf, dump->data, dump->len);
9169 cudbg_free_handle(handle);
9175 free_offload_policy(struct t4_offload_policy *op)
9177 struct offload_rule *r;
9184 for (i = 0; i < op->nrules; i++, r++) {
9185 free(r->bpf_prog.bf_insns, M_CXGBE);
9187 free(op->rule, M_CXGBE);
9192 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
9195 struct t4_offload_policy *op, *old;
9196 struct bpf_program *bf;
9197 const struct offload_settings *s;
9198 struct offload_rule *r;
9201 if (!is_offload(sc))
9204 if (uop->nrules == 0) {
9205 /* Delete installed policies. */
9208 } if (uop->nrules > 256) { /* arbitrary */
9212 /* Copy userspace offload policy to kernel */
9213 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
9214 op->nrules = uop->nrules;
9215 len = op->nrules * sizeof(struct offload_rule);
9216 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9217 rc = copyin(uop->rule, op->rule, len);
9219 free(op->rule, M_CXGBE);
9225 for (i = 0; i < op->nrules; i++, r++) {
9227 /* Validate open_type */
9228 if (r->open_type != OPEN_TYPE_LISTEN &&
9229 r->open_type != OPEN_TYPE_ACTIVE &&
9230 r->open_type != OPEN_TYPE_PASSIVE &&
9231 r->open_type != OPEN_TYPE_DONTCARE) {
9234 * Rules 0 to i have malloc'd filters that need to be
9235 * freed. Rules i+1 to nrules have userspace pointers
9236 * and should be left alone.
9239 free_offload_policy(op);
9243 /* Validate settings */
9245 if ((s->offload != 0 && s->offload != 1) ||
9246 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
9247 s->sched_class < -1 ||
9248 s->sched_class >= sc->chip_params->nsched_cls) {
9254 u = bf->bf_insns; /* userspace ptr */
9255 bf->bf_insns = NULL;
9256 if (bf->bf_len == 0) {
9257 /* legal, matches everything */
9260 len = bf->bf_len * sizeof(*bf->bf_insns);
9261 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9262 rc = copyin(u, bf->bf_insns, len);
9266 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
9272 rw_wlock(&sc->policy_lock);
9275 rw_wunlock(&sc->policy_lock);
9276 free_offload_policy(old);
9281 #define MAX_READ_BUF_SIZE (128 * 1024)
9283 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
9285 uint32_t addr, remaining, n;
9290 rc = validate_mem_range(sc, mr->addr, mr->len);
9294 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
9296 remaining = mr->len;
9297 dst = (void *)mr->data;
9300 n = min(remaining, MAX_READ_BUF_SIZE);
9301 read_via_memwin(sc, 2, addr, buf, n);
9303 rc = copyout(buf, dst, n);
9315 #undef MAX_READ_BUF_SIZE
9318 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
9322 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
9325 if (i2cd->len > sizeof(i2cd->data))
9328 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
9331 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
9332 i2cd->offset, i2cd->len, &i2cd->data[0]);
9333 end_synchronized_op(sc, 0);
9339 t4_os_find_pci_capability(struct adapter *sc, int cap)
9343 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
9347 t4_os_pci_save_state(struct adapter *sc)
9350 struct pci_devinfo *dinfo;
9353 dinfo = device_get_ivars(dev);
9355 pci_cfg_save(dev, dinfo, 0);
9360 t4_os_pci_restore_state(struct adapter *sc)
9363 struct pci_devinfo *dinfo;
9366 dinfo = device_get_ivars(dev);
9368 pci_cfg_restore(dev, dinfo);
9373 t4_os_portmod_changed(struct port_info *pi)
9375 struct adapter *sc = pi->adapter;
9378 static const char *mod_str[] = {
9379 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
9382 MPASS((pi->flags & FIXED_IFMEDIA) == 0);
9385 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
9387 build_medialist(pi, &pi->media);
9390 end_synchronized_op(sc, LOCK_HELD);
9394 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
9395 if_printf(ifp, "transceiver unplugged.\n");
9396 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
9397 if_printf(ifp, "unknown transceiver inserted.\n");
9398 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
9399 if_printf(ifp, "unsupported transceiver inserted.\n");
9400 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
9401 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
9402 port_top_speed(pi), mod_str[pi->mod_type]);
9404 if_printf(ifp, "transceiver (type %d) inserted.\n",
9410 t4_os_link_changed(struct port_info *pi)
9414 struct link_config *lc;
9417 PORT_LOCK_ASSERT_OWNED(pi);
9419 for_each_vi(pi, v, vi) {
9426 ifp->if_baudrate = IF_Mbps(lc->speed);
9427 if_link_state_change(ifp, LINK_STATE_UP);
9429 if_link_state_change(ifp, LINK_STATE_DOWN);
9435 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9439 sx_slock(&t4_list_lock);
9440 SLIST_FOREACH(sc, &t4_list, link) {
9442 * func should not make any assumptions about what state sc is
9443 * in - the only guarantee is that sc->sc_lock is a valid lock.
9447 sx_sunlock(&t4_list_lock);
9451 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9455 struct adapter *sc = dev->si_drv1;
9457 rc = priv_check(td, PRIV_DRIVER);
9462 case CHELSIO_T4_GETREG: {
9463 struct t4_reg *edata = (struct t4_reg *)data;
9465 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9468 if (edata->size == 4)
9469 edata->val = t4_read_reg(sc, edata->addr);
9470 else if (edata->size == 8)
9471 edata->val = t4_read_reg64(sc, edata->addr);
9477 case CHELSIO_T4_SETREG: {
9478 struct t4_reg *edata = (struct t4_reg *)data;
9480 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9483 if (edata->size == 4) {
9484 if (edata->val & 0xffffffff00000000)
9486 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9487 } else if (edata->size == 8)
9488 t4_write_reg64(sc, edata->addr, edata->val);
9493 case CHELSIO_T4_REGDUMP: {
9494 struct t4_regdump *regs = (struct t4_regdump *)data;
9495 int reglen = t4_get_regs_len(sc);
9498 if (regs->len < reglen) {
9499 regs->len = reglen; /* hint to the caller */
9504 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9505 get_regs(sc, regs, buf);
9506 rc = copyout(buf, regs->data, reglen);
9510 case CHELSIO_T4_GET_FILTER_MODE:
9511 rc = get_filter_mode(sc, (uint32_t *)data);
9513 case CHELSIO_T4_SET_FILTER_MODE:
9514 rc = set_filter_mode(sc, *(uint32_t *)data);
9516 case CHELSIO_T4_GET_FILTER:
9517 rc = get_filter(sc, (struct t4_filter *)data);
9519 case CHELSIO_T4_SET_FILTER:
9520 rc = set_filter(sc, (struct t4_filter *)data);
9522 case CHELSIO_T4_DEL_FILTER:
9523 rc = del_filter(sc, (struct t4_filter *)data);
9525 case CHELSIO_T4_GET_SGE_CONTEXT:
9526 rc = get_sge_context(sc, (struct t4_sge_context *)data);
9528 case CHELSIO_T4_LOAD_FW:
9529 rc = load_fw(sc, (struct t4_data *)data);
9531 case CHELSIO_T4_GET_MEM:
9532 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9534 case CHELSIO_T4_GET_I2C:
9535 rc = read_i2c(sc, (struct t4_i2c_data *)data);
9537 case CHELSIO_T4_CLEAR_STATS: {
9539 u_int port_id = *(uint32_t *)data;
9540 struct port_info *pi;
9543 if (port_id >= sc->params.nports)
9545 pi = sc->port[port_id];
9550 t4_clr_port_stats(sc, pi->tx_chan);
9551 pi->tx_parse_error = 0;
9552 pi->tnl_cong_drops = 0;
9553 mtx_lock(&sc->reg_lock);
9554 for_each_vi(pi, v, vi) {
9555 if (vi->flags & VI_INIT_DONE)
9556 t4_clr_vi_stats(sc, vi->viid);
9558 bg_map = pi->mps_bg_map;
9561 i = ffs(bg_map) - 1;
9562 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
9563 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
9564 bg_map &= ~(1 << i);
9566 mtx_unlock(&sc->reg_lock);
9569 * Since this command accepts a port, clear stats for
9570 * all VIs on this port.
9572 for_each_vi(pi, v, vi) {
9573 if (vi->flags & VI_INIT_DONE) {
9574 struct sge_rxq *rxq;
9575 struct sge_txq *txq;
9576 struct sge_wrq *wrq;
9578 for_each_rxq(vi, i, rxq) {
9579 #if defined(INET) || defined(INET6)
9580 rxq->lro.lro_queued = 0;
9581 rxq->lro.lro_flushed = 0;
9584 rxq->vlan_extraction = 0;
9587 for_each_txq(vi, i, txq) {
9590 txq->vlan_insertion = 0;
9594 txq->txpkts0_wrs = 0;
9595 txq->txpkts1_wrs = 0;
9596 txq->txpkts0_pkts = 0;
9597 txq->txpkts1_pkts = 0;
9598 mp_ring_reset_stats(txq->r);
9602 /* nothing to clear for each ofld_rxq */
9604 for_each_ofld_txq(vi, i, wrq) {
9605 wrq->tx_wrs_direct = 0;
9606 wrq->tx_wrs_copied = 0;
9610 if (IS_MAIN_VI(vi)) {
9611 wrq = &sc->sge.ctrlq[pi->port_id];
9612 wrq->tx_wrs_direct = 0;
9613 wrq->tx_wrs_copied = 0;
9619 case CHELSIO_T4_SCHED_CLASS:
9620 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9622 case CHELSIO_T4_SCHED_QUEUE:
9623 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9625 case CHELSIO_T4_GET_TRACER:
9626 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9628 case CHELSIO_T4_SET_TRACER:
9629 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9631 case CHELSIO_T4_LOAD_CFG:
9632 rc = load_cfg(sc, (struct t4_data *)data);
9634 case CHELSIO_T4_LOAD_BOOT:
9635 rc = load_boot(sc, (struct t4_bootrom *)data);
9637 case CHELSIO_T4_LOAD_BOOTCFG:
9638 rc = load_bootcfg(sc, (struct t4_data *)data);
9640 case CHELSIO_T4_CUDBG_DUMP:
9641 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
9643 case CHELSIO_T4_SET_OFLD_POLICY:
9644 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
9654 t4_db_full(struct adapter *sc)
9657 CXGBE_UNIMPLEMENTED(__func__);
9661 t4_db_dropped(struct adapter *sc)
9664 CXGBE_UNIMPLEMENTED(__func__);
9669 toe_capability(struct vi_info *vi, int enable)
9672 struct port_info *pi = vi->pi;
9673 struct adapter *sc = pi->adapter;
9675 ASSERT_SYNCHRONIZED_OP(sc);
9677 if (!is_offload(sc))
9681 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9682 /* TOE is already enabled. */
9687 * We need the port's queues around so that we're able to send
9688 * and receive CPLs to/from the TOE even if the ifnet for this
9689 * port has never been UP'd administratively.
9691 if (!(vi->flags & VI_INIT_DONE)) {
9692 rc = vi_full_init(vi);
9696 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9697 rc = vi_full_init(&pi->vi[0]);
9702 if (isset(&sc->offload_map, pi->port_id)) {
9703 /* TOE is enabled on another VI of this port. */
9708 if (!uld_active(sc, ULD_TOM)) {
9709 rc = t4_activate_uld(sc, ULD_TOM);
9712 "You must kldload t4_tom.ko before trying "
9713 "to enable TOE on a cxgbe interface.\n");
9717 KASSERT(sc->tom_softc != NULL,
9718 ("%s: TOM activated but softc NULL", __func__));
9719 KASSERT(uld_active(sc, ULD_TOM),
9720 ("%s: TOM activated but flag not set", __func__));
9723 /* Activate iWARP and iSCSI too, if the modules are loaded. */
9724 if (!uld_active(sc, ULD_IWARP))
9725 (void) t4_activate_uld(sc, ULD_IWARP);
9726 if (!uld_active(sc, ULD_ISCSI))
9727 (void) t4_activate_uld(sc, ULD_ISCSI);
9730 setbit(&sc->offload_map, pi->port_id);
9734 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9737 KASSERT(uld_active(sc, ULD_TOM),
9738 ("%s: TOM never initialized?", __func__));
9739 clrbit(&sc->offload_map, pi->port_id);
9746 * Add an upper layer driver to the global list.
9749 t4_register_uld(struct uld_info *ui)
9754 sx_xlock(&t4_uld_list_lock);
9755 SLIST_FOREACH(u, &t4_uld_list, link) {
9756 if (u->uld_id == ui->uld_id) {
9762 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9765 sx_xunlock(&t4_uld_list_lock);
9770 t4_unregister_uld(struct uld_info *ui)
9775 sx_xlock(&t4_uld_list_lock);
9777 SLIST_FOREACH(u, &t4_uld_list, link) {
9779 if (ui->refcount > 0) {
9784 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9790 sx_xunlock(&t4_uld_list_lock);
9795 t4_activate_uld(struct adapter *sc, int id)
9798 struct uld_info *ui;
9800 ASSERT_SYNCHRONIZED_OP(sc);
9802 if (id < 0 || id > ULD_MAX)
9804 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
9806 sx_slock(&t4_uld_list_lock);
9808 SLIST_FOREACH(ui, &t4_uld_list, link) {
9809 if (ui->uld_id == id) {
9810 if (!(sc->flags & FULL_INIT_DONE)) {
9811 rc = adapter_full_init(sc);
9816 rc = ui->activate(sc);
9818 setbit(&sc->active_ulds, id);
9825 sx_sunlock(&t4_uld_list_lock);
9831 t4_deactivate_uld(struct adapter *sc, int id)
9834 struct uld_info *ui;
9836 ASSERT_SYNCHRONIZED_OP(sc);
9838 if (id < 0 || id > ULD_MAX)
9842 sx_slock(&t4_uld_list_lock);
9844 SLIST_FOREACH(ui, &t4_uld_list, link) {
9845 if (ui->uld_id == id) {
9846 rc = ui->deactivate(sc);
9848 clrbit(&sc->active_ulds, id);
9855 sx_sunlock(&t4_uld_list_lock);
9861 uld_active(struct adapter *sc, int uld_id)
9864 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9866 return (isset(&sc->active_ulds, uld_id));
9871 * t = ptr to tunable.
9872 * nc = number of CPUs.
9873 * c = compiled in default for that tunable.
9876 calculate_nqueues(int *t, int nc, const int c)
9882 nq = *t < 0 ? -*t : c;
9887 * Come up with reasonable defaults for some of the tunables, provided they're
9888 * not set by the user (in which case we'll use the values as is).
9891 tweak_tunables(void)
9893 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
9897 t4_ntxq = rss_getnumbuckets();
9899 calculate_nqueues(&t4_ntxq, nc, NTXQ);
9903 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
9907 t4_nrxq = rss_getnumbuckets();
9909 calculate_nqueues(&t4_nrxq, nc, NRXQ);
9913 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
9915 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
9916 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
9917 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
9920 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
9921 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
9923 if (t4_toecaps_allowed == -1)
9924 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9926 if (t4_rdmacaps_allowed == -1) {
9927 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
9928 FW_CAPS_CONFIG_RDMA_RDMAC;
9931 if (t4_iscsicaps_allowed == -1) {
9932 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
9933 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
9934 FW_CAPS_CONFIG_ISCSI_T10DIF;
9937 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
9938 t4_tmr_idx_ofld = TMR_IDX_OFLD;
9940 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
9941 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
9943 if (t4_toecaps_allowed == -1)
9944 t4_toecaps_allowed = 0;
9946 if (t4_rdmacaps_allowed == -1)
9947 t4_rdmacaps_allowed = 0;
9949 if (t4_iscsicaps_allowed == -1)
9950 t4_iscsicaps_allowed = 0;
9954 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
9955 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
9958 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
9959 t4_tmr_idx = TMR_IDX;
9961 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
9962 t4_pktc_idx = PKTC_IDX;
9964 if (t4_qsize_txq < 128)
9967 if (t4_qsize_rxq < 128)
9969 while (t4_qsize_rxq & 7)
9972 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
9975 * Number of VIs to create per-port. The first VI is the "main" regular
9976 * VI for the port. The rest are additional virtual interfaces on the
9977 * same physical port. Note that the main VI does not have native
9978 * netmap support but the extra VIs do.
9980 * Limit the number of VIs per port to the number of available
9981 * MAC addresses per port.
9985 if (t4_num_vis > nitems(vi_mac_funcs)) {
9986 t4_num_vis = nitems(vi_mac_funcs);
9987 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
9990 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
9991 pcie_relaxed_ordering = 1;
9992 #if defined(__i386__) || defined(__amd64__)
9993 if (cpu_vendor_id == CPU_VENDOR_INTEL)
9994 pcie_relaxed_ordering = 0;
10001 t4_dump_tcb(struct adapter *sc, int tid)
10003 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
10005 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
10006 save = t4_read_reg(sc, reg);
10007 base = sc->memwin[2].mw_base;
10009 /* Dump TCB for the tid */
10010 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
10011 tcb_addr += tid * TCB_SIZE;
10015 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
10017 pf = V_PFNUM(sc->pf);
10018 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
10020 t4_write_reg(sc, reg, win_pos | pf);
10021 t4_read_reg(sc, reg);
10023 off = tcb_addr - win_pos;
10024 for (i = 0; i < 4; i++) {
10026 for (j = 0; j < 8; j++, off += 4)
10027 buf[j] = htonl(t4_read_reg(sc, base + off));
10029 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
10030 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
10034 t4_write_reg(sc, reg, save);
10035 t4_read_reg(sc, reg);
10039 t4_dump_devlog(struct adapter *sc)
10041 struct devlog_params *dparams = &sc->params.devlog;
10042 struct fw_devlog_e e;
10043 int i, first, j, m, nentries, rc;
10044 uint64_t ftstamp = UINT64_MAX;
10046 if (dparams->start == 0) {
10047 db_printf("devlog params not valid\n");
10051 nentries = dparams->size / sizeof(struct fw_devlog_e);
10052 m = fwmtype_to_hwmtype(dparams->memtype);
10054 /* Find the first entry. */
10056 for (i = 0; i < nentries && !db_pager_quit; i++) {
10057 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10058 sizeof(e), (void *)&e);
10062 if (e.timestamp == 0)
10065 e.timestamp = be64toh(e.timestamp);
10066 if (e.timestamp < ftstamp) {
10067 ftstamp = e.timestamp;
10077 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10078 sizeof(e), (void *)&e);
10082 if (e.timestamp == 0)
10085 e.timestamp = be64toh(e.timestamp);
10086 e.seqno = be32toh(e.seqno);
10087 for (j = 0; j < 8; j++)
10088 e.params[j] = be32toh(e.params[j]);
10090 db_printf("%10d %15ju %8s %8s ",
10091 e.seqno, e.timestamp,
10092 (e.level < nitems(devlog_level_strings) ?
10093 devlog_level_strings[e.level] : "UNKNOWN"),
10094 (e.facility < nitems(devlog_facility_strings) ?
10095 devlog_facility_strings[e.facility] : "UNKNOWN"));
10096 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
10097 e.params[3], e.params[4], e.params[5], e.params[6],
10100 if (++i == nentries)
10102 } while (i != first && !db_pager_quit);
10105 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
10106 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
10108 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
10115 t = db_read_token();
10117 dev = device_lookup_by_name(db_tok_string);
10122 db_printf("usage: show t4 devlog <nexus>\n");
10127 db_printf("device not found\n");
10131 t4_dump_devlog(device_get_softc(dev));
10134 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
10143 t = db_read_token();
10145 dev = device_lookup_by_name(db_tok_string);
10146 t = db_read_token();
10147 if (t == tNUMBER) {
10148 tid = db_tok_number;
10155 db_printf("usage: show t4 tcb <nexus> <tid>\n");
10160 db_printf("device not found\n");
10164 db_printf("invalid tid\n");
10168 t4_dump_tcb(device_get_softc(dev), tid);
10173 * Borrowed from cesa_prep_aes_key().
10175 * NB: The crypto engine wants the words in the decryption key in reverse
10179 t4_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
10181 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
10185 rijndaelKeySetupEnc(ek, enc_key, kbits);
10187 dkey += (kbits / 8) / 4;
10191 for (i = 0; i < 4; i++)
10192 *--dkey = htobe32(ek[4 * 10 + i]);
10195 for (i = 0; i < 2; i++)
10196 *--dkey = htobe32(ek[4 * 11 + 2 + i]);
10197 for (i = 0; i < 4; i++)
10198 *--dkey = htobe32(ek[4 * 12 + i]);
10201 for (i = 0; i < 4; i++)
10202 *--dkey = htobe32(ek[4 * 13 + i]);
10203 for (i = 0; i < 4; i++)
10204 *--dkey = htobe32(ek[4 * 14 + i]);
10207 MPASS(dkey == dec_key);
10210 static struct sx mlu; /* mod load unload */
10211 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
10214 mod_event(module_t mod, int cmd, void *arg)
10217 static int loaded = 0;
10222 if (loaded++ == 0) {
10224 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10225 t4_filter_rpl, CPL_COOKIE_FILTER);
10226 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
10227 do_l2t_write_rpl, CPL_COOKIE_FILTER);
10228 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
10229 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
10230 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10231 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
10232 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
10233 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
10234 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
10235 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
10236 t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
10238 sx_init(&t4_list_lock, "T4/T5 adapters");
10239 SLIST_INIT(&t4_list);
10241 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
10242 SLIST_INIT(&t4_uld_list);
10244 t4_tracer_modload();
10252 if (--loaded == 0) {
10255 sx_slock(&t4_list_lock);
10256 if (!SLIST_EMPTY(&t4_list)) {
10258 sx_sunlock(&t4_list_lock);
10262 sx_slock(&t4_uld_list_lock);
10263 if (!SLIST_EMPTY(&t4_uld_list)) {
10265 sx_sunlock(&t4_uld_list_lock);
10266 sx_sunlock(&t4_list_lock);
10271 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
10272 uprintf("%ju clusters with custom free routine "
10273 "still is use.\n", t4_sge_extfree_refs());
10274 pause("t4unload", 2 * hz);
10277 sx_sunlock(&t4_uld_list_lock);
10279 sx_sunlock(&t4_list_lock);
10281 if (t4_sge_extfree_refs() == 0) {
10282 t4_tracer_modunload();
10284 sx_destroy(&t4_uld_list_lock);
10286 sx_destroy(&t4_list_lock);
10287 t4_sge_modunload();
10291 loaded++; /* undo earlier decrement */
10302 static devclass_t t4_devclass, t5_devclass, t6_devclass;
10303 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
10304 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
10306 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
10307 MODULE_VERSION(t4nex, 1);
10308 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
10310 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
10311 #endif /* DEV_NETMAP */
10313 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
10314 MODULE_VERSION(t5nex, 1);
10315 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
10317 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
10318 #endif /* DEV_NETMAP */
10320 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
10321 MODULE_VERSION(t6nex, 1);
10322 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
10324 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
10325 #endif /* DEV_NETMAP */
10327 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
10328 MODULE_VERSION(cxgbe, 1);
10330 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
10331 MODULE_VERSION(cxl, 1);
10333 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
10334 MODULE_VERSION(cc, 1);
10336 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
10337 MODULE_VERSION(vcxgbe, 1);
10339 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
10340 MODULE_VERSION(vcxl, 1);
10342 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
10343 MODULE_VERSION(vcc, 1);