2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
36 #include <sys/param.h>
39 #include <sys/kernel.h>
41 #include <sys/module.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45 #include <sys/pciio.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pci_private.h>
49 #include <sys/firmware.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <net/ethernet.h>
57 #include <net/if_types.h>
58 #include <net/if_dl.h>
59 #include <net/if_vlan_var.h>
61 #include <net/rss_config.h>
63 #if defined(__i386__) || defined(__amd64__)
69 #include <ddb/db_lex.h>
72 #include "common/common.h"
73 #include "common/t4_msg.h"
74 #include "common/t4_regs.h"
75 #include "common/t4_regs_values.h"
76 #include "cudbg/cudbg.h"
79 #include "t4_mp_ring.h"
82 /* T4 bus driver interface */
83 static int t4_probe(device_t);
84 static int t4_attach(device_t);
85 static int t4_detach(device_t);
86 static int t4_ready(device_t);
87 static int t4_read_port_device(device_t, int, device_t *);
88 static device_method_t t4_methods[] = {
89 DEVMETHOD(device_probe, t4_probe),
90 DEVMETHOD(device_attach, t4_attach),
91 DEVMETHOD(device_detach, t4_detach),
93 DEVMETHOD(t4_is_main_ready, t4_ready),
94 DEVMETHOD(t4_read_port_device, t4_read_port_device),
98 static driver_t t4_driver = {
101 sizeof(struct adapter)
105 /* T4 port (cxgbe) interface */
106 static int cxgbe_probe(device_t);
107 static int cxgbe_attach(device_t);
108 static int cxgbe_detach(device_t);
109 device_method_t cxgbe_methods[] = {
110 DEVMETHOD(device_probe, cxgbe_probe),
111 DEVMETHOD(device_attach, cxgbe_attach),
112 DEVMETHOD(device_detach, cxgbe_detach),
115 static driver_t cxgbe_driver = {
118 sizeof(struct port_info)
121 /* T4 VI (vcxgbe) interface */
122 static int vcxgbe_probe(device_t);
123 static int vcxgbe_attach(device_t);
124 static int vcxgbe_detach(device_t);
125 static device_method_t vcxgbe_methods[] = {
126 DEVMETHOD(device_probe, vcxgbe_probe),
127 DEVMETHOD(device_attach, vcxgbe_attach),
128 DEVMETHOD(device_detach, vcxgbe_detach),
131 static driver_t vcxgbe_driver = {
134 sizeof(struct vi_info)
137 static d_ioctl_t t4_ioctl;
139 static struct cdevsw t4_cdevsw = {
140 .d_version = D_VERSION,
145 /* T5 bus driver interface */
146 static int t5_probe(device_t);
147 static device_method_t t5_methods[] = {
148 DEVMETHOD(device_probe, t5_probe),
149 DEVMETHOD(device_attach, t4_attach),
150 DEVMETHOD(device_detach, t4_detach),
152 DEVMETHOD(t4_is_main_ready, t4_ready),
153 DEVMETHOD(t4_read_port_device, t4_read_port_device),
157 static driver_t t5_driver = {
160 sizeof(struct adapter)
164 /* T5 port (cxl) interface */
165 static driver_t cxl_driver = {
168 sizeof(struct port_info)
171 /* T5 VI (vcxl) interface */
172 static driver_t vcxl_driver = {
175 sizeof(struct vi_info)
178 /* T6 bus driver interface */
179 static int t6_probe(device_t);
180 static device_method_t t6_methods[] = {
181 DEVMETHOD(device_probe, t6_probe),
182 DEVMETHOD(device_attach, t4_attach),
183 DEVMETHOD(device_detach, t4_detach),
185 DEVMETHOD(t4_is_main_ready, t4_ready),
186 DEVMETHOD(t4_read_port_device, t4_read_port_device),
190 static driver_t t6_driver = {
193 sizeof(struct adapter)
197 /* T6 port (cc) interface */
198 static driver_t cc_driver = {
201 sizeof(struct port_info)
204 /* T6 VI (vcc) interface */
205 static driver_t vcc_driver = {
208 sizeof(struct vi_info)
211 /* ifnet + media interface */
212 static void cxgbe_init(void *);
213 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
214 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
215 static void cxgbe_qflush(struct ifnet *);
216 static int cxgbe_media_change(struct ifnet *);
217 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
219 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
222 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
223 * then ADAPTER_LOCK, then t4_uld_list_lock.
225 static struct sx t4_list_lock;
226 SLIST_HEAD(, adapter) t4_list;
228 static struct sx t4_uld_list_lock;
229 SLIST_HEAD(, uld_info) t4_uld_list;
233 * Tunables. See tweak_tunables() too.
235 * Each tunable is set to a default value here if it's known at compile-time.
236 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
237 * provide a reasonable default (upto n) when the driver is loaded.
239 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
240 * T5 are under hw.cxl.
244 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
247 int t4_ntxq10g = -NTXQ_10G;
248 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
251 int t4_nrxq10g = -NRXQ_10G;
252 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
255 int t4_ntxq1g = -NTXQ_1G;
256 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
259 int t4_nrxq1g = -NRXQ_1G;
260 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
263 static int t4_ntxq_vi = -NTXQ_VI;
264 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
267 static int t4_nrxq_vi = -NRXQ_VI;
268 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
270 static int t4_rsrv_noflowq = 0;
271 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
274 #define NOFLDTXQ_10G 8
275 static int t4_nofldtxq10g = -NOFLDTXQ_10G;
276 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
278 #define NOFLDRXQ_10G 2
279 static int t4_nofldrxq10g = -NOFLDRXQ_10G;
280 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
282 #define NOFLDTXQ_1G 2
283 static int t4_nofldtxq1g = -NOFLDTXQ_1G;
284 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
286 #define NOFLDRXQ_1G 1
287 static int t4_nofldrxq1g = -NOFLDRXQ_1G;
288 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
290 #define NOFLDTXQ_VI 1
291 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
292 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
294 #define NOFLDRXQ_VI 1
295 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
296 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
298 #define TMR_IDX_OFLD 1
299 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
300 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_ofld", &t4_tmr_idx_ofld);
302 #define PKTC_IDX_OFLD (-1)
303 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
304 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_ofld", &t4_pktc_idx_ofld);
306 /* 0 means chip/fw default, non-zero number is value in microseconds */
307 static u_long t4_toe_keepalive_idle = 0;
308 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_idle", &t4_toe_keepalive_idle);
310 /* 0 means chip/fw default, non-zero number is value in microseconds */
311 static u_long t4_toe_keepalive_interval = 0;
312 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_interval", &t4_toe_keepalive_interval);
314 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
315 static int t4_toe_keepalive_count = 0;
316 TUNABLE_INT("hw.cxgbe.toe.keepalive_count", &t4_toe_keepalive_count);
318 /* 0 means chip/fw default, non-zero number is value in microseconds */
319 static u_long t4_toe_rexmt_min = 0;
320 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_min", &t4_toe_rexmt_min);
322 /* 0 means chip/fw default, non-zero number is value in microseconds */
323 static u_long t4_toe_rexmt_max = 0;
324 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_max", &t4_toe_rexmt_max);
326 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
327 static int t4_toe_rexmt_count = 0;
328 TUNABLE_INT("hw.cxgbe.toe.rexmt_count", &t4_toe_rexmt_count);
330 /* -1 means chip/fw default, other values are raw backoff values to use */
331 static int t4_toe_rexmt_backoff[16] = {
332 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
334 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.0", &t4_toe_rexmt_backoff[0]);
335 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.1", &t4_toe_rexmt_backoff[1]);
336 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.2", &t4_toe_rexmt_backoff[2]);
337 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.3", &t4_toe_rexmt_backoff[3]);
338 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.4", &t4_toe_rexmt_backoff[4]);
339 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.5", &t4_toe_rexmt_backoff[5]);
340 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.6", &t4_toe_rexmt_backoff[6]);
341 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.7", &t4_toe_rexmt_backoff[7]);
342 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.8", &t4_toe_rexmt_backoff[8]);
343 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.9", &t4_toe_rexmt_backoff[9]);
344 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.10", &t4_toe_rexmt_backoff[10]);
345 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.11", &t4_toe_rexmt_backoff[11]);
346 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.12", &t4_toe_rexmt_backoff[12]);
347 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.13", &t4_toe_rexmt_backoff[13]);
348 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.14", &t4_toe_rexmt_backoff[14]);
349 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.15", &t4_toe_rexmt_backoff[15]);
354 static int t4_nnmtxq_vi = -NNMTXQ_VI;
355 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
358 static int t4_nnmrxq_vi = -NNMRXQ_VI;
359 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
363 * Holdoff parameters for 10G and 1G ports.
365 #define TMR_IDX_10G 1
366 int t4_tmr_idx_10g = TMR_IDX_10G;
367 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
369 #define PKTC_IDX_10G (-1)
370 int t4_pktc_idx_10g = PKTC_IDX_10G;
371 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
374 int t4_tmr_idx_1g = TMR_IDX_1G;
375 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
377 #define PKTC_IDX_1G (-1)
378 int t4_pktc_idx_1g = PKTC_IDX_1G;
379 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
382 * Size (# of entries) of each tx and rx queue.
384 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
385 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
387 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
388 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
391 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
393 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
394 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
397 * Configuration file.
399 #define DEFAULT_CF "default"
400 #define FLASH_CF "flash"
401 #define UWIRE_CF "uwire"
402 #define FPGA_CF "fpga"
403 static char t4_cfg_file[32] = DEFAULT_CF;
404 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
407 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
408 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
409 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
410 * mark or when signalled to do so, 0 to never emit PAUSE.
412 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
413 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
416 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS,
417 * FEC_RESERVED respectively).
418 * -1 to run with the firmware default.
421 static int t4_fec = -1;
422 TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
425 * Link autonegotiation.
426 * -1 to run with the firmware default.
430 static int t4_autoneg = -1;
431 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
434 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
435 * encouraged respectively).
437 static unsigned int t4_fw_install = 1;
438 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
441 * ASIC features that will be used. Disable the ones you don't want so that the
442 * chip resources aren't wasted on features that will not be used.
444 static int t4_nbmcaps_allowed = 0;
445 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
447 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
448 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
450 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
451 FW_CAPS_CONFIG_SWITCH_EGRESS;
452 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
454 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
455 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
457 static int t4_toecaps_allowed = -1;
458 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
460 static int t4_rdmacaps_allowed = -1;
461 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
463 static int t4_cryptocaps_allowed = -1;
464 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
466 static int t4_iscsicaps_allowed = -1;
467 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
469 static int t4_fcoecaps_allowed = 0;
470 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
472 static int t5_write_combine = 1;
473 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
475 static int t4_num_vis = 1;
476 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
478 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
479 static int vi_mac_funcs[] = {
483 FW_VI_FUNC_OPENISCSI,
489 struct intrs_and_queues {
490 uint16_t intr_type; /* INTx, MSI, or MSI-X */
491 uint16_t nirq; /* Total # of vectors */
492 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
493 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */
494 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */
495 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */
496 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */
497 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */
498 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */
499 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */
500 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */
501 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */
502 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */
504 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
505 uint16_t ntxq_vi; /* # of NIC txq's */
506 uint16_t nrxq_vi; /* # of NIC rxq's */
507 uint16_t nofldtxq_vi; /* # of TOE txq's */
508 uint16_t nofldrxq_vi; /* # of TOE rxq's */
509 uint16_t nnmtxq_vi; /* # of netmap txq's */
510 uint16_t nnmrxq_vi; /* # of netmap rxq's */
513 struct filter_entry {
514 uint32_t valid:1; /* filter allocated and valid */
515 uint32_t locked:1; /* filter is administratively locked */
516 uint32_t pending:1; /* filter action is pending firmware reply */
517 uint32_t smtidx:8; /* Source MAC Table index for smac */
518 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
520 struct t4_filter_specification fs;
523 static void setup_memwin(struct adapter *);
524 static void position_memwin(struct adapter *, int, uint32_t);
525 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int);
526 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *,
528 static inline int write_via_memwin(struct adapter *, int, uint32_t,
529 const uint32_t *, int);
530 static int validate_mem_range(struct adapter *, uint32_t, int);
531 static int fwmtype_to_hwmtype(int);
532 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
534 static int fixup_devlog_params(struct adapter *);
535 static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
536 struct intrs_and_queues *);
537 static int prep_firmware(struct adapter *);
538 static int partition_resources(struct adapter *, const struct firmware *,
540 static int get_params__pre_init(struct adapter *);
541 static int get_params__post_init(struct adapter *);
542 static int set_params__post_init(struct adapter *);
543 static void t4_set_desc(struct adapter *);
544 static void build_medialist(struct port_info *, struct ifmedia *);
545 static void init_l1cfg(struct port_info *);
546 static int cxgbe_init_synchronized(struct vi_info *);
547 static int cxgbe_uninit_synchronized(struct vi_info *);
548 static void quiesce_txq(struct adapter *, struct sge_txq *);
549 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
550 static void quiesce_iq(struct adapter *, struct sge_iq *);
551 static void quiesce_fl(struct adapter *, struct sge_fl *);
552 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
553 driver_intr_t *, void *, char *);
554 static int t4_free_irq(struct adapter *, struct irq *);
555 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
556 static void vi_refresh_stats(struct adapter *, struct vi_info *);
557 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
558 static void cxgbe_tick(void *);
559 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
560 static void cxgbe_sysctls(struct port_info *);
561 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
562 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
563 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
564 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
565 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
566 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
567 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
568 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
569 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
570 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
571 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
572 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
573 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
575 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
576 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
577 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
578 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
579 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
580 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
581 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
582 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
583 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
584 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
585 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
586 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
587 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
588 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
589 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
590 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
591 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
592 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
593 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
594 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
595 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
596 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
597 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
598 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
599 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
600 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
601 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
602 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
603 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS);
606 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
607 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
608 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
609 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
610 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
611 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
612 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
614 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t);
615 static uint32_t mode_to_fconf(uint32_t);
616 static uint32_t mode_to_iconf(uint32_t);
617 static int check_fspec_against_fconf_iconf(struct adapter *,
618 struct t4_filter_specification *);
619 static int get_filter_mode(struct adapter *, uint32_t *);
620 static int set_filter_mode(struct adapter *, uint32_t);
621 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
622 static int get_filter(struct adapter *, struct t4_filter *);
623 static int set_filter(struct adapter *, struct t4_filter *);
624 static int del_filter(struct adapter *, struct t4_filter *);
625 static void clear_filter(struct filter_entry *);
626 static int set_filter_wr(struct adapter *, int);
627 static int del_filter_wr(struct adapter *, int);
628 static int set_tcb_rpl(struct sge_iq *, const struct rss_header *,
630 static int get_sge_context(struct adapter *, struct t4_sge_context *);
631 static int load_fw(struct adapter *, struct t4_data *);
632 static int load_cfg(struct adapter *, struct t4_data *);
633 static int load_boot(struct adapter *, struct t4_bootrom *);
634 static int load_bootcfg(struct adapter *, struct t4_data *);
635 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
636 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
637 static int read_i2c(struct adapter *, struct t4_i2c_data *);
639 static int toe_capability(struct vi_info *, int);
641 static int mod_event(module_t, int, void *);
642 static int notify_siblings(device_t, int);
648 {0xa000, "Chelsio Terminator 4 FPGA"},
649 {0x4400, "Chelsio T440-dbg"},
650 {0x4401, "Chelsio T420-CR"},
651 {0x4402, "Chelsio T422-CR"},
652 {0x4403, "Chelsio T440-CR"},
653 {0x4404, "Chelsio T420-BCH"},
654 {0x4405, "Chelsio T440-BCH"},
655 {0x4406, "Chelsio T440-CH"},
656 {0x4407, "Chelsio T420-SO"},
657 {0x4408, "Chelsio T420-CX"},
658 {0x4409, "Chelsio T420-BT"},
659 {0x440a, "Chelsio T404-BT"},
660 {0x440e, "Chelsio T440-LP-CR"},
662 {0xb000, "Chelsio Terminator 5 FPGA"},
663 {0x5400, "Chelsio T580-dbg"},
664 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
665 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
666 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
667 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
668 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
669 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
670 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
671 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
672 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
673 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
674 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
675 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
676 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
678 {0x5404, "Chelsio T520-BCH"},
679 {0x5405, "Chelsio T540-BCH"},
680 {0x5406, "Chelsio T540-CH"},
681 {0x5408, "Chelsio T520-CX"},
682 {0x540b, "Chelsio B520-SR"},
683 {0x540c, "Chelsio B504-BT"},
684 {0x540f, "Chelsio Amsterdam"},
685 {0x5413, "Chelsio T580-CHR"},
688 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
689 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
690 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
691 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
692 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
693 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
694 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
695 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
696 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
697 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
698 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
699 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
700 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
701 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
702 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
703 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
706 {0x6480, "Chelsio T6225 80"},
707 {0x6481, "Chelsio T62100 81"},
712 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
713 * exactly the same for both rxq and ofld_rxq.
715 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
716 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
718 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
721 t4_probe(device_t dev)
724 uint16_t v = pci_get_vendor(dev);
725 uint16_t d = pci_get_device(dev);
726 uint8_t f = pci_get_function(dev);
728 if (v != PCI_VENDOR_ID_CHELSIO)
731 /* Attach only to PF0 of the FPGA */
732 if (d == 0xa000 && f != 0)
735 for (i = 0; i < nitems(t4_pciids); i++) {
736 if (d == t4_pciids[i].device) {
737 device_set_desc(dev, t4_pciids[i].desc);
738 return (BUS_PROBE_DEFAULT);
746 t5_probe(device_t dev)
749 uint16_t v = pci_get_vendor(dev);
750 uint16_t d = pci_get_device(dev);
751 uint8_t f = pci_get_function(dev);
753 if (v != PCI_VENDOR_ID_CHELSIO)
756 /* Attach only to PF0 of the FPGA */
757 if (d == 0xb000 && f != 0)
760 for (i = 0; i < nitems(t5_pciids); i++) {
761 if (d == t5_pciids[i].device) {
762 device_set_desc(dev, t5_pciids[i].desc);
763 return (BUS_PROBE_DEFAULT);
771 t6_probe(device_t dev)
774 uint16_t v = pci_get_vendor(dev);
775 uint16_t d = pci_get_device(dev);
777 if (v != PCI_VENDOR_ID_CHELSIO)
780 for (i = 0; i < nitems(t6_pciids); i++) {
781 if (d == t6_pciids[i].device) {
782 device_set_desc(dev, t6_pciids[i].desc);
783 return (BUS_PROBE_DEFAULT);
791 t5_attribute_workaround(device_t dev)
797 * The T5 chips do not properly echo the No Snoop and Relaxed
798 * Ordering attributes when replying to a TLP from a Root
799 * Port. As a workaround, find the parent Root Port and
800 * disable No Snoop and Relaxed Ordering. Note that this
801 * affects all devices under this root port.
803 root_port = pci_find_pcie_root_port(dev);
804 if (root_port == NULL) {
805 device_printf(dev, "Unable to find parent root port\n");
809 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
810 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
811 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
813 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
814 device_get_nameunit(root_port));
817 static const struct devnames devnames[] = {
819 .nexus_name = "t4nex",
820 .ifnet_name = "cxgbe",
821 .vi_ifnet_name = "vcxgbe",
822 .pf03_drv_name = "t4iov",
823 .vf_nexus_name = "t4vf",
824 .vf_ifnet_name = "cxgbev"
826 .nexus_name = "t5nex",
828 .vi_ifnet_name = "vcxl",
829 .pf03_drv_name = "t5iov",
830 .vf_nexus_name = "t5vf",
831 .vf_ifnet_name = "cxlv"
833 .nexus_name = "t6nex",
835 .vi_ifnet_name = "vcc",
836 .pf03_drv_name = "t6iov",
837 .vf_nexus_name = "t6vf",
838 .vf_ifnet_name = "ccv"
843 t4_init_devnames(struct adapter *sc)
848 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
849 sc->names = &devnames[id - CHELSIO_T4];
851 device_printf(sc->dev, "chip id %d is not supported.\n", id);
857 t4_attach(device_t dev)
860 int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
861 struct make_dev_args mda;
862 struct intrs_and_queues iaq;
866 int ofld_rqidx, ofld_tqidx;
869 int nm_rqidx, nm_tqidx;
873 sc = device_get_softc(dev);
875 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
877 if ((pci_get_device(dev) & 0xff00) == 0x5400)
878 t5_attribute_workaround(dev);
879 pci_enable_busmaster(dev);
880 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
883 pci_set_max_read_req(dev, 4096);
884 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
885 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
886 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
888 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
891 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
892 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
894 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
895 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
896 device_get_nameunit(dev));
898 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
899 device_get_nameunit(dev));
900 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
903 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
904 TAILQ_INIT(&sc->sfl);
905 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
907 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
909 rc = t4_map_bars_0_and_4(sc);
911 goto done; /* error message displayed already */
913 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
915 /* Prepare the adapter for operation. */
916 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
917 rc = -t4_prep_adapter(sc, buf);
920 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
925 * This is the real PF# to which we're attaching. Works from within PCI
926 * passthrough environments too, where pci_get_function() could return a
927 * different PF# depending on the passthrough configuration. We need to
928 * use the real PF# in all our communication with the firmware.
930 j = t4_read_reg(sc, A_PL_WHOAMI);
931 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
934 t4_init_devnames(sc);
935 if (sc->names == NULL) {
937 goto done; /* error message displayed already */
941 * Do this really early, with the memory windows set up even before the
942 * character device. The userland tool's register i/o and mem read
943 * will work even in "recovery mode".
946 if (t4_init_devlog_params(sc, 0) == 0)
947 fixup_devlog_params(sc);
948 make_dev_args_init(&mda);
949 mda.mda_devsw = &t4_cdevsw;
950 mda.mda_uid = UID_ROOT;
951 mda.mda_gid = GID_WHEEL;
953 mda.mda_si_drv1 = sc;
954 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
956 device_printf(dev, "failed to create nexus char device: %d.\n",
959 /* Go no further if recovery mode has been requested. */
960 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
961 device_printf(dev, "recovery mode.\n");
965 #if defined(__i386__)
966 if ((cpu_feature & CPUID_CX8) == 0) {
967 device_printf(dev, "64 bit atomics not available.\n");
973 /* Prepare the firmware for operation */
974 rc = prep_firmware(sc);
976 goto done; /* error message displayed already */
978 rc = get_params__post_init(sc);
980 goto done; /* error message displayed already */
982 rc = set_params__post_init(sc);
984 goto done; /* error message displayed already */
986 rc = t4_map_bar_2(sc);
988 goto done; /* error message displayed already */
990 rc = t4_create_dma_tag(sc);
992 goto done; /* error message displayed already */
995 * Number of VIs to create per-port. The first VI is the "main" regular
996 * VI for the port. The rest are additional virtual interfaces on the
997 * same physical port. Note that the main VI does not have native
998 * netmap support but the extra VIs do.
1000 * Limit the number of VIs per port to the number of available
1001 * MAC addresses per port.
1003 if (t4_num_vis >= 1)
1004 num_vis = t4_num_vis;
1007 if (num_vis > nitems(vi_mac_funcs)) {
1008 num_vis = nitems(vi_mac_funcs);
1009 device_printf(dev, "Number of VIs limited to %d\n", num_vis);
1013 * First pass over all the ports - allocate VIs and initialize some
1014 * basic parameters like mac address, port type, etc. We also figure
1015 * out whether a port is 10G or 1G and use that information when
1016 * calculating how many interrupts to attempt to allocate.
1019 for_each_port(sc, i) {
1020 struct port_info *pi;
1022 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
1025 /* These must be set before t4_port_init */
1029 * XXX: vi[0] is special so we can't delay this allocation until
1030 * pi->nvi's final value is known.
1032 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
1036 * Allocate the "main" VI and initialize parameters
1039 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1041 device_printf(dev, "unable to initialize port %d: %d\n",
1043 free(pi->vi, M_CXGBE);
1049 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1050 device_get_nameunit(dev), i);
1051 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1052 sc->chan_map[pi->tx_chan] = i;
1054 if (port_top_speed(pi) >= 10) {
1060 /* All VIs on this port share this media. */
1061 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1062 cxgbe_media_status);
1064 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
1065 if (pi->dev == NULL) {
1067 "failed to add device for port %d.\n", i);
1071 pi->vi[0].dev = pi->dev;
1072 device_set_softc(pi->dev, pi);
1076 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1078 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
1080 goto done; /* error message displayed already */
1081 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0)
1084 sc->intr_type = iaq.intr_type;
1085 sc->intr_count = iaq.nirq;
1088 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
1089 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
1091 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi;
1092 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi;
1094 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1095 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
1096 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1098 if (is_offload(sc)) {
1099 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
1100 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
1102 s->nofldrxq += (n10g + n1g) * (num_vis - 1) *
1104 s->nofldtxq += (n10g + n1g) * (num_vis - 1) *
1107 s->neq += s->nofldtxq + s->nofldrxq;
1108 s->niq += s->nofldrxq;
1110 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1111 M_CXGBE, M_ZERO | M_WAITOK);
1112 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1113 M_CXGBE, M_ZERO | M_WAITOK);
1118 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi;
1119 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi;
1121 s->neq += s->nnmtxq + s->nnmrxq;
1122 s->niq += s->nnmrxq;
1124 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1125 M_CXGBE, M_ZERO | M_WAITOK);
1126 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1127 M_CXGBE, M_ZERO | M_WAITOK);
1130 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
1132 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1134 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1136 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1138 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1141 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1144 t4_init_l2t(sc, M_WAITOK);
1145 t4_init_tx_sched(sc);
1148 * Second pass over the ports. This time we know the number of rx and
1149 * tx queues that each port should get.
1153 ofld_rqidx = ofld_tqidx = 0;
1156 nm_rqidx = nm_tqidx = 0;
1158 for_each_port(sc, i) {
1159 struct port_info *pi = sc->port[i];
1166 for_each_vi(pi, j, vi) {
1168 vi->qsize_rxq = t4_qsize_rxq;
1169 vi->qsize_txq = t4_qsize_txq;
1171 vi->first_rxq = rqidx;
1172 vi->first_txq = tqidx;
1173 if (port_top_speed(pi) >= 10) {
1174 vi->tmr_idx = t4_tmr_idx_10g;
1175 vi->pktc_idx = t4_pktc_idx_10g;
1176 vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
1177 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi;
1178 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi;
1180 vi->tmr_idx = t4_tmr_idx_1g;
1181 vi->pktc_idx = t4_pktc_idx_1g;
1182 vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
1183 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi;
1184 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi;
1189 if (j == 0 && vi->ntxq > 1)
1190 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
1192 vi->rsrv_noflowq = 0;
1195 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1196 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1197 vi->first_ofld_rxq = ofld_rqidx;
1198 vi->first_ofld_txq = ofld_tqidx;
1199 if (port_top_speed(pi) >= 10) {
1200 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
1201 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g :
1203 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g :
1206 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
1207 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g :
1209 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g :
1212 ofld_rqidx += vi->nofldrxq;
1213 ofld_tqidx += vi->nofldtxq;
1217 vi->first_nm_rxq = nm_rqidx;
1218 vi->first_nm_txq = nm_tqidx;
1219 vi->nnmrxq = iaq.nnmrxq_vi;
1220 vi->nnmtxq = iaq.nnmtxq_vi;
1221 nm_rqidx += vi->nnmrxq;
1222 nm_tqidx += vi->nnmtxq;
1228 rc = t4_setup_intr_handlers(sc);
1231 "failed to setup interrupt handlers: %d\n", rc);
1235 rc = bus_generic_probe(dev);
1237 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1242 * Ensure thread-safe mailbox access (in debug builds).
1244 * So far this was the only thread accessing the mailbox but various
1245 * ifnets and sysctls are about to be created and their handlers/ioctls
1246 * will access the mailbox from different threads.
1248 sc->flags |= CHK_MBOX_ACCESS;
1250 rc = bus_generic_attach(dev);
1253 "failed to attach all child ports: %d\n", rc);
1258 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1259 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1260 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1261 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1262 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1266 notify_siblings(dev, 0);
1269 if (rc != 0 && sc->cdev) {
1270 /* cdev was created and so cxgbetool works; recover that way. */
1272 "error during attach, adapter is now in recovery mode.\n");
1277 t4_detach_common(dev);
1285 t4_ready(device_t dev)
1289 sc = device_get_softc(dev);
1290 if (sc->flags & FW_OK)
1296 t4_read_port_device(device_t dev, int port, device_t *child)
1299 struct port_info *pi;
1301 sc = device_get_softc(dev);
1302 if (port < 0 || port >= MAX_NPORTS)
1304 pi = sc->port[port];
1305 if (pi == NULL || pi->dev == NULL)
1312 notify_siblings(device_t dev, int detaching)
1318 for (i = 0; i < PCI_FUNCMAX; i++) {
1319 if (i == pci_get_function(dev))
1321 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1322 pci_get_slot(dev), i);
1323 if (sibling == NULL || !device_is_attached(sibling))
1326 error = T4_DETACH_CHILD(sibling);
1328 (void)T4_ATTACH_CHILD(sibling);
1339 t4_detach(device_t dev)
1344 sc = device_get_softc(dev);
1346 rc = notify_siblings(dev, 1);
1349 "failed to detach sibling devices: %d\n", rc);
1353 return (t4_detach_common(dev));
1357 t4_detach_common(device_t dev)
1360 struct port_info *pi;
1363 sc = device_get_softc(dev);
1365 sc->flags &= ~CHK_MBOX_ACCESS;
1366 if (sc->flags & FULL_INIT_DONE) {
1367 if (!(sc->flags & IS_VF))
1368 t4_intr_disable(sc);
1372 destroy_dev(sc->cdev);
1376 if (device_is_attached(dev)) {
1377 rc = bus_generic_detach(dev);
1380 "failed to detach child devices: %d\n", rc);
1385 for (i = 0; i < sc->intr_count; i++)
1386 t4_free_irq(sc, &sc->irq[i]);
1388 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1389 t4_free_tx_sched(sc);
1391 for (i = 0; i < MAX_NPORTS; i++) {
1394 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1396 device_delete_child(dev, pi->dev);
1398 mtx_destroy(&pi->pi_lock);
1399 free(pi->vi, M_CXGBE);
1404 device_delete_children(dev);
1406 if (sc->flags & FULL_INIT_DONE)
1407 adapter_full_uninit(sc);
1409 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1410 t4_fw_bye(sc, sc->mbox);
1412 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1413 pci_release_msi(dev);
1416 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1420 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1424 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1428 t4_free_l2t(sc->l2t);
1431 free(sc->sge.ofld_rxq, M_CXGBE);
1432 free(sc->sge.ofld_txq, M_CXGBE);
1435 free(sc->sge.nm_rxq, M_CXGBE);
1436 free(sc->sge.nm_txq, M_CXGBE);
1438 free(sc->irq, M_CXGBE);
1439 free(sc->sge.rxq, M_CXGBE);
1440 free(sc->sge.txq, M_CXGBE);
1441 free(sc->sge.ctrlq, M_CXGBE);
1442 free(sc->sge.iqmap, M_CXGBE);
1443 free(sc->sge.eqmap, M_CXGBE);
1444 free(sc->tids.ftid_tab, M_CXGBE);
1445 t4_destroy_dma_tag(sc);
1446 if (mtx_initialized(&sc->sc_lock)) {
1447 sx_xlock(&t4_list_lock);
1448 SLIST_REMOVE(&t4_list, sc, adapter, link);
1449 sx_xunlock(&t4_list_lock);
1450 mtx_destroy(&sc->sc_lock);
1453 callout_drain(&sc->sfl_callout);
1454 if (mtx_initialized(&sc->tids.ftid_lock))
1455 mtx_destroy(&sc->tids.ftid_lock);
1456 if (mtx_initialized(&sc->sfl_lock))
1457 mtx_destroy(&sc->sfl_lock);
1458 if (mtx_initialized(&sc->ifp_lock))
1459 mtx_destroy(&sc->ifp_lock);
1460 if (mtx_initialized(&sc->reg_lock))
1461 mtx_destroy(&sc->reg_lock);
1463 for (i = 0; i < NUM_MEMWIN; i++) {
1464 struct memwin *mw = &sc->memwin[i];
1466 if (rw_initialized(&mw->mw_lock))
1467 rw_destroy(&mw->mw_lock);
1470 bzero(sc, sizeof(*sc));
1476 cxgbe_probe(device_t dev)
1479 struct port_info *pi = device_get_softc(dev);
1481 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1482 device_set_desc_copy(dev, buf);
1484 return (BUS_PROBE_DEFAULT);
1487 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1488 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1489 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1490 #define T4_CAP_ENABLE (T4_CAP)
1493 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1498 vi->xact_addr_filt = -1;
1499 callout_init(&vi->tick, 1);
1501 /* Allocate an ifnet and set it up */
1502 ifp = if_alloc(IFT_ETHER);
1504 device_printf(dev, "Cannot allocate ifnet\n");
1510 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1511 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1513 ifp->if_init = cxgbe_init;
1514 ifp->if_ioctl = cxgbe_ioctl;
1515 ifp->if_transmit = cxgbe_transmit;
1516 ifp->if_qflush = cxgbe_qflush;
1517 ifp->if_get_counter = cxgbe_get_counter;
1519 ifp->if_capabilities = T4_CAP;
1521 if (vi->nofldrxq != 0)
1522 ifp->if_capabilities |= IFCAP_TOE;
1525 if (vi->nnmrxq != 0)
1526 ifp->if_capabilities |= IFCAP_NETMAP;
1528 ifp->if_capenable = T4_CAP_ENABLE;
1529 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1530 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1532 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1533 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1534 ifp->if_hw_tsomaxsegsize = 65536;
1536 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1537 EVENTHANDLER_PRI_ANY);
1539 ether_ifattach(ifp, vi->hw_addr);
1541 if (ifp->if_capabilities & IFCAP_NETMAP)
1542 cxgbe_nm_attach(vi);
1544 sb = sbuf_new_auto();
1545 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1547 if (ifp->if_capabilities & IFCAP_TOE)
1548 sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
1549 vi->nofldtxq, vi->nofldrxq);
1552 if (ifp->if_capabilities & IFCAP_NETMAP)
1553 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1554 vi->nnmtxq, vi->nnmrxq);
1557 device_printf(dev, "%s\n", sbuf_data(sb));
1566 cxgbe_attach(device_t dev)
1568 struct port_info *pi = device_get_softc(dev);
1569 struct adapter *sc = pi->adapter;
1573 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1575 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1579 for_each_vi(pi, i, vi) {
1582 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1583 if (vi->dev == NULL) {
1584 device_printf(dev, "failed to add VI %d\n", i);
1587 device_set_softc(vi->dev, vi);
1592 bus_generic_attach(dev);
1598 cxgbe_vi_detach(struct vi_info *vi)
1600 struct ifnet *ifp = vi->ifp;
1602 ether_ifdetach(ifp);
1605 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1607 /* Let detach proceed even if these fail. */
1609 if (ifp->if_capabilities & IFCAP_NETMAP)
1610 cxgbe_nm_detach(vi);
1612 cxgbe_uninit_synchronized(vi);
1613 callout_drain(&vi->tick);
1621 cxgbe_detach(device_t dev)
1623 struct port_info *pi = device_get_softc(dev);
1624 struct adapter *sc = pi->adapter;
1627 /* Detach the extra VIs first. */
1628 rc = bus_generic_detach(dev);
1631 device_delete_children(dev);
1633 doom_vi(sc, &pi->vi[0]);
1635 if (pi->flags & HAS_TRACEQ) {
1636 sc->traceq = -1; /* cloner should not create ifnet */
1637 t4_tracer_port_detach(sc);
1640 cxgbe_vi_detach(&pi->vi[0]);
1641 callout_drain(&pi->tick);
1642 ifmedia_removeall(&pi->media);
1644 end_synchronized_op(sc, 0);
1650 cxgbe_init(void *arg)
1652 struct vi_info *vi = arg;
1653 struct adapter *sc = vi->pi->adapter;
1655 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1657 cxgbe_init_synchronized(vi);
1658 end_synchronized_op(sc, 0);
1662 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1664 int rc = 0, mtu, flags, can_sleep;
1665 struct vi_info *vi = ifp->if_softc;
1666 struct port_info *pi = vi->pi;
1667 struct adapter *sc = pi->adapter;
1668 struct ifreq *ifr = (struct ifreq *)data;
1674 if (mtu < ETHERMIN || mtu > MAX_MTU)
1677 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1681 if (vi->flags & VI_INIT_DONE) {
1682 t4_update_fl_bufsize(ifp);
1683 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1684 rc = update_mac_settings(ifp, XGMAC_MTU);
1686 end_synchronized_op(sc, 0);
1692 rc = begin_synchronized_op(sc, vi,
1693 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1697 if (ifp->if_flags & IFF_UP) {
1698 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1699 flags = vi->if_flags;
1700 if ((ifp->if_flags ^ flags) &
1701 (IFF_PROMISC | IFF_ALLMULTI)) {
1702 if (can_sleep == 1) {
1703 end_synchronized_op(sc, 0);
1707 rc = update_mac_settings(ifp,
1708 XGMAC_PROMISC | XGMAC_ALLMULTI);
1711 if (can_sleep == 0) {
1712 end_synchronized_op(sc, LOCK_HELD);
1716 rc = cxgbe_init_synchronized(vi);
1718 vi->if_flags = ifp->if_flags;
1719 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1720 if (can_sleep == 0) {
1721 end_synchronized_op(sc, LOCK_HELD);
1725 rc = cxgbe_uninit_synchronized(vi);
1727 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1731 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1732 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
1735 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1736 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1737 end_synchronized_op(sc, LOCK_HELD);
1741 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1745 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1746 if (mask & IFCAP_TXCSUM) {
1747 ifp->if_capenable ^= IFCAP_TXCSUM;
1748 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1750 if (IFCAP_TSO4 & ifp->if_capenable &&
1751 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1752 ifp->if_capenable &= ~IFCAP_TSO4;
1754 "tso4 disabled due to -txcsum.\n");
1757 if (mask & IFCAP_TXCSUM_IPV6) {
1758 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1759 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1761 if (IFCAP_TSO6 & ifp->if_capenable &&
1762 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1763 ifp->if_capenable &= ~IFCAP_TSO6;
1765 "tso6 disabled due to -txcsum6.\n");
1768 if (mask & IFCAP_RXCSUM)
1769 ifp->if_capenable ^= IFCAP_RXCSUM;
1770 if (mask & IFCAP_RXCSUM_IPV6)
1771 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1774 * Note that we leave CSUM_TSO alone (it is always set). The
1775 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1776 * sending a TSO request our way, so it's sufficient to toggle
1779 if (mask & IFCAP_TSO4) {
1780 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1781 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1782 if_printf(ifp, "enable txcsum first.\n");
1786 ifp->if_capenable ^= IFCAP_TSO4;
1788 if (mask & IFCAP_TSO6) {
1789 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1790 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1791 if_printf(ifp, "enable txcsum6 first.\n");
1795 ifp->if_capenable ^= IFCAP_TSO6;
1797 if (mask & IFCAP_LRO) {
1798 #if defined(INET) || defined(INET6)
1800 struct sge_rxq *rxq;
1802 ifp->if_capenable ^= IFCAP_LRO;
1803 for_each_rxq(vi, i, rxq) {
1804 if (ifp->if_capenable & IFCAP_LRO)
1805 rxq->iq.flags |= IQ_LRO_ENABLED;
1807 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1812 if (mask & IFCAP_TOE) {
1813 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1815 rc = toe_capability(vi, enable);
1819 ifp->if_capenable ^= mask;
1822 if (mask & IFCAP_VLAN_HWTAGGING) {
1823 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1824 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1825 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1827 if (mask & IFCAP_VLAN_MTU) {
1828 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1830 /* Need to find out how to disable auto-mtu-inflation */
1832 if (mask & IFCAP_VLAN_HWTSO)
1833 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1834 if (mask & IFCAP_VLAN_HWCSUM)
1835 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1837 #ifdef VLAN_CAPABILITIES
1838 VLAN_CAPABILITIES(ifp);
1841 end_synchronized_op(sc, 0);
1847 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1851 struct ifi2creq i2c;
1853 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1856 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1860 if (i2c.len > sizeof(i2c.data)) {
1864 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1867 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1868 i2c.offset, i2c.len, &i2c.data[0]);
1869 end_synchronized_op(sc, 0);
1871 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1876 rc = ether_ioctl(ifp, cmd, data);
1883 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1885 struct vi_info *vi = ifp->if_softc;
1886 struct port_info *pi = vi->pi;
1887 struct adapter *sc = pi->adapter;
1888 struct sge_txq *txq;
1893 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
1895 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1900 rc = parse_pkt(sc, &m);
1901 if (__predict_false(rc != 0)) {
1902 MPASS(m == NULL); /* was freed already */
1903 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1908 txq = &sc->sge.txq[vi->first_txq];
1909 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1910 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1914 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1915 if (__predict_false(rc != 0))
1922 cxgbe_qflush(struct ifnet *ifp)
1924 struct vi_info *vi = ifp->if_softc;
1925 struct sge_txq *txq;
1928 /* queues do not exist if !VI_INIT_DONE. */
1929 if (vi->flags & VI_INIT_DONE) {
1930 for_each_txq(vi, i, txq) {
1932 txq->eq.flags |= EQ_QFLUSH;
1934 while (!mp_ring_is_idle(txq->r)) {
1935 mp_ring_check_drainage(txq->r, 0);
1939 txq->eq.flags &= ~EQ_QFLUSH;
1947 vi_get_counter(struct ifnet *ifp, ift_counter c)
1949 struct vi_info *vi = ifp->if_softc;
1950 struct fw_vi_stats_vf *s = &vi->stats;
1952 vi_refresh_stats(vi->pi->adapter, vi);
1955 case IFCOUNTER_IPACKETS:
1956 return (s->rx_bcast_frames + s->rx_mcast_frames +
1957 s->rx_ucast_frames);
1958 case IFCOUNTER_IERRORS:
1959 return (s->rx_err_frames);
1960 case IFCOUNTER_OPACKETS:
1961 return (s->tx_bcast_frames + s->tx_mcast_frames +
1962 s->tx_ucast_frames + s->tx_offload_frames);
1963 case IFCOUNTER_OERRORS:
1964 return (s->tx_drop_frames);
1965 case IFCOUNTER_IBYTES:
1966 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
1968 case IFCOUNTER_OBYTES:
1969 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
1970 s->tx_ucast_bytes + s->tx_offload_bytes);
1971 case IFCOUNTER_IMCASTS:
1972 return (s->rx_mcast_frames);
1973 case IFCOUNTER_OMCASTS:
1974 return (s->tx_mcast_frames);
1975 case IFCOUNTER_OQDROPS: {
1979 if (vi->flags & VI_INIT_DONE) {
1981 struct sge_txq *txq;
1983 for_each_txq(vi, i, txq)
1984 drops += counter_u64_fetch(txq->r->drops);
1992 return (if_get_counter_default(ifp, c));
1997 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
1999 struct vi_info *vi = ifp->if_softc;
2000 struct port_info *pi = vi->pi;
2001 struct adapter *sc = pi->adapter;
2002 struct port_stats *s = &pi->stats;
2004 if (pi->nvi > 1 || sc->flags & IS_VF)
2005 return (vi_get_counter(ifp, c));
2007 cxgbe_refresh_stats(sc, pi);
2010 case IFCOUNTER_IPACKETS:
2011 return (s->rx_frames);
2013 case IFCOUNTER_IERRORS:
2014 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2015 s->rx_fcs_err + s->rx_len_err);
2017 case IFCOUNTER_OPACKETS:
2018 return (s->tx_frames);
2020 case IFCOUNTER_OERRORS:
2021 return (s->tx_error_frames);
2023 case IFCOUNTER_IBYTES:
2024 return (s->rx_octets);
2026 case IFCOUNTER_OBYTES:
2027 return (s->tx_octets);
2029 case IFCOUNTER_IMCASTS:
2030 return (s->rx_mcast_frames);
2032 case IFCOUNTER_OMCASTS:
2033 return (s->tx_mcast_frames);
2035 case IFCOUNTER_IQDROPS:
2036 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2037 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2038 s->rx_trunc3 + pi->tnl_cong_drops);
2040 case IFCOUNTER_OQDROPS: {
2044 if (vi->flags & VI_INIT_DONE) {
2046 struct sge_txq *txq;
2048 for_each_txq(vi, i, txq)
2049 drops += counter_u64_fetch(txq->r->drops);
2057 return (if_get_counter_default(ifp, c));
2062 cxgbe_media_change(struct ifnet *ifp)
2064 struct vi_info *vi = ifp->if_softc;
2066 device_printf(vi->dev, "%s unimplemented.\n", __func__);
2068 return (EOPNOTSUPP);
2072 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2074 struct vi_info *vi = ifp->if_softc;
2075 struct port_info *pi = vi->pi;
2076 struct ifmedia_entry *cur;
2077 struct link_config *lc = &pi->link_cfg;
2080 * If all the interfaces are administratively down the firmware does not
2081 * report transceiver changes. Refresh port info here so that ifconfig
2082 * displays accurate information at all times.
2084 if (begin_synchronized_op(pi->adapter, NULL, SLEEP_OK | INTR_OK,
2087 if (pi->up_vis == 0) {
2088 t4_update_port_info(pi);
2089 build_medialist(pi, &pi->media);
2092 end_synchronized_op(pi->adapter, 0);
2095 ifmr->ifm_status = IFM_AVALID;
2096 if (lc->link_ok == 0)
2099 ifmr->ifm_status |= IFM_ACTIVE;
2100 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2101 if (lc->fc & PAUSE_RX)
2102 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2103 if (lc->fc & PAUSE_TX)
2104 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2106 /* active and current will differ iff current media is autoselect. */
2107 cur = pi->media.ifm_cur;
2108 if (cur != NULL && IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2111 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2112 if (lc->fc & PAUSE_RX)
2113 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2114 if (lc->fc & PAUSE_TX)
2115 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2116 switch (lc->speed) {
2118 ifmr->ifm_active |= IFM_10G_T;
2121 ifmr->ifm_active |= IFM_1000_T;
2124 ifmr->ifm_active |= IFM_100_TX;
2127 ifmr->ifm_active |= IFM_10_T;
2130 device_printf(vi->dev, "link up but speed unknown (%u)\n",
2136 vcxgbe_probe(device_t dev)
2139 struct vi_info *vi = device_get_softc(dev);
2141 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2143 device_set_desc_copy(dev, buf);
2145 return (BUS_PROBE_DEFAULT);
2149 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2151 int func, index, rc;
2152 uint32_t param, val;
2154 ASSERT_SYNCHRONIZED_OP(sc);
2156 index = vi - pi->vi;
2157 MPASS(index > 0); /* This function deals with _extra_ VIs only */
2158 KASSERT(index < nitems(vi_mac_funcs),
2159 ("%s: VI %s doesn't have a MAC func", __func__,
2160 device_get_nameunit(vi->dev)));
2161 func = vi_mac_funcs[index];
2162 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2163 vi->hw_addr, &vi->rss_size, func, 0);
2165 device_printf(vi->dev, "failed to allocate virtual interface %d"
2166 "for port %d: %d\n", index, pi->port_id, -rc);
2170 if (chip_id(sc) <= CHELSIO_T5)
2171 vi->smt_idx = (rc & 0x7f) << 1;
2173 vi->smt_idx = (rc & 0x7f);
2175 if (vi->rss_size == 1) {
2177 * This VI didn't get a slice of the RSS table. Reduce the
2178 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2179 * configuration file (nvi, rssnvi for this PF) if this is a
2182 device_printf(vi->dev, "RSS table not available.\n");
2183 vi->rss_base = 0xffff;
2188 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2189 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2190 V_FW_PARAMS_PARAM_YZ(vi->viid);
2191 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2193 vi->rss_base = 0xffff;
2195 MPASS((val >> 16) == vi->rss_size);
2196 vi->rss_base = val & 0xffff;
2203 vcxgbe_attach(device_t dev)
2206 struct port_info *pi;
2210 vi = device_get_softc(dev);
2214 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2217 rc = alloc_extra_vi(sc, pi, vi);
2218 end_synchronized_op(sc, 0);
2222 rc = cxgbe_vi_attach(dev, vi);
2224 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2231 vcxgbe_detach(device_t dev)
2236 vi = device_get_softc(dev);
2237 sc = vi->pi->adapter;
2241 cxgbe_vi_detach(vi);
2242 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2244 end_synchronized_op(sc, 0);
2250 t4_fatal_err(struct adapter *sc)
2252 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2253 t4_intr_disable(sc);
2254 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2255 device_get_nameunit(sc->dev));
2259 t4_add_adapter(struct adapter *sc)
2261 sx_xlock(&t4_list_lock);
2262 SLIST_INSERT_HEAD(&t4_list, sc, link);
2263 sx_xunlock(&t4_list_lock);
2267 t4_map_bars_0_and_4(struct adapter *sc)
2269 sc->regs_rid = PCIR_BAR(0);
2270 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2271 &sc->regs_rid, RF_ACTIVE);
2272 if (sc->regs_res == NULL) {
2273 device_printf(sc->dev, "cannot map registers.\n");
2276 sc->bt = rman_get_bustag(sc->regs_res);
2277 sc->bh = rman_get_bushandle(sc->regs_res);
2278 sc->mmio_len = rman_get_size(sc->regs_res);
2279 setbit(&sc->doorbells, DOORBELL_KDB);
2281 sc->msix_rid = PCIR_BAR(4);
2282 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2283 &sc->msix_rid, RF_ACTIVE);
2284 if (sc->msix_res == NULL) {
2285 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2293 t4_map_bar_2(struct adapter *sc)
2297 * T4: only iWARP driver uses the userspace doorbells. There is no need
2298 * to map it if RDMA is disabled.
2300 if (is_t4(sc) && sc->rdmacaps == 0)
2303 sc->udbs_rid = PCIR_BAR(2);
2304 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2305 &sc->udbs_rid, RF_ACTIVE);
2306 if (sc->udbs_res == NULL) {
2307 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2310 sc->udbs_base = rman_get_virtual(sc->udbs_res);
2312 if (chip_id(sc) >= CHELSIO_T5) {
2313 setbit(&sc->doorbells, DOORBELL_UDB);
2314 #if defined(__i386__) || defined(__amd64__)
2315 if (t5_write_combine) {
2319 * Enable write combining on BAR2. This is the
2320 * userspace doorbell BAR and is split into 128B
2321 * (UDBS_SEG_SIZE) doorbell regions, each associated
2322 * with an egress queue. The first 64B has the doorbell
2323 * and the second 64B can be used to submit a tx work
2324 * request with an implicit doorbell.
2327 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2328 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2330 clrbit(&sc->doorbells, DOORBELL_UDB);
2331 setbit(&sc->doorbells, DOORBELL_WCWR);
2332 setbit(&sc->doorbells, DOORBELL_UDBWC);
2334 t5_write_combine = 0;
2335 device_printf(sc->dev,
2336 "couldn't enable write combining: %d\n",
2340 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2341 t4_write_reg(sc, A_SGE_STAT_CFG,
2342 V_STATSOURCE_T5(7) | mode);
2345 t5_write_combine = 0;
2347 sc->iwt.wc_en = t5_write_combine;
2353 struct memwin_init {
2358 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2359 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2360 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2361 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2364 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2365 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2366 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2367 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2371 setup_memwin(struct adapter *sc)
2373 const struct memwin_init *mw_init;
2380 * Read low 32b of bar0 indirectly via the hardware backdoor
2381 * mechanism. Works from within PCI passthrough environments
2382 * too, where rman_get_start() can return a different value. We
2383 * need to program the T4 memory window decoders with the actual
2384 * addresses that will be coming across the PCIe link.
2386 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2387 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2389 mw_init = &t4_memwin[0];
2391 /* T5+ use the relative offset inside the PCIe BAR */
2394 mw_init = &t5_memwin[0];
2397 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2398 rw_init(&mw->mw_lock, "memory window access");
2399 mw->mw_base = mw_init->base;
2400 mw->mw_aperture = mw_init->aperture;
2403 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2404 (mw->mw_base + bar0) | V_BIR(0) |
2405 V_WINDOW(ilog2(mw->mw_aperture) - 10));
2406 rw_wlock(&mw->mw_lock);
2407 position_memwin(sc, i, 0);
2408 rw_wunlock(&mw->mw_lock);
2412 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2416 * Positions the memory window at the given address in the card's address space.
2417 * There are some alignment requirements and the actual position may be at an
2418 * address prior to the requested address. mw->mw_curpos always has the actual
2419 * position of the window.
2422 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2428 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2429 mw = &sc->memwin[idx];
2430 rw_assert(&mw->mw_lock, RA_WLOCKED);
2434 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
2436 pf = V_PFNUM(sc->pf);
2437 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
2439 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2440 t4_write_reg(sc, reg, mw->mw_curpos | pf);
2441 t4_read_reg(sc, reg); /* flush */
2445 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2451 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2453 /* Memory can only be accessed in naturally aligned 4 byte units */
2454 if (addr & 3 || len & 3 || len <= 0)
2457 mw = &sc->memwin[idx];
2459 rw_rlock(&mw->mw_lock);
2460 mw_end = mw->mw_curpos + mw->mw_aperture;
2461 if (addr >= mw_end || addr < mw->mw_curpos) {
2462 /* Will need to reposition the window */
2463 if (!rw_try_upgrade(&mw->mw_lock)) {
2464 rw_runlock(&mw->mw_lock);
2465 rw_wlock(&mw->mw_lock);
2467 rw_assert(&mw->mw_lock, RA_WLOCKED);
2468 position_memwin(sc, idx, addr);
2469 rw_downgrade(&mw->mw_lock);
2470 mw_end = mw->mw_curpos + mw->mw_aperture;
2472 rw_assert(&mw->mw_lock, RA_RLOCKED);
2473 while (addr < mw_end && len > 0) {
2475 v = t4_read_reg(sc, mw->mw_base + addr -
2477 *val++ = le32toh(v);
2480 t4_write_reg(sc, mw->mw_base + addr -
2481 mw->mw_curpos, htole32(v));
2486 rw_runlock(&mw->mw_lock);
2493 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2497 return (rw_via_memwin(sc, idx, addr, val, len, 0));
2501 write_via_memwin(struct adapter *sc, int idx, uint32_t addr,
2502 const uint32_t *val, int len)
2505 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1));
2509 t4_range_cmp(const void *a, const void *b)
2511 return ((const struct t4_range *)a)->start -
2512 ((const struct t4_range *)b)->start;
2516 * Verify that the memory range specified by the addr/len pair is valid within
2517 * the card's address space.
2520 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2522 struct t4_range mem_ranges[4], *r, *next;
2523 uint32_t em, addr_len;
2524 int i, n, remaining;
2526 /* Memory can only be accessed in naturally aligned 4 byte units */
2527 if (addr & 3 || len & 3 || len <= 0)
2530 /* Enabled memories */
2531 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2535 bzero(r, sizeof(mem_ranges));
2536 if (em & F_EDRAM0_ENABLE) {
2537 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2538 r->size = G_EDRAM0_SIZE(addr_len) << 20;
2540 r->start = G_EDRAM0_BASE(addr_len) << 20;
2541 if (addr >= r->start &&
2542 addr + len <= r->start + r->size)
2548 if (em & F_EDRAM1_ENABLE) {
2549 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2550 r->size = G_EDRAM1_SIZE(addr_len) << 20;
2552 r->start = G_EDRAM1_BASE(addr_len) << 20;
2553 if (addr >= r->start &&
2554 addr + len <= r->start + r->size)
2560 if (em & F_EXT_MEM_ENABLE) {
2561 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2562 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2564 r->start = G_EXT_MEM_BASE(addr_len) << 20;
2565 if (addr >= r->start &&
2566 addr + len <= r->start + r->size)
2572 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2573 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2574 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2576 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2577 if (addr >= r->start &&
2578 addr + len <= r->start + r->size)
2584 MPASS(n <= nitems(mem_ranges));
2587 /* Sort and merge the ranges. */
2588 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2590 /* Start from index 0 and examine the next n - 1 entries. */
2592 for (remaining = n - 1; remaining > 0; remaining--, r++) {
2594 MPASS(r->size > 0); /* r is a valid entry. */
2596 MPASS(next->size > 0); /* and so is the next one. */
2598 while (r->start + r->size >= next->start) {
2599 /* Merge the next one into the current entry. */
2600 r->size = max(r->start + r->size,
2601 next->start + next->size) - r->start;
2602 n--; /* One fewer entry in total. */
2603 if (--remaining == 0)
2604 goto done; /* short circuit */
2607 if (next != r + 1) {
2609 * Some entries were merged into r and next
2610 * points to the first valid entry that couldn't
2613 MPASS(next->size > 0); /* must be valid */
2614 memcpy(r + 1, next, remaining * sizeof(*r));
2617 * This so that the foo->size assertion in the
2618 * next iteration of the loop do the right
2619 * thing for entries that were pulled up and are
2622 MPASS(n < nitems(mem_ranges));
2623 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2624 sizeof(struct t4_range));
2629 /* Done merging the ranges. */
2632 for (i = 0; i < n; i++, r++) {
2633 if (addr >= r->start &&
2634 addr + len <= r->start + r->size)
2643 fwmtype_to_hwmtype(int mtype)
2647 case FW_MEMTYPE_EDC0:
2649 case FW_MEMTYPE_EDC1:
2651 case FW_MEMTYPE_EXTMEM:
2653 case FW_MEMTYPE_EXTMEM1:
2656 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2661 * Verify that the memory range specified by the memtype/offset/len pair is
2662 * valid and lies entirely within the memtype specified. The global address of
2663 * the start of the range is returned in addr.
2666 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2669 uint32_t em, addr_len, maddr;
2671 /* Memory can only be accessed in naturally aligned 4 byte units */
2672 if (off & 3 || len & 3 || len == 0)
2675 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2676 switch (fwmtype_to_hwmtype(mtype)) {
2678 if (!(em & F_EDRAM0_ENABLE))
2680 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2681 maddr = G_EDRAM0_BASE(addr_len) << 20;
2684 if (!(em & F_EDRAM1_ENABLE))
2686 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2687 maddr = G_EDRAM1_BASE(addr_len) << 20;
2690 if (!(em & F_EXT_MEM_ENABLE))
2692 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2693 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2696 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2698 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2699 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2705 *addr = maddr + off; /* global address */
2706 return (validate_mem_range(sc, *addr, len));
2710 fixup_devlog_params(struct adapter *sc)
2712 struct devlog_params *dparams = &sc->params.devlog;
2715 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2716 dparams->size, &dparams->addr);
2722 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
2723 struct intrs_and_queues *iaq)
2725 int rc, itype, navail, nrxq10g, nrxq1g, n;
2726 int nofldrxq10g = 0, nofldrxq1g = 0;
2728 bzero(iaq, sizeof(*iaq));
2730 iaq->ntxq10g = t4_ntxq10g;
2731 iaq->ntxq1g = t4_ntxq1g;
2732 iaq->ntxq_vi = t4_ntxq_vi;
2733 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
2734 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
2735 iaq->nrxq_vi = t4_nrxq_vi;
2736 iaq->rsrv_noflowq = t4_rsrv_noflowq;
2738 if (is_offload(sc)) {
2739 iaq->nofldtxq10g = t4_nofldtxq10g;
2740 iaq->nofldtxq1g = t4_nofldtxq1g;
2741 iaq->nofldtxq_vi = t4_nofldtxq_vi;
2742 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
2743 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
2744 iaq->nofldrxq_vi = t4_nofldrxq_vi;
2748 iaq->nnmtxq_vi = t4_nnmtxq_vi;
2749 iaq->nnmrxq_vi = t4_nnmrxq_vi;
2752 for (itype = INTR_MSIX; itype; itype >>= 1) {
2754 if ((itype & t4_intr_types) == 0)
2755 continue; /* not allowed */
2757 if (itype == INTR_MSIX)
2758 navail = pci_msix_count(sc->dev);
2759 else if (itype == INTR_MSI)
2760 navail = pci_msi_count(sc->dev);
2767 iaq->intr_type = itype;
2768 iaq->intr_flags_10g = 0;
2769 iaq->intr_flags_1g = 0;
2772 * Best option: an interrupt vector for errors, one for the
2773 * firmware event queue, and one for every rxq (NIC and TOE) of
2774 * every VI. The VIs that support netmap use the same
2775 * interrupts for the NIC rx queues and the netmap rx queues
2776 * because only one set of queues is active at a time.
2778 iaq->nirq = T4_EXTRA_INTR;
2779 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2780 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2781 iaq->nirq += (n10g + n1g) * (num_vis - 1) *
2782 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */
2783 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi;
2784 if (iaq->nirq <= navail &&
2785 (itype != INTR_MSI || powerof2(iaq->nirq))) {
2786 iaq->intr_flags_10g = INTR_ALL;
2787 iaq->intr_flags_1g = INTR_ALL;
2791 /* Disable the VIs (and netmap) if there aren't enough intrs */
2793 device_printf(sc->dev, "virtual interfaces disabled "
2794 "because num_vis=%u with current settings "
2795 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, "
2796 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, "
2797 "nnmrxq_vi=%u) would need %u interrupts but "
2798 "only %u are available.\n", num_vis, nrxq10g,
2799 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi,
2800 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq,
2803 iaq->ntxq_vi = iaq->nrxq_vi = 0;
2804 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
2805 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
2810 * Second best option: a vector for errors, one for the firmware
2811 * event queue, and vectors for either all the NIC rx queues or
2812 * all the TOE rx queues. The queues that don't get vectors
2813 * will forward their interrupts to those that do.
2815 iaq->nirq = T4_EXTRA_INTR;
2816 if (nrxq10g >= nofldrxq10g) {
2817 iaq->intr_flags_10g = INTR_RXQ;
2818 iaq->nirq += n10g * nrxq10g;
2820 iaq->intr_flags_10g = INTR_OFLD_RXQ;
2821 iaq->nirq += n10g * nofldrxq10g;
2823 if (nrxq1g >= nofldrxq1g) {
2824 iaq->intr_flags_1g = INTR_RXQ;
2825 iaq->nirq += n1g * nrxq1g;
2827 iaq->intr_flags_1g = INTR_OFLD_RXQ;
2828 iaq->nirq += n1g * nofldrxq1g;
2830 if (iaq->nirq <= navail &&
2831 (itype != INTR_MSI || powerof2(iaq->nirq)))
2835 * Next best option: an interrupt vector for errors, one for the
2836 * firmware event queue, and at least one per main-VI. At this
2837 * point we know we'll have to downsize nrxq and/or nofldrxq to
2838 * fit what's available to us.
2840 iaq->nirq = T4_EXTRA_INTR;
2841 iaq->nirq += n10g + n1g;
2842 if (iaq->nirq <= navail) {
2843 int leftover = navail - iaq->nirq;
2846 int target = max(nrxq10g, nofldrxq10g);
2848 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2849 INTR_RXQ : INTR_OFLD_RXQ;
2852 while (n < target && leftover >= n10g) {
2857 iaq->nrxq10g = min(n, nrxq10g);
2859 iaq->nofldrxq10g = min(n, nofldrxq10g);
2864 int target = max(nrxq1g, nofldrxq1g);
2866 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2867 INTR_RXQ : INTR_OFLD_RXQ;
2870 while (n < target && leftover >= n1g) {
2875 iaq->nrxq1g = min(n, nrxq1g);
2877 iaq->nofldrxq1g = min(n, nofldrxq1g);
2881 if (itype != INTR_MSI || powerof2(iaq->nirq))
2886 * Least desirable option: one interrupt vector for everything.
2888 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2889 iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2892 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2897 if (itype == INTR_MSIX)
2898 rc = pci_alloc_msix(sc->dev, &navail);
2899 else if (itype == INTR_MSI)
2900 rc = pci_alloc_msi(sc->dev, &navail);
2903 if (navail == iaq->nirq)
2907 * Didn't get the number requested. Use whatever number
2908 * the kernel is willing to allocate (it's in navail).
2910 device_printf(sc->dev, "fewer vectors than requested, "
2911 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2912 itype, iaq->nirq, navail);
2913 pci_release_msi(sc->dev);
2917 device_printf(sc->dev,
2918 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2919 itype, rc, iaq->nirq, navail);
2922 device_printf(sc->dev,
2923 "failed to find a usable interrupt type. "
2924 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2925 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2930 #define FW_VERSION(chip) ( \
2931 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2932 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2933 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2934 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2935 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2941 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
2945 .kld_name = "t4fw_cfg",
2946 .fw_mod_name = "t4fw",
2948 .chip = FW_HDR_CHIP_T4,
2949 .fw_ver = htobe32_const(FW_VERSION(T4)),
2950 .intfver_nic = FW_INTFVER(T4, NIC),
2951 .intfver_vnic = FW_INTFVER(T4, VNIC),
2952 .intfver_ofld = FW_INTFVER(T4, OFLD),
2953 .intfver_ri = FW_INTFVER(T4, RI),
2954 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2955 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
2956 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2957 .intfver_fcoe = FW_INTFVER(T4, FCOE),
2961 .kld_name = "t5fw_cfg",
2962 .fw_mod_name = "t5fw",
2964 .chip = FW_HDR_CHIP_T5,
2965 .fw_ver = htobe32_const(FW_VERSION(T5)),
2966 .intfver_nic = FW_INTFVER(T5, NIC),
2967 .intfver_vnic = FW_INTFVER(T5, VNIC),
2968 .intfver_ofld = FW_INTFVER(T5, OFLD),
2969 .intfver_ri = FW_INTFVER(T5, RI),
2970 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2971 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2972 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2973 .intfver_fcoe = FW_INTFVER(T5, FCOE),
2977 .kld_name = "t6fw_cfg",
2978 .fw_mod_name = "t6fw",
2980 .chip = FW_HDR_CHIP_T6,
2981 .fw_ver = htobe32_const(FW_VERSION(T6)),
2982 .intfver_nic = FW_INTFVER(T6, NIC),
2983 .intfver_vnic = FW_INTFVER(T6, VNIC),
2984 .intfver_ofld = FW_INTFVER(T6, OFLD),
2985 .intfver_ri = FW_INTFVER(T6, RI),
2986 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
2987 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
2988 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
2989 .intfver_fcoe = FW_INTFVER(T6, FCOE),
2994 static struct fw_info *
2995 find_fw_info(int chip)
2999 for (i = 0; i < nitems(fw_info); i++) {
3000 if (fw_info[i].chip == chip)
3001 return (&fw_info[i]);
3007 * Is the given firmware API compatible with the one the driver was compiled
3011 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3014 /* short circuit if it's the exact same firmware version */
3015 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3019 * XXX: Is this too conservative? Perhaps I should limit this to the
3020 * features that are supported in the driver.
3022 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3023 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3024 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3025 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3033 * The firmware in the KLD is usable, but should it be installed? This routine
3034 * explains itself in detail if it indicates the KLD firmware should be
3038 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
3042 if (!card_fw_usable) {
3043 reason = "incompatible or unusable";
3048 reason = "older than the version bundled with this driver";
3052 if (t4_fw_install == 2 && k != c) {
3053 reason = "different than the version bundled with this driver";
3060 if (t4_fw_install == 0) {
3061 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3062 "but the driver is prohibited from installing a different "
3063 "firmware on the card.\n",
3064 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3065 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3070 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3071 "installing firmware %u.%u.%u.%u on card.\n",
3072 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3073 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3074 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3075 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3081 * Establish contact with the firmware and determine if we are the master driver
3082 * or not, and whether we are responsible for chip initialization.
3085 prep_firmware(struct adapter *sc)
3087 const struct firmware *fw = NULL, *default_cfg;
3088 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
3089 enum dev_state state;
3090 struct fw_info *fw_info;
3091 struct fw_hdr *card_fw; /* fw on the card */
3092 const struct fw_hdr *kld_fw; /* fw in the KLD */
3093 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
3096 /* This is the firmware whose headers the driver was compiled against */
3097 fw_info = find_fw_info(chip_id(sc));
3098 if (fw_info == NULL) {
3099 device_printf(sc->dev,
3100 "unable to look up firmware information for chip %d.\n",
3104 drv_fw = &fw_info->fw_hdr;
3107 * The firmware KLD contains many modules. The KLD name is also the
3108 * name of the module that contains the default config file.
3110 default_cfg = firmware_get(fw_info->kld_name);
3112 /* This is the firmware in the KLD */
3113 fw = firmware_get(fw_info->fw_mod_name);
3115 kld_fw = (const void *)fw->data;
3116 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
3122 /* Read the header of the firmware on the card */
3123 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3124 rc = -t4_read_flash(sc, FLASH_FW_START,
3125 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
3127 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
3128 if (card_fw->fw_ver == be32toh(0xffffffff)) {
3129 uint32_t d = be32toh(kld_fw->fw_ver);
3131 if (!kld_fw_usable) {
3132 device_printf(sc->dev,
3133 "no firmware on the card and no usable "
3134 "firmware bundled with the driver.\n");
3137 } else if (t4_fw_install == 0) {
3138 device_printf(sc->dev,
3139 "no firmware on the card and the driver "
3140 "is prohibited from installing new "
3146 device_printf(sc->dev, "no firmware on the card, "
3147 "installing firmware %d.%d.%d.%d\n",
3148 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3149 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3150 rc = t4_fw_forceinstall(sc, fw->data, fw->datasize);
3153 device_printf(sc->dev,
3154 "firmware install failed: %d.\n", rc);
3157 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3162 device_printf(sc->dev,
3163 "Unable to read card's firmware header: %d\n", rc);
3167 /* Contact firmware. */
3168 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
3169 if (rc < 0 || state == DEV_STATE_ERR) {
3171 device_printf(sc->dev,
3172 "failed to connect to the firmware: %d, %d.\n", rc, state);
3177 sc->flags |= MASTER_PF;
3178 else if (state == DEV_STATE_UNINIT) {
3180 * We didn't get to be the master so we definitely won't be
3181 * configuring the chip. It's a bug if someone else hasn't
3182 * configured it already.
3184 device_printf(sc->dev, "couldn't be master(%d), "
3185 "device not already initialized either(%d).\n", rc, state);
3190 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3191 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
3193 * Common case: the firmware on the card is an exact match and
3194 * the KLD is an exact match too, or the KLD is
3195 * absent/incompatible. Note that t4_fw_install = 2 is ignored
3196 * here -- use cxgbetool loadfw if you want to reinstall the
3197 * same firmware as the one on the card.
3199 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
3200 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
3201 be32toh(card_fw->fw_ver))) {
3203 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3205 device_printf(sc->dev,
3206 "failed to install firmware: %d\n", rc);
3210 /* Installed successfully, update the cached header too. */
3211 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3213 need_fw_reset = 0; /* already reset as part of load_fw */
3216 if (!card_fw_usable) {
3219 d = ntohl(drv_fw->fw_ver);
3220 c = ntohl(card_fw->fw_ver);
3221 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3223 device_printf(sc->dev, "Cannot find a usable firmware: "
3224 "fw_install %d, chip state %d, "
3225 "driver compiled with %d.%d.%d.%d, "
3226 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3227 t4_fw_install, state,
3228 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3229 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3230 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3231 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3232 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3233 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3239 if (need_fw_reset &&
3240 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3241 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3242 if (rc != ETIMEDOUT && rc != EIO)
3243 t4_fw_bye(sc, sc->mbox);
3248 rc = get_params__pre_init(sc);
3250 goto done; /* error message displayed already */
3252 /* Partition adapter resources as specified in the config file. */
3253 if (state == DEV_STATE_UNINIT) {
3255 KASSERT(sc->flags & MASTER_PF,
3256 ("%s: trying to change chip settings when not master.",
3259 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3261 goto done; /* error message displayed already */
3263 t4_tweak_chip_settings(sc);
3265 /* get basic stuff going */
3266 rc = -t4_fw_initialize(sc, sc->mbox);
3268 device_printf(sc->dev, "fw init failed: %d.\n", rc);
3272 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3277 free(card_fw, M_CXGBE);
3279 firmware_put(fw, FIRMWARE_UNLOAD);
3280 if (default_cfg != NULL)
3281 firmware_put(default_cfg, FIRMWARE_UNLOAD);
3286 #define FW_PARAM_DEV(param) \
3287 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3288 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3289 #define FW_PARAM_PFVF(param) \
3290 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3291 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3294 * Partition chip resources for use between various PFs, VFs, etc.
3297 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3298 const char *name_prefix)
3300 const struct firmware *cfg = NULL;
3302 struct fw_caps_config_cmd caps;
3303 uint32_t mtype, moff, finicsum, cfcsum;
3306 * Figure out what configuration file to use. Pick the default config
3307 * file for the card if the user hasn't specified one explicitly.
3309 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3310 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3311 /* Card specific overrides go here. */
3312 if (pci_get_device(sc->dev) == 0x440a)
3313 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3315 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3319 * We need to load another module if the profile is anything except
3320 * "default" or "flash".
3322 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3323 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3326 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3327 cfg = firmware_get(s);
3329 if (default_cfg != NULL) {
3330 device_printf(sc->dev,
3331 "unable to load module \"%s\" for "
3332 "configuration profile \"%s\", will use "
3333 "the default config file instead.\n",
3335 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3338 device_printf(sc->dev,
3339 "unable to load module \"%s\" for "
3340 "configuration profile \"%s\", will use "
3341 "the config file on the card's flash "
3342 "instead.\n", s, sc->cfg_file);
3343 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3349 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3350 default_cfg == NULL) {
3351 device_printf(sc->dev,
3352 "default config file not available, will use the config "
3353 "file on the card's flash instead.\n");
3354 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3357 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3359 const uint32_t *cfdata;
3360 uint32_t param, val, addr;
3362 KASSERT(cfg != NULL || default_cfg != NULL,
3363 ("%s: no config to upload", __func__));
3366 * Ask the firmware where it wants us to upload the config file.
3368 param = FW_PARAM_DEV(CF);
3369 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3371 /* No support for config file? Shouldn't happen. */
3372 device_printf(sc->dev,
3373 "failed to query config file location: %d.\n", rc);
3376 mtype = G_FW_PARAMS_PARAM_Y(val);
3377 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3380 * XXX: sheer laziness. We deliberately added 4 bytes of
3381 * useless stuffing/comments at the end of the config file so
3382 * it's ok to simply throw away the last remaining bytes when
3383 * the config file is not an exact multiple of 4. This also
3384 * helps with the validate_mt_off_len check.
3387 cflen = cfg->datasize & ~3;
3390 cflen = default_cfg->datasize & ~3;
3391 cfdata = default_cfg->data;
3394 if (cflen > FLASH_CFG_MAX_SIZE) {
3395 device_printf(sc->dev,
3396 "config file too long (%d, max allowed is %d). "
3397 "Will try to use the config on the card, if any.\n",
3398 cflen, FLASH_CFG_MAX_SIZE);
3399 goto use_config_on_flash;
3402 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3404 device_printf(sc->dev,
3405 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
3406 "Will try to use the config on the card, if any.\n",
3407 __func__, mtype, moff, cflen, rc);
3408 goto use_config_on_flash;
3410 write_via_memwin(sc, 2, addr, cfdata, cflen);
3412 use_config_on_flash:
3413 mtype = FW_MEMTYPE_FLASH;
3414 moff = t4_flash_cfg_addr(sc);
3417 bzero(&caps, sizeof(caps));
3418 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3419 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3420 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3421 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3422 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3423 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3425 device_printf(sc->dev,
3426 "failed to pre-process config file: %d "
3427 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
3431 finicsum = be32toh(caps.finicsum);
3432 cfcsum = be32toh(caps.cfcsum);
3433 if (finicsum != cfcsum) {
3434 device_printf(sc->dev,
3435 "WARNING: config file checksum mismatch: %08x %08x\n",
3438 sc->cfcsum = cfcsum;
3440 #define LIMIT_CAPS(x) do { \
3441 caps.x &= htobe16(t4_##x##_allowed); \
3445 * Let the firmware know what features will (not) be used so it can tune
3446 * things accordingly.
3448 LIMIT_CAPS(nbmcaps);
3449 LIMIT_CAPS(linkcaps);
3450 LIMIT_CAPS(switchcaps);
3451 LIMIT_CAPS(niccaps);
3452 LIMIT_CAPS(toecaps);
3453 LIMIT_CAPS(rdmacaps);
3454 LIMIT_CAPS(cryptocaps);
3455 LIMIT_CAPS(iscsicaps);
3456 LIMIT_CAPS(fcoecaps);
3459 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3460 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3461 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3462 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3464 device_printf(sc->dev,
3465 "failed to process config file: %d.\n", rc);
3469 firmware_put(cfg, FIRMWARE_UNLOAD);
3474 * Retrieve parameters that are needed (or nice to have) very early.
3477 get_params__pre_init(struct adapter *sc)
3480 uint32_t param[2], val[2];
3482 t4_get_version_info(sc);
3484 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3485 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3486 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3487 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3488 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3490 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3491 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3492 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3493 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3494 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3496 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3497 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3498 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3499 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3500 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3502 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3503 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3504 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3505 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3506 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3508 param[0] = FW_PARAM_DEV(PORTVEC);
3509 param[1] = FW_PARAM_DEV(CCLK);
3510 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3512 device_printf(sc->dev,
3513 "failed to query parameters (pre_init): %d.\n", rc);
3517 sc->params.portvec = val[0];
3518 sc->params.nports = bitcount32(val[0]);
3519 sc->params.vpd.cclk = val[1];
3521 /* Read device log parameters. */
3522 rc = -t4_init_devlog_params(sc, 1);
3524 fixup_devlog_params(sc);
3526 device_printf(sc->dev,
3527 "failed to get devlog parameters: %d.\n", rc);
3528 rc = 0; /* devlog isn't critical for device operation */
3535 * Retrieve various parameters that are of interest to the driver. The device
3536 * has been initialized by the firmware at this point.
3539 get_params__post_init(struct adapter *sc)
3542 uint32_t param[7], val[7];
3543 struct fw_caps_config_cmd caps;
3545 param[0] = FW_PARAM_PFVF(IQFLINT_START);
3546 param[1] = FW_PARAM_PFVF(EQ_START);
3547 param[2] = FW_PARAM_PFVF(FILTER_START);
3548 param[3] = FW_PARAM_PFVF(FILTER_END);
3549 param[4] = FW_PARAM_PFVF(L2T_START);
3550 param[5] = FW_PARAM_PFVF(L2T_END);
3551 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3553 device_printf(sc->dev,
3554 "failed to query parameters (post_init): %d.\n", rc);
3558 sc->sge.iq_start = val[0];
3559 sc->sge.eq_start = val[1];
3560 sc->tids.ftid_base = val[2];
3561 sc->tids.nftids = val[3] - val[2] + 1;
3562 sc->params.ftid_min = val[2];
3563 sc->params.ftid_max = val[3];
3564 sc->vres.l2t.start = val[4];
3565 sc->vres.l2t.size = val[5] - val[4] + 1;
3566 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3567 ("%s: L2 table size (%u) larger than expected (%u)",
3568 __func__, sc->vres.l2t.size, L2T_SIZE));
3571 * MPSBGMAP is queried separately because only recent firmwares support
3572 * it as a parameter and we don't want the compound query above to fail
3573 * on older firmwares.
3575 param[0] = FW_PARAM_DEV(MPSBGMAP);
3577 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
3579 sc->params.mps_bg_map = val[0];
3581 sc->params.mps_bg_map = 0;
3583 /* get capabilites */
3584 bzero(&caps, sizeof(caps));
3585 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3586 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3587 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3588 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3590 device_printf(sc->dev,
3591 "failed to get card capabilities: %d.\n", rc);
3595 #define READ_CAPS(x) do { \
3596 sc->x = htobe16(caps.x); \
3599 READ_CAPS(linkcaps);
3600 READ_CAPS(switchcaps);
3603 READ_CAPS(rdmacaps);
3604 READ_CAPS(cryptocaps);
3605 READ_CAPS(iscsicaps);
3606 READ_CAPS(fcoecaps);
3609 * The firmware attempts memfree TOE configuration for -SO cards and
3610 * will report toecaps=0 if it runs out of resources (this depends on
3611 * the config file). It may not report 0 for other capabilities
3612 * dependent on the TOE in this case. Set them to 0 here so that the
3613 * driver doesn't bother tracking resources that will never be used.
3615 if (sc->toecaps == 0) {
3620 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3621 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3622 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3623 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3624 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3626 device_printf(sc->dev,
3627 "failed to query NIC parameters: %d.\n", rc);
3630 sc->tids.etid_base = val[0];
3631 sc->params.etid_min = val[0];
3632 sc->tids.netids = val[1] - val[0] + 1;
3633 sc->params.netids = sc->tids.netids;
3634 sc->params.eo_wr_cred = val[2];
3635 sc->params.ethoffload = 1;
3639 /* query offload-related parameters */
3640 param[0] = FW_PARAM_DEV(NTID);
3641 param[1] = FW_PARAM_PFVF(SERVER_START);
3642 param[2] = FW_PARAM_PFVF(SERVER_END);
3643 param[3] = FW_PARAM_PFVF(TDDP_START);
3644 param[4] = FW_PARAM_PFVF(TDDP_END);
3645 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3646 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3648 device_printf(sc->dev,
3649 "failed to query TOE parameters: %d.\n", rc);
3652 sc->tids.ntids = val[0];
3653 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3654 sc->tids.stid_base = val[1];
3655 sc->tids.nstids = val[2] - val[1] + 1;
3656 sc->vres.ddp.start = val[3];
3657 sc->vres.ddp.size = val[4] - val[3] + 1;
3658 sc->params.ofldq_wr_cred = val[5];
3659 sc->params.offload = 1;
3662 param[0] = FW_PARAM_PFVF(STAG_START);
3663 param[1] = FW_PARAM_PFVF(STAG_END);
3664 param[2] = FW_PARAM_PFVF(RQ_START);
3665 param[3] = FW_PARAM_PFVF(RQ_END);
3666 param[4] = FW_PARAM_PFVF(PBL_START);
3667 param[5] = FW_PARAM_PFVF(PBL_END);
3668 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3670 device_printf(sc->dev,
3671 "failed to query RDMA parameters(1): %d.\n", rc);
3674 sc->vres.stag.start = val[0];
3675 sc->vres.stag.size = val[1] - val[0] + 1;
3676 sc->vres.rq.start = val[2];
3677 sc->vres.rq.size = val[3] - val[2] + 1;
3678 sc->vres.pbl.start = val[4];
3679 sc->vres.pbl.size = val[5] - val[4] + 1;
3681 param[0] = FW_PARAM_PFVF(SQRQ_START);
3682 param[1] = FW_PARAM_PFVF(SQRQ_END);
3683 param[2] = FW_PARAM_PFVF(CQ_START);
3684 param[3] = FW_PARAM_PFVF(CQ_END);
3685 param[4] = FW_PARAM_PFVF(OCQ_START);
3686 param[5] = FW_PARAM_PFVF(OCQ_END);
3687 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3689 device_printf(sc->dev,
3690 "failed to query RDMA parameters(2): %d.\n", rc);
3693 sc->vres.qp.start = val[0];
3694 sc->vres.qp.size = val[1] - val[0] + 1;
3695 sc->vres.cq.start = val[2];
3696 sc->vres.cq.size = val[3] - val[2] + 1;
3697 sc->vres.ocq.start = val[4];
3698 sc->vres.ocq.size = val[5] - val[4] + 1;
3700 param[0] = FW_PARAM_PFVF(SRQ_START);
3701 param[1] = FW_PARAM_PFVF(SRQ_END);
3702 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
3703 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
3704 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
3706 device_printf(sc->dev,
3707 "failed to query RDMA parameters(3): %d.\n", rc);
3710 sc->vres.srq.start = val[0];
3711 sc->vres.srq.size = val[1] - val[0] + 1;
3712 sc->params.max_ordird_qp = val[2];
3713 sc->params.max_ird_adapter = val[3];
3715 if (sc->iscsicaps) {
3716 param[0] = FW_PARAM_PFVF(ISCSI_START);
3717 param[1] = FW_PARAM_PFVF(ISCSI_END);
3718 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3720 device_printf(sc->dev,
3721 "failed to query iSCSI parameters: %d.\n", rc);
3724 sc->vres.iscsi.start = val[0];
3725 sc->vres.iscsi.size = val[1] - val[0] + 1;
3728 t4_init_sge_params(sc);
3731 * We've got the params we wanted to query via the firmware. Now grab
3732 * some others directly from the chip.
3734 rc = t4_read_chip_settings(sc);
3740 set_params__post_init(struct adapter *sc)
3742 uint32_t param, val;
3747 /* ask for encapsulated CPLs */
3748 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3750 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3754 * Override the TOE timers with user provided tunables. This is not the
3755 * recommended way to change the timers (the firmware config file is) so
3756 * these tunables are not documented.
3758 * All the timer tunables are in microseconds.
3760 if (t4_toe_keepalive_idle != 0) {
3761 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
3762 v &= M_KEEPALIVEIDLE;
3763 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
3764 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
3766 if (t4_toe_keepalive_interval != 0) {
3767 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
3768 v &= M_KEEPALIVEINTVL;
3769 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
3770 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
3772 if (t4_toe_keepalive_count != 0) {
3773 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
3774 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
3775 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
3776 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
3777 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
3779 if (t4_toe_rexmt_min != 0) {
3780 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
3782 t4_set_reg_field(sc, A_TP_RXT_MIN,
3783 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
3785 if (t4_toe_rexmt_max != 0) {
3786 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
3788 t4_set_reg_field(sc, A_TP_RXT_MAX,
3789 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
3791 if (t4_toe_rexmt_count != 0) {
3792 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
3793 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
3794 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
3795 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
3796 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
3798 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
3799 if (t4_toe_rexmt_backoff[i] != -1) {
3800 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
3801 shift = (i & 3) << 3;
3802 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
3803 M_TIMERBACKOFFINDEX0 << shift, v << shift);
3810 #undef FW_PARAM_PFVF
3814 t4_set_desc(struct adapter *sc)
3817 struct adapter_params *p = &sc->params;
3819 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
3821 device_set_desc_copy(sc->dev, buf);
3825 build_medialist(struct port_info *pi, struct ifmedia *media)
3829 PORT_LOCK_ASSERT_OWNED(pi);
3831 ifmedia_removeall(media);
3834 * XXX: Would it be better to ifmedia_add all 4 combinations of pause
3835 * settings for every speed instead of just txpause|rxpause? ifconfig
3836 * media display looks much better if autoselect is the only case where
3837 * ifm_current is different from ifm_active. If the user picks anything
3838 * except txpause|rxpause the display is ugly.
3840 m = IFM_ETHER | IFM_FDX | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3842 switch(pi->port_type) {
3843 case FW_PORT_TYPE_BT_XFI:
3844 case FW_PORT_TYPE_BT_XAUI:
3845 ifmedia_add(media, m | IFM_10G_T, 0, NULL);
3848 case FW_PORT_TYPE_BT_SGMII:
3849 ifmedia_add(media, m | IFM_1000_T, 0, NULL);
3850 ifmedia_add(media, m | IFM_100_TX, 0, NULL);
3851 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
3852 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
3855 case FW_PORT_TYPE_CX4:
3856 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
3857 ifmedia_set(media, m | IFM_10G_CX4);
3860 case FW_PORT_TYPE_QSFP_10G:
3861 case FW_PORT_TYPE_SFP:
3862 case FW_PORT_TYPE_FIBER_XFI:
3863 case FW_PORT_TYPE_FIBER_XAUI:
3864 switch (pi->mod_type) {
3866 case FW_PORT_MOD_TYPE_LR:
3867 ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
3868 ifmedia_set(media, m | IFM_10G_LR);
3871 case FW_PORT_MOD_TYPE_SR:
3872 ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
3873 ifmedia_set(media, m | IFM_10G_SR);
3876 case FW_PORT_MOD_TYPE_LRM:
3877 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
3878 ifmedia_set(media, m | IFM_10G_LRM);
3881 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3882 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3883 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
3884 ifmedia_set(media, m | IFM_10G_TWINAX);
3887 case FW_PORT_MOD_TYPE_NONE:
3889 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3890 ifmedia_set(media, m | IFM_NONE);
3893 case FW_PORT_MOD_TYPE_NA:
3894 case FW_PORT_MOD_TYPE_ER:
3896 device_printf(pi->dev,
3897 "unknown port_type (%d), mod_type (%d)\n",
3898 pi->port_type, pi->mod_type);
3899 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3900 ifmedia_set(media, m | IFM_UNKNOWN);
3905 case FW_PORT_TYPE_CR_QSFP:
3906 case FW_PORT_TYPE_SFP28:
3907 case FW_PORT_TYPE_KR_SFP28:
3908 switch (pi->mod_type) {
3910 case FW_PORT_MOD_TYPE_SR:
3911 ifmedia_add(media, m | IFM_25G_SR, 0, NULL);
3912 ifmedia_set(media, m | IFM_25G_SR);
3915 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3916 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3917 ifmedia_add(media, m | IFM_25G_CR, 0, NULL);
3918 ifmedia_set(media, m | IFM_25G_CR);
3921 case FW_PORT_MOD_TYPE_NONE:
3923 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3924 ifmedia_set(media, m | IFM_NONE);
3928 device_printf(pi->dev,
3929 "unknown port_type (%d), mod_type (%d)\n",
3930 pi->port_type, pi->mod_type);
3931 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3932 ifmedia_set(media, m | IFM_UNKNOWN);
3937 case FW_PORT_TYPE_QSFP:
3938 switch (pi->mod_type) {
3940 case FW_PORT_MOD_TYPE_LR:
3941 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
3942 ifmedia_set(media, m | IFM_40G_LR4);
3945 case FW_PORT_MOD_TYPE_SR:
3946 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
3947 ifmedia_set(media, m | IFM_40G_SR4);
3950 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3951 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3952 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
3953 ifmedia_set(media, m | IFM_40G_CR4);
3956 case FW_PORT_MOD_TYPE_NONE:
3958 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3959 ifmedia_set(media, m | IFM_NONE);
3963 device_printf(pi->dev,
3964 "unknown port_type (%d), mod_type (%d)\n",
3965 pi->port_type, pi->mod_type);
3966 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
3967 ifmedia_set(media, m | IFM_UNKNOWN);
3972 case FW_PORT_TYPE_KR4_100G:
3973 case FW_PORT_TYPE_CR4_QSFP:
3974 switch (pi->mod_type) {
3976 case FW_PORT_MOD_TYPE_LR:
3977 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL);
3978 ifmedia_set(media, m | IFM_100G_LR4);
3981 case FW_PORT_MOD_TYPE_SR:
3982 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL);
3983 ifmedia_set(media, m | IFM_100G_SR4);
3986 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3987 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3988 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL);
3989 ifmedia_set(media, m | IFM_100G_CR4);
3992 case FW_PORT_MOD_TYPE_NONE:
3994 ifmedia_add(media, m | IFM_NONE, 0, NULL);
3995 ifmedia_set(media, m | IFM_NONE);
3999 device_printf(pi->dev,
4000 "unknown port_type (%d), mod_type (%d)\n",
4001 pi->port_type, pi->mod_type);
4002 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
4003 ifmedia_set(media, m | IFM_UNKNOWN);
4009 device_printf(pi->dev,
4010 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
4012 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
4013 ifmedia_set(media, m | IFM_UNKNOWN);
4019 * Update all the requested_* fields in the link config and then send a mailbox
4020 * command to apply the settings.
4023 init_l1cfg(struct port_info *pi)
4025 struct adapter *sc = pi->adapter;
4026 struct link_config *lc = &pi->link_cfg;
4029 ASSERT_SYNCHRONIZED_OP(sc);
4031 if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) {
4032 lc->requested_aneg = AUTONEG_ENABLE;
4033 lc->requested_speed = 0;
4035 lc->requested_aneg = AUTONEG_DISABLE;
4036 lc->requested_speed = port_top_speed(pi); /* in Gbps */
4039 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX);
4042 lc->requested_fec = t4_fec & (FEC_RS | FEC_BASER_RS |
4045 /* Use the suggested value provided by the firmware in acaps */
4046 if (lc->advertising & FW_PORT_CAP_FEC_RS)
4047 lc->requested_fec = FEC_RS;
4048 else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS)
4049 lc->requested_fec = FEC_BASER_RS;
4050 else if (lc->advertising & FW_PORT_CAP_FEC_RESERVED)
4051 lc->requested_fec = FEC_RESERVED;
4053 lc->requested_fec = 0;
4056 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
4058 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
4060 lc->fc = lc->requested_fc;
4061 lc->fec = lc->requested_fec;
4065 #define FW_MAC_EXACT_CHUNK 7
4068 * Program the port's XGMAC based on parameters in ifnet. The caller also
4069 * indicates which parameters should be programmed (the rest are left alone).
4072 update_mac_settings(struct ifnet *ifp, int flags)
4075 struct vi_info *vi = ifp->if_softc;
4076 struct port_info *pi = vi->pi;
4077 struct adapter *sc = pi->adapter;
4078 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
4080 ASSERT_SYNCHRONIZED_OP(sc);
4081 KASSERT(flags, ("%s: not told what to update.", __func__));
4083 if (flags & XGMAC_MTU)
4086 if (flags & XGMAC_PROMISC)
4087 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
4089 if (flags & XGMAC_ALLMULTI)
4090 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
4092 if (flags & XGMAC_VLANEX)
4093 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
4095 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
4096 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
4097 allmulti, 1, vlanex, false);
4099 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
4105 if (flags & XGMAC_UCADDR) {
4106 uint8_t ucaddr[ETHER_ADDR_LEN];
4108 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
4109 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
4110 ucaddr, true, true);
4113 if_printf(ifp, "change_mac failed: %d\n", rc);
4116 vi->xact_addr_filt = rc;
4121 if (flags & XGMAC_MCADDRS) {
4122 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
4125 struct ifmultiaddr *ifma;
4128 if_maddr_rlock(ifp);
4129 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4130 if (ifma->ifma_addr->sa_family != AF_LINK)
4133 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
4134 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
4137 if (i == FW_MAC_EXACT_CHUNK) {
4138 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
4139 del, i, mcaddr, NULL, &hash, 0);
4142 for (j = 0; j < i; j++) {
4144 "failed to add mc address"
4146 "%02x:%02x:%02x rc=%d\n",
4147 mcaddr[j][0], mcaddr[j][1],
4148 mcaddr[j][2], mcaddr[j][3],
4149 mcaddr[j][4], mcaddr[j][5],
4159 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
4160 mcaddr, NULL, &hash, 0);
4163 for (j = 0; j < i; j++) {
4165 "failed to add mc address"
4167 "%02x:%02x:%02x rc=%d\n",
4168 mcaddr[j][0], mcaddr[j][1],
4169 mcaddr[j][2], mcaddr[j][3],
4170 mcaddr[j][4], mcaddr[j][5],
4177 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
4179 if_printf(ifp, "failed to set mc address hash: %d", rc);
4181 if_maddr_runlock(ifp);
4188 * {begin|end}_synchronized_op must be called from the same thread.
4191 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
4197 /* the caller thinks it's ok to sleep, but is it really? */
4198 if (flags & SLEEP_OK)
4199 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
4200 "begin_synchronized_op");
4211 if (vi && IS_DOOMED(vi)) {
4221 if (!(flags & SLEEP_OK)) {
4226 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
4232 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
4235 sc->last_op = wmesg;
4236 sc->last_op_thr = curthread;
4237 sc->last_op_flags = flags;
4241 if (!(flags & HOLD_LOCK) || rc)
4248 * Tell if_ioctl and if_init that the VI is going away. This is
4249 * special variant of begin_synchronized_op and must be paired with a
4250 * call to end_synchronized_op.
4253 doom_vi(struct adapter *sc, struct vi_info *vi)
4260 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
4263 sc->last_op = "t4detach";
4264 sc->last_op_thr = curthread;
4265 sc->last_op_flags = 0;
4271 * {begin|end}_synchronized_op must be called from the same thread.
4274 end_synchronized_op(struct adapter *sc, int flags)
4277 if (flags & LOCK_HELD)
4278 ADAPTER_LOCK_ASSERT_OWNED(sc);
4282 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
4289 cxgbe_init_synchronized(struct vi_info *vi)
4291 struct port_info *pi = vi->pi;
4292 struct adapter *sc = pi->adapter;
4293 struct ifnet *ifp = vi->ifp;
4295 struct sge_txq *txq;
4297 ASSERT_SYNCHRONIZED_OP(sc);
4299 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4300 return (0); /* already running */
4302 if (!(sc->flags & FULL_INIT_DONE) &&
4303 ((rc = adapter_full_init(sc)) != 0))
4304 return (rc); /* error message displayed already */
4306 if (!(vi->flags & VI_INIT_DONE) &&
4307 ((rc = vi_full_init(vi)) != 0))
4308 return (rc); /* error message displayed already */
4310 rc = update_mac_settings(ifp, XGMAC_ALL);
4312 goto done; /* error message displayed already */
4314 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
4316 if_printf(ifp, "enable_vi failed: %d\n", rc);
4321 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
4325 for_each_txq(vi, i, txq) {
4327 txq->eq.flags |= EQ_ENABLED;
4332 * The first iq of the first port to come up is used for tracing.
4334 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
4335 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
4336 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
4337 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
4338 V_QUEUENUMBER(sc->traceq));
4339 pi->flags |= HAS_TRACEQ;
4344 if (pi->up_vis++ == 0) {
4345 t4_update_port_info(pi);
4346 build_medialist(pi, &pi->media);
4349 ifp->if_drv_flags |= IFF_DRV_RUNNING;
4351 if (pi->nvi > 1 || sc->flags & IS_VF)
4352 callout_reset(&vi->tick, hz, vi_tick, vi);
4354 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
4358 cxgbe_uninit_synchronized(vi);
4367 cxgbe_uninit_synchronized(struct vi_info *vi)
4369 struct port_info *pi = vi->pi;
4370 struct adapter *sc = pi->adapter;
4371 struct ifnet *ifp = vi->ifp;
4373 struct sge_txq *txq;
4375 ASSERT_SYNCHRONIZED_OP(sc);
4377 if (!(vi->flags & VI_INIT_DONE)) {
4378 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
4379 ("uninited VI is running"));
4384 * Disable the VI so that all its data in either direction is discarded
4385 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
4386 * tick) intact as the TP can deliver negative advice or data that it's
4387 * holding in its RAM (for an offloaded connection) even after the VI is
4390 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4392 if_printf(ifp, "disable_vi failed: %d\n", rc);
4396 for_each_txq(vi, i, txq) {
4398 txq->eq.flags &= ~EQ_ENABLED;
4403 if (pi->nvi > 1 || sc->flags & IS_VF)
4404 callout_stop(&vi->tick);
4406 callout_stop(&pi->tick);
4407 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4411 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4413 if (pi->up_vis > 0) {
4419 pi->link_cfg.link_ok = 0;
4420 pi->link_cfg.speed = 0;
4421 pi->link_cfg.link_down_rc = 255;
4422 t4_os_link_changed(pi);
4423 pi->old_link_cfg = pi->link_cfg;
4429 * It is ok for this function to fail midway and return right away. t4_detach
4430 * will walk the entire sc->irq list and clean up whatever is valid.
4433 t4_setup_intr_handlers(struct adapter *sc)
4435 int rc, rid, p, q, v;
4438 struct port_info *pi;
4440 struct sge *sge = &sc->sge;
4441 struct sge_rxq *rxq;
4443 struct sge_ofld_rxq *ofld_rxq;
4446 struct sge_nm_rxq *nm_rxq;
4449 int nbuckets = rss_getnumbuckets();
4456 rid = sc->intr_type == INTR_INTX ? 0 : 1;
4457 if (sc->intr_count == 1)
4458 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4460 /* Multiple interrupts. */
4461 if (sc->flags & IS_VF)
4462 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4463 ("%s: too few intr.", __func__));
4465 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4466 ("%s: too few intr.", __func__));
4468 /* The first one is always error intr on PFs */
4469 if (!(sc->flags & IS_VF)) {
4470 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4477 /* The second one is always the firmware event queue (first on VFs) */
4478 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4484 for_each_port(sc, p) {
4486 for_each_vi(pi, v, vi) {
4487 vi->first_intr = rid - 1;
4489 if (vi->nnmrxq > 0) {
4490 int n = max(vi->nrxq, vi->nnmrxq);
4492 MPASS(vi->flags & INTR_RXQ);
4494 rxq = &sge->rxq[vi->first_rxq];
4496 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4498 for (q = 0; q < n; q++) {
4499 snprintf(s, sizeof(s), "%x%c%x", p,
4505 irq->nm_rxq = nm_rxq++;
4507 rc = t4_alloc_irq(sc, irq, rid,
4508 t4_vi_intr, irq, s);
4515 } else if (vi->flags & INTR_RXQ) {
4516 for_each_rxq(vi, q, rxq) {
4517 snprintf(s, sizeof(s), "%x%c%x", p,
4519 rc = t4_alloc_irq(sc, irq, rid,
4524 bus_bind_intr(sc->dev, irq->res,
4525 rss_getcpu(q % nbuckets));
4533 if (vi->flags & INTR_OFLD_RXQ) {
4534 for_each_ofld_rxq(vi, q, ofld_rxq) {
4535 snprintf(s, sizeof(s), "%x%c%x", p,
4537 rc = t4_alloc_irq(sc, irq, rid,
4538 t4_intr, ofld_rxq, s);
4549 MPASS(irq == &sc->irq[sc->intr_count]);
4555 adapter_full_init(struct adapter *sc)
4559 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4560 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4563 ASSERT_SYNCHRONIZED_OP(sc);
4564 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4565 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
4566 ("%s: FULL_INIT_DONE already", __func__));
4569 * queues that belong to the adapter (not any particular port).
4571 rc = t4_setup_adapter_queues(sc);
4575 for (i = 0; i < nitems(sc->tq); i++) {
4576 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
4577 taskqueue_thread_enqueue, &sc->tq[i]);
4578 if (sc->tq[i] == NULL) {
4579 device_printf(sc->dev,
4580 "failed to allocate task queue %d\n", i);
4584 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
4585 device_get_nameunit(sc->dev), i);
4588 MPASS(RSS_KEYSIZE == 40);
4589 rss_getkey((void *)&raw_rss_key[0]);
4590 for (i = 0; i < nitems(rss_key); i++) {
4591 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
4593 t4_write_rss_key(sc, &rss_key[0], -1, 1);
4596 if (!(sc->flags & IS_VF))
4598 sc->flags |= FULL_INIT_DONE;
4601 adapter_full_uninit(sc);
4607 adapter_full_uninit(struct adapter *sc)
4611 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4613 t4_teardown_adapter_queues(sc);
4615 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
4616 taskqueue_free(sc->tq[i]);
4620 sc->flags &= ~FULL_INIT_DONE;
4626 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
4627 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
4628 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
4629 RSS_HASHTYPE_RSS_UDP_IPV6)
4631 /* Translates kernel hash types to hardware. */
4633 hashconfig_to_hashen(int hashconfig)
4637 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
4638 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
4639 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
4640 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
4641 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
4642 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4643 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4645 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
4646 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4647 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4649 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
4650 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4651 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
4652 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4657 /* Translates hardware hash types to kernel. */
4659 hashen_to_hashconfig(int hashen)
4663 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
4665 * If UDP hashing was enabled it must have been enabled for
4666 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
4667 * enabling any 4-tuple hash is nonsense configuration.
4669 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4670 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
4672 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4673 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
4674 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4675 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
4677 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4678 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
4679 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4680 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
4681 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
4682 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
4683 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
4684 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
4686 return (hashconfig);
4691 vi_full_init(struct vi_info *vi)
4693 struct adapter *sc = vi->pi->adapter;
4694 struct ifnet *ifp = vi->ifp;
4696 struct sge_rxq *rxq;
4697 int rc, i, j, hashen;
4699 int nbuckets = rss_getnumbuckets();
4700 int hashconfig = rss_gethashconfig();
4704 ASSERT_SYNCHRONIZED_OP(sc);
4705 KASSERT((vi->flags & VI_INIT_DONE) == 0,
4706 ("%s: VI_INIT_DONE already", __func__));
4708 sysctl_ctx_init(&vi->ctx);
4709 vi->flags |= VI_SYSCTL_CTX;
4712 * Allocate tx/rx/fl queues for this VI.
4714 rc = t4_setup_vi_queues(vi);
4716 goto done; /* error message displayed already */
4719 * Setup RSS for this VI. Save a copy of the RSS table for later use.
4721 if (vi->nrxq > vi->rss_size) {
4722 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
4723 "some queues will never receive traffic.\n", vi->nrxq,
4725 } else if (vi->rss_size % vi->nrxq) {
4726 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
4727 "expect uneven traffic distribution.\n", vi->nrxq,
4731 if (vi->nrxq != nbuckets) {
4732 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
4733 "performance will be impacted.\n", vi->nrxq, nbuckets);
4736 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
4737 for (i = 0; i < vi->rss_size;) {
4739 j = rss_get_indirection_to_bucket(i);
4741 rxq = &sc->sge.rxq[vi->first_rxq + j];
4742 rss[i++] = rxq->iq.abs_id;
4744 for_each_rxq(vi, j, rxq) {
4745 rss[i++] = rxq->iq.abs_id;
4746 if (i == vi->rss_size)
4752 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
4755 if_printf(ifp, "rss_config failed: %d\n", rc);
4760 hashen = hashconfig_to_hashen(hashconfig);
4763 * We may have had to enable some hashes even though the global config
4764 * wants them disabled. This is a potential problem that must be
4765 * reported to the user.
4767 extra = hashen_to_hashconfig(hashen) ^ hashconfig;
4770 * If we consider only the supported hash types, then the enabled hashes
4771 * are a superset of the requested hashes. In other words, there cannot
4772 * be any supported hash that was requested but not enabled, but there
4773 * can be hashes that were not requested but had to be enabled.
4775 extra &= SUPPORTED_RSS_HASHTYPES;
4776 MPASS((extra & hashconfig) == 0);
4780 "global RSS config (0x%x) cannot be accommodated.\n",
4783 if (extra & RSS_HASHTYPE_RSS_IPV4)
4784 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
4785 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
4786 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
4787 if (extra & RSS_HASHTYPE_RSS_IPV6)
4788 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
4789 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
4790 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
4791 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
4792 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
4793 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
4794 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
4796 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
4797 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
4798 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4799 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
4801 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
4803 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
4808 vi->flags |= VI_INIT_DONE;
4820 vi_full_uninit(struct vi_info *vi)
4822 struct port_info *pi = vi->pi;
4823 struct adapter *sc = pi->adapter;
4825 struct sge_rxq *rxq;
4826 struct sge_txq *txq;
4828 struct sge_ofld_rxq *ofld_rxq;
4829 struct sge_wrq *ofld_txq;
4832 if (vi->flags & VI_INIT_DONE) {
4834 /* Need to quiesce queues. */
4836 /* XXX: Only for the first VI? */
4837 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
4838 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
4840 for_each_txq(vi, i, txq) {
4841 quiesce_txq(sc, txq);
4845 for_each_ofld_txq(vi, i, ofld_txq) {
4846 quiesce_wrq(sc, ofld_txq);
4850 for_each_rxq(vi, i, rxq) {
4851 quiesce_iq(sc, &rxq->iq);
4852 quiesce_fl(sc, &rxq->fl);
4856 for_each_ofld_rxq(vi, i, ofld_rxq) {
4857 quiesce_iq(sc, &ofld_rxq->iq);
4858 quiesce_fl(sc, &ofld_rxq->fl);
4861 free(vi->rss, M_CXGBE);
4862 free(vi->nm_rss, M_CXGBE);
4865 t4_teardown_vi_queues(vi);
4866 vi->flags &= ~VI_INIT_DONE;
4872 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
4874 struct sge_eq *eq = &txq->eq;
4875 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
4877 (void) sc; /* unused */
4881 MPASS((eq->flags & EQ_ENABLED) == 0);
4885 /* Wait for the mp_ring to empty. */
4886 while (!mp_ring_is_idle(txq->r)) {
4887 mp_ring_check_drainage(txq->r, 0);
4888 pause("rquiesce", 1);
4891 /* Then wait for the hardware to finish. */
4892 while (spg->cidx != htobe16(eq->pidx))
4893 pause("equiesce", 1);
4895 /* Finally, wait for the driver to reclaim all descriptors. */
4896 while (eq->cidx != eq->pidx)
4897 pause("dquiesce", 1);
4901 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
4908 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
4910 (void) sc; /* unused */
4912 /* Synchronize with the interrupt handler */
4913 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
4918 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
4920 mtx_lock(&sc->sfl_lock);
4922 fl->flags |= FL_DOOMED;
4924 callout_stop(&sc->sfl_callout);
4925 mtx_unlock(&sc->sfl_lock);
4927 KASSERT((fl->flags & FL_STARVING) == 0,
4928 ("%s: still starving", __func__));
4932 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
4933 driver_intr_t *handler, void *arg, char *name)
4938 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
4939 RF_SHAREABLE | RF_ACTIVE);
4940 if (irq->res == NULL) {
4941 device_printf(sc->dev,
4942 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
4946 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
4947 NULL, handler, arg, &irq->tag);
4949 device_printf(sc->dev,
4950 "failed to setup interrupt for rid %d, name %s: %d\n",
4953 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
4959 t4_free_irq(struct adapter *sc, struct irq *irq)
4962 bus_teardown_intr(sc->dev, irq->res, irq->tag);
4964 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
4966 bzero(irq, sizeof(*irq));
4972 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
4975 regs->version = chip_id(sc) | chip_rev(sc) << 10;
4976 t4_get_regs(sc, buf, regs->len);
4979 #define A_PL_INDIR_CMD 0x1f8
4981 #define S_PL_AUTOINC 31
4982 #define M_PL_AUTOINC 0x1U
4983 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
4984 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
4986 #define S_PL_VFID 20
4987 #define M_PL_VFID 0xffU
4988 #define V_PL_VFID(x) ((x) << S_PL_VFID)
4989 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
4992 #define M_PL_ADDR 0xfffffU
4993 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
4994 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
4996 #define A_PL_INDIR_DATA 0x1fc
4999 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
5003 mtx_assert(&sc->reg_lock, MA_OWNED);
5004 if (sc->flags & IS_VF) {
5005 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
5006 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
5008 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5009 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5010 V_PL_ADDR(VF_MPS_REG(reg)));
5011 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
5012 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
5014 return (((uint64_t)stats[1]) << 32 | stats[0]);
5018 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
5019 struct fw_vi_stats_vf *stats)
5022 #define GET_STAT(name) \
5023 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
5025 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
5026 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
5027 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
5028 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
5029 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
5030 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
5031 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
5032 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
5033 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
5034 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
5035 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
5036 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
5037 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
5038 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
5039 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
5040 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
5046 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
5050 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5051 V_PL_VFID(G_FW_VIID_VIN(viid)) |
5052 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
5053 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
5054 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
5055 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
5059 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
5062 const struct timeval interval = {0, 250000}; /* 250ms */
5064 if (!(vi->flags & VI_INIT_DONE))
5068 timevalsub(&tv, &interval);
5069 if (timevalcmp(&tv, &vi->last_refreshed, <))
5072 mtx_lock(&sc->reg_lock);
5073 t4_get_vi_stats(sc, vi->viid, &vi->stats);
5074 getmicrotime(&vi->last_refreshed);
5075 mtx_unlock(&sc->reg_lock);
5079 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
5081 u_int i, v, tnl_cong_drops, bg_map;
5083 const struct timeval interval = {0, 250000}; /* 250ms */
5086 timevalsub(&tv, &interval);
5087 if (timevalcmp(&tv, &pi->last_refreshed, <))
5091 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
5092 bg_map = pi->mps_bg_map;
5094 i = ffs(bg_map) - 1;
5095 mtx_lock(&sc->reg_lock);
5096 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
5097 A_TP_MIB_TNL_CNG_DROP_0 + i);
5098 mtx_unlock(&sc->reg_lock);
5099 tnl_cong_drops += v;
5100 bg_map &= ~(1 << i);
5102 pi->tnl_cong_drops = tnl_cong_drops;
5103 getmicrotime(&pi->last_refreshed);
5107 cxgbe_tick(void *arg)
5109 struct port_info *pi = arg;
5110 struct adapter *sc = pi->adapter;
5112 PORT_LOCK_ASSERT_OWNED(pi);
5113 cxgbe_refresh_stats(sc, pi);
5115 callout_schedule(&pi->tick, hz);
5121 struct vi_info *vi = arg;
5122 struct adapter *sc = vi->pi->adapter;
5124 vi_refresh_stats(sc, vi);
5126 callout_schedule(&vi->tick, hz);
5130 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
5134 if (arg != ifp || ifp->if_type != IFT_ETHER)
5137 vlan = VLAN_DEVAT(ifp, vid);
5138 VLAN_SETCOOKIE(vlan, ifp);
5142 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
5144 static char *caps_decoder[] = {
5145 "\20\001IPMI\002NCSI", /* 0: NBM */
5146 "\20\001PPP\002QFC\003DCBX", /* 1: link */
5147 "\20\001INGRESS\002EGRESS", /* 2: switch */
5148 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
5149 "\006HASHFILTER\007ETHOFLD",
5150 "\20\001TOE", /* 4: TOE */
5151 "\20\001RDDP\002RDMAC", /* 5: RDMA */
5152 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
5153 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
5154 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
5156 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
5157 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */
5158 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
5159 "\004PO_INITIATOR\005PO_TARGET",
5163 t4_sysctls(struct adapter *sc)
5165 struct sysctl_ctx_list *ctx;
5166 struct sysctl_oid *oid;
5167 struct sysctl_oid_list *children, *c0;
5168 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5170 ctx = device_get_sysctl_ctx(sc->dev);
5175 oid = device_get_sysctl_tree(sc->dev);
5176 c0 = children = SYSCTL_CHILDREN(oid);
5178 sc->sc_do_rxcopy = 1;
5179 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5180 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5182 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5183 sc->params.nports, "# of ports");
5185 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5186 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
5187 sysctl_bitfield, "A", "available doorbells");
5189 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5190 sc->params.vpd.cclk, "core clock frequency (in KHz)");
5192 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5193 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
5194 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
5195 "interrupt holdoff timer values (us)");
5197 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5198 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
5199 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
5200 "interrupt holdoff packet counter values");
5202 t4_sge_sysctls(sc, ctx, children);
5204 sc->lro_timeout = 100;
5205 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5206 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5208 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
5209 &sc->debug_flags, 0, "flags to enable runtime debugging");
5211 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
5212 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
5214 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5215 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5217 if (sc->flags & IS_VF)
5220 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5221 NULL, chip_rev(sc), "chip hardware revision");
5223 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
5224 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
5226 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
5227 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
5229 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
5230 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
5232 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
5233 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
5235 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
5236 sc->er_version, 0, "expansion ROM version");
5238 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
5239 sc->bs_version, 0, "bootstrap firmware version");
5241 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
5242 NULL, sc->params.scfg_vers, "serial config version");
5244 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
5245 NULL, sc->params.vpd_vers, "VPD version");
5247 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5248 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5250 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5251 sc->cfcsum, "config file checksum");
5253 #define SYSCTL_CAP(name, n, text) \
5254 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
5255 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \
5256 sysctl_bitfield, "A", "available " text " capabilities")
5258 SYSCTL_CAP(nbmcaps, 0, "NBM");
5259 SYSCTL_CAP(linkcaps, 1, "link");
5260 SYSCTL_CAP(switchcaps, 2, "switch");
5261 SYSCTL_CAP(niccaps, 3, "NIC");
5262 SYSCTL_CAP(toecaps, 4, "TCP offload");
5263 SYSCTL_CAP(rdmacaps, 5, "RDMA");
5264 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
5265 SYSCTL_CAP(cryptocaps, 7, "crypto");
5266 SYSCTL_CAP(fcoecaps, 8, "FCoE");
5269 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5270 NULL, sc->tids.nftids, "number of filters");
5272 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5273 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5274 "chip temperature (in Celsius)");
5278 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
5280 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5281 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5282 "logs and miscellaneous information");
5283 children = SYSCTL_CHILDREN(oid);
5285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5286 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5287 sysctl_cctrl, "A", "congestion control");
5289 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5290 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5291 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5293 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5294 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5295 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5297 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5298 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5299 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5301 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5302 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5303 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5305 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5306 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5307 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5309 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5310 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5311 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5313 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5314 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5315 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
5316 "A", "CIM logic analyzer");
5318 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5319 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5320 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5322 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5323 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5324 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5326 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5327 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5328 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5330 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5331 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5332 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5334 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5335 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5336 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5338 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5339 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5340 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5342 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5343 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5344 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5346 if (chip_id(sc) > CHELSIO_T4) {
5347 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5348 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5349 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5351 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5352 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5353 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5356 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5357 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5358 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5360 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5361 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5362 sysctl_cim_qcfg, "A", "CIM queue configuration");
5364 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5365 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5366 sysctl_cpl_stats, "A", "CPL statistics");
5368 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5369 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5370 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5372 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5373 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5374 sysctl_devlog, "A", "firmware's device log");
5376 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5377 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5378 sysctl_fcoe_stats, "A", "FCoE statistics");
5380 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5381 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5382 sysctl_hw_sched, "A", "hardware scheduler ");
5384 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5385 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5386 sysctl_l2t, "A", "hardware L2 table");
5388 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5389 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5390 sysctl_lb_stats, "A", "loopback statistics");
5392 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5393 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5394 sysctl_meminfo, "A", "memory regions");
5396 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5397 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5398 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5399 "A", "MPS TCAM entries");
5401 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5402 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5403 sysctl_path_mtus, "A", "path MTUs");
5405 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5406 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5407 sysctl_pm_stats, "A", "PM statistics");
5409 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5410 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5411 sysctl_rdma_stats, "A", "RDMA statistics");
5413 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5414 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5415 sysctl_tcp_stats, "A", "TCP statistics");
5417 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5418 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5419 sysctl_tids, "A", "TID information");
5421 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5422 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5423 sysctl_tp_err_stats, "A", "TP error statistics");
5425 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5426 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5427 "TP logic analyzer event capture mask");
5429 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5430 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5431 sysctl_tp_la, "A", "TP logic analyzer");
5433 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5434 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5435 sysctl_tx_rate, "A", "Tx rate");
5437 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5438 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5439 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5441 if (chip_id(sc) >= CHELSIO_T5) {
5442 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5443 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5444 sysctl_wcwr_stats, "A", "write combined work requests");
5449 if (is_offload(sc)) {
5456 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5457 NULL, "TOE parameters");
5458 children = SYSCTL_CHILDREN(oid);
5460 sc->tt.cong_algorithm = -1;
5461 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
5462 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
5463 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
5466 sc->tt.sndbuf = 256 * 1024;
5467 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5468 &sc->tt.sndbuf, 0, "max hardware send buffer size");
5471 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5472 &sc->tt.ddp, 0, "DDP allowed");
5474 sc->tt.rx_coalesce = 1;
5475 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5476 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5478 sc->tt.tx_align = 1;
5479 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5480 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5482 sc->tt.tx_zcopy = 0;
5483 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
5484 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
5485 "Enable zero-copy aio_write(2)");
5487 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5488 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5489 "TP timer tick (us)");
5491 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5492 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5493 "TCP timestamp tick (us)");
5495 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5496 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5499 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5500 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5501 "IU", "DACK timer (us)");
5503 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5504 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5505 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)");
5507 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5508 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5509 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)");
5511 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5512 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5513 sysctl_tp_timer, "LU", "Persist timer min (us)");
5515 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5516 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5517 sysctl_tp_timer, "LU", "Persist timer max (us)");
5519 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5520 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5521 sysctl_tp_timer, "LU", "Keepalive idle timer (us)");
5523 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
5524 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
5525 sysctl_tp_timer, "LU", "Keepalive interval timer (us)");
5527 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
5528 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
5529 sysctl_tp_timer, "LU", "Initial SRTT (us)");
5531 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
5532 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
5533 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
5535 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
5536 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX,
5537 sysctl_tp_shift_cnt, "IU",
5538 "Number of SYN retransmissions before abort");
5540 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
5541 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2,
5542 sysctl_tp_shift_cnt, "IU",
5543 "Number of retransmissions before abort");
5545 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
5546 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2,
5547 sysctl_tp_shift_cnt, "IU",
5548 "Number of keepalive probes before abort");
5550 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
5551 CTLFLAG_RD, NULL, "TOE retransmit backoffs");
5552 children = SYSCTL_CHILDREN(oid);
5553 for (i = 0; i < 16; i++) {
5554 snprintf(s, sizeof(s), "%u", i);
5555 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
5556 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff,
5557 "IU", "TOE retransmit backoff");
5564 vi_sysctls(struct vi_info *vi)
5566 struct sysctl_ctx_list *ctx;
5567 struct sysctl_oid *oid;
5568 struct sysctl_oid_list *children;
5570 ctx = device_get_sysctl_ctx(vi->dev);
5573 * dev.v?(cxgbe|cxl).X.
5575 oid = device_get_sysctl_tree(vi->dev);
5576 children = SYSCTL_CHILDREN(oid);
5578 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5579 vi->viid, "VI identifer");
5580 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5581 &vi->nrxq, 0, "# of rx queues");
5582 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5583 &vi->ntxq, 0, "# of tx queues");
5584 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5585 &vi->first_rxq, 0, "index of first rx queue");
5586 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5587 &vi->first_txq, 0, "index of first tx queue");
5588 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
5589 vi->rss_size, "size of RSS indirection table");
5591 if (IS_MAIN_VI(vi)) {
5592 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
5593 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5594 "Reserve queue 0 for non-flowid packets");
5598 if (vi->nofldrxq != 0) {
5599 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5601 "# of rx queues for offloaded TCP connections");
5602 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5604 "# of tx queues for offloaded TCP connections");
5605 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5606 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5607 "index of first TOE rx queue");
5608 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5609 CTLFLAG_RD, &vi->first_ofld_txq, 0,
5610 "index of first TOE tx queue");
5611 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
5612 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5613 sysctl_holdoff_tmr_idx_ofld, "I",
5614 "holdoff timer index for TOE queues");
5615 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
5616 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
5617 sysctl_holdoff_pktc_idx_ofld, "I",
5618 "holdoff packet counter index for TOE queues");
5622 if (vi->nnmrxq != 0) {
5623 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5624 &vi->nnmrxq, 0, "# of netmap rx queues");
5625 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5626 &vi->nnmtxq, 0, "# of netmap tx queues");
5627 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5628 CTLFLAG_RD, &vi->first_nm_rxq, 0,
5629 "index of first netmap rx queue");
5630 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5631 CTLFLAG_RD, &vi->first_nm_txq, 0,
5632 "index of first netmap tx queue");
5636 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5637 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5638 "holdoff timer index");
5639 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5640 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5641 "holdoff packet counter index");
5643 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5644 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5646 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5647 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5652 cxgbe_sysctls(struct port_info *pi)
5654 struct sysctl_ctx_list *ctx;
5655 struct sysctl_oid *oid;
5656 struct sysctl_oid_list *children, *children2;
5657 struct adapter *sc = pi->adapter;
5661 ctx = device_get_sysctl_ctx(pi->dev);
5666 oid = device_get_sysctl_tree(pi->dev);
5667 children = SYSCTL_CHILDREN(oid);
5669 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5670 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5671 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5672 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5673 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5674 "PHY temperature (in Celsius)");
5675 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5676 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5677 "PHY firmware version");
5680 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5681 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
5682 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5683 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
5684 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
5685 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
5686 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
5687 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
5688 "autonegotiation (-1 = not supported)");
5690 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
5691 port_top_speed(pi), "max speed (in Gbps)");
5692 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
5693 pi->mps_bg_map, "MPS buffer group map");
5694 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
5695 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
5697 if (sc->flags & IS_VF)
5701 * dev.(cxgbe|cxl).X.tc.
5703 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
5704 "Tx scheduler traffic classes (cl_rl)");
5705 for (i = 0; i < sc->chip_params->nsched_cls; i++) {
5706 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
5708 snprintf(name, sizeof(name), "%d", i);
5709 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
5710 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
5712 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD,
5713 &tc->flags, 0, "flags");
5714 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
5715 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
5717 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
5718 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
5719 sysctl_tc_params, "A", "traffic class parameters");
5724 * dev.cxgbe.X.stats.
5726 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5727 NULL, "port statistics");
5728 children = SYSCTL_CHILDREN(oid);
5729 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5730 &pi->tx_parse_error, 0,
5731 "# of tx packets with invalid length or # of segments");
5733 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5734 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5735 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5736 sysctl_handle_t4_reg64, "QU", desc)
5738 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5739 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5740 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5741 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5742 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5743 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5744 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5745 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5746 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5747 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5748 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5749 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5750 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5751 "# of tx frames in this range",
5752 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5753 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5754 "# of tx frames in this range",
5755 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5756 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5757 "# of tx frames in this range",
5758 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5759 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5760 "# of tx frames in this range",
5761 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5762 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5763 "# of tx frames in this range",
5764 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5765 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5766 "# of tx frames in this range",
5767 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5768 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5769 "# of tx frames in this range",
5770 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5771 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5772 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5773 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5774 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5775 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5776 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5777 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5778 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5779 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5780 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5781 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5782 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5783 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5784 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5785 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5786 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5787 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5788 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5789 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5790 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5792 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5793 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5794 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5795 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5796 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5797 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5798 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5799 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5800 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5801 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5802 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5803 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5804 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5805 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5806 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5807 "# of frames received with bad FCS",
5808 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5809 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5810 "# of frames received with length error",
5811 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5812 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5813 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5814 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5815 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5816 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5817 "# of rx frames in this range",
5818 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5819 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5820 "# of rx frames in this range",
5821 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5822 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5823 "# of rx frames in this range",
5824 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5825 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5826 "# of rx frames in this range",
5827 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5828 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5829 "# of rx frames in this range",
5830 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5831 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5832 "# of rx frames in this range",
5833 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5834 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5835 "# of rx frames in this range",
5836 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5837 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5838 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5839 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5840 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5841 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5842 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5843 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5844 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5845 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5846 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5847 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5848 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5849 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5850 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5851 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5852 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5853 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5854 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5856 #undef SYSCTL_ADD_T4_REG64
5858 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5859 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5860 &pi->stats.name, desc)
5862 /* We get these from port_stats and they may be stale by up to 1s */
5863 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5864 "# drops due to buffer-group 0 overflows");
5865 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5866 "# drops due to buffer-group 1 overflows");
5867 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5868 "# drops due to buffer-group 2 overflows");
5869 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5870 "# drops due to buffer-group 3 overflows");
5871 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5872 "# of buffer-group 0 truncated packets");
5873 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5874 "# of buffer-group 1 truncated packets");
5875 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5876 "# of buffer-group 2 truncated packets");
5877 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5878 "# of buffer-group 3 truncated packets");
5880 #undef SYSCTL_ADD_T4_PORTSTAT
5884 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5886 int rc, *i, space = 0;
5889 sbuf_new_for_sysctl(&sb, NULL, 64, req);
5890 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5892 sbuf_printf(&sb, " ");
5893 sbuf_printf(&sb, "%d", *i);
5896 rc = sbuf_finish(&sb);
5902 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5907 rc = sysctl_wire_old_buffer(req, 0);
5911 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5915 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5916 rc = sbuf_finish(sb);
5923 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5925 struct port_info *pi = arg1;
5927 struct adapter *sc = pi->adapter;
5931 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
5934 /* XXX: magic numbers */
5935 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5937 end_synchronized_op(sc, 0);
5943 rc = sysctl_handle_int(oidp, &v, 0, req);
5948 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5950 struct vi_info *vi = arg1;
5953 val = vi->rsrv_noflowq;
5954 rc = sysctl_handle_int(oidp, &val, 0, req);
5955 if (rc != 0 || req->newptr == NULL)
5958 if ((val >= 1) && (vi->ntxq > 1))
5959 vi->rsrv_noflowq = 1;
5961 vi->rsrv_noflowq = 0;
5967 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5969 struct vi_info *vi = arg1;
5970 struct adapter *sc = vi->pi->adapter;
5972 struct sge_rxq *rxq;
5977 rc = sysctl_handle_int(oidp, &idx, 0, req);
5978 if (rc != 0 || req->newptr == NULL)
5981 if (idx < 0 || idx >= SGE_NTIMERS)
5984 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5989 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
5990 for_each_rxq(vi, i, rxq) {
5991 #ifdef atomic_store_rel_8
5992 atomic_store_rel_8(&rxq->iq.intr_params, v);
5994 rxq->iq.intr_params = v;
5999 end_synchronized_op(sc, LOCK_HELD);
6004 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
6006 struct vi_info *vi = arg1;
6007 struct adapter *sc = vi->pi->adapter;
6012 rc = sysctl_handle_int(oidp, &idx, 0, req);
6013 if (rc != 0 || req->newptr == NULL)
6016 if (idx < -1 || idx >= SGE_NCOUNTERS)
6019 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6024 if (vi->flags & VI_INIT_DONE)
6025 rc = EBUSY; /* cannot be changed once the queues are created */
6029 end_synchronized_op(sc, LOCK_HELD);
6034 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
6036 struct vi_info *vi = arg1;
6037 struct adapter *sc = vi->pi->adapter;
6040 qsize = vi->qsize_rxq;
6042 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6043 if (rc != 0 || req->newptr == NULL)
6046 if (qsize < 128 || (qsize & 7))
6049 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6054 if (vi->flags & VI_INIT_DONE)
6055 rc = EBUSY; /* cannot be changed once the queues are created */
6057 vi->qsize_rxq = qsize;
6059 end_synchronized_op(sc, LOCK_HELD);
6064 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
6066 struct vi_info *vi = arg1;
6067 struct adapter *sc = vi->pi->adapter;
6070 qsize = vi->qsize_txq;
6072 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6073 if (rc != 0 || req->newptr == NULL)
6076 if (qsize < 128 || qsize > 65536)
6079 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6084 if (vi->flags & VI_INIT_DONE)
6085 rc = EBUSY; /* cannot be changed once the queues are created */
6087 vi->qsize_txq = qsize;
6089 end_synchronized_op(sc, LOCK_HELD);
6094 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
6096 struct port_info *pi = arg1;
6097 struct adapter *sc = pi->adapter;
6098 struct link_config *lc = &pi->link_cfg;
6101 if (req->newptr == NULL) {
6103 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
6105 rc = sysctl_wire_old_buffer(req, 0);
6109 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6113 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
6114 rc = sbuf_finish(sb);
6120 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
6123 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6129 if (s[0] < '0' || s[0] > '9')
6130 return (EINVAL); /* not a number */
6132 if (n & ~(PAUSE_TX | PAUSE_RX))
6133 return (EINVAL); /* some other bit is set too */
6135 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6139 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
6140 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
6141 lc->requested_fc |= n;
6142 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6144 lc->fc = lc->requested_fc;
6147 end_synchronized_op(sc, 0);
6154 sysctl_fec(SYSCTL_HANDLER_ARGS)
6156 struct port_info *pi = arg1;
6157 struct adapter *sc = pi->adapter;
6158 struct link_config *lc = &pi->link_cfg;
6161 if (req->newptr == NULL) {
6163 static char *bits = "\20\1RS\2BASER_RS\3RESERVED";
6165 rc = sysctl_wire_old_buffer(req, 0);
6169 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6173 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits);
6174 rc = sbuf_finish(sb);
6180 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC);
6183 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6189 if (s[0] < '0' || s[0] > '9')
6190 return (EINVAL); /* not a number */
6192 if (n & ~M_FW_PORT_CAP_FEC)
6193 return (EINVAL); /* some other bit is set too */
6195 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6199 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) {
6200 lc->requested_fec = n &
6201 G_FW_PORT_CAP_FEC(lc->supported);
6202 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6204 lc->fec = lc->requested_fec;
6207 end_synchronized_op(sc, 0);
6214 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
6216 struct port_info *pi = arg1;
6217 struct adapter *sc = pi->adapter;
6218 struct link_config *lc = &pi->link_cfg;
6221 if (lc->supported & FW_PORT_CAP_ANEG)
6222 val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0;
6225 rc = sysctl_handle_int(oidp, &val, 0, req);
6226 if (rc != 0 || req->newptr == NULL)
6228 if ((lc->supported & FW_PORT_CAP_ANEG) == 0)
6232 val = AUTONEG_DISABLE;
6234 val = AUTONEG_ENABLE;
6237 if (lc->requested_aneg == val)
6238 return (0); /* no change */
6240 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6244 old = lc->requested_aneg;
6245 lc->requested_aneg = val;
6246 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6248 lc->requested_aneg = old;
6249 end_synchronized_op(sc, 0);
6254 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
6256 struct adapter *sc = arg1;
6260 val = t4_read_reg64(sc, reg);
6262 return (sysctl_handle_64(oidp, &val, 0, req));
6266 sysctl_temperature(SYSCTL_HANDLER_ARGS)
6268 struct adapter *sc = arg1;
6270 uint32_t param, val;
6272 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
6275 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6276 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
6277 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
6278 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
6279 end_synchronized_op(sc, 0);
6283 /* unknown is returned as 0 but we display -1 in that case */
6284 t = val == 0 ? -1 : val;
6286 rc = sysctl_handle_int(oidp, &t, 0, req);
6292 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
6294 struct adapter *sc = arg1;
6297 uint16_t incr[NMTUS][NCCTRL_WIN];
6298 static const char *dec_fac[] = {
6299 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
6303 rc = sysctl_wire_old_buffer(req, 0);
6307 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6311 t4_read_cong_tbl(sc, incr);
6313 for (i = 0; i < NCCTRL_WIN; ++i) {
6314 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
6315 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6316 incr[5][i], incr[6][i], incr[7][i]);
6317 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6318 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6319 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6320 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6323 rc = sbuf_finish(sb);
6329 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6330 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
6331 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
6332 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
6336 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6338 struct adapter *sc = arg1;
6340 int rc, i, n, qid = arg2;
6343 u_int cim_num_obq = sc->chip_params->cim_num_obq;
6345 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6346 ("%s: bad qid %d\n", __func__, qid));
6348 if (qid < CIM_NUM_IBQ) {
6351 n = 4 * CIM_IBQ_SIZE;
6352 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6353 rc = t4_read_cim_ibq(sc, qid, buf, n);
6355 /* outbound queue */
6358 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6359 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6360 rc = t4_read_cim_obq(sc, qid, buf, n);
6367 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
6369 rc = sysctl_wire_old_buffer(req, 0);
6373 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6379 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6380 for (i = 0, p = buf; i < n; i += 16, p += 4)
6381 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6384 rc = sbuf_finish(sb);
6392 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6394 struct adapter *sc = arg1;
6400 MPASS(chip_id(sc) <= CHELSIO_T5);
6402 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6406 rc = sysctl_wire_old_buffer(req, 0);
6410 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6414 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6417 rc = -t4_cim_read_la(sc, buf, NULL);
6421 sbuf_printf(sb, "Status Data PC%s",
6422 cfg & F_UPDBGLACAPTPCONLY ? "" :
6423 " LS0Stat LS0Addr LS0Data");
6425 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
6426 if (cfg & F_UPDBGLACAPTPCONLY) {
6427 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
6429 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
6430 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6431 p[4] & 0xff, p[5] >> 8);
6432 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
6433 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6434 p[1] & 0xf, p[2] >> 4);
6437 "\n %02x %x%07x %x%07x %08x %08x "
6439 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6440 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6445 rc = sbuf_finish(sb);
6453 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
6455 struct adapter *sc = arg1;
6461 MPASS(chip_id(sc) > CHELSIO_T5);
6463 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6467 rc = sysctl_wire_old_buffer(req, 0);
6471 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6475 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6478 rc = -t4_cim_read_la(sc, buf, NULL);
6482 sbuf_printf(sb, "Status Inst Data PC%s",
6483 cfg & F_UPDBGLACAPTPCONLY ? "" :
6484 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
6486 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
6487 if (cfg & F_UPDBGLACAPTPCONLY) {
6488 sbuf_printf(sb, "\n %02x %08x %08x %08x",
6489 p[3] & 0xff, p[2], p[1], p[0]);
6490 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
6491 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
6492 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
6493 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
6494 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
6495 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
6498 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
6499 "%08x %08x %08x %08x %08x %08x",
6500 (p[9] >> 16) & 0xff,
6501 p[9] & 0xffff, p[8] >> 16,
6502 p[8] & 0xffff, p[7] >> 16,
6503 p[7] & 0xffff, p[6] >> 16,
6504 p[2], p[1], p[0], p[5], p[4], p[3]);
6508 rc = sbuf_finish(sb);
6516 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6518 struct adapter *sc = arg1;
6524 rc = sysctl_wire_old_buffer(req, 0);
6528 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6532 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6535 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6538 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6539 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6543 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
6544 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6545 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
6546 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6547 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6548 (p[1] >> 2) | ((p[2] & 3) << 30),
6549 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6553 rc = sbuf_finish(sb);
6560 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6562 struct adapter *sc = arg1;
6568 rc = sysctl_wire_old_buffer(req, 0);
6572 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6576 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6579 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6582 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
6583 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6584 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
6585 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6586 p[4], p[3], p[2], p[1], p[0]);
6589 sbuf_printf(sb, "\n\nCntl ID Data");
6590 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6591 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
6592 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6595 rc = sbuf_finish(sb);
6602 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
6604 struct adapter *sc = arg1;
6607 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6608 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6609 uint16_t thres[CIM_NUM_IBQ];
6610 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
6611 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
6612 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
6614 cim_num_obq = sc->chip_params->cim_num_obq;
6616 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
6617 obq_rdaddr = A_UP_OBQ_0_REALADDR;
6619 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
6620 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
6622 nq = CIM_NUM_IBQ + cim_num_obq;
6624 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
6626 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
6630 t4_read_cimq_cfg(sc, base, size, thres);
6632 rc = sysctl_wire_old_buffer(req, 0);
6636 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6641 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
6643 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
6644 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
6645 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
6646 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6647 G_QUEREMFLITS(p[2]) * 16);
6648 for ( ; i < nq; i++, p += 4, wr += 2)
6649 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
6650 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
6651 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6652 G_QUEREMFLITS(p[2]) * 16);
6654 rc = sbuf_finish(sb);
6661 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
6663 struct adapter *sc = arg1;
6666 struct tp_cpl_stats stats;
6668 rc = sysctl_wire_old_buffer(req, 0);
6672 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6676 mtx_lock(&sc->reg_lock);
6677 t4_tp_get_cpl_stats(sc, &stats, 0);
6678 mtx_unlock(&sc->reg_lock);
6680 if (sc->chip_params->nchan > 2) {
6681 sbuf_printf(sb, " channel 0 channel 1"
6682 " channel 2 channel 3");
6683 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
6684 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
6685 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
6686 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
6688 sbuf_printf(sb, " channel 0 channel 1");
6689 sbuf_printf(sb, "\nCPL requests: %10u %10u",
6690 stats.req[0], stats.req[1]);
6691 sbuf_printf(sb, "\nCPL responses: %10u %10u",
6692 stats.rsp[0], stats.rsp[1]);
6695 rc = sbuf_finish(sb);
6702 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
6704 struct adapter *sc = arg1;
6707 struct tp_usm_stats stats;
6709 rc = sysctl_wire_old_buffer(req, 0);
6713 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6717 t4_get_usm_stats(sc, &stats, 1);
6719 sbuf_printf(sb, "Frames: %u\n", stats.frames);
6720 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
6721 sbuf_printf(sb, "Drops: %u", stats.drops);
6723 rc = sbuf_finish(sb);
6729 static const char * const devlog_level_strings[] = {
6730 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
6731 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
6732 [FW_DEVLOG_LEVEL_ERR] = "ERR",
6733 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
6734 [FW_DEVLOG_LEVEL_INFO] = "INFO",
6735 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
6738 static const char * const devlog_facility_strings[] = {
6739 [FW_DEVLOG_FACILITY_CORE] = "CORE",
6740 [FW_DEVLOG_FACILITY_CF] = "CF",
6741 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
6742 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
6743 [FW_DEVLOG_FACILITY_RES] = "RES",
6744 [FW_DEVLOG_FACILITY_HW] = "HW",
6745 [FW_DEVLOG_FACILITY_FLR] = "FLR",
6746 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
6747 [FW_DEVLOG_FACILITY_PHY] = "PHY",
6748 [FW_DEVLOG_FACILITY_MAC] = "MAC",
6749 [FW_DEVLOG_FACILITY_PORT] = "PORT",
6750 [FW_DEVLOG_FACILITY_VI] = "VI",
6751 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
6752 [FW_DEVLOG_FACILITY_ACL] = "ACL",
6753 [FW_DEVLOG_FACILITY_TM] = "TM",
6754 [FW_DEVLOG_FACILITY_QFC] = "QFC",
6755 [FW_DEVLOG_FACILITY_DCB] = "DCB",
6756 [FW_DEVLOG_FACILITY_ETH] = "ETH",
6757 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
6758 [FW_DEVLOG_FACILITY_RI] = "RI",
6759 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
6760 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
6761 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
6762 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
6763 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
6767 sysctl_devlog(SYSCTL_HANDLER_ARGS)
6769 struct adapter *sc = arg1;
6770 struct devlog_params *dparams = &sc->params.devlog;
6771 struct fw_devlog_e *buf, *e;
6772 int i, j, rc, nentries, first = 0;
6774 uint64_t ftstamp = UINT64_MAX;
6776 if (dparams->addr == 0)
6779 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
6783 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
6787 nentries = dparams->size / sizeof(struct fw_devlog_e);
6788 for (i = 0; i < nentries; i++) {
6791 if (e->timestamp == 0)
6794 e->timestamp = be64toh(e->timestamp);
6795 e->seqno = be32toh(e->seqno);
6796 for (j = 0; j < 8; j++)
6797 e->params[j] = be32toh(e->params[j]);
6799 if (e->timestamp < ftstamp) {
6800 ftstamp = e->timestamp;
6805 if (buf[first].timestamp == 0)
6806 goto done; /* nothing in the log */
6808 rc = sysctl_wire_old_buffer(req, 0);
6812 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6817 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
6818 "Seq#", "Tstamp", "Level", "Facility", "Message");
6823 if (e->timestamp == 0)
6826 sbuf_printf(sb, "%10d %15ju %8s %8s ",
6827 e->seqno, e->timestamp,
6828 (e->level < nitems(devlog_level_strings) ?
6829 devlog_level_strings[e->level] : "UNKNOWN"),
6830 (e->facility < nitems(devlog_facility_strings) ?
6831 devlog_facility_strings[e->facility] : "UNKNOWN"));
6832 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6833 e->params[2], e->params[3], e->params[4],
6834 e->params[5], e->params[6], e->params[7]);
6836 if (++i == nentries)
6838 } while (i != first);
6840 rc = sbuf_finish(sb);
6848 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6850 struct adapter *sc = arg1;
6853 struct tp_fcoe_stats stats[MAX_NCHAN];
6854 int i, nchan = sc->chip_params->nchan;
6856 rc = sysctl_wire_old_buffer(req, 0);
6860 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6864 for (i = 0; i < nchan; i++)
6865 t4_get_fcoe_stats(sc, i, &stats[i], 1);
6868 sbuf_printf(sb, " channel 0 channel 1"
6869 " channel 2 channel 3");
6870 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
6871 stats[0].octets_ddp, stats[1].octets_ddp,
6872 stats[2].octets_ddp, stats[3].octets_ddp);
6873 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
6874 stats[0].frames_ddp, stats[1].frames_ddp,
6875 stats[2].frames_ddp, stats[3].frames_ddp);
6876 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
6877 stats[0].frames_drop, stats[1].frames_drop,
6878 stats[2].frames_drop, stats[3].frames_drop);
6880 sbuf_printf(sb, " channel 0 channel 1");
6881 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
6882 stats[0].octets_ddp, stats[1].octets_ddp);
6883 sbuf_printf(sb, "\nframesDDP: %16u %16u",
6884 stats[0].frames_ddp, stats[1].frames_ddp);
6885 sbuf_printf(sb, "\nframesDrop: %16u %16u",
6886 stats[0].frames_drop, stats[1].frames_drop);
6889 rc = sbuf_finish(sb);
6896 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6898 struct adapter *sc = arg1;
6901 unsigned int map, kbps, ipg, mode;
6902 unsigned int pace_tab[NTX_SCHED];
6904 rc = sysctl_wire_old_buffer(req, 0);
6908 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6912 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6913 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6914 t4_read_pace_tbl(sc, pace_tab);
6916 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
6917 "Class IPG (0.1 ns) Flow IPG (us)");
6919 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6920 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
6921 sbuf_printf(sb, "\n %u %-5s %u ", i,
6922 (mode & (1 << i)) ? "flow" : "class", map & 3);
6924 sbuf_printf(sb, "%9u ", kbps);
6926 sbuf_printf(sb, " disabled ");
6929 sbuf_printf(sb, "%13u ", ipg);
6931 sbuf_printf(sb, " disabled ");
6934 sbuf_printf(sb, "%10u", pace_tab[i]);
6936 sbuf_printf(sb, " disabled");
6939 rc = sbuf_finish(sb);
6946 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6948 struct adapter *sc = arg1;
6952 struct lb_port_stats s[2];
6953 static const char *stat_name[] = {
6954 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6955 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6956 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
6957 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6958 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6959 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6960 "BG2FramesTrunc:", "BG3FramesTrunc:"
6963 rc = sysctl_wire_old_buffer(req, 0);
6967 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6971 memset(s, 0, sizeof(s));
6973 for (i = 0; i < sc->chip_params->nchan; i += 2) {
6974 t4_get_lb_stats(sc, i, &s[0]);
6975 t4_get_lb_stats(sc, i + 1, &s[1]);
6979 sbuf_printf(sb, "%s Loopback %u"
6980 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6982 for (j = 0; j < nitems(stat_name); j++)
6983 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6987 rc = sbuf_finish(sb);
6994 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6997 struct port_info *pi = arg1;
6998 struct link_config *lc = &pi->link_cfg;
7001 rc = sysctl_wire_old_buffer(req, 0);
7004 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
7008 if (lc->link_ok || lc->link_down_rc == 255)
7009 sbuf_printf(sb, "n/a");
7011 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
7013 rc = sbuf_finish(sb);
7026 mem_desc_cmp(const void *a, const void *b)
7028 return ((const struct mem_desc *)a)->base -
7029 ((const struct mem_desc *)b)->base;
7033 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
7041 size = to - from + 1;
7045 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
7046 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
7050 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
7052 struct adapter *sc = arg1;
7055 uint32_t lo, hi, used, alloc;
7056 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
7057 static const char *region[] = {
7058 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
7059 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
7060 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
7061 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
7062 "RQUDP region:", "PBL region:", "TXPBL region:",
7063 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
7066 struct mem_desc avail[4];
7067 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
7068 struct mem_desc *md = mem;
7070 rc = sysctl_wire_old_buffer(req, 0);
7074 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7078 for (i = 0; i < nitems(mem); i++) {
7083 /* Find and sort the populated memory ranges */
7085 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
7086 if (lo & F_EDRAM0_ENABLE) {
7087 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
7088 avail[i].base = G_EDRAM0_BASE(hi) << 20;
7089 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
7093 if (lo & F_EDRAM1_ENABLE) {
7094 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
7095 avail[i].base = G_EDRAM1_BASE(hi) << 20;
7096 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
7100 if (lo & F_EXT_MEM_ENABLE) {
7101 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
7102 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
7103 avail[i].limit = avail[i].base +
7104 (G_EXT_MEM_SIZE(hi) << 20);
7105 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
7108 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
7109 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
7110 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
7111 avail[i].limit = avail[i].base +
7112 (G_EXT_MEM1_SIZE(hi) << 20);
7116 if (!i) /* no memory available */
7118 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
7120 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
7121 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
7122 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
7123 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7124 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
7125 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
7126 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
7127 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
7128 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
7130 /* the next few have explicit upper bounds */
7131 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
7132 md->limit = md->base - 1 +
7133 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
7134 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
7137 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
7138 md->limit = md->base - 1 +
7139 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
7140 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
7143 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7144 if (chip_id(sc) <= CHELSIO_T5)
7145 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
7147 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
7151 md->idx = nitems(region); /* hide it */
7155 #define ulp_region(reg) \
7156 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
7157 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
7159 ulp_region(RX_ISCSI);
7160 ulp_region(RX_TDDP);
7162 ulp_region(RX_STAG);
7164 ulp_region(RX_RQUDP);
7170 md->idx = nitems(region);
7173 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
7174 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
7177 if (sge_ctrl & F_VFIFO_ENABLE)
7178 size = G_DBVFIFO_SIZE(fifo_size);
7180 size = G_T6_DBVFIFO_SIZE(fifo_size);
7183 md->base = G_BASEADDR(t4_read_reg(sc,
7184 A_SGE_DBVFIFO_BADDR));
7185 md->limit = md->base + (size << 2) - 1;
7190 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
7193 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
7197 md->base = sc->vres.ocq.start;
7198 if (sc->vres.ocq.size)
7199 md->limit = md->base + sc->vres.ocq.size - 1;
7201 md->idx = nitems(region); /* hide it */
7204 /* add any address-space holes, there can be up to 3 */
7205 for (n = 0; n < i - 1; n++)
7206 if (avail[n].limit < avail[n + 1].base)
7207 (md++)->base = avail[n].limit;
7209 (md++)->base = avail[n].limit;
7212 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
7214 for (lo = 0; lo < i; lo++)
7215 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
7216 avail[lo].limit - 1);
7218 sbuf_printf(sb, "\n");
7219 for (i = 0; i < n; i++) {
7220 if (mem[i].idx >= nitems(region))
7221 continue; /* skip holes */
7223 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
7224 mem_region_show(sb, region[mem[i].idx], mem[i].base,
7228 sbuf_printf(sb, "\n");
7229 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
7230 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
7231 mem_region_show(sb, "uP RAM:", lo, hi);
7233 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
7234 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
7235 mem_region_show(sb, "uP Extmem2:", lo, hi);
7237 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
7238 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
7240 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
7241 (lo & F_PMRXNUMCHN) ? 2 : 1);
7243 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
7244 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
7245 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
7247 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
7248 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
7249 sbuf_printf(sb, "%u p-structs\n",
7250 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
7252 for (i = 0; i < 4; i++) {
7253 if (chip_id(sc) > CHELSIO_T5)
7254 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
7256 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
7258 used = G_T5_USED(lo);
7259 alloc = G_T5_ALLOC(lo);
7262 alloc = G_ALLOC(lo);
7264 /* For T6 these are MAC buffer groups */
7265 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
7268 for (i = 0; i < sc->chip_params->nchan; i++) {
7269 if (chip_id(sc) > CHELSIO_T5)
7270 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
7272 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
7274 used = G_T5_USED(lo);
7275 alloc = G_T5_ALLOC(lo);
7278 alloc = G_ALLOC(lo);
7280 /* For T6 these are MAC buffer groups */
7282 "\nLoopback %d using %u pages out of %u allocated",
7286 rc = sbuf_finish(sb);
7293 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
7297 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
7301 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
7303 struct adapter *sc = arg1;
7307 MPASS(chip_id(sc) <= CHELSIO_T5);
7309 rc = sysctl_wire_old_buffer(req, 0);
7313 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7318 "Idx Ethernet address Mask Vld Ports PF"
7319 " VF Replication P0 P1 P2 P3 ML");
7320 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7321 uint64_t tcamx, tcamy, mask;
7322 uint32_t cls_lo, cls_hi;
7323 uint8_t addr[ETHER_ADDR_LEN];
7325 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
7326 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
7329 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7330 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7331 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7332 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
7333 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
7334 addr[3], addr[4], addr[5], (uintmax_t)mask,
7335 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
7336 G_PORTMAP(cls_hi), G_PF(cls_lo),
7337 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
7339 if (cls_lo & F_REPLICATE) {
7340 struct fw_ldst_cmd ldst_cmd;
7342 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7343 ldst_cmd.op_to_addrspace =
7344 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7345 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7346 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7347 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7348 ldst_cmd.u.mps.rplc.fid_idx =
7349 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7350 V_FW_LDST_CMD_IDX(i));
7352 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7356 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7357 sizeof(ldst_cmd), &ldst_cmd);
7358 end_synchronized_op(sc, 0);
7361 sbuf_printf(sb, "%36d", rc);
7364 sbuf_printf(sb, " %08x %08x %08x %08x",
7365 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7366 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7367 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7368 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7371 sbuf_printf(sb, "%36s", "");
7373 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
7374 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
7375 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
7379 (void) sbuf_finish(sb);
7381 rc = sbuf_finish(sb);
7388 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
7390 struct adapter *sc = arg1;
7394 MPASS(chip_id(sc) > CHELSIO_T5);
7396 rc = sysctl_wire_old_buffer(req, 0);
7400 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7404 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
7405 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
7407 " P0 P1 P2 P3 ML\n");
7409 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7410 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
7412 uint64_t tcamx, tcamy, val, mask;
7413 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
7414 uint8_t addr[ETHER_ADDR_LEN];
7416 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
7418 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
7420 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
7421 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7422 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7423 tcamy = G_DMACH(val) << 32;
7424 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7425 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7426 lookup_type = G_DATALKPTYPE(data2);
7427 port_num = G_DATAPORTNUM(data2);
7428 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7429 /* Inner header VNI */
7430 vniy = ((data2 & F_DATAVIDH2) << 23) |
7431 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7432 dip_hit = data2 & F_DATADIPHIT;
7437 vlan_vld = data2 & F_DATAVIDH2;
7438 ivlan = G_VIDL(val);
7441 ctl |= V_CTLXYBITSEL(1);
7442 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7443 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7444 tcamx = G_DMACH(val) << 32;
7445 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7446 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7447 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7448 /* Inner header VNI mask */
7449 vnix = ((data2 & F_DATAVIDH2) << 23) |
7450 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7456 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7458 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7459 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7461 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7462 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7463 "%012jx %06x %06x - - %3c"
7464 " 'I' %4x %3c %#x%4u%4d", i, addr[0],
7465 addr[1], addr[2], addr[3], addr[4], addr[5],
7466 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
7467 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7468 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7469 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7471 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7472 "%012jx - - ", i, addr[0], addr[1],
7473 addr[2], addr[3], addr[4], addr[5],
7477 sbuf_printf(sb, "%4u Y ", ivlan);
7479 sbuf_printf(sb, " - N ");
7481 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
7482 lookup_type ? 'I' : 'O', port_num,
7483 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7484 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7485 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7489 if (cls_lo & F_T6_REPLICATE) {
7490 struct fw_ldst_cmd ldst_cmd;
7492 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7493 ldst_cmd.op_to_addrspace =
7494 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7495 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7496 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7497 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7498 ldst_cmd.u.mps.rplc.fid_idx =
7499 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7500 V_FW_LDST_CMD_IDX(i));
7502 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7506 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7507 sizeof(ldst_cmd), &ldst_cmd);
7508 end_synchronized_op(sc, 0);
7511 sbuf_printf(sb, "%72d", rc);
7514 sbuf_printf(sb, " %08x %08x %08x %08x"
7515 " %08x %08x %08x %08x",
7516 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
7517 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
7518 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
7519 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
7520 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7521 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7522 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7523 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7526 sbuf_printf(sb, "%72s", "");
7528 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
7529 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
7530 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
7531 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
7535 (void) sbuf_finish(sb);
7537 rc = sbuf_finish(sb);
7544 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
7546 struct adapter *sc = arg1;
7549 uint16_t mtus[NMTUS];
7551 rc = sysctl_wire_old_buffer(req, 0);
7555 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7559 t4_read_mtu_tbl(sc, mtus, NULL);
7561 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
7562 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
7563 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
7564 mtus[14], mtus[15]);
7566 rc = sbuf_finish(sb);
7573 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7575 struct adapter *sc = arg1;
7578 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
7579 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
7580 static const char *tx_stats[MAX_PM_NSTATS] = {
7581 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
7582 "Tx FIFO wait", NULL, "Tx latency"
7584 static const char *rx_stats[MAX_PM_NSTATS] = {
7585 "Read:", "Write bypass:", "Write mem:", "Flush:",
7586 "Rx FIFO wait", NULL, "Rx latency"
7589 rc = sysctl_wire_old_buffer(req, 0);
7593 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7597 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
7598 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
7600 sbuf_printf(sb, " Tx pcmds Tx bytes");
7601 for (i = 0; i < 4; i++) {
7602 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7606 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
7607 for (i = 0; i < 4; i++) {
7608 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7612 if (chip_id(sc) > CHELSIO_T5) {
7614 "\n Total wait Total occupancy");
7615 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7617 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7621 MPASS(i < nitems(tx_stats));
7624 "\n Reads Total wait");
7625 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7627 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7631 rc = sbuf_finish(sb);
7638 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
7640 struct adapter *sc = arg1;
7643 struct tp_rdma_stats stats;
7645 rc = sysctl_wire_old_buffer(req, 0);
7649 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7653 mtx_lock(&sc->reg_lock);
7654 t4_tp_get_rdma_stats(sc, &stats, 0);
7655 mtx_unlock(&sc->reg_lock);
7657 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
7658 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
7660 rc = sbuf_finish(sb);
7667 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
7669 struct adapter *sc = arg1;
7672 struct tp_tcp_stats v4, v6;
7674 rc = sysctl_wire_old_buffer(req, 0);
7678 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7682 mtx_lock(&sc->reg_lock);
7683 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
7684 mtx_unlock(&sc->reg_lock);
7688 sbuf_printf(sb, "OutRsts: %20u %20u\n",
7689 v4.tcp_out_rsts, v6.tcp_out_rsts);
7690 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
7691 v4.tcp_in_segs, v6.tcp_in_segs);
7692 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
7693 v4.tcp_out_segs, v6.tcp_out_segs);
7694 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
7695 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
7697 rc = sbuf_finish(sb);
7704 sysctl_tids(SYSCTL_HANDLER_ARGS)
7706 struct adapter *sc = arg1;
7709 struct tid_info *t = &sc->tids;
7711 rc = sysctl_wire_old_buffer(req, 0);
7715 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7720 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
7725 sbuf_printf(sb, "TID range: ");
7726 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7729 if (chip_id(sc) <= CHELSIO_T5) {
7730 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
7731 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
7733 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
7734 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
7738 sbuf_printf(sb, "0-%u, ", b - 1);
7739 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
7741 sbuf_printf(sb, "0-%u", t->ntids - 1);
7742 sbuf_printf(sb, ", in use: %u\n",
7743 atomic_load_acq_int(&t->tids_in_use));
7747 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
7748 t->stid_base + t->nstids - 1, t->stids_in_use);
7752 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
7753 t->ftid_base + t->nftids - 1);
7757 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
7758 t->etid_base + t->netids - 1);
7761 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
7762 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
7763 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
7765 rc = sbuf_finish(sb);
7772 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
7774 struct adapter *sc = arg1;
7777 struct tp_err_stats stats;
7779 rc = sysctl_wire_old_buffer(req, 0);
7783 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7787 mtx_lock(&sc->reg_lock);
7788 t4_tp_get_err_stats(sc, &stats, 0);
7789 mtx_unlock(&sc->reg_lock);
7791 if (sc->chip_params->nchan > 2) {
7792 sbuf_printf(sb, " channel 0 channel 1"
7793 " channel 2 channel 3\n");
7794 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
7795 stats.mac_in_errs[0], stats.mac_in_errs[1],
7796 stats.mac_in_errs[2], stats.mac_in_errs[3]);
7797 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
7798 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
7799 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
7800 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
7801 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
7802 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
7803 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
7804 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
7805 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
7806 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
7807 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
7808 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
7809 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
7810 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
7811 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
7812 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
7813 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
7814 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
7815 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
7816 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
7817 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
7819 sbuf_printf(sb, " channel 0 channel 1\n");
7820 sbuf_printf(sb, "macInErrs: %10u %10u\n",
7821 stats.mac_in_errs[0], stats.mac_in_errs[1]);
7822 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
7823 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
7824 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
7825 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
7826 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
7827 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
7828 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
7829 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
7830 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
7831 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
7832 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
7833 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
7834 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
7835 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
7838 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
7839 stats.ofld_no_neigh, stats.ofld_cong_defer);
7841 rc = sbuf_finish(sb);
7848 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
7850 struct adapter *sc = arg1;
7851 struct tp_params *tpp = &sc->params.tp;
7855 mask = tpp->la_mask >> 16;
7856 rc = sysctl_handle_int(oidp, &mask, 0, req);
7857 if (rc != 0 || req->newptr == NULL)
7861 tpp->la_mask = mask << 16;
7862 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
7874 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
7880 uint64_t mask = (1ULL << f->width) - 1;
7881 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
7882 ((uintmax_t)v >> f->start) & mask);
7884 if (line_size + len >= 79) {
7886 sbuf_printf(sb, "\n ");
7888 sbuf_printf(sb, "%s ", buf);
7889 line_size += len + 1;
7892 sbuf_printf(sb, "\n");
7895 static const struct field_desc tp_la0[] = {
7896 { "RcfOpCodeOut", 60, 4 },
7898 { "WcfState", 52, 4 },
7899 { "RcfOpcSrcOut", 50, 2 },
7900 { "CRxError", 49, 1 },
7901 { "ERxError", 48, 1 },
7902 { "SanityFailed", 47, 1 },
7903 { "SpuriousMsg", 46, 1 },
7904 { "FlushInputMsg", 45, 1 },
7905 { "FlushInputCpl", 44, 1 },
7906 { "RssUpBit", 43, 1 },
7907 { "RssFilterHit", 42, 1 },
7909 { "InitTcb", 31, 1 },
7910 { "LineNumber", 24, 7 },
7912 { "EdataOut", 22, 1 },
7914 { "CdataOut", 20, 1 },
7915 { "EreadPdu", 19, 1 },
7916 { "CreadPdu", 18, 1 },
7917 { "TunnelPkt", 17, 1 },
7918 { "RcfPeerFin", 16, 1 },
7919 { "RcfReasonOut", 12, 4 },
7920 { "TxCchannel", 10, 2 },
7921 { "RcfTxChannel", 8, 2 },
7922 { "RxEchannel", 6, 2 },
7923 { "RcfRxChannel", 5, 1 },
7924 { "RcfDataOutSrdy", 4, 1 },
7926 { "RxOoDvld", 2, 1 },
7927 { "RxCongestion", 1, 1 },
7928 { "TxCongestion", 0, 1 },
7932 static const struct field_desc tp_la1[] = {
7933 { "CplCmdIn", 56, 8 },
7934 { "CplCmdOut", 48, 8 },
7935 { "ESynOut", 47, 1 },
7936 { "EAckOut", 46, 1 },
7937 { "EFinOut", 45, 1 },
7938 { "ERstOut", 44, 1 },
7943 { "DataIn", 39, 1 },
7944 { "DataInVld", 38, 1 },
7946 { "RxBufEmpty", 36, 1 },
7948 { "RxFbCongestion", 34, 1 },
7949 { "TxFbCongestion", 33, 1 },
7950 { "TxPktSumSrdy", 32, 1 },
7951 { "RcfUlpType", 28, 4 },
7953 { "Ebypass", 26, 1 },
7955 { "Static0", 24, 1 },
7957 { "Cbypass", 22, 1 },
7959 { "CPktOut", 20, 1 },
7960 { "RxPagePoolFull", 18, 2 },
7961 { "RxLpbkPkt", 17, 1 },
7962 { "TxLpbkPkt", 16, 1 },
7963 { "RxVfValid", 15, 1 },
7964 { "SynLearned", 14, 1 },
7965 { "SetDelEntry", 13, 1 },
7966 { "SetInvEntry", 12, 1 },
7967 { "CpcmdDvld", 11, 1 },
7968 { "CpcmdSave", 10, 1 },
7969 { "RxPstructsFull", 8, 2 },
7970 { "EpcmdDvld", 7, 1 },
7971 { "EpcmdFlush", 6, 1 },
7972 { "EpcmdTrimPrefix", 5, 1 },
7973 { "EpcmdTrimPostfix", 4, 1 },
7974 { "ERssIp4Pkt", 3, 1 },
7975 { "ERssIp6Pkt", 2, 1 },
7976 { "ERssTcpUdpPkt", 1, 1 },
7977 { "ERssFceFipPkt", 0, 1 },
7981 static const struct field_desc tp_la2[] = {
7982 { "CplCmdIn", 56, 8 },
7983 { "MpsVfVld", 55, 1 },
7990 { "DataIn", 39, 1 },
7991 { "DataInVld", 38, 1 },
7993 { "RxBufEmpty", 36, 1 },
7995 { "RxFbCongestion", 34, 1 },
7996 { "TxFbCongestion", 33, 1 },
7997 { "TxPktSumSrdy", 32, 1 },
7998 { "RcfUlpType", 28, 4 },
8000 { "Ebypass", 26, 1 },
8002 { "Static0", 24, 1 },
8004 { "Cbypass", 22, 1 },
8006 { "CPktOut", 20, 1 },
8007 { "RxPagePoolFull", 18, 2 },
8008 { "RxLpbkPkt", 17, 1 },
8009 { "TxLpbkPkt", 16, 1 },
8010 { "RxVfValid", 15, 1 },
8011 { "SynLearned", 14, 1 },
8012 { "SetDelEntry", 13, 1 },
8013 { "SetInvEntry", 12, 1 },
8014 { "CpcmdDvld", 11, 1 },
8015 { "CpcmdSave", 10, 1 },
8016 { "RxPstructsFull", 8, 2 },
8017 { "EpcmdDvld", 7, 1 },
8018 { "EpcmdFlush", 6, 1 },
8019 { "EpcmdTrimPrefix", 5, 1 },
8020 { "EpcmdTrimPostfix", 4, 1 },
8021 { "ERssIp4Pkt", 3, 1 },
8022 { "ERssIp6Pkt", 2, 1 },
8023 { "ERssTcpUdpPkt", 1, 1 },
8024 { "ERssFceFipPkt", 0, 1 },
8029 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
8032 field_desc_show(sb, *p, tp_la0);
8036 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
8040 sbuf_printf(sb, "\n");
8041 field_desc_show(sb, p[0], tp_la0);
8042 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8043 field_desc_show(sb, p[1], tp_la0);
8047 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
8051 sbuf_printf(sb, "\n");
8052 field_desc_show(sb, p[0], tp_la0);
8053 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8054 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
8058 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
8060 struct adapter *sc = arg1;
8065 void (*show_func)(struct sbuf *, uint64_t *, int);
8067 rc = sysctl_wire_old_buffer(req, 0);
8071 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8075 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
8077 t4_tp_read_la(sc, buf, NULL);
8080 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
8083 show_func = tp_la_show2;
8087 show_func = tp_la_show3;
8091 show_func = tp_la_show;
8094 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
8095 (*show_func)(sb, p, i);
8097 rc = sbuf_finish(sb);
8104 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
8106 struct adapter *sc = arg1;
8109 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
8111 rc = sysctl_wire_old_buffer(req, 0);
8115 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8119 t4_get_chan_txrate(sc, nrate, orate);
8121 if (sc->chip_params->nchan > 2) {
8122 sbuf_printf(sb, " channel 0 channel 1"
8123 " channel 2 channel 3\n");
8124 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
8125 nrate[0], nrate[1], nrate[2], nrate[3]);
8126 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
8127 orate[0], orate[1], orate[2], orate[3]);
8129 sbuf_printf(sb, " channel 0 channel 1\n");
8130 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
8131 nrate[0], nrate[1]);
8132 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
8133 orate[0], orate[1]);
8136 rc = sbuf_finish(sb);
8143 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
8145 struct adapter *sc = arg1;
8150 rc = sysctl_wire_old_buffer(req, 0);
8154 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8158 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
8161 t4_ulprx_read_la(sc, buf);
8164 sbuf_printf(sb, " Pcmd Type Message"
8166 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
8167 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
8168 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
8171 rc = sbuf_finish(sb);
8178 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
8180 struct adapter *sc = arg1;
8184 MPASS(chip_id(sc) >= CHELSIO_T5);
8186 rc = sysctl_wire_old_buffer(req, 0);
8190 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8194 v = t4_read_reg(sc, A_SGE_STAT_CFG);
8195 if (G_STATSOURCE_T5(v) == 7) {
8198 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
8200 sbuf_printf(sb, "total %d, incomplete %d",
8201 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8202 t4_read_reg(sc, A_SGE_STAT_MATCH));
8203 } else if (mode == 1) {
8204 sbuf_printf(sb, "total %d, data overflow %d",
8205 t4_read_reg(sc, A_SGE_STAT_TOTAL),
8206 t4_read_reg(sc, A_SGE_STAT_MATCH));
8208 sbuf_printf(sb, "unknown mode %d", mode);
8211 rc = sbuf_finish(sb);
8218 sysctl_tc_params(SYSCTL_HANDLER_ARGS)
8220 struct adapter *sc = arg1;
8221 struct tx_cl_rl_params tc;
8223 int i, rc, port_id, mbps, gbps;
8225 rc = sysctl_wire_old_buffer(req, 0);
8229 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8233 port_id = arg2 >> 16;
8234 MPASS(port_id < sc->params.nports);
8235 MPASS(sc->port[port_id] != NULL);
8237 MPASS(i < sc->chip_params->nsched_cls);
8239 mtx_lock(&sc->tc_lock);
8240 tc = sc->port[port_id]->sched_params->cl_rl[i];
8241 mtx_unlock(&sc->tc_lock);
8243 if (tc.flags & TX_CLRL_ERROR) {
8244 sbuf_printf(sb, "error");
8248 if (tc.ratemode == SCHED_CLASS_RATEMODE_REL) {
8249 /* XXX: top speed or actual link speed? */
8250 gbps = port_top_speed(sc->port[port_id]);
8251 sbuf_printf(sb, " %u%% of %uGbps", tc.maxrate, gbps);
8252 } else if (tc.ratemode == SCHED_CLASS_RATEMODE_ABS) {
8253 switch (tc.rateunit) {
8254 case SCHED_CLASS_RATEUNIT_BITS:
8255 mbps = tc.maxrate / 1000;
8256 gbps = tc.maxrate / 1000000;
8257 if (tc.maxrate == gbps * 1000000)
8258 sbuf_printf(sb, " %uGbps", gbps);
8259 else if (tc.maxrate == mbps * 1000)
8260 sbuf_printf(sb, " %uMbps", mbps);
8262 sbuf_printf(sb, " %uKbps", tc.maxrate);
8264 case SCHED_CLASS_RATEUNIT_PKTS:
8265 sbuf_printf(sb, " %upps", tc.maxrate);
8274 case SCHED_CLASS_MODE_CLASS:
8275 sbuf_printf(sb, " aggregate");
8277 case SCHED_CLASS_MODE_FLOW:
8278 sbuf_printf(sb, " per-flow");
8287 rc = sbuf_finish(sb);
8296 unit_conv(char *buf, size_t len, u_int val, u_int factor)
8298 u_int rem = val % factor;
8301 snprintf(buf, len, "%u", val / factor);
8303 while (rem % 10 == 0)
8305 snprintf(buf, len, "%u.%u", val / factor, rem);
8310 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
8312 struct adapter *sc = arg1;
8315 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8317 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8321 re = G_TIMERRESOLUTION(res);
8324 /* TCP timestamp tick */
8325 re = G_TIMESTAMPRESOLUTION(res);
8329 re = G_DELAYEDACKRESOLUTION(res);
8335 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
8337 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
8341 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
8343 struct adapter *sc = arg1;
8344 u_int res, dack_re, v;
8345 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8347 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8348 dack_re = G_DELAYEDACKRESOLUTION(res);
8349 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
8351 return (sysctl_handle_int(oidp, &v, 0, req));
8355 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
8357 struct adapter *sc = arg1;
8360 u_long tp_tick_us, v;
8361 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8363 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
8364 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
8365 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
8366 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
8368 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
8369 tp_tick_us = (cclk_ps << tre) / 1000000;
8371 if (reg == A_TP_INIT_SRTT)
8372 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
8374 v = tp_tick_us * t4_read_reg(sc, reg);
8376 return (sysctl_handle_long(oidp, &v, 0, req));
8380 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
8381 * passed to this function.
8384 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
8386 struct adapter *sc = arg1;
8390 MPASS(idx >= 0 && idx <= 24);
8392 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
8394 return (sysctl_handle_int(oidp, &v, 0, req));
8398 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
8400 struct adapter *sc = arg1;
8404 MPASS(idx >= 0 && idx < 16);
8406 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
8407 shift = (idx & 3) << 3;
8408 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
8410 return (sysctl_handle_int(oidp, &v, 0, req));
8414 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
8416 struct vi_info *vi = arg1;
8417 struct adapter *sc = vi->pi->adapter;
8419 struct sge_ofld_rxq *ofld_rxq;
8422 idx = vi->ofld_tmr_idx;
8424 rc = sysctl_handle_int(oidp, &idx, 0, req);
8425 if (rc != 0 || req->newptr == NULL)
8428 if (idx < 0 || idx >= SGE_NTIMERS)
8431 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8436 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
8437 for_each_ofld_rxq(vi, i, ofld_rxq) {
8438 #ifdef atomic_store_rel_8
8439 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
8441 ofld_rxq->iq.intr_params = v;
8444 vi->ofld_tmr_idx = idx;
8446 end_synchronized_op(sc, LOCK_HELD);
8451 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
8453 struct vi_info *vi = arg1;
8454 struct adapter *sc = vi->pi->adapter;
8457 idx = vi->ofld_pktc_idx;
8459 rc = sysctl_handle_int(oidp, &idx, 0, req);
8460 if (rc != 0 || req->newptr == NULL)
8463 if (idx < -1 || idx >= SGE_NCOUNTERS)
8466 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8471 if (vi->flags & VI_INIT_DONE)
8472 rc = EBUSY; /* cannot be changed once the queues are created */
8474 vi->ofld_pktc_idx = idx;
8476 end_synchronized_op(sc, LOCK_HELD);
8482 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
8486 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
8487 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
8489 if (fconf & F_FRAGMENTATION)
8490 mode |= T4_FILTER_IP_FRAGMENT;
8492 if (fconf & F_MPSHITTYPE)
8493 mode |= T4_FILTER_MPS_HIT_TYPE;
8495 if (fconf & F_MACMATCH)
8496 mode |= T4_FILTER_MAC_IDX;
8498 if (fconf & F_ETHERTYPE)
8499 mode |= T4_FILTER_ETH_TYPE;
8501 if (fconf & F_PROTOCOL)
8502 mode |= T4_FILTER_IP_PROTO;
8505 mode |= T4_FILTER_IP_TOS;
8508 mode |= T4_FILTER_VLAN;
8510 if (fconf & F_VNIC_ID) {
8511 mode |= T4_FILTER_VNIC;
8513 mode |= T4_FILTER_IC_VNIC;
8517 mode |= T4_FILTER_PORT;
8520 mode |= T4_FILTER_FCoE;
8526 mode_to_fconf(uint32_t mode)
8530 if (mode & T4_FILTER_IP_FRAGMENT)
8531 fconf |= F_FRAGMENTATION;
8533 if (mode & T4_FILTER_MPS_HIT_TYPE)
8534 fconf |= F_MPSHITTYPE;
8536 if (mode & T4_FILTER_MAC_IDX)
8537 fconf |= F_MACMATCH;
8539 if (mode & T4_FILTER_ETH_TYPE)
8540 fconf |= F_ETHERTYPE;
8542 if (mode & T4_FILTER_IP_PROTO)
8543 fconf |= F_PROTOCOL;
8545 if (mode & T4_FILTER_IP_TOS)
8548 if (mode & T4_FILTER_VLAN)
8551 if (mode & T4_FILTER_VNIC)
8554 if (mode & T4_FILTER_PORT)
8557 if (mode & T4_FILTER_FCoE)
8564 mode_to_iconf(uint32_t mode)
8567 if (mode & T4_FILTER_IC_VNIC)
8572 static int check_fspec_against_fconf_iconf(struct adapter *sc,
8573 struct t4_filter_specification *fs)
8575 struct tp_params *tpp = &sc->params.tp;
8578 if (fs->val.frag || fs->mask.frag)
8579 fconf |= F_FRAGMENTATION;
8581 if (fs->val.matchtype || fs->mask.matchtype)
8582 fconf |= F_MPSHITTYPE;
8584 if (fs->val.macidx || fs->mask.macidx)
8585 fconf |= F_MACMATCH;
8587 if (fs->val.ethtype || fs->mask.ethtype)
8588 fconf |= F_ETHERTYPE;
8590 if (fs->val.proto || fs->mask.proto)
8591 fconf |= F_PROTOCOL;
8593 if (fs->val.tos || fs->mask.tos)
8596 if (fs->val.vlan_vld || fs->mask.vlan_vld)
8599 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
8601 if (tpp->ingress_config & F_VNIC)
8605 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
8607 if ((tpp->ingress_config & F_VNIC) == 0)
8611 if (fs->val.iport || fs->mask.iport)
8614 if (fs->val.fcoe || fs->mask.fcoe)
8617 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
8624 get_filter_mode(struct adapter *sc, uint32_t *mode)
8626 struct tp_params *tpp = &sc->params.tp;
8629 * We trust the cached values of the relevant TP registers. This means
8630 * things work reliably only if writes to those registers are always via
8631 * t4_set_filter_mode.
8633 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
8639 set_filter_mode(struct adapter *sc, uint32_t mode)
8641 struct tp_params *tpp = &sc->params.tp;
8642 uint32_t fconf, iconf;
8645 iconf = mode_to_iconf(mode);
8646 if ((iconf ^ tpp->ingress_config) & F_VNIC) {
8648 * For now we just complain if A_TP_INGRESS_CONFIG is not
8649 * already set to the correct value for the requested filter
8650 * mode. It's not clear if it's safe to write to this register
8651 * on the fly. (And we trust the cached value of the register).
8656 fconf = mode_to_fconf(mode);
8658 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8663 if (sc->tids.ftids_in_use > 0) {
8669 if (uld_active(sc, ULD_TOM)) {
8675 rc = -t4_set_filter_mode(sc, fconf, true);
8677 end_synchronized_op(sc, LOCK_HELD);
8681 static inline uint64_t
8682 get_filter_hits(struct adapter *sc, uint32_t fid)
8686 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) +
8687 (fid + sc->tids.ftid_base) * TCB_SIZE;
8692 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
8693 return (be64toh(hits));
8697 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
8698 return (be32toh(hits));
8703 get_filter(struct adapter *sc, struct t4_filter *t)
8705 int i, rc, nfilters = sc->tids.nftids;
8706 struct filter_entry *f;
8708 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8713 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
8714 t->idx >= nfilters) {
8715 t->idx = 0xffffffff;
8719 f = &sc->tids.ftid_tab[t->idx];
8720 for (i = t->idx; i < nfilters; i++, f++) {
8723 t->l2tidx = f->l2t ? f->l2t->idx : 0;
8724 t->smtidx = f->smtidx;
8726 t->hits = get_filter_hits(sc, t->idx);
8728 t->hits = UINT64_MAX;
8735 t->idx = 0xffffffff;
8737 end_synchronized_op(sc, LOCK_HELD);
8742 set_filter(struct adapter *sc, struct t4_filter *t)
8744 unsigned int nfilters, nports;
8745 struct filter_entry *f;
8748 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
8752 nfilters = sc->tids.nftids;
8753 nports = sc->params.nports;
8755 if (nfilters == 0) {
8760 if (t->idx >= nfilters) {
8765 /* Validate against the global filter mode and ingress config */
8766 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
8770 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
8775 if (t->fs.val.iport >= nports) {
8780 /* Can't specify an iq if not steering to it */
8781 if (!t->fs.dirsteer && t->fs.iq) {
8786 /* IPv6 filter idx must be 4 aligned */
8787 if (t->fs.type == 1 &&
8788 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
8793 if (!(sc->flags & FULL_INIT_DONE) &&
8794 ((rc = adapter_full_init(sc)) != 0))
8797 if (sc->tids.ftid_tab == NULL) {
8798 KASSERT(sc->tids.ftids_in_use == 0,
8799 ("%s: no memory allocated but filters_in_use > 0",
8802 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
8803 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
8804 if (sc->tids.ftid_tab == NULL) {
8808 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
8811 for (i = 0; i < 4; i++) {
8812 f = &sc->tids.ftid_tab[t->idx + i];
8814 if (f->pending || f->valid) {
8823 if (t->fs.type == 0)
8827 f = &sc->tids.ftid_tab[t->idx];
8830 rc = set_filter_wr(sc, t->idx);
8832 end_synchronized_op(sc, 0);
8835 mtx_lock(&sc->tids.ftid_lock);
8837 if (f->pending == 0) {
8838 rc = f->valid ? 0 : EIO;
8842 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8843 PCATCH, "t4setfw", 0)) {
8848 mtx_unlock(&sc->tids.ftid_lock);
8854 del_filter(struct adapter *sc, struct t4_filter *t)
8856 unsigned int nfilters;
8857 struct filter_entry *f;
8860 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
8864 nfilters = sc->tids.nftids;
8866 if (nfilters == 0) {
8871 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
8872 t->idx >= nfilters) {
8877 if (!(sc->flags & FULL_INIT_DONE)) {
8882 f = &sc->tids.ftid_tab[t->idx];
8894 t->fs = f->fs; /* extra info for the caller */
8895 rc = del_filter_wr(sc, t->idx);
8899 end_synchronized_op(sc, 0);
8902 mtx_lock(&sc->tids.ftid_lock);
8904 if (f->pending == 0) {
8905 rc = f->valid ? EIO : 0;
8909 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8910 PCATCH, "t4delfw", 0)) {
8915 mtx_unlock(&sc->tids.ftid_lock);
8922 clear_filter(struct filter_entry *f)
8925 t4_l2t_release(f->l2t);
8927 bzero(f, sizeof (*f));
8931 set_filter_wr(struct adapter *sc, int fidx)
8933 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8934 struct fw_filter_wr *fwr;
8935 unsigned int ftid, vnic_vld, vnic_vld_mask;
8936 struct wrq_cookie cookie;
8938 ASSERT_SYNCHRONIZED_OP(sc);
8940 if (f->fs.newdmac || f->fs.newvlan) {
8941 /* This filter needs an L2T entry; allocate one. */
8942 f->l2t = t4_l2t_alloc_switching(sc->l2t);
8945 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
8947 t4_l2t_release(f->l2t);
8953 /* Already validated against fconf, iconf */
8954 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0);
8955 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0);
8956 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld)
8960 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld)
8965 ftid = sc->tids.ftid_base + fidx;
8967 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8970 bzero(fwr, sizeof(*fwr));
8972 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
8973 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
8975 htobe32(V_FW_FILTER_WR_TID(ftid) |
8976 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
8977 V_FW_FILTER_WR_NOREPLY(0) |
8978 V_FW_FILTER_WR_IQ(f->fs.iq));
8979 fwr->del_filter_to_l2tix =
8980 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
8981 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
8982 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
8983 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
8984 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
8985 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
8986 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
8987 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
8988 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
8989 f->fs.newvlan == VLAN_REWRITE) |
8990 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
8991 f->fs.newvlan == VLAN_REWRITE) |
8992 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
8993 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
8994 V_FW_FILTER_WR_PRIO(f->fs.prio) |
8995 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
8996 fwr->ethtype = htobe16(f->fs.val.ethtype);
8997 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
8998 fwr->frag_to_ovlan_vldm =
8999 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
9000 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
9001 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
9002 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
9003 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
9004 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
9006 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
9007 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
9008 fwr->maci_to_matchtypem =
9009 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
9010 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
9011 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
9012 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
9013 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
9014 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
9015 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
9016 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
9017 fwr->ptcl = f->fs.val.proto;
9018 fwr->ptclm = f->fs.mask.proto;
9019 fwr->ttyp = f->fs.val.tos;
9020 fwr->ttypm = f->fs.mask.tos;
9021 fwr->ivlan = htobe16(f->fs.val.vlan);
9022 fwr->ivlanm = htobe16(f->fs.mask.vlan);
9023 fwr->ovlan = htobe16(f->fs.val.vnic);
9024 fwr->ovlanm = htobe16(f->fs.mask.vnic);
9025 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
9026 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
9027 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
9028 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
9029 fwr->lp = htobe16(f->fs.val.dport);
9030 fwr->lpm = htobe16(f->fs.mask.dport);
9031 fwr->fp = htobe16(f->fs.val.sport);
9032 fwr->fpm = htobe16(f->fs.mask.sport);
9034 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
9037 sc->tids.ftids_in_use++;
9039 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
9044 del_filter_wr(struct adapter *sc, int fidx)
9046 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
9047 struct fw_filter_wr *fwr;
9049 struct wrq_cookie cookie;
9051 ftid = sc->tids.ftid_base + fidx;
9053 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
9056 bzero(fwr, sizeof (*fwr));
9058 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
9061 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
9066 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
9068 struct adapter *sc = iq->adapter;
9069 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
9070 unsigned int idx = GET_TID(rpl);
9072 struct filter_entry *f;
9074 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
9076 MPASS(iq == &sc->sge.fwq);
9077 MPASS(is_ftid(sc, idx));
9079 idx -= sc->tids.ftid_base;
9080 f = &sc->tids.ftid_tab[idx];
9081 rc = G_COOKIE(rpl->cookie);
9083 mtx_lock(&sc->tids.ftid_lock);
9084 if (rc == FW_FILTER_WR_FLT_ADDED) {
9085 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
9087 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
9088 f->pending = 0; /* asynchronous setup completed */
9091 if (rc != FW_FILTER_WR_FLT_DELETED) {
9092 /* Add or delete failed, display an error */
9094 "filter %u setup failed with error %u\n",
9099 sc->tids.ftids_in_use--;
9101 wakeup(&sc->tids.ftid_tab);
9102 mtx_unlock(&sc->tids.ftid_lock);
9108 set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
9111 MPASS(iq->set_tcb_rpl != NULL);
9112 return (iq->set_tcb_rpl(iq, rss, m));
9116 l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
9119 MPASS(iq->l2t_write_rpl != NULL);
9120 return (iq->l2t_write_rpl(iq, rss, m));
9124 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
9128 if (cntxt->cid > M_CTXTQID)
9131 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
9132 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
9135 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
9139 if (sc->flags & FW_OK) {
9140 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
9147 * Read via firmware failed or wasn't even attempted. Read directly via
9150 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
9152 end_synchronized_op(sc, 0);
9157 load_fw(struct adapter *sc, struct t4_data *fw)
9162 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
9167 * The firmware, with the sole exception of the memory parity error
9168 * handler, runs from memory and not flash. It is almost always safe to
9169 * install a new firmware on a running system. Just set bit 1 in
9170 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
9172 if (sc->flags & FULL_INIT_DONE &&
9173 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
9178 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
9179 if (fw_data == NULL) {
9184 rc = copyin(fw->data, fw_data, fw->len);
9186 rc = -t4_load_fw(sc, fw_data, fw->len);
9188 free(fw_data, M_CXGBE);
9190 end_synchronized_op(sc, 0);
9195 load_cfg(struct adapter *sc, struct t4_data *cfg)
9198 uint8_t *cfg_data = NULL;
9200 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9204 if (cfg->len == 0) {
9206 rc = -t4_load_cfg(sc, NULL, 0);
9210 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
9211 if (cfg_data == NULL) {
9216 rc = copyin(cfg->data, cfg_data, cfg->len);
9218 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
9220 free(cfg_data, M_CXGBE);
9222 end_synchronized_op(sc, 0);
9227 load_boot(struct adapter *sc, struct t4_bootrom *br)
9230 uint8_t *br_data = NULL;
9233 if (br->len > 1024 * 1024)
9236 if (br->pf_offset == 0) {
9238 if (br->pfidx_addr > 7)
9240 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
9241 A_PCIE_PF_EXPROM_OFST)));
9242 } else if (br->pf_offset == 1) {
9244 offset = G_OFFSET(br->pfidx_addr);
9249 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
9255 rc = -t4_load_boot(sc, NULL, offset, 0);
9259 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
9260 if (br_data == NULL) {
9265 rc = copyin(br->data, br_data, br->len);
9267 rc = -t4_load_boot(sc, br_data, offset, br->len);
9269 free(br_data, M_CXGBE);
9271 end_synchronized_op(sc, 0);
9276 load_bootcfg(struct adapter *sc, struct t4_data *bc)
9279 uint8_t *bc_data = NULL;
9281 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9287 rc = -t4_load_bootcfg(sc, NULL, 0);
9291 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
9292 if (bc_data == NULL) {
9297 rc = copyin(bc->data, bc_data, bc->len);
9299 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
9301 free(bc_data, M_CXGBE);
9303 end_synchronized_op(sc, 0);
9308 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
9311 struct cudbg_init *cudbg;
9314 /* buf is large, don't block if no memory is available */
9315 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
9319 handle = cudbg_alloc_handle();
9320 if (handle == NULL) {
9325 cudbg = cudbg_get_init(handle);
9327 cudbg->print = (cudbg_print_cb)printf;
9330 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
9331 __func__, dump->wr_flash, dump->len, dump->data);
9335 cudbg->use_flash = 1;
9336 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
9337 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
9339 rc = cudbg_collect(handle, buf, &dump->len);
9343 rc = copyout(buf, dump->data, dump->len);
9345 cudbg_free_handle(handle);
9350 #define MAX_READ_BUF_SIZE (128 * 1024)
9352 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
9354 uint32_t addr, remaining, n;
9359 rc = validate_mem_range(sc, mr->addr, mr->len);
9363 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
9365 remaining = mr->len;
9366 dst = (void *)mr->data;
9369 n = min(remaining, MAX_READ_BUF_SIZE);
9370 read_via_memwin(sc, 2, addr, buf, n);
9372 rc = copyout(buf, dst, n);
9384 #undef MAX_READ_BUF_SIZE
9387 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
9391 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
9394 if (i2cd->len > sizeof(i2cd->data))
9397 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
9400 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
9401 i2cd->offset, i2cd->len, &i2cd->data[0]);
9402 end_synchronized_op(sc, 0);
9408 t4_os_find_pci_capability(struct adapter *sc, int cap)
9412 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
9416 t4_os_pci_save_state(struct adapter *sc)
9419 struct pci_devinfo *dinfo;
9422 dinfo = device_get_ivars(dev);
9424 pci_cfg_save(dev, dinfo, 0);
9429 t4_os_pci_restore_state(struct adapter *sc)
9432 struct pci_devinfo *dinfo;
9435 dinfo = device_get_ivars(dev);
9437 pci_cfg_restore(dev, dinfo);
9442 t4_os_portmod_changed(struct port_info *pi)
9444 struct adapter *sc = pi->adapter;
9447 static const char *mod_str[] = {
9448 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
9452 build_medialist(pi, &pi->media);
9455 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
9457 end_synchronized_op(sc, LOCK_HELD);
9461 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
9462 if_printf(ifp, "transceiver unplugged.\n");
9463 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
9464 if_printf(ifp, "unknown transceiver inserted.\n");
9465 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
9466 if_printf(ifp, "unsupported transceiver inserted.\n");
9467 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
9468 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
9469 port_top_speed(pi), mod_str[pi->mod_type]);
9471 if_printf(ifp, "transceiver (type %d) inserted.\n",
9477 t4_os_link_changed(struct port_info *pi)
9481 struct link_config *lc;
9484 for_each_vi(pi, v, vi) {
9491 ifp->if_baudrate = IF_Mbps(lc->speed);
9492 if_link_state_change(ifp, LINK_STATE_UP);
9494 if_link_state_change(ifp, LINK_STATE_DOWN);
9500 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9504 sx_slock(&t4_list_lock);
9505 SLIST_FOREACH(sc, &t4_list, link) {
9507 * func should not make any assumptions about what state sc is
9508 * in - the only guarantee is that sc->sc_lock is a valid lock.
9512 sx_sunlock(&t4_list_lock);
9516 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9520 struct adapter *sc = dev->si_drv1;
9522 rc = priv_check(td, PRIV_DRIVER);
9527 case CHELSIO_T4_GETREG: {
9528 struct t4_reg *edata = (struct t4_reg *)data;
9530 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9533 if (edata->size == 4)
9534 edata->val = t4_read_reg(sc, edata->addr);
9535 else if (edata->size == 8)
9536 edata->val = t4_read_reg64(sc, edata->addr);
9542 case CHELSIO_T4_SETREG: {
9543 struct t4_reg *edata = (struct t4_reg *)data;
9545 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9548 if (edata->size == 4) {
9549 if (edata->val & 0xffffffff00000000)
9551 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9552 } else if (edata->size == 8)
9553 t4_write_reg64(sc, edata->addr, edata->val);
9558 case CHELSIO_T4_REGDUMP: {
9559 struct t4_regdump *regs = (struct t4_regdump *)data;
9560 int reglen = t4_get_regs_len(sc);
9563 if (regs->len < reglen) {
9564 regs->len = reglen; /* hint to the caller */
9569 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9570 get_regs(sc, regs, buf);
9571 rc = copyout(buf, regs->data, reglen);
9575 case CHELSIO_T4_GET_FILTER_MODE:
9576 rc = get_filter_mode(sc, (uint32_t *)data);
9578 case CHELSIO_T4_SET_FILTER_MODE:
9579 rc = set_filter_mode(sc, *(uint32_t *)data);
9581 case CHELSIO_T4_GET_FILTER:
9582 rc = get_filter(sc, (struct t4_filter *)data);
9584 case CHELSIO_T4_SET_FILTER:
9585 rc = set_filter(sc, (struct t4_filter *)data);
9587 case CHELSIO_T4_DEL_FILTER:
9588 rc = del_filter(sc, (struct t4_filter *)data);
9590 case CHELSIO_T4_GET_SGE_CONTEXT:
9591 rc = get_sge_context(sc, (struct t4_sge_context *)data);
9593 case CHELSIO_T4_LOAD_FW:
9594 rc = load_fw(sc, (struct t4_data *)data);
9596 case CHELSIO_T4_GET_MEM:
9597 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9599 case CHELSIO_T4_GET_I2C:
9600 rc = read_i2c(sc, (struct t4_i2c_data *)data);
9602 case CHELSIO_T4_CLEAR_STATS: {
9604 u_int port_id = *(uint32_t *)data;
9605 struct port_info *pi;
9608 if (port_id >= sc->params.nports)
9610 pi = sc->port[port_id];
9615 t4_clr_port_stats(sc, pi->tx_chan);
9616 pi->tx_parse_error = 0;
9617 mtx_lock(&sc->reg_lock);
9618 for_each_vi(pi, v, vi) {
9619 if (vi->flags & VI_INIT_DONE)
9620 t4_clr_vi_stats(sc, vi->viid);
9622 mtx_unlock(&sc->reg_lock);
9625 * Since this command accepts a port, clear stats for
9626 * all VIs on this port.
9628 for_each_vi(pi, v, vi) {
9629 if (vi->flags & VI_INIT_DONE) {
9630 struct sge_rxq *rxq;
9631 struct sge_txq *txq;
9632 struct sge_wrq *wrq;
9634 for_each_rxq(vi, i, rxq) {
9635 #if defined(INET) || defined(INET6)
9636 rxq->lro.lro_queued = 0;
9637 rxq->lro.lro_flushed = 0;
9640 rxq->vlan_extraction = 0;
9643 for_each_txq(vi, i, txq) {
9646 txq->vlan_insertion = 0;
9650 txq->txpkts0_wrs = 0;
9651 txq->txpkts1_wrs = 0;
9652 txq->txpkts0_pkts = 0;
9653 txq->txpkts1_pkts = 0;
9654 mp_ring_reset_stats(txq->r);
9658 /* nothing to clear for each ofld_rxq */
9660 for_each_ofld_txq(vi, i, wrq) {
9661 wrq->tx_wrs_direct = 0;
9662 wrq->tx_wrs_copied = 0;
9666 if (IS_MAIN_VI(vi)) {
9667 wrq = &sc->sge.ctrlq[pi->port_id];
9668 wrq->tx_wrs_direct = 0;
9669 wrq->tx_wrs_copied = 0;
9675 case CHELSIO_T4_SCHED_CLASS:
9676 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9678 case CHELSIO_T4_SCHED_QUEUE:
9679 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9681 case CHELSIO_T4_GET_TRACER:
9682 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9684 case CHELSIO_T4_SET_TRACER:
9685 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9687 case CHELSIO_T4_LOAD_CFG:
9688 rc = load_cfg(sc, (struct t4_data *)data);
9690 case CHELSIO_T4_LOAD_BOOT:
9691 rc = load_boot(sc, (struct t4_bootrom *)data);
9693 case CHELSIO_T4_LOAD_BOOTCFG:
9694 rc = load_bootcfg(sc, (struct t4_data *)data);
9696 case CHELSIO_T4_CUDBG_DUMP:
9697 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
9707 t4_db_full(struct adapter *sc)
9710 CXGBE_UNIMPLEMENTED(__func__);
9714 t4_db_dropped(struct adapter *sc)
9717 CXGBE_UNIMPLEMENTED(__func__);
9722 toe_capability(struct vi_info *vi, int enable)
9725 struct port_info *pi = vi->pi;
9726 struct adapter *sc = pi->adapter;
9728 ASSERT_SYNCHRONIZED_OP(sc);
9730 if (!is_offload(sc))
9734 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9735 /* TOE is already enabled. */
9740 * We need the port's queues around so that we're able to send
9741 * and receive CPLs to/from the TOE even if the ifnet for this
9742 * port has never been UP'd administratively.
9744 if (!(vi->flags & VI_INIT_DONE)) {
9745 rc = vi_full_init(vi);
9749 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9750 rc = vi_full_init(&pi->vi[0]);
9755 if (isset(&sc->offload_map, pi->port_id)) {
9756 /* TOE is enabled on another VI of this port. */
9761 if (!uld_active(sc, ULD_TOM)) {
9762 rc = t4_activate_uld(sc, ULD_TOM);
9765 "You must kldload t4_tom.ko before trying "
9766 "to enable TOE on a cxgbe interface.\n");
9770 KASSERT(sc->tom_softc != NULL,
9771 ("%s: TOM activated but softc NULL", __func__));
9772 KASSERT(uld_active(sc, ULD_TOM),
9773 ("%s: TOM activated but flag not set", __func__));
9776 /* Activate iWARP and iSCSI too, if the modules are loaded. */
9777 if (!uld_active(sc, ULD_IWARP))
9778 (void) t4_activate_uld(sc, ULD_IWARP);
9779 if (!uld_active(sc, ULD_ISCSI))
9780 (void) t4_activate_uld(sc, ULD_ISCSI);
9783 setbit(&sc->offload_map, pi->port_id);
9787 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9790 KASSERT(uld_active(sc, ULD_TOM),
9791 ("%s: TOM never initialized?", __func__));
9792 clrbit(&sc->offload_map, pi->port_id);
9799 * Add an upper layer driver to the global list.
9802 t4_register_uld(struct uld_info *ui)
9807 sx_xlock(&t4_uld_list_lock);
9808 SLIST_FOREACH(u, &t4_uld_list, link) {
9809 if (u->uld_id == ui->uld_id) {
9815 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9818 sx_xunlock(&t4_uld_list_lock);
9823 t4_unregister_uld(struct uld_info *ui)
9828 sx_xlock(&t4_uld_list_lock);
9830 SLIST_FOREACH(u, &t4_uld_list, link) {
9832 if (ui->refcount > 0) {
9837 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9843 sx_xunlock(&t4_uld_list_lock);
9848 t4_activate_uld(struct adapter *sc, int id)
9851 struct uld_info *ui;
9853 ASSERT_SYNCHRONIZED_OP(sc);
9855 if (id < 0 || id > ULD_MAX)
9857 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
9859 sx_slock(&t4_uld_list_lock);
9861 SLIST_FOREACH(ui, &t4_uld_list, link) {
9862 if (ui->uld_id == id) {
9863 if (!(sc->flags & FULL_INIT_DONE)) {
9864 rc = adapter_full_init(sc);
9869 rc = ui->activate(sc);
9871 setbit(&sc->active_ulds, id);
9878 sx_sunlock(&t4_uld_list_lock);
9884 t4_deactivate_uld(struct adapter *sc, int id)
9887 struct uld_info *ui;
9889 ASSERT_SYNCHRONIZED_OP(sc);
9891 if (id < 0 || id > ULD_MAX)
9895 sx_slock(&t4_uld_list_lock);
9897 SLIST_FOREACH(ui, &t4_uld_list, link) {
9898 if (ui->uld_id == id) {
9899 rc = ui->deactivate(sc);
9901 clrbit(&sc->active_ulds, id);
9908 sx_sunlock(&t4_uld_list_lock);
9914 uld_active(struct adapter *sc, int uld_id)
9917 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9919 return (isset(&sc->active_ulds, uld_id));
9924 * t = ptr to tunable.
9925 * nc = number of CPUs.
9926 * c = compiled in default for that tunable.
9929 calculate_nqueues(int *t, int nc, const int c)
9935 nq = *t < 0 ? -*t : c;
9940 * Come up with reasonable defaults for some of the tunables, provided they're
9941 * not set by the user (in which case we'll use the values as is).
9944 tweak_tunables(void)
9946 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
9948 if (t4_ntxq10g < 1) {
9950 t4_ntxq10g = rss_getnumbuckets();
9952 calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G);
9956 if (t4_ntxq1g < 1) {
9958 /* XXX: way too many for 1GbE? */
9959 t4_ntxq1g = rss_getnumbuckets();
9961 calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G);
9965 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
9967 if (t4_nrxq10g < 1) {
9969 t4_nrxq10g = rss_getnumbuckets();
9971 calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G);
9975 if (t4_nrxq1g < 1) {
9977 /* XXX: way too many for 1GbE? */
9978 t4_nrxq1g = rss_getnumbuckets();
9980 calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G);
9984 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
9987 calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G);
9988 calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G);
9989 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
9990 calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G);
9991 calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G);
9992 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
9994 if (t4_toecaps_allowed == -1)
9995 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9997 if (t4_rdmacaps_allowed == -1) {
9998 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
9999 FW_CAPS_CONFIG_RDMA_RDMAC;
10002 if (t4_iscsicaps_allowed == -1) {
10003 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
10004 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
10005 FW_CAPS_CONFIG_ISCSI_T10DIF;
10008 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
10009 t4_tmr_idx_ofld = TMR_IDX_OFLD;
10011 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
10012 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
10014 if (t4_toecaps_allowed == -1)
10015 t4_toecaps_allowed = 0;
10017 if (t4_rdmacaps_allowed == -1)
10018 t4_rdmacaps_allowed = 0;
10020 if (t4_iscsicaps_allowed == -1)
10021 t4_iscsicaps_allowed = 0;
10025 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
10026 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
10029 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
10030 t4_tmr_idx_10g = TMR_IDX_10G;
10032 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
10033 t4_pktc_idx_10g = PKTC_IDX_10G;
10035 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
10036 t4_tmr_idx_1g = TMR_IDX_1G;
10038 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
10039 t4_pktc_idx_1g = PKTC_IDX_1G;
10041 if (t4_qsize_txq < 128)
10042 t4_qsize_txq = 128;
10044 if (t4_qsize_rxq < 128)
10045 t4_qsize_rxq = 128;
10046 while (t4_qsize_rxq & 7)
10049 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
10054 t4_dump_tcb(struct adapter *sc, int tid)
10056 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
10058 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
10059 save = t4_read_reg(sc, reg);
10060 base = sc->memwin[2].mw_base;
10062 /* Dump TCB for the tid */
10063 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
10064 tcb_addr += tid * TCB_SIZE;
10068 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
10070 pf = V_PFNUM(sc->pf);
10071 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
10073 t4_write_reg(sc, reg, win_pos | pf);
10074 t4_read_reg(sc, reg);
10076 off = tcb_addr - win_pos;
10077 for (i = 0; i < 4; i++) {
10079 for (j = 0; j < 8; j++, off += 4)
10080 buf[j] = htonl(t4_read_reg(sc, base + off));
10082 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
10083 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
10087 t4_write_reg(sc, reg, save);
10088 t4_read_reg(sc, reg);
10092 t4_dump_devlog(struct adapter *sc)
10094 struct devlog_params *dparams = &sc->params.devlog;
10095 struct fw_devlog_e e;
10096 int i, first, j, m, nentries, rc;
10097 uint64_t ftstamp = UINT64_MAX;
10099 if (dparams->start == 0) {
10100 db_printf("devlog params not valid\n");
10104 nentries = dparams->size / sizeof(struct fw_devlog_e);
10105 m = fwmtype_to_hwmtype(dparams->memtype);
10107 /* Find the first entry. */
10109 for (i = 0; i < nentries && !db_pager_quit; i++) {
10110 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10111 sizeof(e), (void *)&e);
10115 if (e.timestamp == 0)
10118 e.timestamp = be64toh(e.timestamp);
10119 if (e.timestamp < ftstamp) {
10120 ftstamp = e.timestamp;
10130 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10131 sizeof(e), (void *)&e);
10135 if (e.timestamp == 0)
10138 e.timestamp = be64toh(e.timestamp);
10139 e.seqno = be32toh(e.seqno);
10140 for (j = 0; j < 8; j++)
10141 e.params[j] = be32toh(e.params[j]);
10143 db_printf("%10d %15ju %8s %8s ",
10144 e.seqno, e.timestamp,
10145 (e.level < nitems(devlog_level_strings) ?
10146 devlog_level_strings[e.level] : "UNKNOWN"),
10147 (e.facility < nitems(devlog_facility_strings) ?
10148 devlog_facility_strings[e.facility] : "UNKNOWN"));
10149 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
10150 e.params[3], e.params[4], e.params[5], e.params[6],
10153 if (++i == nentries)
10155 } while (i != first && !db_pager_quit);
10158 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
10159 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
10161 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
10168 t = db_read_token();
10170 dev = device_lookup_by_name(db_tok_string);
10175 db_printf("usage: show t4 devlog <nexus>\n");
10180 db_printf("device not found\n");
10184 t4_dump_devlog(device_get_softc(dev));
10187 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
10196 t = db_read_token();
10198 dev = device_lookup_by_name(db_tok_string);
10199 t = db_read_token();
10200 if (t == tNUMBER) {
10201 tid = db_tok_number;
10208 db_printf("usage: show t4 tcb <nexus> <tid>\n");
10213 db_printf("device not found\n");
10217 db_printf("invalid tid\n");
10221 t4_dump_tcb(device_get_softc(dev), tid);
10225 static struct sx mlu; /* mod load unload */
10226 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
10229 mod_event(module_t mod, int cmd, void *arg)
10232 static int loaded = 0;
10237 if (loaded++ == 0) {
10239 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl);
10240 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl);
10241 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
10242 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
10243 sx_init(&t4_list_lock, "T4/T5 adapters");
10244 SLIST_INIT(&t4_list);
10246 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
10247 SLIST_INIT(&t4_uld_list);
10249 t4_tracer_modload();
10257 if (--loaded == 0) {
10260 sx_slock(&t4_list_lock);
10261 if (!SLIST_EMPTY(&t4_list)) {
10263 sx_sunlock(&t4_list_lock);
10267 sx_slock(&t4_uld_list_lock);
10268 if (!SLIST_EMPTY(&t4_uld_list)) {
10270 sx_sunlock(&t4_uld_list_lock);
10271 sx_sunlock(&t4_list_lock);
10276 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
10277 uprintf("%ju clusters with custom free routine "
10278 "still is use.\n", t4_sge_extfree_refs());
10279 pause("t4unload", 2 * hz);
10282 sx_sunlock(&t4_uld_list_lock);
10284 sx_sunlock(&t4_list_lock);
10286 if (t4_sge_extfree_refs() == 0) {
10287 t4_tracer_modunload();
10289 sx_destroy(&t4_uld_list_lock);
10291 sx_destroy(&t4_list_lock);
10292 t4_sge_modunload();
10296 loaded++; /* undo earlier decrement */
10307 static devclass_t t4_devclass, t5_devclass, t6_devclass;
10308 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
10309 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
10311 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
10312 MODULE_VERSION(t4nex, 1);
10313 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
10315 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
10316 #endif /* DEV_NETMAP */
10318 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
10319 MODULE_VERSION(t5nex, 1);
10320 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
10322 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
10323 #endif /* DEV_NETMAP */
10325 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
10326 MODULE_VERSION(t6nex, 1);
10327 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
10329 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
10330 #endif /* DEV_NETMAP */
10332 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
10333 MODULE_VERSION(cxgbe, 1);
10335 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
10336 MODULE_VERSION(cxl, 1);
10338 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
10339 MODULE_VERSION(cc, 1);
10341 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
10342 MODULE_VERSION(vcxgbe, 1);
10344 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
10345 MODULE_VERSION(vcxl, 1);
10347 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
10348 MODULE_VERSION(vcc, 1);