2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "opt_inet6.h"
36 #include "opt_ratelimit.h"
39 #include <sys/param.h>
42 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <sys/firmware.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <net/ethernet.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/if_vlan_var.h>
64 #include <net/rss_config.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
68 #if defined(__i386__) || defined(__amd64__)
69 #include <machine/md_var.h>
70 #include <machine/cputypes.h>
74 #include <crypto/rijndael/rijndael.h>
77 #include <ddb/db_lex.h>
80 #include "common/common.h"
81 #include "common/t4_msg.h"
82 #include "common/t4_regs.h"
83 #include "common/t4_regs_values.h"
84 #include "cudbg/cudbg.h"
88 #include "t4_mp_ring.h"
92 /* T4 bus driver interface */
93 static int t4_probe(device_t);
94 static int t4_attach(device_t);
95 static int t4_detach(device_t);
96 static int t4_child_location_str(device_t, device_t, char *, size_t);
97 static int t4_ready(device_t);
98 static int t4_read_port_device(device_t, int, device_t *);
99 static device_method_t t4_methods[] = {
100 DEVMETHOD(device_probe, t4_probe),
101 DEVMETHOD(device_attach, t4_attach),
102 DEVMETHOD(device_detach, t4_detach),
104 DEVMETHOD(bus_child_location_str, t4_child_location_str),
106 DEVMETHOD(t4_is_main_ready, t4_ready),
107 DEVMETHOD(t4_read_port_device, t4_read_port_device),
111 static driver_t t4_driver = {
114 sizeof(struct adapter)
118 /* T4 port (cxgbe) interface */
119 static int cxgbe_probe(device_t);
120 static int cxgbe_attach(device_t);
121 static int cxgbe_detach(device_t);
122 device_method_t cxgbe_methods[] = {
123 DEVMETHOD(device_probe, cxgbe_probe),
124 DEVMETHOD(device_attach, cxgbe_attach),
125 DEVMETHOD(device_detach, cxgbe_detach),
128 static driver_t cxgbe_driver = {
131 sizeof(struct port_info)
134 /* T4 VI (vcxgbe) interface */
135 static int vcxgbe_probe(device_t);
136 static int vcxgbe_attach(device_t);
137 static int vcxgbe_detach(device_t);
138 static device_method_t vcxgbe_methods[] = {
139 DEVMETHOD(device_probe, vcxgbe_probe),
140 DEVMETHOD(device_attach, vcxgbe_attach),
141 DEVMETHOD(device_detach, vcxgbe_detach),
144 static driver_t vcxgbe_driver = {
147 sizeof(struct vi_info)
150 static d_ioctl_t t4_ioctl;
152 static struct cdevsw t4_cdevsw = {
153 .d_version = D_VERSION,
158 /* T5 bus driver interface */
159 static int t5_probe(device_t);
160 static device_method_t t5_methods[] = {
161 DEVMETHOD(device_probe, t5_probe),
162 DEVMETHOD(device_attach, t4_attach),
163 DEVMETHOD(device_detach, t4_detach),
165 DEVMETHOD(bus_child_location_str, t4_child_location_str),
167 DEVMETHOD(t4_is_main_ready, t4_ready),
168 DEVMETHOD(t4_read_port_device, t4_read_port_device),
172 static driver_t t5_driver = {
175 sizeof(struct adapter)
179 /* T5 port (cxl) interface */
180 static driver_t cxl_driver = {
183 sizeof(struct port_info)
186 /* T5 VI (vcxl) interface */
187 static driver_t vcxl_driver = {
190 sizeof(struct vi_info)
193 /* T6 bus driver interface */
194 static int t6_probe(device_t);
195 static device_method_t t6_methods[] = {
196 DEVMETHOD(device_probe, t6_probe),
197 DEVMETHOD(device_attach, t4_attach),
198 DEVMETHOD(device_detach, t4_detach),
200 DEVMETHOD(bus_child_location_str, t4_child_location_str),
202 DEVMETHOD(t4_is_main_ready, t4_ready),
203 DEVMETHOD(t4_read_port_device, t4_read_port_device),
207 static driver_t t6_driver = {
210 sizeof(struct adapter)
214 /* T6 port (cc) interface */
215 static driver_t cc_driver = {
218 sizeof(struct port_info)
221 /* T6 VI (vcc) interface */
222 static driver_t vcc_driver = {
225 sizeof(struct vi_info)
228 /* ifnet interface */
229 static void cxgbe_init(void *);
230 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
231 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
232 static void cxgbe_qflush(struct ifnet *);
234 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
237 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
238 * then ADAPTER_LOCK, then t4_uld_list_lock.
240 static struct sx t4_list_lock;
241 SLIST_HEAD(, adapter) t4_list;
243 static struct sx t4_uld_list_lock;
244 SLIST_HEAD(, uld_info) t4_uld_list;
248 * Tunables. See tweak_tunables() too.
250 * Each tunable is set to a default value here if it's known at compile-time.
251 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
252 * provide a reasonable default (upto n) when the driver is loaded.
254 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
255 * T5 are under hw.cxl.
257 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe(4) parameters");
258 SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD, 0, "cxgbe(4) T5+ parameters");
259 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD, 0, "cxgbe(4) TOE parameters");
262 * Number of queues for tx and rx, NIC and offload.
266 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0,
267 "Number of TX queues per port");
268 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
272 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0,
273 "Number of RX queues per port");
274 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
277 static int t4_ntxq_vi = -NTXQ_VI;
278 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0,
279 "Number of TX queues per VI");
282 static int t4_nrxq_vi = -NRXQ_VI;
283 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0,
284 "Number of RX queues per VI");
286 static int t4_rsrv_noflowq = 0;
287 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq,
288 0, "Reserve TX queue 0 of each VI for non-flowid packets");
290 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
292 static int t4_nofldtxq = -NOFLDTXQ;
293 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
294 "Number of offload TX queues per port");
297 static int t4_nofldrxq = -NOFLDRXQ;
298 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
299 "Number of offload RX queues per port");
301 #define NOFLDTXQ_VI 1
302 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
303 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
304 "Number of offload TX queues per VI");
306 #define NOFLDRXQ_VI 1
307 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
308 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
309 "Number of offload RX queues per VI");
311 #define TMR_IDX_OFLD 1
312 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
313 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
314 &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
316 #define PKTC_IDX_OFLD (-1)
317 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
318 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
319 &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
321 /* 0 means chip/fw default, non-zero number is value in microseconds */
322 static u_long t4_toe_keepalive_idle = 0;
323 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN,
324 &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)");
326 /* 0 means chip/fw default, non-zero number is value in microseconds */
327 static u_long t4_toe_keepalive_interval = 0;
328 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN,
329 &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)");
331 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
332 static int t4_toe_keepalive_count = 0;
333 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN,
334 &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort");
336 /* 0 means chip/fw default, non-zero number is value in microseconds */
337 static u_long t4_toe_rexmt_min = 0;
338 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN,
339 &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)");
341 /* 0 means chip/fw default, non-zero number is value in microseconds */
342 static u_long t4_toe_rexmt_max = 0;
343 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN,
344 &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)");
346 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
347 static int t4_toe_rexmt_count = 0;
348 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN,
349 &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort");
351 /* -1 means chip/fw default, other values are raw backoff values to use */
352 static int t4_toe_rexmt_backoff[16] = {
353 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
355 SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff, CTLFLAG_RD, 0,
356 "cxgbe(4) TOE retransmit backoff values");
357 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN,
358 &t4_toe_rexmt_backoff[0], 0, "");
359 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN,
360 &t4_toe_rexmt_backoff[1], 0, "");
361 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN,
362 &t4_toe_rexmt_backoff[2], 0, "");
363 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN,
364 &t4_toe_rexmt_backoff[3], 0, "");
365 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN,
366 &t4_toe_rexmt_backoff[4], 0, "");
367 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN,
368 &t4_toe_rexmt_backoff[5], 0, "");
369 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN,
370 &t4_toe_rexmt_backoff[6], 0, "");
371 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN,
372 &t4_toe_rexmt_backoff[7], 0, "");
373 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN,
374 &t4_toe_rexmt_backoff[8], 0, "");
375 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN,
376 &t4_toe_rexmt_backoff[9], 0, "");
377 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN,
378 &t4_toe_rexmt_backoff[10], 0, "");
379 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN,
380 &t4_toe_rexmt_backoff[11], 0, "");
381 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN,
382 &t4_toe_rexmt_backoff[12], 0, "");
383 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN,
384 &t4_toe_rexmt_backoff[13], 0, "");
385 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN,
386 &t4_toe_rexmt_backoff[14], 0, "");
387 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN,
388 &t4_toe_rexmt_backoff[15], 0, "");
393 static int t4_nnmtxq_vi = -NNMTXQ_VI;
394 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0,
395 "Number of netmap TX queues per VI");
398 static int t4_nnmrxq_vi = -NNMRXQ_VI;
399 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0,
400 "Number of netmap RX queues per VI");
404 * Holdoff parameters for ports.
407 int t4_tmr_idx = TMR_IDX;
408 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx,
409 0, "Holdoff timer index");
410 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
412 #define PKTC_IDX (-1)
413 int t4_pktc_idx = PKTC_IDX;
414 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx,
415 0, "Holdoff packet counter index");
416 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
419 * Size (# of entries) of each tx and rx queue.
421 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
422 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0,
423 "Number of descriptors in each TX queue");
425 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
426 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0,
427 "Number of descriptors in each RX queue");
430 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
432 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
433 SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types,
434 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
437 * Configuration file. All the _CF names here are special.
439 #define DEFAULT_CF "default"
440 #define BUILTIN_CF "built-in"
441 #define FLASH_CF "flash"
442 #define UWIRE_CF "uwire"
443 #define FPGA_CF "fpga"
444 static char t4_cfg_file[32] = DEFAULT_CF;
445 SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file,
446 sizeof(t4_cfg_file), "Firmware configuration file");
449 * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
450 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
451 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
452 * mark or when signalled to do so, 0 to never emit PAUSE.
453 * pause_autoneg = 1 means PAUSE will be negotiated if possible and the
454 * negotiated settings will override rx_pause/tx_pause.
455 * Otherwise rx_pause/tx_pause are applied forcibly.
457 static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
458 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN,
459 &t4_pause_settings, 0,
460 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
463 * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
464 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
467 static int t4_fec = -1;
468 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
469 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
472 * Link autonegotiation.
473 * -1 to run with the firmware default.
477 static int t4_autoneg = -1;
478 SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0,
479 "Link autonegotiation");
482 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
483 * encouraged respectively). '-n' is the same as 'n' except the firmware
484 * version used in the checks is read from the firmware bundled with the driver.
486 static int t4_fw_install = 1;
487 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0,
488 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
491 * ASIC features that will be used. Disable the ones you don't want so that the
492 * chip resources aren't wasted on features that will not be used.
494 static int t4_nbmcaps_allowed = 0;
495 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN,
496 &t4_nbmcaps_allowed, 0, "Default NBM capabilities");
498 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
499 SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN,
500 &t4_linkcaps_allowed, 0, "Default link capabilities");
502 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
503 FW_CAPS_CONFIG_SWITCH_EGRESS;
504 SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
505 &t4_switchcaps_allowed, 0, "Default switch capabilities");
508 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
509 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
511 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
512 FW_CAPS_CONFIG_NIC_HASHFILTER;
514 SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN,
515 &t4_niccaps_allowed, 0, "Default NIC capabilities");
517 static int t4_toecaps_allowed = -1;
518 SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN,
519 &t4_toecaps_allowed, 0, "Default TCP offload capabilities");
521 static int t4_rdmacaps_allowed = -1;
522 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN,
523 &t4_rdmacaps_allowed, 0, "Default RDMA capabilities");
525 static int t4_cryptocaps_allowed = -1;
526 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN,
527 &t4_cryptocaps_allowed, 0, "Default crypto capabilities");
529 static int t4_iscsicaps_allowed = -1;
530 SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN,
531 &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities");
533 static int t4_fcoecaps_allowed = 0;
534 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN,
535 &t4_fcoecaps_allowed, 0, "Default FCoE capabilities");
537 static int t5_write_combine = 0;
538 SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
539 0, "Use WC instead of UC for BAR2");
541 static int t4_num_vis = 1;
542 SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
543 "Number of VIs per port");
546 * PCIe Relaxed Ordering.
547 * -1: driver should figure out a good value.
552 static int pcie_relaxed_ordering = -1;
553 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN,
554 &pcie_relaxed_ordering, 0,
555 "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone");
557 static int t4_panic_on_fatal_err = 0;
558 SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RDTUN,
559 &t4_panic_on_fatal_err, 0, "panic on fatal errors");
565 static int t4_cop_managed_offloading = 0;
566 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
567 &t4_cop_managed_offloading, 0,
568 "COP (Connection Offload Policy) controls all TOE offload");
571 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
572 static int vi_mac_funcs[] = {
576 FW_VI_FUNC_OPENISCSI,
582 struct intrs_and_queues {
583 uint16_t intr_type; /* INTx, MSI, or MSI-X */
584 uint16_t num_vis; /* number of VIs for each port */
585 uint16_t nirq; /* Total # of vectors */
586 uint16_t ntxq; /* # of NIC txq's for each port */
587 uint16_t nrxq; /* # of NIC rxq's for each port */
588 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
589 uint16_t nofldrxq; /* # of TOE rxq's for each port */
591 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
592 uint16_t ntxq_vi; /* # of NIC txq's */
593 uint16_t nrxq_vi; /* # of NIC rxq's */
594 uint16_t nofldtxq_vi; /* # of TOE txq's */
595 uint16_t nofldrxq_vi; /* # of TOE rxq's */
596 uint16_t nnmtxq_vi; /* # of netmap txq's */
597 uint16_t nnmrxq_vi; /* # of netmap rxq's */
600 static void setup_memwin(struct adapter *);
601 static void position_memwin(struct adapter *, int, uint32_t);
602 static int validate_mem_range(struct adapter *, uint32_t, uint32_t);
603 static int fwmtype_to_hwmtype(int);
604 static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t,
606 static int fixup_devlog_params(struct adapter *);
607 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
608 static int contact_firmware(struct adapter *);
609 static int partition_resources(struct adapter *);
610 static int get_params__pre_init(struct adapter *);
611 static int set_params__pre_init(struct adapter *);
612 static int get_params__post_init(struct adapter *);
613 static int set_params__post_init(struct adapter *);
614 static void t4_set_desc(struct adapter *);
615 static bool fixed_ifmedia(struct port_info *);
616 static void build_medialist(struct port_info *);
617 static void init_link_config(struct port_info *);
618 static int fixup_link_config(struct port_info *);
619 static int apply_link_config(struct port_info *);
620 static int cxgbe_init_synchronized(struct vi_info *);
621 static int cxgbe_uninit_synchronized(struct vi_info *);
622 static void quiesce_txq(struct adapter *, struct sge_txq *);
623 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
624 static void quiesce_iq(struct adapter *, struct sge_iq *);
625 static void quiesce_fl(struct adapter *, struct sge_fl *);
626 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
627 driver_intr_t *, void *, char *);
628 static int t4_free_irq(struct adapter *, struct irq *);
629 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
630 static void vi_refresh_stats(struct adapter *, struct vi_info *);
631 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
632 static void cxgbe_tick(void *);
633 static void cxgbe_sysctls(struct port_info *);
634 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
635 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
636 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
637 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
638 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
639 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
640 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
641 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
642 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
643 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
644 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
645 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
646 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
647 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
648 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
649 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
650 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
651 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
652 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
653 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
654 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
655 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
656 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
657 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
658 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
659 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
660 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
661 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
662 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
663 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
664 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
665 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
666 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
667 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
668 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
669 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
670 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
671 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
672 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
673 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
674 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
675 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
676 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
678 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
679 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
680 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
681 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
682 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
683 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
684 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
685 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
687 static int get_sge_context(struct adapter *, struct t4_sge_context *);
688 static int load_fw(struct adapter *, struct t4_data *);
689 static int load_cfg(struct adapter *, struct t4_data *);
690 static int load_boot(struct adapter *, struct t4_bootrom *);
691 static int load_bootcfg(struct adapter *, struct t4_data *);
692 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
693 static void free_offload_policy(struct t4_offload_policy *);
694 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
695 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
696 static int read_i2c(struct adapter *, struct t4_i2c_data *);
698 static int toe_capability(struct vi_info *, int);
700 static int mod_event(module_t, int, void *);
701 static int notify_siblings(device_t, int);
707 {0xa000, "Chelsio Terminator 4 FPGA"},
708 {0x4400, "Chelsio T440-dbg"},
709 {0x4401, "Chelsio T420-CR"},
710 {0x4402, "Chelsio T422-CR"},
711 {0x4403, "Chelsio T440-CR"},
712 {0x4404, "Chelsio T420-BCH"},
713 {0x4405, "Chelsio T440-BCH"},
714 {0x4406, "Chelsio T440-CH"},
715 {0x4407, "Chelsio T420-SO"},
716 {0x4408, "Chelsio T420-CX"},
717 {0x4409, "Chelsio T420-BT"},
718 {0x440a, "Chelsio T404-BT"},
719 {0x440e, "Chelsio T440-LP-CR"},
721 {0xb000, "Chelsio Terminator 5 FPGA"},
722 {0x5400, "Chelsio T580-dbg"},
723 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
724 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
725 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
726 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
727 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
728 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
729 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
730 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
731 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
732 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
733 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
734 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
735 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
736 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
737 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
738 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
739 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
742 {0x5483, "Custom T540-CR"},
743 {0x5484, "Custom T540-BT"},
745 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
746 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
747 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
748 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
749 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
750 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
751 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
752 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
753 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
754 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
755 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
756 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
757 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
758 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
759 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
760 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
763 {0x6480, "Custom T6225-CR"},
764 {0x6481, "Custom T62100-CR"},
765 {0x6482, "Custom T6225-CR"},
766 {0x6483, "Custom T62100-CR"},
767 {0x6484, "Custom T64100-CR"},
768 {0x6485, "Custom T6240-SO"},
769 {0x6486, "Custom T6225-SO-CR"},
770 {0x6487, "Custom T6225-CR"},
775 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
776 * be exactly the same for both rxq and ofld_rxq.
778 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
779 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
781 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
784 t4_probe(device_t dev)
787 uint16_t v = pci_get_vendor(dev);
788 uint16_t d = pci_get_device(dev);
789 uint8_t f = pci_get_function(dev);
791 if (v != PCI_VENDOR_ID_CHELSIO)
794 /* Attach only to PF0 of the FPGA */
795 if (d == 0xa000 && f != 0)
798 for (i = 0; i < nitems(t4_pciids); i++) {
799 if (d == t4_pciids[i].device) {
800 device_set_desc(dev, t4_pciids[i].desc);
801 return (BUS_PROBE_DEFAULT);
809 t5_probe(device_t dev)
812 uint16_t v = pci_get_vendor(dev);
813 uint16_t d = pci_get_device(dev);
814 uint8_t f = pci_get_function(dev);
816 if (v != PCI_VENDOR_ID_CHELSIO)
819 /* Attach only to PF0 of the FPGA */
820 if (d == 0xb000 && f != 0)
823 for (i = 0; i < nitems(t5_pciids); i++) {
824 if (d == t5_pciids[i].device) {
825 device_set_desc(dev, t5_pciids[i].desc);
826 return (BUS_PROBE_DEFAULT);
834 t6_probe(device_t dev)
837 uint16_t v = pci_get_vendor(dev);
838 uint16_t d = pci_get_device(dev);
840 if (v != PCI_VENDOR_ID_CHELSIO)
843 for (i = 0; i < nitems(t6_pciids); i++) {
844 if (d == t6_pciids[i].device) {
845 device_set_desc(dev, t6_pciids[i].desc);
846 return (BUS_PROBE_DEFAULT);
854 t5_attribute_workaround(device_t dev)
860 * The T5 chips do not properly echo the No Snoop and Relaxed
861 * Ordering attributes when replying to a TLP from a Root
862 * Port. As a workaround, find the parent Root Port and
863 * disable No Snoop and Relaxed Ordering. Note that this
864 * affects all devices under this root port.
866 root_port = pci_find_pcie_root_port(dev);
867 if (root_port == NULL) {
868 device_printf(dev, "Unable to find parent root port\n");
872 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
873 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
874 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
876 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
877 device_get_nameunit(root_port));
880 static const struct devnames devnames[] = {
882 .nexus_name = "t4nex",
883 .ifnet_name = "cxgbe",
884 .vi_ifnet_name = "vcxgbe",
885 .pf03_drv_name = "t4iov",
886 .vf_nexus_name = "t4vf",
887 .vf_ifnet_name = "cxgbev"
889 .nexus_name = "t5nex",
891 .vi_ifnet_name = "vcxl",
892 .pf03_drv_name = "t5iov",
893 .vf_nexus_name = "t5vf",
894 .vf_ifnet_name = "cxlv"
896 .nexus_name = "t6nex",
898 .vi_ifnet_name = "vcc",
899 .pf03_drv_name = "t6iov",
900 .vf_nexus_name = "t6vf",
901 .vf_ifnet_name = "ccv"
906 t4_init_devnames(struct adapter *sc)
911 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
912 sc->names = &devnames[id - CHELSIO_T4];
914 device_printf(sc->dev, "chip id %d is not supported.\n", id);
920 t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
922 const char *parent, *name;
927 parent = device_get_nameunit(sc->dev);
928 name = sc->names->ifnet_name;
929 while (resource_find_dev(&line, name, &unit, "at", parent) == 0) {
930 if (resource_long_value(name, unit, "port", &value) == 0 &&
931 value == pi->port_id)
938 t4_attach(device_t dev)
941 int rc = 0, i, j, rqidx, tqidx, nports;
942 struct make_dev_args mda;
943 struct intrs_and_queues iaq;
946 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
953 int nm_rqidx, nm_tqidx;
957 sc = device_get_softc(dev);
959 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
961 if ((pci_get_device(dev) & 0xff00) == 0x5400)
962 t5_attribute_workaround(dev);
963 pci_enable_busmaster(dev);
964 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
967 pci_set_max_read_req(dev, 4096);
968 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
969 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
970 if (pcie_relaxed_ordering == 0 &&
971 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
972 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
973 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
974 } else if (pcie_relaxed_ordering == 1 &&
975 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
976 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
977 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
981 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
982 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
984 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
985 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
986 device_get_nameunit(dev));
988 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
989 device_get_nameunit(dev));
990 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
993 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
994 TAILQ_INIT(&sc->sfl);
995 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
997 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
1000 rw_init(&sc->policy_lock, "connection offload policy");
1002 rc = t4_map_bars_0_and_4(sc);
1004 goto done; /* error message displayed already */
1006 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
1008 /* Prepare the adapter for operation. */
1009 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
1010 rc = -t4_prep_adapter(sc, buf);
1013 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
1018 * This is the real PF# to which we're attaching. Works from within PCI
1019 * passthrough environments too, where pci_get_function() could return a
1020 * different PF# depending on the passthrough configuration. We need to
1021 * use the real PF# in all our communication with the firmware.
1023 j = t4_read_reg(sc, A_PL_WHOAMI);
1024 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
1027 t4_init_devnames(sc);
1028 if (sc->names == NULL) {
1030 goto done; /* error message displayed already */
1034 * Do this really early, with the memory windows set up even before the
1035 * character device. The userland tool's register i/o and mem read
1036 * will work even in "recovery mode".
1039 if (t4_init_devlog_params(sc, 0) == 0)
1040 fixup_devlog_params(sc);
1041 make_dev_args_init(&mda);
1042 mda.mda_devsw = &t4_cdevsw;
1043 mda.mda_uid = UID_ROOT;
1044 mda.mda_gid = GID_WHEEL;
1045 mda.mda_mode = 0600;
1046 mda.mda_si_drv1 = sc;
1047 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
1049 device_printf(dev, "failed to create nexus char device: %d.\n",
1052 /* Go no further if recovery mode has been requested. */
1053 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
1054 device_printf(dev, "recovery mode.\n");
1058 #if defined(__i386__)
1059 if ((cpu_feature & CPUID_CX8) == 0) {
1060 device_printf(dev, "64 bit atomics not available.\n");
1066 /* Contact the firmware and try to become the master driver. */
1067 rc = contact_firmware(sc);
1069 goto done; /* error message displayed already */
1070 MPASS(sc->flags & FW_OK);
1072 rc = get_params__pre_init(sc);
1074 goto done; /* error message displayed already */
1076 if (sc->flags & MASTER_PF) {
1077 rc = partition_resources(sc);
1079 goto done; /* error message displayed already */
1083 rc = get_params__post_init(sc);
1085 goto done; /* error message displayed already */
1087 rc = set_params__post_init(sc);
1089 goto done; /* error message displayed already */
1091 rc = t4_map_bar_2(sc);
1093 goto done; /* error message displayed already */
1095 rc = t4_create_dma_tag(sc);
1097 goto done; /* error message displayed already */
1100 * First pass over all the ports - allocate VIs and initialize some
1101 * basic parameters like mac address, port type, etc.
1103 for_each_port(sc, i) {
1104 struct port_info *pi;
1106 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
1109 /* These must be set before t4_port_init */
1113 * XXX: vi[0] is special so we can't delay this allocation until
1114 * pi->nvi's final value is known.
1116 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1120 * Allocate the "main" VI and initialize parameters
1123 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1125 device_printf(dev, "unable to initialize port %d: %d\n",
1127 free(pi->vi, M_CXGBE);
1133 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1134 device_get_nameunit(dev), i);
1135 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1136 sc->chan_map[pi->tx_chan] = i;
1138 /* All VIs on this port share this media. */
1139 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1140 cxgbe_media_status);
1143 init_link_config(pi);
1144 fixup_link_config(pi);
1145 build_medialist(pi);
1146 if (fixed_ifmedia(pi))
1147 pi->flags |= FIXED_IFMEDIA;
1150 pi->dev = device_add_child(dev, sc->names->ifnet_name,
1151 t4_ifnet_unit(sc, pi));
1152 if (pi->dev == NULL) {
1154 "failed to add device for port %d.\n", i);
1158 pi->vi[0].dev = pi->dev;
1159 device_set_softc(pi->dev, pi);
1163 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1165 nports = sc->params.nports;
1166 rc = cfg_itype_and_nqueues(sc, &iaq);
1168 goto done; /* error message displayed already */
1170 num_vis = iaq.num_vis;
1171 sc->intr_type = iaq.intr_type;
1172 sc->intr_count = iaq.nirq;
1175 s->nrxq = nports * iaq.nrxq;
1176 s->ntxq = nports * iaq.ntxq;
1178 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1179 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1181 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1182 s->neq += nports; /* ctrl queues: 1 per port */
1183 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1184 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1185 if (is_offload(sc) || is_ethoffload(sc)) {
1186 s->nofldtxq = nports * iaq.nofldtxq;
1188 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1189 s->neq += s->nofldtxq;
1191 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1192 M_CXGBE, M_ZERO | M_WAITOK);
1196 if (is_offload(sc)) {
1197 s->nofldrxq = nports * iaq.nofldrxq;
1199 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1200 s->neq += s->nofldrxq; /* free list */
1201 s->niq += s->nofldrxq;
1203 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1204 M_CXGBE, M_ZERO | M_WAITOK);
1209 s->nnmrxq = nports * (num_vis - 1) * iaq.nnmrxq_vi;
1210 s->nnmtxq = nports * (num_vis - 1) * iaq.nnmtxq_vi;
1212 s->neq += s->nnmtxq + s->nnmrxq;
1213 s->niq += s->nnmrxq;
1215 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1216 M_CXGBE, M_ZERO | M_WAITOK);
1217 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1218 M_CXGBE, M_ZERO | M_WAITOK);
1221 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1223 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1225 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1227 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1229 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1232 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1235 t4_init_l2t(sc, M_WAITOK);
1236 t4_init_smt(sc, M_WAITOK);
1237 t4_init_tx_sched(sc);
1239 t4_init_etid_table(sc);
1242 t4_init_clip_table(sc);
1244 if (sc->vres.key.size != 0)
1245 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
1246 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
1249 * Second pass over the ports. This time we know the number of rx and
1250 * tx queues that each port should get.
1253 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1260 nm_rqidx = nm_tqidx = 0;
1262 for_each_port(sc, i) {
1263 struct port_info *pi = sc->port[i];
1270 for_each_vi(pi, j, vi) {
1272 vi->qsize_rxq = t4_qsize_rxq;
1273 vi->qsize_txq = t4_qsize_txq;
1275 vi->first_rxq = rqidx;
1276 vi->first_txq = tqidx;
1277 vi->tmr_idx = t4_tmr_idx;
1278 vi->pktc_idx = t4_pktc_idx;
1279 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1280 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1285 if (j == 0 && vi->ntxq > 1)
1286 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1288 vi->rsrv_noflowq = 0;
1290 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1291 vi->first_ofld_txq = ofld_tqidx;
1292 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1293 ofld_tqidx += vi->nofldtxq;
1296 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1297 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1298 vi->first_ofld_rxq = ofld_rqidx;
1299 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1301 ofld_rqidx += vi->nofldrxq;
1305 vi->first_nm_rxq = nm_rqidx;
1306 vi->first_nm_txq = nm_tqidx;
1307 vi->nnmrxq = iaq.nnmrxq_vi;
1308 vi->nnmtxq = iaq.nnmtxq_vi;
1309 nm_rqidx += vi->nnmrxq;
1310 nm_tqidx += vi->nnmtxq;
1316 rc = t4_setup_intr_handlers(sc);
1319 "failed to setup interrupt handlers: %d\n", rc);
1323 rc = bus_generic_probe(dev);
1325 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1330 * Ensure thread-safe mailbox access (in debug builds).
1332 * So far this was the only thread accessing the mailbox but various
1333 * ifnets and sysctls are about to be created and their handlers/ioctls
1334 * will access the mailbox from different threads.
1336 sc->flags |= CHK_MBOX_ACCESS;
1338 rc = bus_generic_attach(dev);
1341 "failed to attach all child ports: %d\n", rc);
1346 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1347 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1348 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1349 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1350 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1354 notify_siblings(dev, 0);
1357 if (rc != 0 && sc->cdev) {
1358 /* cdev was created and so cxgbetool works; recover that way. */
1360 "error during attach, adapter is now in recovery mode.\n");
1365 t4_detach_common(dev);
1373 t4_child_location_str(device_t bus, device_t dev, char *buf, size_t buflen)
1376 struct port_info *pi;
1379 sc = device_get_softc(bus);
1381 for_each_port(sc, i) {
1383 if (pi != NULL && pi->dev == dev) {
1384 snprintf(buf, buflen, "port=%d", pi->port_id);
1392 t4_ready(device_t dev)
1396 sc = device_get_softc(dev);
1397 if (sc->flags & FW_OK)
1403 t4_read_port_device(device_t dev, int port, device_t *child)
1406 struct port_info *pi;
1408 sc = device_get_softc(dev);
1409 if (port < 0 || port >= MAX_NPORTS)
1411 pi = sc->port[port];
1412 if (pi == NULL || pi->dev == NULL)
1419 notify_siblings(device_t dev, int detaching)
1425 for (i = 0; i < PCI_FUNCMAX; i++) {
1426 if (i == pci_get_function(dev))
1428 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1429 pci_get_slot(dev), i);
1430 if (sibling == NULL || !device_is_attached(sibling))
1433 error = T4_DETACH_CHILD(sibling);
1435 (void)T4_ATTACH_CHILD(sibling);
1446 t4_detach(device_t dev)
1451 sc = device_get_softc(dev);
1453 rc = notify_siblings(dev, 1);
1456 "failed to detach sibling devices: %d\n", rc);
1460 return (t4_detach_common(dev));
1464 t4_detach_common(device_t dev)
1467 struct port_info *pi;
1470 sc = device_get_softc(dev);
1473 destroy_dev(sc->cdev);
1477 sc->flags &= ~CHK_MBOX_ACCESS;
1478 if (sc->flags & FULL_INIT_DONE) {
1479 if (!(sc->flags & IS_VF))
1480 t4_intr_disable(sc);
1483 if (device_is_attached(dev)) {
1484 rc = bus_generic_detach(dev);
1487 "failed to detach child devices: %d\n", rc);
1492 for (i = 0; i < sc->intr_count; i++)
1493 t4_free_irq(sc, &sc->irq[i]);
1495 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1496 t4_free_tx_sched(sc);
1498 for (i = 0; i < MAX_NPORTS; i++) {
1501 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1503 device_delete_child(dev, pi->dev);
1505 mtx_destroy(&pi->pi_lock);
1506 free(pi->vi, M_CXGBE);
1511 device_delete_children(dev);
1513 if (sc->flags & FULL_INIT_DONE)
1514 adapter_full_uninit(sc);
1516 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1517 t4_fw_bye(sc, sc->mbox);
1519 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1520 pci_release_msi(dev);
1523 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1527 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1531 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1535 t4_free_l2t(sc->l2t);
1537 t4_free_smt(sc->smt);
1539 t4_free_etid_table(sc);
1542 vmem_destroy(sc->key_map);
1544 t4_destroy_clip_table(sc);
1547 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1548 free(sc->sge.ofld_txq, M_CXGBE);
1551 free(sc->sge.ofld_rxq, M_CXGBE);
1554 free(sc->sge.nm_rxq, M_CXGBE);
1555 free(sc->sge.nm_txq, M_CXGBE);
1557 free(sc->irq, M_CXGBE);
1558 free(sc->sge.rxq, M_CXGBE);
1559 free(sc->sge.txq, M_CXGBE);
1560 free(sc->sge.ctrlq, M_CXGBE);
1561 free(sc->sge.iqmap, M_CXGBE);
1562 free(sc->sge.eqmap, M_CXGBE);
1563 free(sc->tids.ftid_tab, M_CXGBE);
1564 free(sc->tids.hpftid_tab, M_CXGBE);
1565 free_hftid_hash(&sc->tids);
1566 free(sc->tids.atid_tab, M_CXGBE);
1567 free(sc->tids.tid_tab, M_CXGBE);
1568 free(sc->tt.tls_rx_ports, M_CXGBE);
1569 t4_destroy_dma_tag(sc);
1570 if (mtx_initialized(&sc->sc_lock)) {
1571 sx_xlock(&t4_list_lock);
1572 SLIST_REMOVE(&t4_list, sc, adapter, link);
1573 sx_xunlock(&t4_list_lock);
1574 mtx_destroy(&sc->sc_lock);
1577 callout_drain(&sc->sfl_callout);
1578 if (mtx_initialized(&sc->tids.ftid_lock)) {
1579 mtx_destroy(&sc->tids.ftid_lock);
1580 cv_destroy(&sc->tids.ftid_cv);
1582 if (mtx_initialized(&sc->tids.atid_lock))
1583 mtx_destroy(&sc->tids.atid_lock);
1584 if (mtx_initialized(&sc->sfl_lock))
1585 mtx_destroy(&sc->sfl_lock);
1586 if (mtx_initialized(&sc->ifp_lock))
1587 mtx_destroy(&sc->ifp_lock);
1588 if (mtx_initialized(&sc->reg_lock))
1589 mtx_destroy(&sc->reg_lock);
1591 if (rw_initialized(&sc->policy_lock)) {
1592 rw_destroy(&sc->policy_lock);
1594 if (sc->policy != NULL)
1595 free_offload_policy(sc->policy);
1599 for (i = 0; i < NUM_MEMWIN; i++) {
1600 struct memwin *mw = &sc->memwin[i];
1602 if (rw_initialized(&mw->mw_lock))
1603 rw_destroy(&mw->mw_lock);
1606 bzero(sc, sizeof(*sc));
1612 cxgbe_probe(device_t dev)
1615 struct port_info *pi = device_get_softc(dev);
1617 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1618 device_set_desc_copy(dev, buf);
1620 return (BUS_PROBE_DEFAULT);
1623 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1624 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1625 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
1627 #define T4_CAP_ENABLE (T4_CAP)
1630 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1635 vi->xact_addr_filt = -1;
1636 callout_init(&vi->tick, 1);
1638 /* Allocate an ifnet and set it up */
1639 ifp = if_alloc_dev(IFT_ETHER, dev);
1641 device_printf(dev, "Cannot allocate ifnet\n");
1647 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1648 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1650 ifp->if_init = cxgbe_init;
1651 ifp->if_ioctl = cxgbe_ioctl;
1652 ifp->if_transmit = cxgbe_transmit;
1653 ifp->if_qflush = cxgbe_qflush;
1654 ifp->if_get_counter = cxgbe_get_counter;
1656 ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
1657 ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
1658 ifp->if_snd_tag_query = cxgbe_snd_tag_query;
1659 ifp->if_snd_tag_free = cxgbe_snd_tag_free;
1662 ifp->if_capabilities = T4_CAP;
1663 ifp->if_capenable = T4_CAP_ENABLE;
1665 if (vi->nofldrxq != 0)
1666 ifp->if_capabilities |= IFCAP_TOE;
1669 if (is_ethoffload(vi->pi->adapter) && vi->nofldtxq != 0) {
1670 ifp->if_capabilities |= IFCAP_TXRTLMT;
1671 ifp->if_capenable |= IFCAP_TXRTLMT;
1674 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1675 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1677 ifp->if_hw_tsomax = IP_MAXPACKET;
1678 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO;
1680 if (is_ethoffload(vi->pi->adapter) && vi->nofldtxq != 0)
1681 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_EO_TSO;
1683 ifp->if_hw_tsomaxsegsize = 65536;
1685 ether_ifattach(ifp, vi->hw_addr);
1687 if (vi->nnmrxq != 0)
1688 cxgbe_nm_attach(vi);
1690 sb = sbuf_new_auto();
1691 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1692 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1693 switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
1695 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
1697 case IFCAP_TOE | IFCAP_TXRTLMT:
1698 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
1701 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
1706 if (ifp->if_capabilities & IFCAP_TOE)
1707 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
1710 if (ifp->if_capabilities & IFCAP_NETMAP)
1711 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1712 vi->nnmtxq, vi->nnmrxq);
1715 device_printf(dev, "%s\n", sbuf_data(sb));
1724 cxgbe_attach(device_t dev)
1726 struct port_info *pi = device_get_softc(dev);
1727 struct adapter *sc = pi->adapter;
1731 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1733 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1737 for_each_vi(pi, i, vi) {
1740 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1741 if (vi->dev == NULL) {
1742 device_printf(dev, "failed to add VI %d\n", i);
1745 device_set_softc(vi->dev, vi);
1750 bus_generic_attach(dev);
1756 cxgbe_vi_detach(struct vi_info *vi)
1758 struct ifnet *ifp = vi->ifp;
1760 ether_ifdetach(ifp);
1762 /* Let detach proceed even if these fail. */
1764 if (ifp->if_capabilities & IFCAP_NETMAP)
1765 cxgbe_nm_detach(vi);
1767 cxgbe_uninit_synchronized(vi);
1768 callout_drain(&vi->tick);
1776 cxgbe_detach(device_t dev)
1778 struct port_info *pi = device_get_softc(dev);
1779 struct adapter *sc = pi->adapter;
1782 /* Detach the extra VIs first. */
1783 rc = bus_generic_detach(dev);
1786 device_delete_children(dev);
1788 doom_vi(sc, &pi->vi[0]);
1790 if (pi->flags & HAS_TRACEQ) {
1791 sc->traceq = -1; /* cloner should not create ifnet */
1792 t4_tracer_port_detach(sc);
1795 cxgbe_vi_detach(&pi->vi[0]);
1796 callout_drain(&pi->tick);
1797 ifmedia_removeall(&pi->media);
1799 end_synchronized_op(sc, 0);
1805 cxgbe_init(void *arg)
1807 struct vi_info *vi = arg;
1808 struct adapter *sc = vi->pi->adapter;
1810 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1812 cxgbe_init_synchronized(vi);
1813 end_synchronized_op(sc, 0);
1817 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1819 int rc = 0, mtu, flags;
1820 struct vi_info *vi = ifp->if_softc;
1821 struct port_info *pi = vi->pi;
1822 struct adapter *sc = pi->adapter;
1823 struct ifreq *ifr = (struct ifreq *)data;
1829 if (mtu < ETHERMIN || mtu > MAX_MTU)
1832 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1836 if (vi->flags & VI_INIT_DONE) {
1837 t4_update_fl_bufsize(ifp);
1838 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1839 rc = update_mac_settings(ifp, XGMAC_MTU);
1841 end_synchronized_op(sc, 0);
1845 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
1849 if (ifp->if_flags & IFF_UP) {
1850 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1851 flags = vi->if_flags;
1852 if ((ifp->if_flags ^ flags) &
1853 (IFF_PROMISC | IFF_ALLMULTI)) {
1854 rc = update_mac_settings(ifp,
1855 XGMAC_PROMISC | XGMAC_ALLMULTI);
1858 rc = cxgbe_init_synchronized(vi);
1860 vi->if_flags = ifp->if_flags;
1861 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1862 rc = cxgbe_uninit_synchronized(vi);
1864 end_synchronized_op(sc, 0);
1869 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
1872 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1873 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1874 end_synchronized_op(sc, 0);
1878 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1882 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1883 if (mask & IFCAP_TXCSUM) {
1884 ifp->if_capenable ^= IFCAP_TXCSUM;
1885 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1887 if (IFCAP_TSO4 & ifp->if_capenable &&
1888 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1889 ifp->if_capenable &= ~IFCAP_TSO4;
1891 "tso4 disabled due to -txcsum.\n");
1894 if (mask & IFCAP_TXCSUM_IPV6) {
1895 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1896 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1898 if (IFCAP_TSO6 & ifp->if_capenable &&
1899 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1900 ifp->if_capenable &= ~IFCAP_TSO6;
1902 "tso6 disabled due to -txcsum6.\n");
1905 if (mask & IFCAP_RXCSUM)
1906 ifp->if_capenable ^= IFCAP_RXCSUM;
1907 if (mask & IFCAP_RXCSUM_IPV6)
1908 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1911 * Note that we leave CSUM_TSO alone (it is always set). The
1912 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1913 * sending a TSO request our way, so it's sufficient to toggle
1916 if (mask & IFCAP_TSO4) {
1917 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1918 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1919 if_printf(ifp, "enable txcsum first.\n");
1923 ifp->if_capenable ^= IFCAP_TSO4;
1925 if (mask & IFCAP_TSO6) {
1926 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1927 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1928 if_printf(ifp, "enable txcsum6 first.\n");
1932 ifp->if_capenable ^= IFCAP_TSO6;
1934 if (mask & IFCAP_LRO) {
1935 #if defined(INET) || defined(INET6)
1937 struct sge_rxq *rxq;
1939 ifp->if_capenable ^= IFCAP_LRO;
1940 for_each_rxq(vi, i, rxq) {
1941 if (ifp->if_capenable & IFCAP_LRO)
1942 rxq->iq.flags |= IQ_LRO_ENABLED;
1944 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1949 if (mask & IFCAP_TOE) {
1950 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1952 rc = toe_capability(vi, enable);
1956 ifp->if_capenable ^= mask;
1959 if (mask & IFCAP_VLAN_HWTAGGING) {
1960 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1961 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1962 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1964 if (mask & IFCAP_VLAN_MTU) {
1965 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1967 /* Need to find out how to disable auto-mtu-inflation */
1969 if (mask & IFCAP_VLAN_HWTSO)
1970 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1971 if (mask & IFCAP_VLAN_HWCSUM)
1972 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1974 if (mask & IFCAP_TXRTLMT)
1975 ifp->if_capenable ^= IFCAP_TXRTLMT;
1977 if (mask & IFCAP_HWRXTSTMP) {
1979 struct sge_rxq *rxq;
1981 ifp->if_capenable ^= IFCAP_HWRXTSTMP;
1982 for_each_rxq(vi, i, rxq) {
1983 if (ifp->if_capenable & IFCAP_HWRXTSTMP)
1984 rxq->iq.flags |= IQ_RX_TIMESTAMP;
1986 rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
1990 #ifdef VLAN_CAPABILITIES
1991 VLAN_CAPABILITIES(ifp);
1994 end_synchronized_op(sc, 0);
2000 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
2004 struct ifi2creq i2c;
2006 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2009 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
2013 if (i2c.len > sizeof(i2c.data)) {
2017 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
2020 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
2021 i2c.offset, i2c.len, &i2c.data[0]);
2022 end_synchronized_op(sc, 0);
2024 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2029 rc = ether_ioctl(ifp, cmd, data);
2036 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
2038 struct vi_info *vi = ifp->if_softc;
2039 struct port_info *pi = vi->pi;
2040 struct adapter *sc = pi->adapter;
2041 struct sge_txq *txq;
2046 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
2048 if (__predict_false(pi->link_cfg.link_ok == false)) {
2053 rc = parse_pkt(sc, &m);
2054 if (__predict_false(rc != 0)) {
2055 MPASS(m == NULL); /* was freed already */
2056 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
2060 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2061 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2062 return (ethofld_transmit(ifp, m));
2067 txq = &sc->sge.txq[vi->first_txq];
2068 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2069 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
2073 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
2074 if (__predict_false(rc != 0))
2081 cxgbe_qflush(struct ifnet *ifp)
2083 struct vi_info *vi = ifp->if_softc;
2084 struct sge_txq *txq;
2087 /* queues do not exist if !VI_INIT_DONE. */
2088 if (vi->flags & VI_INIT_DONE) {
2089 for_each_txq(vi, i, txq) {
2091 txq->eq.flags |= EQ_QFLUSH;
2093 while (!mp_ring_is_idle(txq->r)) {
2094 mp_ring_check_drainage(txq->r, 0);
2098 txq->eq.flags &= ~EQ_QFLUSH;
2106 vi_get_counter(struct ifnet *ifp, ift_counter c)
2108 struct vi_info *vi = ifp->if_softc;
2109 struct fw_vi_stats_vf *s = &vi->stats;
2111 vi_refresh_stats(vi->pi->adapter, vi);
2114 case IFCOUNTER_IPACKETS:
2115 return (s->rx_bcast_frames + s->rx_mcast_frames +
2116 s->rx_ucast_frames);
2117 case IFCOUNTER_IERRORS:
2118 return (s->rx_err_frames);
2119 case IFCOUNTER_OPACKETS:
2120 return (s->tx_bcast_frames + s->tx_mcast_frames +
2121 s->tx_ucast_frames + s->tx_offload_frames);
2122 case IFCOUNTER_OERRORS:
2123 return (s->tx_drop_frames);
2124 case IFCOUNTER_IBYTES:
2125 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
2127 case IFCOUNTER_OBYTES:
2128 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
2129 s->tx_ucast_bytes + s->tx_offload_bytes);
2130 case IFCOUNTER_IMCASTS:
2131 return (s->rx_mcast_frames);
2132 case IFCOUNTER_OMCASTS:
2133 return (s->tx_mcast_frames);
2134 case IFCOUNTER_OQDROPS: {
2138 if (vi->flags & VI_INIT_DONE) {
2140 struct sge_txq *txq;
2142 for_each_txq(vi, i, txq)
2143 drops += counter_u64_fetch(txq->r->drops);
2151 return (if_get_counter_default(ifp, c));
2156 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
2158 struct vi_info *vi = ifp->if_softc;
2159 struct port_info *pi = vi->pi;
2160 struct adapter *sc = pi->adapter;
2161 struct port_stats *s = &pi->stats;
2163 if (pi->nvi > 1 || sc->flags & IS_VF)
2164 return (vi_get_counter(ifp, c));
2166 cxgbe_refresh_stats(sc, pi);
2169 case IFCOUNTER_IPACKETS:
2170 return (s->rx_frames);
2172 case IFCOUNTER_IERRORS:
2173 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2174 s->rx_fcs_err + s->rx_len_err);
2176 case IFCOUNTER_OPACKETS:
2177 return (s->tx_frames);
2179 case IFCOUNTER_OERRORS:
2180 return (s->tx_error_frames);
2182 case IFCOUNTER_IBYTES:
2183 return (s->rx_octets);
2185 case IFCOUNTER_OBYTES:
2186 return (s->tx_octets);
2188 case IFCOUNTER_IMCASTS:
2189 return (s->rx_mcast_frames);
2191 case IFCOUNTER_OMCASTS:
2192 return (s->tx_mcast_frames);
2194 case IFCOUNTER_IQDROPS:
2195 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2196 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2197 s->rx_trunc3 + pi->tnl_cong_drops);
2199 case IFCOUNTER_OQDROPS: {
2203 if (vi->flags & VI_INIT_DONE) {
2205 struct sge_txq *txq;
2207 for_each_txq(vi, i, txq)
2208 drops += counter_u64_fetch(txq->r->drops);
2216 return (if_get_counter_default(ifp, c));
2221 * The kernel picks a media from the list we had provided but we still validate
2225 cxgbe_media_change(struct ifnet *ifp)
2227 struct vi_info *vi = ifp->if_softc;
2228 struct port_info *pi = vi->pi;
2229 struct ifmedia *ifm = &pi->media;
2230 struct link_config *lc = &pi->link_cfg;
2231 struct adapter *sc = pi->adapter;
2234 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
2238 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2239 /* ifconfig .. media autoselect */
2240 if (!(lc->supported & FW_PORT_CAP32_ANEG)) {
2241 rc = ENOTSUP; /* AN not supported by transceiver */
2244 lc->requested_aneg = AUTONEG_ENABLE;
2245 lc->requested_speed = 0;
2246 lc->requested_fc |= PAUSE_AUTONEG;
2248 lc->requested_aneg = AUTONEG_DISABLE;
2249 lc->requested_speed =
2250 ifmedia_baudrate(ifm->ifm_media) / 1000000;
2251 lc->requested_fc = 0;
2252 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
2253 lc->requested_fc |= PAUSE_RX;
2254 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
2255 lc->requested_fc |= PAUSE_TX;
2257 if (pi->up_vis > 0) {
2258 fixup_link_config(pi);
2259 rc = apply_link_config(pi);
2263 end_synchronized_op(sc, 0);
2268 * Base media word (without ETHER, pause, link active, etc.) for the port at the
2272 port_mword(struct port_info *pi, uint32_t speed)
2275 MPASS(speed & M_FW_PORT_CAP32_SPEED);
2276 MPASS(powerof2(speed));
2278 switch(pi->port_type) {
2279 case FW_PORT_TYPE_BT_SGMII:
2280 case FW_PORT_TYPE_BT_XFI:
2281 case FW_PORT_TYPE_BT_XAUI:
2284 case FW_PORT_CAP32_SPEED_100M:
2286 case FW_PORT_CAP32_SPEED_1G:
2287 return (IFM_1000_T);
2288 case FW_PORT_CAP32_SPEED_10G:
2292 case FW_PORT_TYPE_KX4:
2293 if (speed == FW_PORT_CAP32_SPEED_10G)
2294 return (IFM_10G_KX4);
2296 case FW_PORT_TYPE_CX4:
2297 if (speed == FW_PORT_CAP32_SPEED_10G)
2298 return (IFM_10G_CX4);
2300 case FW_PORT_TYPE_KX:
2301 if (speed == FW_PORT_CAP32_SPEED_1G)
2302 return (IFM_1000_KX);
2304 case FW_PORT_TYPE_KR:
2305 case FW_PORT_TYPE_BP_AP:
2306 case FW_PORT_TYPE_BP4_AP:
2307 case FW_PORT_TYPE_BP40_BA:
2308 case FW_PORT_TYPE_KR4_100G:
2309 case FW_PORT_TYPE_KR_SFP28:
2310 case FW_PORT_TYPE_KR_XLAUI:
2312 case FW_PORT_CAP32_SPEED_1G:
2313 return (IFM_1000_KX);
2314 case FW_PORT_CAP32_SPEED_10G:
2315 return (IFM_10G_KR);
2316 case FW_PORT_CAP32_SPEED_25G:
2317 return (IFM_25G_KR);
2318 case FW_PORT_CAP32_SPEED_40G:
2319 return (IFM_40G_KR4);
2320 case FW_PORT_CAP32_SPEED_50G:
2321 return (IFM_50G_KR2);
2322 case FW_PORT_CAP32_SPEED_100G:
2323 return (IFM_100G_KR4);
2326 case FW_PORT_TYPE_FIBER_XFI:
2327 case FW_PORT_TYPE_FIBER_XAUI:
2328 case FW_PORT_TYPE_SFP:
2329 case FW_PORT_TYPE_QSFP_10G:
2330 case FW_PORT_TYPE_QSA:
2331 case FW_PORT_TYPE_QSFP:
2332 case FW_PORT_TYPE_CR4_QSFP:
2333 case FW_PORT_TYPE_CR_QSFP:
2334 case FW_PORT_TYPE_CR2_QSFP:
2335 case FW_PORT_TYPE_SFP28:
2336 /* Pluggable transceiver */
2337 switch (pi->mod_type) {
2338 case FW_PORT_MOD_TYPE_LR:
2340 case FW_PORT_CAP32_SPEED_1G:
2341 return (IFM_1000_LX);
2342 case FW_PORT_CAP32_SPEED_10G:
2343 return (IFM_10G_LR);
2344 case FW_PORT_CAP32_SPEED_25G:
2345 return (IFM_25G_LR);
2346 case FW_PORT_CAP32_SPEED_40G:
2347 return (IFM_40G_LR4);
2348 case FW_PORT_CAP32_SPEED_50G:
2349 return (IFM_50G_LR2);
2350 case FW_PORT_CAP32_SPEED_100G:
2351 return (IFM_100G_LR4);
2354 case FW_PORT_MOD_TYPE_SR:
2356 case FW_PORT_CAP32_SPEED_1G:
2357 return (IFM_1000_SX);
2358 case FW_PORT_CAP32_SPEED_10G:
2359 return (IFM_10G_SR);
2360 case FW_PORT_CAP32_SPEED_25G:
2361 return (IFM_25G_SR);
2362 case FW_PORT_CAP32_SPEED_40G:
2363 return (IFM_40G_SR4);
2364 case FW_PORT_CAP32_SPEED_50G:
2365 return (IFM_50G_SR2);
2366 case FW_PORT_CAP32_SPEED_100G:
2367 return (IFM_100G_SR4);
2370 case FW_PORT_MOD_TYPE_ER:
2371 if (speed == FW_PORT_CAP32_SPEED_10G)
2372 return (IFM_10G_ER);
2374 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2375 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2377 case FW_PORT_CAP32_SPEED_1G:
2378 return (IFM_1000_CX);
2379 case FW_PORT_CAP32_SPEED_10G:
2380 return (IFM_10G_TWINAX);
2381 case FW_PORT_CAP32_SPEED_25G:
2382 return (IFM_25G_CR);
2383 case FW_PORT_CAP32_SPEED_40G:
2384 return (IFM_40G_CR4);
2385 case FW_PORT_CAP32_SPEED_50G:
2386 return (IFM_50G_CR2);
2387 case FW_PORT_CAP32_SPEED_100G:
2388 return (IFM_100G_CR4);
2391 case FW_PORT_MOD_TYPE_LRM:
2392 if (speed == FW_PORT_CAP32_SPEED_10G)
2393 return (IFM_10G_LRM);
2395 case FW_PORT_MOD_TYPE_NA:
2396 MPASS(0); /* Not pluggable? */
2398 case FW_PORT_MOD_TYPE_ERROR:
2399 case FW_PORT_MOD_TYPE_UNKNOWN:
2400 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
2402 case FW_PORT_MOD_TYPE_NONE:
2406 case FW_PORT_TYPE_NONE:
2410 return (IFM_UNKNOWN);
2414 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2416 struct vi_info *vi = ifp->if_softc;
2417 struct port_info *pi = vi->pi;
2418 struct adapter *sc = pi->adapter;
2419 struct link_config *lc = &pi->link_cfg;
2421 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2425 if (pi->up_vis == 0) {
2427 * If all the interfaces are administratively down the firmware
2428 * does not report transceiver changes. Refresh port info here
2429 * so that ifconfig displays accurate ifmedia at all times.
2430 * This is the only reason we have a synchronized op in this
2431 * function. Just PORT_LOCK would have been enough otherwise.
2433 t4_update_port_info(pi);
2434 build_medialist(pi);
2438 ifmr->ifm_status = IFM_AVALID;
2439 if (lc->link_ok == false)
2441 ifmr->ifm_status |= IFM_ACTIVE;
2444 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2445 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2446 if (lc->fc & PAUSE_RX)
2447 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2448 if (lc->fc & PAUSE_TX)
2449 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2450 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
2453 end_synchronized_op(sc, 0);
2457 vcxgbe_probe(device_t dev)
2460 struct vi_info *vi = device_get_softc(dev);
2462 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2464 device_set_desc_copy(dev, buf);
2466 return (BUS_PROBE_DEFAULT);
2470 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2472 int func, index, rc;
2473 uint32_t param, val;
2475 ASSERT_SYNCHRONIZED_OP(sc);
2477 index = vi - pi->vi;
2478 MPASS(index > 0); /* This function deals with _extra_ VIs only */
2479 KASSERT(index < nitems(vi_mac_funcs),
2480 ("%s: VI %s doesn't have a MAC func", __func__,
2481 device_get_nameunit(vi->dev)));
2482 func = vi_mac_funcs[index];
2483 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2484 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
2486 device_printf(vi->dev, "failed to allocate virtual interface %d"
2487 "for port %d: %d\n", index, pi->port_id, -rc);
2492 if (vi->rss_size == 1) {
2494 * This VI didn't get a slice of the RSS table. Reduce the
2495 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2496 * configuration file (nvi, rssnvi for this PF) if this is a
2499 device_printf(vi->dev, "RSS table not available.\n");
2500 vi->rss_base = 0xffff;
2505 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2506 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2507 V_FW_PARAMS_PARAM_YZ(vi->viid);
2508 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2510 vi->rss_base = 0xffff;
2512 MPASS((val >> 16) == vi->rss_size);
2513 vi->rss_base = val & 0xffff;
2520 vcxgbe_attach(device_t dev)
2523 struct port_info *pi;
2527 vi = device_get_softc(dev);
2531 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2534 rc = alloc_extra_vi(sc, pi, vi);
2535 end_synchronized_op(sc, 0);
2539 rc = cxgbe_vi_attach(dev, vi);
2541 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2548 vcxgbe_detach(device_t dev)
2553 vi = device_get_softc(dev);
2554 sc = vi->pi->adapter;
2558 cxgbe_vi_detach(vi);
2559 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2561 end_synchronized_op(sc, 0);
2566 static struct callout fatal_callout;
2569 delayed_panic(void *arg)
2571 struct adapter *sc = arg;
2573 panic("%s: panic on fatal error", device_get_nameunit(sc->dev));
2577 t4_fatal_err(struct adapter *sc, bool fw_error)
2580 t4_shutdown_adapter(sc);
2581 log(LOG_ALERT, "%s: encountered fatal error, adapter stopped.\n",
2582 device_get_nameunit(sc->dev));
2584 ASSERT_SYNCHRONIZED_OP(sc);
2585 sc->flags |= ADAP_ERR;
2588 sc->flags |= ADAP_ERR;
2592 if (t4_panic_on_fatal_err) {
2593 log(LOG_ALERT, "%s: panic on fatal error after 30s",
2594 device_get_nameunit(sc->dev));
2595 callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
2600 t4_add_adapter(struct adapter *sc)
2602 sx_xlock(&t4_list_lock);
2603 SLIST_INSERT_HEAD(&t4_list, sc, link);
2604 sx_xunlock(&t4_list_lock);
2608 t4_map_bars_0_and_4(struct adapter *sc)
2610 sc->regs_rid = PCIR_BAR(0);
2611 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2612 &sc->regs_rid, RF_ACTIVE);
2613 if (sc->regs_res == NULL) {
2614 device_printf(sc->dev, "cannot map registers.\n");
2617 sc->bt = rman_get_bustag(sc->regs_res);
2618 sc->bh = rman_get_bushandle(sc->regs_res);
2619 sc->mmio_len = rman_get_size(sc->regs_res);
2620 setbit(&sc->doorbells, DOORBELL_KDB);
2622 sc->msix_rid = PCIR_BAR(4);
2623 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2624 &sc->msix_rid, RF_ACTIVE);
2625 if (sc->msix_res == NULL) {
2626 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2634 t4_map_bar_2(struct adapter *sc)
2638 * T4: only iWARP driver uses the userspace doorbells. There is no need
2639 * to map it if RDMA is disabled.
2641 if (is_t4(sc) && sc->rdmacaps == 0)
2644 sc->udbs_rid = PCIR_BAR(2);
2645 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2646 &sc->udbs_rid, RF_ACTIVE);
2647 if (sc->udbs_res == NULL) {
2648 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2651 sc->udbs_base = rman_get_virtual(sc->udbs_res);
2653 if (chip_id(sc) >= CHELSIO_T5) {
2654 setbit(&sc->doorbells, DOORBELL_UDB);
2655 #if defined(__i386__) || defined(__amd64__)
2656 if (t5_write_combine) {
2660 * Enable write combining on BAR2. This is the
2661 * userspace doorbell BAR and is split into 128B
2662 * (UDBS_SEG_SIZE) doorbell regions, each associated
2663 * with an egress queue. The first 64B has the doorbell
2664 * and the second 64B can be used to submit a tx work
2665 * request with an implicit doorbell.
2668 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2669 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2671 clrbit(&sc->doorbells, DOORBELL_UDB);
2672 setbit(&sc->doorbells, DOORBELL_WCWR);
2673 setbit(&sc->doorbells, DOORBELL_UDBWC);
2675 device_printf(sc->dev,
2676 "couldn't enable write combining: %d\n",
2680 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2681 t4_write_reg(sc, A_SGE_STAT_CFG,
2682 V_STATSOURCE_T5(7) | mode);
2686 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
2691 struct memwin_init {
2696 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2697 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2698 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2699 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2702 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2703 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2704 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2705 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2709 setup_memwin(struct adapter *sc)
2711 const struct memwin_init *mw_init;
2718 * Read low 32b of bar0 indirectly via the hardware backdoor
2719 * mechanism. Works from within PCI passthrough environments
2720 * too, where rman_get_start() can return a different value. We
2721 * need to program the T4 memory window decoders with the actual
2722 * addresses that will be coming across the PCIe link.
2724 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2725 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2727 mw_init = &t4_memwin[0];
2729 /* T5+ use the relative offset inside the PCIe BAR */
2732 mw_init = &t5_memwin[0];
2735 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2736 rw_init(&mw->mw_lock, "memory window access");
2737 mw->mw_base = mw_init->base;
2738 mw->mw_aperture = mw_init->aperture;
2741 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2742 (mw->mw_base + bar0) | V_BIR(0) |
2743 V_WINDOW(ilog2(mw->mw_aperture) - 10));
2744 rw_wlock(&mw->mw_lock);
2745 position_memwin(sc, i, 0);
2746 rw_wunlock(&mw->mw_lock);
2750 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2754 * Positions the memory window at the given address in the card's address space.
2755 * There are some alignment requirements and the actual position may be at an
2756 * address prior to the requested address. mw->mw_curpos always has the actual
2757 * position of the window.
2760 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2766 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2767 mw = &sc->memwin[idx];
2768 rw_assert(&mw->mw_lock, RA_WLOCKED);
2772 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
2774 pf = V_PFNUM(sc->pf);
2775 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
2777 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2778 t4_write_reg(sc, reg, mw->mw_curpos | pf);
2779 t4_read_reg(sc, reg); /* flush */
2783 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2789 MPASS(idx >= 0 && idx < NUM_MEMWIN);
2791 /* Memory can only be accessed in naturally aligned 4 byte units */
2792 if (addr & 3 || len & 3 || len <= 0)
2795 mw = &sc->memwin[idx];
2797 rw_rlock(&mw->mw_lock);
2798 mw_end = mw->mw_curpos + mw->mw_aperture;
2799 if (addr >= mw_end || addr < mw->mw_curpos) {
2800 /* Will need to reposition the window */
2801 if (!rw_try_upgrade(&mw->mw_lock)) {
2802 rw_runlock(&mw->mw_lock);
2803 rw_wlock(&mw->mw_lock);
2805 rw_assert(&mw->mw_lock, RA_WLOCKED);
2806 position_memwin(sc, idx, addr);
2807 rw_downgrade(&mw->mw_lock);
2808 mw_end = mw->mw_curpos + mw->mw_aperture;
2810 rw_assert(&mw->mw_lock, RA_RLOCKED);
2811 while (addr < mw_end && len > 0) {
2813 v = t4_read_reg(sc, mw->mw_base + addr -
2815 *val++ = le32toh(v);
2818 t4_write_reg(sc, mw->mw_base + addr -
2819 mw->mw_curpos, htole32(v));
2824 rw_runlock(&mw->mw_lock);
2831 alloc_atid_tab(struct tid_info *t, int flags)
2835 MPASS(t->natids > 0);
2836 MPASS(t->atid_tab == NULL);
2838 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
2840 if (t->atid_tab == NULL)
2842 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
2843 t->afree = t->atid_tab;
2844 t->atids_in_use = 0;
2845 for (i = 1; i < t->natids; i++)
2846 t->atid_tab[i - 1].next = &t->atid_tab[i];
2847 t->atid_tab[t->natids - 1].next = NULL;
2853 free_atid_tab(struct tid_info *t)
2856 KASSERT(t->atids_in_use == 0,
2857 ("%s: %d atids still in use.", __func__, t->atids_in_use));
2859 if (mtx_initialized(&t->atid_lock))
2860 mtx_destroy(&t->atid_lock);
2861 free(t->atid_tab, M_CXGBE);
2866 alloc_atid(struct adapter *sc, void *ctx)
2868 struct tid_info *t = &sc->tids;
2871 mtx_lock(&t->atid_lock);
2873 union aopen_entry *p = t->afree;
2875 atid = p - t->atid_tab;
2876 MPASS(atid <= M_TID_TID);
2881 mtx_unlock(&t->atid_lock);
2886 lookup_atid(struct adapter *sc, int atid)
2888 struct tid_info *t = &sc->tids;
2890 return (t->atid_tab[atid].data);
2894 free_atid(struct adapter *sc, int atid)
2896 struct tid_info *t = &sc->tids;
2897 union aopen_entry *p = &t->atid_tab[atid];
2899 mtx_lock(&t->atid_lock);
2903 mtx_unlock(&t->atid_lock);
2907 queue_tid_release(struct adapter *sc, int tid)
2910 CXGBE_UNIMPLEMENTED("deferred tid release");
2914 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
2917 struct cpl_tid_release *req;
2919 wr = alloc_wrqe(sizeof(*req), ctrlq);
2921 queue_tid_release(sc, tid); /* defer */
2926 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
2932 t4_range_cmp(const void *a, const void *b)
2934 return ((const struct t4_range *)a)->start -
2935 ((const struct t4_range *)b)->start;
2939 * Verify that the memory range specified by the addr/len pair is valid within
2940 * the card's address space.
2943 validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len)
2945 struct t4_range mem_ranges[4], *r, *next;
2946 uint32_t em, addr_len;
2947 int i, n, remaining;
2949 /* Memory can only be accessed in naturally aligned 4 byte units */
2950 if (addr & 3 || len & 3 || len == 0)
2953 /* Enabled memories */
2954 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2958 bzero(r, sizeof(mem_ranges));
2959 if (em & F_EDRAM0_ENABLE) {
2960 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2961 r->size = G_EDRAM0_SIZE(addr_len) << 20;
2963 r->start = G_EDRAM0_BASE(addr_len) << 20;
2964 if (addr >= r->start &&
2965 addr + len <= r->start + r->size)
2971 if (em & F_EDRAM1_ENABLE) {
2972 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2973 r->size = G_EDRAM1_SIZE(addr_len) << 20;
2975 r->start = G_EDRAM1_BASE(addr_len) << 20;
2976 if (addr >= r->start &&
2977 addr + len <= r->start + r->size)
2983 if (em & F_EXT_MEM_ENABLE) {
2984 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2985 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2987 r->start = G_EXT_MEM_BASE(addr_len) << 20;
2988 if (addr >= r->start &&
2989 addr + len <= r->start + r->size)
2995 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2996 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2997 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2999 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
3000 if (addr >= r->start &&
3001 addr + len <= r->start + r->size)
3007 MPASS(n <= nitems(mem_ranges));
3010 /* Sort and merge the ranges. */
3011 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
3013 /* Start from index 0 and examine the next n - 1 entries. */
3015 for (remaining = n - 1; remaining > 0; remaining--, r++) {
3017 MPASS(r->size > 0); /* r is a valid entry. */
3019 MPASS(next->size > 0); /* and so is the next one. */
3021 while (r->start + r->size >= next->start) {
3022 /* Merge the next one into the current entry. */
3023 r->size = max(r->start + r->size,
3024 next->start + next->size) - r->start;
3025 n--; /* One fewer entry in total. */
3026 if (--remaining == 0)
3027 goto done; /* short circuit */
3030 if (next != r + 1) {
3032 * Some entries were merged into r and next
3033 * points to the first valid entry that couldn't
3036 MPASS(next->size > 0); /* must be valid */
3037 memcpy(r + 1, next, remaining * sizeof(*r));
3040 * This so that the foo->size assertion in the
3041 * next iteration of the loop do the right
3042 * thing for entries that were pulled up and are
3045 MPASS(n < nitems(mem_ranges));
3046 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
3047 sizeof(struct t4_range));
3052 /* Done merging the ranges. */
3055 for (i = 0; i < n; i++, r++) {
3056 if (addr >= r->start &&
3057 addr + len <= r->start + r->size)
3066 fwmtype_to_hwmtype(int mtype)
3070 case FW_MEMTYPE_EDC0:
3072 case FW_MEMTYPE_EDC1:
3074 case FW_MEMTYPE_EXTMEM:
3076 case FW_MEMTYPE_EXTMEM1:
3079 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
3084 * Verify that the memory range specified by the memtype/offset/len pair is
3085 * valid and lies entirely within the memtype specified. The global address of
3086 * the start of the range is returned in addr.
3089 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len,
3092 uint32_t em, addr_len, maddr;
3094 /* Memory can only be accessed in naturally aligned 4 byte units */
3095 if (off & 3 || len & 3 || len == 0)
3098 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3099 switch (fwmtype_to_hwmtype(mtype)) {
3101 if (!(em & F_EDRAM0_ENABLE))
3103 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3104 maddr = G_EDRAM0_BASE(addr_len) << 20;
3107 if (!(em & F_EDRAM1_ENABLE))
3109 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3110 maddr = G_EDRAM1_BASE(addr_len) << 20;
3113 if (!(em & F_EXT_MEM_ENABLE))
3115 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3116 maddr = G_EXT_MEM_BASE(addr_len) << 20;
3119 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
3121 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
3122 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
3128 *addr = maddr + off; /* global address */
3129 return (validate_mem_range(sc, *addr, len));
3133 fixup_devlog_params(struct adapter *sc)
3135 struct devlog_params *dparams = &sc->params.devlog;
3138 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
3139 dparams->size, &dparams->addr);
3145 update_nirq(struct intrs_and_queues *iaq, int nports)
3147 int extra = T4_EXTRA_INTR;
3150 iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq);
3151 iaq->nirq += nports * (iaq->num_vis - 1) *
3152 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
3153 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
3157 * Adjust requirements to fit the number of interrupts available.
3160 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
3164 const int nports = sc->params.nports;
3169 bzero(iaq, sizeof(*iaq));
3170 iaq->intr_type = itype;
3171 iaq->num_vis = t4_num_vis;
3172 iaq->ntxq = t4_ntxq;
3173 iaq->ntxq_vi = t4_ntxq_vi;
3174 iaq->nrxq = t4_nrxq;
3175 iaq->nrxq_vi = t4_nrxq_vi;
3176 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3177 if (is_offload(sc) || is_ethoffload(sc)) {
3178 iaq->nofldtxq = t4_nofldtxq;
3179 iaq->nofldtxq_vi = t4_nofldtxq_vi;
3183 if (is_offload(sc)) {
3184 iaq->nofldrxq = t4_nofldrxq;
3185 iaq->nofldrxq_vi = t4_nofldrxq_vi;
3189 iaq->nnmtxq_vi = t4_nnmtxq_vi;
3190 iaq->nnmrxq_vi = t4_nnmrxq_vi;
3193 update_nirq(iaq, nports);
3194 if (iaq->nirq <= navail &&
3195 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3197 * This is the normal case -- there are enough interrupts for
3204 * If extra VIs have been configured try reducing their count and see if
3207 while (iaq->num_vis > 1) {
3209 update_nirq(iaq, nports);
3210 if (iaq->nirq <= navail &&
3211 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3212 device_printf(sc->dev, "virtual interfaces per port "
3213 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
3214 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
3215 "itype %d, navail %u, nirq %d.\n",
3216 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
3217 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
3218 itype, navail, iaq->nirq);
3224 * Extra VIs will not be created. Log a message if they were requested.
3226 MPASS(iaq->num_vis == 1);
3227 iaq->ntxq_vi = iaq->nrxq_vi = 0;
3228 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
3229 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
3230 if (iaq->num_vis != t4_num_vis) {
3231 device_printf(sc->dev, "extra virtual interfaces disabled. "
3232 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
3233 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
3234 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
3235 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
3239 * Keep reducing the number of NIC rx queues to the next lower power of
3240 * 2 (for even RSS distribution) and halving the TOE rx queues and see
3244 if (iaq->nrxq > 1) {
3247 } while (!powerof2(iaq->nrxq));
3249 if (iaq->nofldrxq > 1)
3250 iaq->nofldrxq >>= 1;
3252 old_nirq = iaq->nirq;
3253 update_nirq(iaq, nports);
3254 if (iaq->nirq <= navail &&
3255 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3256 device_printf(sc->dev, "running with reduced number of "
3257 "rx queues because of shortage of interrupts. "
3258 "nrxq=%u, nofldrxq=%u. "
3259 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
3260 iaq->nofldrxq, itype, navail, iaq->nirq);
3263 } while (old_nirq != iaq->nirq);
3265 /* One interrupt for everything. Ugh. */
3266 device_printf(sc->dev, "running with minimal number of queues. "
3267 "itype %d, navail %u.\n", itype, navail);
3269 MPASS(iaq->nrxq == 1);
3271 if (iaq->nofldrxq > 1)
3274 MPASS(iaq->num_vis > 0);
3275 if (iaq->num_vis > 1) {
3276 MPASS(iaq->nrxq_vi > 0);
3277 MPASS(iaq->ntxq_vi > 0);
3279 MPASS(iaq->nirq > 0);
3280 MPASS(iaq->nrxq > 0);
3281 MPASS(iaq->ntxq > 0);
3282 if (itype == INTR_MSI) {
3283 MPASS(powerof2(iaq->nirq));
3288 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
3290 int rc, itype, navail, nalloc;
3292 for (itype = INTR_MSIX; itype; itype >>= 1) {
3294 if ((itype & t4_intr_types) == 0)
3295 continue; /* not allowed */
3297 if (itype == INTR_MSIX)
3298 navail = pci_msix_count(sc->dev);
3299 else if (itype == INTR_MSI)
3300 navail = pci_msi_count(sc->dev);
3307 calculate_iaq(sc, iaq, itype, navail);
3310 if (itype == INTR_MSIX)
3311 rc = pci_alloc_msix(sc->dev, &nalloc);
3312 else if (itype == INTR_MSI)
3313 rc = pci_alloc_msi(sc->dev, &nalloc);
3315 if (rc == 0 && nalloc > 0) {
3316 if (nalloc == iaq->nirq)
3320 * Didn't get the number requested. Use whatever number
3321 * the kernel is willing to allocate.
3323 device_printf(sc->dev, "fewer vectors than requested, "
3324 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
3325 itype, iaq->nirq, nalloc);
3326 pci_release_msi(sc->dev);
3331 device_printf(sc->dev,
3332 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
3333 itype, rc, iaq->nirq, nalloc);
3336 device_printf(sc->dev,
3337 "failed to find a usable interrupt type. "
3338 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
3339 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
3344 #define FW_VERSION(chip) ( \
3345 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
3346 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
3347 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
3348 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
3349 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
3351 /* Just enough of fw_hdr to cover all version info. */
3357 __be32 tp_microcode_ver;
3362 __u8 intfver_iscsipdu;
3364 __u8 intfver_fcoepdu;
3367 /* Spot check a couple of fields. */
3368 CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver));
3369 CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic));
3370 CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe));
3380 .kld_name = "t4fw_cfg",
3381 .fw_mod_name = "t4fw",
3383 .chip = FW_HDR_CHIP_T4,
3384 .fw_ver = htobe32(FW_VERSION(T4)),
3385 .intfver_nic = FW_INTFVER(T4, NIC),
3386 .intfver_vnic = FW_INTFVER(T4, VNIC),
3387 .intfver_ofld = FW_INTFVER(T4, OFLD),
3388 .intfver_ri = FW_INTFVER(T4, RI),
3389 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
3390 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3391 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
3392 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3396 .kld_name = "t5fw_cfg",
3397 .fw_mod_name = "t5fw",
3399 .chip = FW_HDR_CHIP_T5,
3400 .fw_ver = htobe32(FW_VERSION(T5)),
3401 .intfver_nic = FW_INTFVER(T5, NIC),
3402 .intfver_vnic = FW_INTFVER(T5, VNIC),
3403 .intfver_ofld = FW_INTFVER(T5, OFLD),
3404 .intfver_ri = FW_INTFVER(T5, RI),
3405 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
3406 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3407 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
3408 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3412 .kld_name = "t6fw_cfg",
3413 .fw_mod_name = "t6fw",
3415 .chip = FW_HDR_CHIP_T6,
3416 .fw_ver = htobe32(FW_VERSION(T6)),
3417 .intfver_nic = FW_INTFVER(T6, NIC),
3418 .intfver_vnic = FW_INTFVER(T6, VNIC),
3419 .intfver_ofld = FW_INTFVER(T6, OFLD),
3420 .intfver_ri = FW_INTFVER(T6, RI),
3421 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3422 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3423 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3424 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3429 static struct fw_info *
3430 find_fw_info(int chip)
3434 for (i = 0; i < nitems(fw_info); i++) {
3435 if (fw_info[i].chip == chip)
3436 return (&fw_info[i]);
3442 * Is the given firmware API compatible with the one the driver was compiled
3446 fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
3449 /* short circuit if it's the exact same firmware version */
3450 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3454 * XXX: Is this too conservative? Perhaps I should limit this to the
3455 * features that are supported in the driver.
3457 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3458 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3459 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3460 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3468 load_fw_module(struct adapter *sc, const struct firmware **dcfg,
3469 const struct firmware **fw)
3471 struct fw_info *fw_info;
3477 fw_info = find_fw_info(chip_id(sc));
3478 if (fw_info == NULL) {
3479 device_printf(sc->dev,
3480 "unable to look up firmware information for chip %d.\n",
3485 *dcfg = firmware_get(fw_info->kld_name);
3486 if (*dcfg != NULL) {
3488 *fw = firmware_get(fw_info->fw_mod_name);
3496 unload_fw_module(struct adapter *sc, const struct firmware *dcfg,
3497 const struct firmware *fw)
3501 firmware_put(fw, FIRMWARE_UNLOAD);
3503 firmware_put(dcfg, FIRMWARE_UNLOAD);
3508 * 0 means no firmware install attempted.
3509 * ERESTART means a firmware install was attempted and was successful.
3510 * +ve errno means a firmware install was attempted but failed.
3513 install_kld_firmware(struct adapter *sc, struct fw_h *card_fw,
3514 const struct fw_h *drv_fw, const char *reason, int *already)
3516 const struct firmware *cfg, *fw;
3517 const uint32_t c = be32toh(card_fw->fw_ver);
3520 struct fw_h bundled_fw;
3521 bool load_attempted;
3524 load_attempted = false;
3525 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install;
3527 memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw));
3528 if (t4_fw_install < 0) {
3529 rc = load_fw_module(sc, &cfg, &fw);
3530 if (rc != 0 || fw == NULL) {
3531 device_printf(sc->dev,
3532 "failed to load firmware module: %d. cfg %p, fw %p;"
3533 " will use compiled-in firmware version for"
3534 "hw.cxgbe.fw_install checks.\n",
3537 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw));
3539 load_attempted = true;
3541 d = be32toh(bundled_fw.fw_ver);
3546 if ((sc->flags & FW_OK) == 0) {
3548 if (c == 0xffffffff) {
3557 if (!fw_compatible(card_fw, &bundled_fw)) {
3558 reason = "incompatible or unusable";
3563 reason = "older than the version bundled with this driver";
3567 if (fw_install == 2 && d != c) {
3568 reason = "different than the version bundled with this driver";
3572 /* No reason to do anything to the firmware already on the card. */
3581 if (fw_install == 0) {
3582 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3583 "but the driver is prohibited from installing a firmware "
3585 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3586 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3592 * We'll attempt to install a firmware. Load the module first (if it
3593 * hasn't been loaded already).
3595 if (!load_attempted) {
3596 rc = load_fw_module(sc, &cfg, &fw);
3597 if (rc != 0 || fw == NULL) {
3598 device_printf(sc->dev,
3599 "failed to load firmware module: %d. cfg %p, fw %p\n",
3605 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3606 "but the driver cannot take corrective action because it "
3607 "is unable to load the firmware module.\n",
3608 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3609 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3610 rc = sc->flags & FW_OK ? 0 : ENOENT;
3613 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver);
3615 MPASS(t4_fw_install > 0);
3616 device_printf(sc->dev,
3617 "firmware in KLD (%u.%u.%u.%u) is not what the driver was "
3618 "expecting (%u.%u.%u.%u) and will not be used.\n",
3619 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3620 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k),
3621 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3622 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3623 rc = sc->flags & FW_OK ? 0 : EINVAL;
3627 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3628 "installing firmware %u.%u.%u.%u on card.\n",
3629 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3630 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3631 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3632 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3634 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3636 device_printf(sc->dev, "failed to install firmware: %d\n", rc);
3638 /* Installed successfully, update the cached header too. */
3640 memcpy(card_fw, fw->data, sizeof(*card_fw));
3643 unload_fw_module(sc, cfg, fw);
3649 * Establish contact with the firmware and attempt to become the master driver.
3651 * A firmware will be installed to the card if needed (if the driver is allowed
3655 contact_firmware(struct adapter *sc)
3657 int rc, already = 0;
3658 enum dev_state state;
3659 struct fw_info *fw_info;
3660 struct fw_hdr *card_fw; /* fw on the card */
3661 const struct fw_h *drv_fw;
3663 fw_info = find_fw_info(chip_id(sc));
3664 if (fw_info == NULL) {
3665 device_printf(sc->dev,
3666 "unable to look up firmware information for chip %d.\n",
3670 drv_fw = &fw_info->fw_h;
3672 /* Read the header of the firmware on the card */
3673 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3675 rc = -t4_get_fw_hdr(sc, card_fw);
3677 device_printf(sc->dev,
3678 "unable to read firmware header from card's flash: %d\n",
3683 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL,
3690 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
3691 if (rc < 0 || state == DEV_STATE_ERR) {
3693 device_printf(sc->dev,
3694 "failed to connect to the firmware: %d, %d. "
3695 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
3697 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
3698 "not responding properly to HELLO", &already) == ERESTART)
3703 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT);
3704 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */
3707 sc->flags |= MASTER_PF;
3708 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
3714 } else if (state == DEV_STATE_UNINIT) {
3716 * We didn't get to be the master so we definitely won't be
3717 * configuring the chip. It's a bug if someone else hasn't
3718 * configured it already.
3720 device_printf(sc->dev, "couldn't be master(%d), "
3721 "device not already initialized either(%d). "
3722 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
3727 * Some other PF is the master and has configured the chip.
3728 * This is allowed but untested.
3730 device_printf(sc->dev, "PF%d is master, device state %d. "
3731 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
3732 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc);
3737 if (rc != 0 && sc->flags & FW_OK) {
3738 t4_fw_bye(sc, sc->mbox);
3739 sc->flags &= ~FW_OK;
3741 free(card_fw, M_CXGBE);
3746 copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
3747 uint32_t mtype, uint32_t moff)
3749 struct fw_info *fw_info;
3750 const struct firmware *dcfg, *rcfg = NULL;
3751 const uint32_t *cfdata;
3752 uint32_t cflen, addr;
3755 load_fw_module(sc, &dcfg, NULL);
3757 /* Card specific interpretation of "default". */
3758 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3759 if (pci_get_device(sc->dev) == 0x440a)
3760 snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF);
3762 snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF);
3765 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3767 device_printf(sc->dev,
3768 "KLD with default config is not available.\n");
3772 cfdata = dcfg->data;
3773 cflen = dcfg->datasize & ~3;
3777 fw_info = find_fw_info(chip_id(sc));
3778 if (fw_info == NULL) {
3779 device_printf(sc->dev,
3780 "unable to look up firmware information for chip %d.\n",
3785 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file);
3787 rcfg = firmware_get(s);
3789 device_printf(sc->dev,
3790 "unable to load module \"%s\" for configuration "
3791 "profile \"%s\".\n", s, cfg_file);
3795 cfdata = rcfg->data;
3796 cflen = rcfg->datasize & ~3;
3799 if (cflen > FLASH_CFG_MAX_SIZE) {
3800 device_printf(sc->dev,
3801 "config file too long (%d, max allowed is %d).\n",
3802 cflen, FLASH_CFG_MAX_SIZE);
3807 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3809 device_printf(sc->dev,
3810 "%s: addr (%d/0x%x) or len %d is not valid: %d.\n",
3811 __func__, mtype, moff, cflen, rc);
3815 write_via_memwin(sc, 2, addr, cfdata, cflen);
3818 firmware_put(rcfg, FIRMWARE_UNLOAD);
3819 unload_fw_module(sc, dcfg, NULL);
3823 struct caps_allowed {
3826 uint16_t switchcaps;
3830 uint16_t cryptocaps;
3835 #define FW_PARAM_DEV(param) \
3836 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3837 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3838 #define FW_PARAM_PFVF(param) \
3839 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3840 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3843 * Provide a configuration profile to the firmware and have it initialize the
3844 * chip accordingly. This may involve uploading a configuration file to the
3848 apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
3849 const struct caps_allowed *caps_allowed)
3852 struct fw_caps_config_cmd caps;
3853 uint32_t mtype, moff, finicsum, cfcsum, param, val;
3855 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
3857 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3861 bzero(&caps, sizeof(caps));
3862 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3863 F_FW_CMD_REQUEST | F_FW_CMD_READ);
3864 if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) {
3867 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3868 } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
3869 mtype = FW_MEMTYPE_FLASH;
3870 moff = t4_flash_cfg_addr(sc);
3871 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3872 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3873 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
3877 * Ask the firmware where it wants us to upload the config file.
3879 param = FW_PARAM_DEV(CF);
3880 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3882 /* No support for config file? Shouldn't happen. */
3883 device_printf(sc->dev,
3884 "failed to query config file location: %d.\n", rc);
3887 mtype = G_FW_PARAMS_PARAM_Y(val);
3888 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3889 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3890 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3891 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
3894 rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
3896 device_printf(sc->dev,
3897 "failed to upload config file to card: %d.\n", rc);
3901 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3903 device_printf(sc->dev, "failed to pre-process config file: %d "
3904 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
3908 finicsum = be32toh(caps.finicsum);
3909 cfcsum = be32toh(caps.cfcsum); /* actual */
3910 if (finicsum != cfcsum) {
3911 device_printf(sc->dev,
3912 "WARNING: config file checksum mismatch: %08x %08x\n",
3915 sc->cfcsum = cfcsum;
3916 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file);
3919 * Let the firmware know what features will (not) be used so it can tune
3920 * things accordingly.
3922 #define LIMIT_CAPS(x) do { \
3923 caps.x##caps &= htobe16(caps_allowed->x##caps); \
3935 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
3937 * TOE and hashfilters are mutually exclusive. It is a config
3938 * file or firmware bug if both are reported as available. Try
3939 * to cope with the situation in non-debug builds by disabling
3942 MPASS(caps.toecaps == 0);
3949 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3950 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3951 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3952 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3954 device_printf(sc->dev,
3955 "failed to process config file: %d.\n", rc);
3959 t4_tweak_chip_settings(sc);
3960 set_params__pre_init(sc);
3962 /* get basic stuff going */
3963 rc = -t4_fw_initialize(sc, sc->mbox);
3965 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc);
3973 * Partition chip resources for use between various PFs, VFs, etc.
3976 partition_resources(struct adapter *sc)
3978 char cfg_file[sizeof(t4_cfg_file)];
3979 struct caps_allowed caps_allowed;
3983 /* Only the master driver gets to configure the chip resources. */
3984 MPASS(sc->flags & MASTER_PF);
3986 #define COPY_CAPS(x) do { \
3987 caps_allowed.x##caps = t4_##x##caps_allowed; \
3989 bzero(&caps_allowed, sizeof(caps_allowed));
3999 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true;
4000 snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file);
4002 rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed);
4003 if (rc != 0 && fallback) {
4004 device_printf(sc->dev,
4005 "failed (%d) to configure card with \"%s\" profile, "
4006 "will fall back to a basic configuration and retry.\n",
4008 snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF);
4009 bzero(&caps_allowed, sizeof(caps_allowed));
4022 * Retrieve parameters that are needed (or nice to have) very early.
4025 get_params__pre_init(struct adapter *sc)
4028 uint32_t param[2], val[2];
4030 t4_get_version_info(sc);
4032 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
4033 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
4034 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
4035 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
4036 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
4038 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
4039 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
4040 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
4041 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
4042 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
4044 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
4045 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
4046 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
4047 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
4048 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
4050 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
4051 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
4052 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
4053 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
4054 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
4056 param[0] = FW_PARAM_DEV(PORTVEC);
4057 param[1] = FW_PARAM_DEV(CCLK);
4058 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4060 device_printf(sc->dev,
4061 "failed to query parameters (pre_init): %d.\n", rc);
4065 sc->params.portvec = val[0];
4066 sc->params.nports = bitcount32(val[0]);
4067 sc->params.vpd.cclk = val[1];
4069 /* Read device log parameters. */
4070 rc = -t4_init_devlog_params(sc, 1);
4072 fixup_devlog_params(sc);
4074 device_printf(sc->dev,
4075 "failed to get devlog parameters: %d.\n", rc);
4076 rc = 0; /* devlog isn't critical for device operation */
4083 * Any params that need to be set before FW_INITIALIZE.
4086 set_params__pre_init(struct adapter *sc)
4089 uint32_t param, val;
4091 if (chip_id(sc) >= CHELSIO_T6) {
4092 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
4094 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4095 /* firmwares < 1.20.1.0 do not have this param. */
4096 if (rc == FW_EINVAL && sc->params.fw_vers <
4097 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
4098 V_FW_HDR_FW_VER_MICRO(1) | V_FW_HDR_FW_VER_BUILD(0))) {
4102 device_printf(sc->dev,
4103 "failed to enable high priority filters :%d.\n",
4108 /* Enable opaque VIIDs with firmwares that support it. */
4109 param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
4111 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4112 if (rc == 0 && val == 1)
4113 sc->params.viid_smt_extn_support = true;
4115 sc->params.viid_smt_extn_support = false;
4121 * Retrieve various parameters that are of interest to the driver. The device
4122 * has been initialized by the firmware at this point.
4125 get_params__post_init(struct adapter *sc)
4128 uint32_t param[7], val[7];
4129 struct fw_caps_config_cmd caps;
4131 param[0] = FW_PARAM_PFVF(IQFLINT_START);
4132 param[1] = FW_PARAM_PFVF(EQ_START);
4133 param[2] = FW_PARAM_PFVF(FILTER_START);
4134 param[3] = FW_PARAM_PFVF(FILTER_END);
4135 param[4] = FW_PARAM_PFVF(L2T_START);
4136 param[5] = FW_PARAM_PFVF(L2T_END);
4137 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4138 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4139 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
4140 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
4142 device_printf(sc->dev,
4143 "failed to query parameters (post_init): %d.\n", rc);
4147 sc->sge.iq_start = val[0];
4148 sc->sge.eq_start = val[1];
4149 if ((int)val[3] > (int)val[2]) {
4150 sc->tids.ftid_base = val[2];
4151 sc->tids.ftid_end = val[3];
4152 sc->tids.nftids = val[3] - val[2] + 1;
4154 sc->vres.l2t.start = val[4];
4155 sc->vres.l2t.size = val[5] - val[4] + 1;
4156 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
4157 ("%s: L2 table size (%u) larger than expected (%u)",
4158 __func__, sc->vres.l2t.size, L2T_SIZE));
4159 sc->params.core_vdd = val[6];
4161 if (chip_id(sc) >= CHELSIO_T6) {
4163 sc->tids.tid_base = t4_read_reg(sc,
4164 A_LE_DB_ACTIVE_TABLE_START_INDEX);
4166 param[0] = FW_PARAM_PFVF(HPFILTER_START);
4167 param[1] = FW_PARAM_PFVF(HPFILTER_END);
4168 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4170 device_printf(sc->dev,
4171 "failed to query hpfilter parameters: %d.\n", rc);
4174 if ((int)val[1] > (int)val[0]) {
4175 sc->tids.hpftid_base = val[0];
4176 sc->tids.hpftid_end = val[1];
4177 sc->tids.nhpftids = val[1] - val[0] + 1;
4180 * These should go off if the layout changes and the
4181 * driver needs to catch up.
4183 MPASS(sc->tids.hpftid_base == 0);
4184 MPASS(sc->tids.tid_base == sc->tids.nhpftids);
4189 * MPSBGMAP is queried separately because only recent firmwares support
4190 * it as a parameter and we don't want the compound query above to fail
4191 * on older firmwares.
4193 param[0] = FW_PARAM_DEV(MPSBGMAP);
4195 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4197 sc->params.mps_bg_map = val[0];
4199 sc->params.mps_bg_map = 0;
4202 * Determine whether the firmware supports the filter2 work request.
4203 * This is queried separately for the same reason as MPSBGMAP above.
4205 param[0] = FW_PARAM_DEV(FILTER2_WR);
4207 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4209 sc->params.filter2_wr_support = val[0] != 0;
4211 sc->params.filter2_wr_support = 0;
4214 * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
4215 * This is queried separately for the same reason as other params above.
4217 param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4219 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4221 sc->params.ulptx_memwrite_dsgl = val[0] != 0;
4223 sc->params.ulptx_memwrite_dsgl = false;
4225 /* get capabilites */
4226 bzero(&caps, sizeof(caps));
4227 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4228 F_FW_CMD_REQUEST | F_FW_CMD_READ);
4229 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4230 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4232 device_printf(sc->dev,
4233 "failed to get card capabilities: %d.\n", rc);
4237 #define READ_CAPS(x) do { \
4238 sc->x = htobe16(caps.x); \
4241 READ_CAPS(linkcaps);
4242 READ_CAPS(switchcaps);
4245 READ_CAPS(rdmacaps);
4246 READ_CAPS(cryptocaps);
4247 READ_CAPS(iscsicaps);
4248 READ_CAPS(fcoecaps);
4250 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
4251 MPASS(chip_id(sc) > CHELSIO_T4);
4252 MPASS(sc->toecaps == 0);
4255 param[0] = FW_PARAM_DEV(NTID);
4256 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4258 device_printf(sc->dev,
4259 "failed to query HASHFILTER parameters: %d.\n", rc);
4262 sc->tids.ntids = val[0];
4263 if (sc->params.fw_vers <
4264 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
4265 V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) {
4266 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4267 sc->tids.ntids -= sc->tids.nhpftids;
4269 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4270 sc->params.hash_filter = 1;
4272 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
4273 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
4274 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
4275 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4276 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
4278 device_printf(sc->dev,
4279 "failed to query NIC parameters: %d.\n", rc);
4282 if ((int)val[1] > (int)val[0]) {
4283 sc->tids.etid_base = val[0];
4284 sc->tids.etid_end = val[1];
4285 sc->tids.netids = val[1] - val[0] + 1;
4286 sc->params.eo_wr_cred = val[2];
4287 sc->params.ethoffload = 1;
4291 /* query offload-related parameters */
4292 param[0] = FW_PARAM_DEV(NTID);
4293 param[1] = FW_PARAM_PFVF(SERVER_START);
4294 param[2] = FW_PARAM_PFVF(SERVER_END);
4295 param[3] = FW_PARAM_PFVF(TDDP_START);
4296 param[4] = FW_PARAM_PFVF(TDDP_END);
4297 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4298 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4300 device_printf(sc->dev,
4301 "failed to query TOE parameters: %d.\n", rc);
4304 sc->tids.ntids = val[0];
4305 if (sc->params.fw_vers <
4306 (V_FW_HDR_FW_VER_MAJOR(1) | V_FW_HDR_FW_VER_MINOR(20) |
4307 V_FW_HDR_FW_VER_MICRO(5) | V_FW_HDR_FW_VER_BUILD(0))) {
4308 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4309 sc->tids.ntids -= sc->tids.nhpftids;
4311 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4312 if ((int)val[2] > (int)val[1]) {
4313 sc->tids.stid_base = val[1];
4314 sc->tids.nstids = val[2] - val[1] + 1;
4316 sc->vres.ddp.start = val[3];
4317 sc->vres.ddp.size = val[4] - val[3] + 1;
4318 sc->params.ofldq_wr_cred = val[5];
4319 sc->params.offload = 1;
4322 * The firmware attempts memfree TOE configuration for -SO cards
4323 * and will report toecaps=0 if it runs out of resources (this
4324 * depends on the config file). It may not report 0 for other
4325 * capabilities dependent on the TOE in this case. Set them to
4326 * 0 here so that the driver doesn't bother tracking resources
4327 * that will never be used.
4333 param[0] = FW_PARAM_PFVF(STAG_START);
4334 param[1] = FW_PARAM_PFVF(STAG_END);
4335 param[2] = FW_PARAM_PFVF(RQ_START);
4336 param[3] = FW_PARAM_PFVF(RQ_END);
4337 param[4] = FW_PARAM_PFVF(PBL_START);
4338 param[5] = FW_PARAM_PFVF(PBL_END);
4339 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4341 device_printf(sc->dev,
4342 "failed to query RDMA parameters(1): %d.\n", rc);
4345 sc->vres.stag.start = val[0];
4346 sc->vres.stag.size = val[1] - val[0] + 1;
4347 sc->vres.rq.start = val[2];
4348 sc->vres.rq.size = val[3] - val[2] + 1;
4349 sc->vres.pbl.start = val[4];
4350 sc->vres.pbl.size = val[5] - val[4] + 1;
4352 param[0] = FW_PARAM_PFVF(SQRQ_START);
4353 param[1] = FW_PARAM_PFVF(SQRQ_END);
4354 param[2] = FW_PARAM_PFVF(CQ_START);
4355 param[3] = FW_PARAM_PFVF(CQ_END);
4356 param[4] = FW_PARAM_PFVF(OCQ_START);
4357 param[5] = FW_PARAM_PFVF(OCQ_END);
4358 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4360 device_printf(sc->dev,
4361 "failed to query RDMA parameters(2): %d.\n", rc);
4364 sc->vres.qp.start = val[0];
4365 sc->vres.qp.size = val[1] - val[0] + 1;
4366 sc->vres.cq.start = val[2];
4367 sc->vres.cq.size = val[3] - val[2] + 1;
4368 sc->vres.ocq.start = val[4];
4369 sc->vres.ocq.size = val[5] - val[4] + 1;
4371 param[0] = FW_PARAM_PFVF(SRQ_START);
4372 param[1] = FW_PARAM_PFVF(SRQ_END);
4373 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
4374 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4375 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
4377 device_printf(sc->dev,
4378 "failed to query RDMA parameters(3): %d.\n", rc);
4381 sc->vres.srq.start = val[0];
4382 sc->vres.srq.size = val[1] - val[0] + 1;
4383 sc->params.max_ordird_qp = val[2];
4384 sc->params.max_ird_adapter = val[3];
4386 if (sc->iscsicaps) {
4387 param[0] = FW_PARAM_PFVF(ISCSI_START);
4388 param[1] = FW_PARAM_PFVF(ISCSI_END);
4389 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4391 device_printf(sc->dev,
4392 "failed to query iSCSI parameters: %d.\n", rc);
4395 sc->vres.iscsi.start = val[0];
4396 sc->vres.iscsi.size = val[1] - val[0] + 1;
4398 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
4399 param[0] = FW_PARAM_PFVF(TLS_START);
4400 param[1] = FW_PARAM_PFVF(TLS_END);
4401 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4403 device_printf(sc->dev,
4404 "failed to query TLS parameters: %d.\n", rc);
4407 sc->vres.key.start = val[0];
4408 sc->vres.key.size = val[1] - val[0] + 1;
4411 t4_init_sge_params(sc);
4414 * We've got the params we wanted to query via the firmware. Now grab
4415 * some others directly from the chip.
4417 rc = t4_read_chip_settings(sc);
4423 set_params__post_init(struct adapter *sc)
4425 uint32_t param, val;
4430 /* ask for encapsulated CPLs */
4431 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4433 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4435 /* Enable 32b port caps if the firmware supports it. */
4436 param = FW_PARAM_PFVF(PORT_CAPS32);
4438 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0)
4439 sc->params.port_caps32 = 1;
4441 /* Let filter + maskhash steer to a part of the VI's RSS region. */
4442 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1);
4443 t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER),
4444 V_MASKFILTER(val - 1));
4448 * Override the TOE timers with user provided tunables. This is not the
4449 * recommended way to change the timers (the firmware config file is) so
4450 * these tunables are not documented.
4452 * All the timer tunables are in microseconds.
4454 if (t4_toe_keepalive_idle != 0) {
4455 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
4456 v &= M_KEEPALIVEIDLE;
4457 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
4458 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
4460 if (t4_toe_keepalive_interval != 0) {
4461 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
4462 v &= M_KEEPALIVEINTVL;
4463 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
4464 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
4466 if (t4_toe_keepalive_count != 0) {
4467 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
4468 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4469 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
4470 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
4471 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
4473 if (t4_toe_rexmt_min != 0) {
4474 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
4476 t4_set_reg_field(sc, A_TP_RXT_MIN,
4477 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
4479 if (t4_toe_rexmt_max != 0) {
4480 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
4482 t4_set_reg_field(sc, A_TP_RXT_MAX,
4483 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
4485 if (t4_toe_rexmt_count != 0) {
4486 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
4487 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4488 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
4489 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
4490 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
4492 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
4493 if (t4_toe_rexmt_backoff[i] != -1) {
4494 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
4495 shift = (i & 3) << 3;
4496 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
4497 M_TIMERBACKOFFINDEX0 << shift, v << shift);
4504 #undef FW_PARAM_PFVF
4508 t4_set_desc(struct adapter *sc)
4511 struct adapter_params *p = &sc->params;
4513 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
4515 device_set_desc_copy(sc->dev, buf);
4519 ifmedia_add4(struct ifmedia *ifm, int m)
4522 ifmedia_add(ifm, m, 0, NULL);
4523 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
4524 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
4525 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
4529 * This is the selected media, which is not quite the same as the active media.
4530 * The media line in ifconfig is "media: Ethernet selected (active)" if selected
4531 * and active are not the same, and "media: Ethernet selected" otherwise.
4534 set_current_media(struct port_info *pi)
4536 struct link_config *lc;
4537 struct ifmedia *ifm;
4541 PORT_LOCK_ASSERT_OWNED(pi);
4543 /* Leave current media alone if it's already set to IFM_NONE. */
4545 if (ifm->ifm_cur != NULL &&
4546 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
4550 if (lc->requested_aneg != AUTONEG_DISABLE &&
4551 lc->supported & FW_PORT_CAP32_ANEG) {
4552 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
4555 mword = IFM_ETHER | IFM_FDX;
4556 if (lc->requested_fc & PAUSE_TX)
4557 mword |= IFM_ETH_TXPAUSE;
4558 if (lc->requested_fc & PAUSE_RX)
4559 mword |= IFM_ETH_RXPAUSE;
4560 if (lc->requested_speed == 0)
4561 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */
4563 speed = lc->requested_speed;
4564 mword |= port_mword(pi, speed_to_fwcap(speed));
4565 ifmedia_set(ifm, mword);
4569 * Returns true if the ifmedia list for the port cannot change.
4572 fixed_ifmedia(struct port_info *pi)
4575 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
4576 pi->port_type == FW_PORT_TYPE_BT_XFI ||
4577 pi->port_type == FW_PORT_TYPE_BT_XAUI ||
4578 pi->port_type == FW_PORT_TYPE_KX4 ||
4579 pi->port_type == FW_PORT_TYPE_KX ||
4580 pi->port_type == FW_PORT_TYPE_KR ||
4581 pi->port_type == FW_PORT_TYPE_BP_AP ||
4582 pi->port_type == FW_PORT_TYPE_BP4_AP ||
4583 pi->port_type == FW_PORT_TYPE_BP40_BA ||
4584 pi->port_type == FW_PORT_TYPE_KR4_100G ||
4585 pi->port_type == FW_PORT_TYPE_KR_SFP28 ||
4586 pi->port_type == FW_PORT_TYPE_KR_XLAUI);
4590 build_medialist(struct port_info *pi)
4593 int unknown, mword, bit;
4594 struct link_config *lc;
4595 struct ifmedia *ifm;
4597 PORT_LOCK_ASSERT_OWNED(pi);
4599 if (pi->flags & FIXED_IFMEDIA)
4603 * Rebuild the ifmedia list.
4606 ifmedia_removeall(ifm);
4608 ss = G_FW_PORT_CAP32_SPEED(lc->supported); /* Supported Speeds */
4609 if (__predict_false(ss == 0)) { /* not supposed to happen. */
4612 MPASS(LIST_EMPTY(&ifm->ifm_list));
4613 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
4614 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
4619 for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
4621 MPASS(speed & M_FW_PORT_CAP32_SPEED);
4623 mword = port_mword(pi, speed);
4624 if (mword == IFM_NONE) {
4626 } else if (mword == IFM_UNKNOWN)
4629 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
4632 if (unknown > 0) /* Add one unknown for all unknown media types. */
4633 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
4634 if (lc->supported & FW_PORT_CAP32_ANEG)
4635 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
4637 set_current_media(pi);
4641 * Initialize the requested fields in the link config based on driver tunables.
4644 init_link_config(struct port_info *pi)
4646 struct link_config *lc = &pi->link_cfg;
4648 PORT_LOCK_ASSERT_OWNED(pi);
4650 lc->requested_speed = 0;
4652 if (t4_autoneg == 0)
4653 lc->requested_aneg = AUTONEG_DISABLE;
4654 else if (t4_autoneg == 1)
4655 lc->requested_aneg = AUTONEG_ENABLE;
4657 lc->requested_aneg = AUTONEG_AUTO;
4659 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
4662 if (t4_fec == -1 || t4_fec & FEC_AUTO)
4663 lc->requested_fec = FEC_AUTO;
4665 lc->requested_fec = FEC_NONE;
4666 if (t4_fec & FEC_RS)
4667 lc->requested_fec |= FEC_RS;
4668 if (t4_fec & FEC_BASER_RS)
4669 lc->requested_fec |= FEC_BASER_RS;
4674 * Makes sure that all requested settings comply with what's supported by the
4675 * port. Returns the number of settings that were invalid and had to be fixed.
4678 fixup_link_config(struct port_info *pi)
4681 struct link_config *lc = &pi->link_cfg;
4684 PORT_LOCK_ASSERT_OWNED(pi);
4686 /* Speed (when not autonegotiating) */
4687 if (lc->requested_speed != 0) {
4688 fwspeed = speed_to_fwcap(lc->requested_speed);
4689 if ((fwspeed & lc->supported) == 0) {
4691 lc->requested_speed = 0;
4695 /* Link autonegotiation */
4696 MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
4697 lc->requested_aneg == AUTONEG_DISABLE ||
4698 lc->requested_aneg == AUTONEG_AUTO);
4699 if (lc->requested_aneg == AUTONEG_ENABLE &&
4700 !(lc->supported & FW_PORT_CAP32_ANEG)) {
4702 lc->requested_aneg = AUTONEG_AUTO;
4706 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
4707 if (lc->requested_fc & PAUSE_TX &&
4708 !(lc->supported & FW_PORT_CAP32_FC_TX)) {
4710 lc->requested_fc &= ~PAUSE_TX;
4712 if (lc->requested_fc & PAUSE_RX &&
4713 !(lc->supported & FW_PORT_CAP32_FC_RX)) {
4715 lc->requested_fc &= ~PAUSE_RX;
4717 if (!(lc->requested_fc & PAUSE_AUTONEG) &&
4718 !(lc->supported & FW_PORT_CAP32_FORCE_PAUSE)) {
4720 lc->requested_fc |= PAUSE_AUTONEG;
4724 if ((lc->requested_fec & FEC_RS &&
4725 !(lc->supported & FW_PORT_CAP32_FEC_RS)) ||
4726 (lc->requested_fec & FEC_BASER_RS &&
4727 !(lc->supported & FW_PORT_CAP32_FEC_BASER_RS))) {
4729 lc->requested_fec = FEC_AUTO;
4736 * Apply the requested L1 settings, which are expected to be valid, to the
4740 apply_link_config(struct port_info *pi)
4742 struct adapter *sc = pi->adapter;
4743 struct link_config *lc = &pi->link_cfg;
4747 ASSERT_SYNCHRONIZED_OP(sc);
4748 PORT_LOCK_ASSERT_OWNED(pi);
4750 if (lc->requested_aneg == AUTONEG_ENABLE)
4751 MPASS(lc->supported & FW_PORT_CAP32_ANEG);
4752 if (!(lc->requested_fc & PAUSE_AUTONEG))
4753 MPASS(lc->supported & FW_PORT_CAP32_FORCE_PAUSE);
4754 if (lc->requested_fc & PAUSE_TX)
4755 MPASS(lc->supported & FW_PORT_CAP32_FC_TX);
4756 if (lc->requested_fc & PAUSE_RX)
4757 MPASS(lc->supported & FW_PORT_CAP32_FC_RX);
4758 if (lc->requested_fec & FEC_RS)
4759 MPASS(lc->supported & FW_PORT_CAP32_FEC_RS);
4760 if (lc->requested_fec & FEC_BASER_RS)
4761 MPASS(lc->supported & FW_PORT_CAP32_FEC_BASER_RS);
4763 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
4765 /* Don't complain if the VF driver gets back an EPERM. */
4766 if (!(sc->flags & IS_VF) || rc != FW_EPERM)
4767 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
4770 * An L1_CFG will almost always result in a link-change event if
4771 * the link is up, and the driver will refresh the actual
4772 * fec/fc/etc. when the notification is processed. If the link
4773 * is down then the actual settings are meaningless.
4775 * This takes care of the case where a change in the L1 settings
4776 * may not result in a notification.
4778 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
4779 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
4784 #define FW_MAC_EXACT_CHUNK 7
4787 * Program the port's XGMAC based on parameters in ifnet. The caller also
4788 * indicates which parameters should be programmed (the rest are left alone).
4791 update_mac_settings(struct ifnet *ifp, int flags)
4794 struct vi_info *vi = ifp->if_softc;
4795 struct port_info *pi = vi->pi;
4796 struct adapter *sc = pi->adapter;
4797 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
4799 ASSERT_SYNCHRONIZED_OP(sc);
4800 KASSERT(flags, ("%s: not told what to update.", __func__));
4802 if (flags & XGMAC_MTU)
4805 if (flags & XGMAC_PROMISC)
4806 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
4808 if (flags & XGMAC_ALLMULTI)
4809 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
4811 if (flags & XGMAC_VLANEX)
4812 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
4814 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
4815 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
4816 allmulti, 1, vlanex, false);
4818 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
4824 if (flags & XGMAC_UCADDR) {
4825 uint8_t ucaddr[ETHER_ADDR_LEN];
4827 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
4828 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
4829 ucaddr, true, &vi->smt_idx);
4832 if_printf(ifp, "change_mac failed: %d\n", rc);
4835 vi->xact_addr_filt = rc;
4840 if (flags & XGMAC_MCADDRS) {
4841 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
4844 struct ifmultiaddr *ifma;
4847 if_maddr_rlock(ifp);
4848 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
4849 if (ifma->ifma_addr->sa_family != AF_LINK)
4852 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
4853 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
4856 if (i == FW_MAC_EXACT_CHUNK) {
4857 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
4858 del, i, mcaddr, NULL, &hash, 0);
4861 for (j = 0; j < i; j++) {
4863 "failed to add mc address"
4865 "%02x:%02x:%02x rc=%d\n",
4866 mcaddr[j][0], mcaddr[j][1],
4867 mcaddr[j][2], mcaddr[j][3],
4868 mcaddr[j][4], mcaddr[j][5],
4878 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
4879 mcaddr, NULL, &hash, 0);
4882 for (j = 0; j < i; j++) {
4884 "failed to add mc address"
4886 "%02x:%02x:%02x rc=%d\n",
4887 mcaddr[j][0], mcaddr[j][1],
4888 mcaddr[j][2], mcaddr[j][3],
4889 mcaddr[j][4], mcaddr[j][5],
4896 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
4898 if_printf(ifp, "failed to set mc address hash: %d", rc);
4900 if_maddr_runlock(ifp);
4907 * {begin|end}_synchronized_op must be called from the same thread.
4910 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
4916 /* the caller thinks it's ok to sleep, but is it really? */
4917 if (flags & SLEEP_OK)
4918 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
4919 "begin_synchronized_op");
4930 if (vi && IS_DOOMED(vi)) {
4940 if (!(flags & SLEEP_OK)) {
4945 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
4951 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
4954 sc->last_op = wmesg;
4955 sc->last_op_thr = curthread;
4956 sc->last_op_flags = flags;
4960 if (!(flags & HOLD_LOCK) || rc)
4967 * Tell if_ioctl and if_init that the VI is going away. This is
4968 * special variant of begin_synchronized_op and must be paired with a
4969 * call to end_synchronized_op.
4972 doom_vi(struct adapter *sc, struct vi_info *vi)
4979 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
4982 sc->last_op = "t4detach";
4983 sc->last_op_thr = curthread;
4984 sc->last_op_flags = 0;
4990 * {begin|end}_synchronized_op must be called from the same thread.
4993 end_synchronized_op(struct adapter *sc, int flags)
4996 if (flags & LOCK_HELD)
4997 ADAPTER_LOCK_ASSERT_OWNED(sc);
5001 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
5008 cxgbe_init_synchronized(struct vi_info *vi)
5010 struct port_info *pi = vi->pi;
5011 struct adapter *sc = pi->adapter;
5012 struct ifnet *ifp = vi->ifp;
5014 struct sge_txq *txq;
5016 ASSERT_SYNCHRONIZED_OP(sc);
5018 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5019 return (0); /* already running */
5021 if (!(sc->flags & FULL_INIT_DONE) &&
5022 ((rc = adapter_full_init(sc)) != 0))
5023 return (rc); /* error message displayed already */
5025 if (!(vi->flags & VI_INIT_DONE) &&
5026 ((rc = vi_full_init(vi)) != 0))
5027 return (rc); /* error message displayed already */
5029 rc = update_mac_settings(ifp, XGMAC_ALL);
5031 goto done; /* error message displayed already */
5034 if (pi->up_vis == 0) {
5035 t4_update_port_info(pi);
5036 fixup_link_config(pi);
5037 build_medialist(pi);
5038 apply_link_config(pi);
5041 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
5043 if_printf(ifp, "enable_vi failed: %d\n", rc);
5049 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
5053 for_each_txq(vi, i, txq) {
5055 txq->eq.flags |= EQ_ENABLED;
5060 * The first iq of the first port to come up is used for tracing.
5062 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
5063 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
5064 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
5065 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
5066 V_QUEUENUMBER(sc->traceq));
5067 pi->flags |= HAS_TRACEQ;
5072 ifp->if_drv_flags |= IFF_DRV_RUNNING;
5074 if (pi->nvi > 1 || sc->flags & IS_VF)
5075 callout_reset(&vi->tick, hz, vi_tick, vi);
5077 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
5078 if (pi->link_cfg.link_ok)
5079 t4_os_link_changed(pi);
5083 cxgbe_uninit_synchronized(vi);
5092 cxgbe_uninit_synchronized(struct vi_info *vi)
5094 struct port_info *pi = vi->pi;
5095 struct adapter *sc = pi->adapter;
5096 struct ifnet *ifp = vi->ifp;
5098 struct sge_txq *txq;
5100 ASSERT_SYNCHRONIZED_OP(sc);
5102 if (!(vi->flags & VI_INIT_DONE)) {
5103 if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5104 KASSERT(0, ("uninited VI is running"));
5105 if_printf(ifp, "uninited VI with running ifnet. "
5106 "vi->flags 0x%016lx, if_flags 0x%08x, "
5107 "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
5114 * Disable the VI so that all its data in either direction is discarded
5115 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
5116 * tick) intact as the TP can deliver negative advice or data that it's
5117 * holding in its RAM (for an offloaded connection) even after the VI is
5120 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
5122 if_printf(ifp, "disable_vi failed: %d\n", rc);
5126 for_each_txq(vi, i, txq) {
5128 txq->eq.flags &= ~EQ_ENABLED;
5133 if (pi->nvi > 1 || sc->flags & IS_VF)
5134 callout_stop(&vi->tick);
5136 callout_stop(&pi->tick);
5137 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5141 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5143 if (pi->up_vis > 0) {
5148 pi->link_cfg.link_ok = false;
5149 pi->link_cfg.speed = 0;
5150 pi->link_cfg.link_down_rc = 255;
5151 t4_os_link_changed(pi);
5158 * It is ok for this function to fail midway and return right away. t4_detach
5159 * will walk the entire sc->irq list and clean up whatever is valid.
5162 t4_setup_intr_handlers(struct adapter *sc)
5164 int rc, rid, p, q, v;
5167 struct port_info *pi;
5169 struct sge *sge = &sc->sge;
5170 struct sge_rxq *rxq;
5172 struct sge_ofld_rxq *ofld_rxq;
5175 struct sge_nm_rxq *nm_rxq;
5178 int nbuckets = rss_getnumbuckets();
5185 rid = sc->intr_type == INTR_INTX ? 0 : 1;
5186 if (forwarding_intr_to_fwq(sc))
5187 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
5189 /* Multiple interrupts. */
5190 if (sc->flags & IS_VF)
5191 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
5192 ("%s: too few intr.", __func__));
5194 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
5195 ("%s: too few intr.", __func__));
5197 /* The first one is always error intr on PFs */
5198 if (!(sc->flags & IS_VF)) {
5199 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
5206 /* The second one is always the firmware event queue (first on VFs) */
5207 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
5213 for_each_port(sc, p) {
5215 for_each_vi(pi, v, vi) {
5216 vi->first_intr = rid - 1;
5218 if (vi->nnmrxq > 0) {
5219 int n = max(vi->nrxq, vi->nnmrxq);
5221 rxq = &sge->rxq[vi->first_rxq];
5223 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
5225 for (q = 0; q < n; q++) {
5226 snprintf(s, sizeof(s), "%x%c%x", p,
5232 irq->nm_rxq = nm_rxq++;
5234 if (irq->nm_rxq != NULL &&
5236 /* Netmap rx only */
5237 rc = t4_alloc_irq(sc, irq, rid,
5238 t4_nm_intr, irq->nm_rxq, s);
5240 if (irq->nm_rxq != NULL &&
5242 /* NIC and Netmap rx */
5243 rc = t4_alloc_irq(sc, irq, rid,
5244 t4_vi_intr, irq, s);
5247 if (irq->rxq != NULL &&
5248 irq->nm_rxq == NULL) {
5250 rc = t4_alloc_irq(sc, irq, rid,
5251 t4_intr, irq->rxq, s);
5257 bus_bind_intr(sc->dev, irq->res,
5258 rss_getcpu(q % nbuckets));
5266 for_each_rxq(vi, q, rxq) {
5267 snprintf(s, sizeof(s), "%x%c%x", p,
5269 rc = t4_alloc_irq(sc, irq, rid,
5274 bus_bind_intr(sc->dev, irq->res,
5275 rss_getcpu(q % nbuckets));
5283 for_each_ofld_rxq(vi, q, ofld_rxq) {
5284 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
5285 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
5296 MPASS(irq == &sc->irq[sc->intr_count]);
5302 adapter_full_init(struct adapter *sc)
5306 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5307 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5310 ASSERT_SYNCHRONIZED_OP(sc);
5311 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5312 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
5313 ("%s: FULL_INIT_DONE already", __func__));
5316 * queues that belong to the adapter (not any particular port).
5318 rc = t4_setup_adapter_queues(sc);
5322 for (i = 0; i < nitems(sc->tq); i++) {
5323 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
5324 taskqueue_thread_enqueue, &sc->tq[i]);
5325 if (sc->tq[i] == NULL) {
5326 device_printf(sc->dev,
5327 "failed to allocate task queue %d\n", i);
5331 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
5332 device_get_nameunit(sc->dev), i);
5335 MPASS(RSS_KEYSIZE == 40);
5336 rss_getkey((void *)&raw_rss_key[0]);
5337 for (i = 0; i < nitems(rss_key); i++) {
5338 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
5340 t4_write_rss_key(sc, &rss_key[0], -1, 1);
5343 if (!(sc->flags & IS_VF))
5345 sc->flags |= FULL_INIT_DONE;
5348 adapter_full_uninit(sc);
5354 adapter_full_uninit(struct adapter *sc)
5358 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5360 t4_teardown_adapter_queues(sc);
5362 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
5363 taskqueue_free(sc->tq[i]);
5367 sc->flags &= ~FULL_INIT_DONE;
5373 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
5374 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
5375 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
5376 RSS_HASHTYPE_RSS_UDP_IPV6)
5378 /* Translates kernel hash types to hardware. */
5380 hashconfig_to_hashen(int hashconfig)
5384 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
5385 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
5386 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
5387 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
5388 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
5389 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5390 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5392 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
5393 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5394 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5396 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
5397 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5398 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
5399 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5404 /* Translates hardware hash types to kernel. */
5406 hashen_to_hashconfig(int hashen)
5410 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
5412 * If UDP hashing was enabled it must have been enabled for
5413 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
5414 * enabling any 4-tuple hash is nonsense configuration.
5416 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5417 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
5419 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5420 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
5421 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5422 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
5424 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5425 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
5426 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5427 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
5428 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
5429 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
5430 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
5431 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
5433 return (hashconfig);
5438 vi_full_init(struct vi_info *vi)
5440 struct adapter *sc = vi->pi->adapter;
5441 struct ifnet *ifp = vi->ifp;
5443 struct sge_rxq *rxq;
5446 int nbuckets = rss_getnumbuckets();
5447 int hashconfig = rss_gethashconfig();
5451 ASSERT_SYNCHRONIZED_OP(sc);
5452 KASSERT((vi->flags & VI_INIT_DONE) == 0,
5453 ("%s: VI_INIT_DONE already", __func__));
5455 sysctl_ctx_init(&vi->ctx);
5456 vi->flags |= VI_SYSCTL_CTX;
5459 * Allocate tx/rx/fl queues for this VI.
5461 rc = t4_setup_vi_queues(vi);
5463 goto done; /* error message displayed already */
5466 * Setup RSS for this VI. Save a copy of the RSS table for later use.
5468 if (vi->nrxq > vi->rss_size) {
5469 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
5470 "some queues will never receive traffic.\n", vi->nrxq,
5472 } else if (vi->rss_size % vi->nrxq) {
5473 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
5474 "expect uneven traffic distribution.\n", vi->nrxq,
5478 if (vi->nrxq != nbuckets) {
5479 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
5480 "performance will be impacted.\n", vi->nrxq, nbuckets);
5483 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
5484 for (i = 0; i < vi->rss_size;) {
5486 j = rss_get_indirection_to_bucket(i);
5488 rxq = &sc->sge.rxq[vi->first_rxq + j];
5489 rss[i++] = rxq->iq.abs_id;
5491 for_each_rxq(vi, j, rxq) {
5492 rss[i++] = rxq->iq.abs_id;
5493 if (i == vi->rss_size)
5499 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
5503 if_printf(ifp, "rss_config failed: %d\n", rc);
5508 vi->hashen = hashconfig_to_hashen(hashconfig);
5511 * We may have had to enable some hashes even though the global config
5512 * wants them disabled. This is a potential problem that must be
5513 * reported to the user.
5515 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig;
5518 * If we consider only the supported hash types, then the enabled hashes
5519 * are a superset of the requested hashes. In other words, there cannot
5520 * be any supported hash that was requested but not enabled, but there
5521 * can be hashes that were not requested but had to be enabled.
5523 extra &= SUPPORTED_RSS_HASHTYPES;
5524 MPASS((extra & hashconfig) == 0);
5528 "global RSS config (0x%x) cannot be accommodated.\n",
5531 if (extra & RSS_HASHTYPE_RSS_IPV4)
5532 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
5533 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
5534 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
5535 if (extra & RSS_HASHTYPE_RSS_IPV6)
5536 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
5537 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
5538 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
5539 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
5540 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
5541 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
5542 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
5544 vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
5545 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
5546 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5547 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
5549 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, rss[0], 0, 0);
5552 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
5557 vi->flags |= VI_INIT_DONE;
5569 vi_full_uninit(struct vi_info *vi)
5571 struct port_info *pi = vi->pi;
5572 struct adapter *sc = pi->adapter;
5574 struct sge_rxq *rxq;
5575 struct sge_txq *txq;
5577 struct sge_ofld_rxq *ofld_rxq;
5579 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
5580 struct sge_wrq *ofld_txq;
5583 if (vi->flags & VI_INIT_DONE) {
5585 /* Need to quiesce queues. */
5587 /* XXX: Only for the first VI? */
5588 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
5589 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
5591 for_each_txq(vi, i, txq) {
5592 quiesce_txq(sc, txq);
5595 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
5596 for_each_ofld_txq(vi, i, ofld_txq) {
5597 quiesce_wrq(sc, ofld_txq);
5601 for_each_rxq(vi, i, rxq) {
5602 quiesce_iq(sc, &rxq->iq);
5603 quiesce_fl(sc, &rxq->fl);
5607 for_each_ofld_rxq(vi, i, ofld_rxq) {
5608 quiesce_iq(sc, &ofld_rxq->iq);
5609 quiesce_fl(sc, &ofld_rxq->fl);
5612 free(vi->rss, M_CXGBE);
5613 free(vi->nm_rss, M_CXGBE);
5616 t4_teardown_vi_queues(vi);
5617 vi->flags &= ~VI_INIT_DONE;
5623 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
5625 struct sge_eq *eq = &txq->eq;
5626 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
5628 (void) sc; /* unused */
5632 MPASS((eq->flags & EQ_ENABLED) == 0);
5636 /* Wait for the mp_ring to empty. */
5637 while (!mp_ring_is_idle(txq->r)) {
5638 mp_ring_check_drainage(txq->r, 0);
5639 pause("rquiesce", 1);
5642 /* Then wait for the hardware to finish. */
5643 while (spg->cidx != htobe16(eq->pidx))
5644 pause("equiesce", 1);
5646 /* Finally, wait for the driver to reclaim all descriptors. */
5647 while (eq->cidx != eq->pidx)
5648 pause("dquiesce", 1);
5652 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
5659 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
5661 (void) sc; /* unused */
5663 /* Synchronize with the interrupt handler */
5664 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
5669 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
5671 mtx_lock(&sc->sfl_lock);
5673 fl->flags |= FL_DOOMED;
5675 callout_stop(&sc->sfl_callout);
5676 mtx_unlock(&sc->sfl_lock);
5678 KASSERT((fl->flags & FL_STARVING) == 0,
5679 ("%s: still starving", __func__));
5683 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
5684 driver_intr_t *handler, void *arg, char *name)
5689 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
5690 RF_SHAREABLE | RF_ACTIVE);
5691 if (irq->res == NULL) {
5692 device_printf(sc->dev,
5693 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
5697 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
5698 NULL, handler, arg, &irq->tag);
5700 device_printf(sc->dev,
5701 "failed to setup interrupt for rid %d, name %s: %d\n",
5704 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
5710 t4_free_irq(struct adapter *sc, struct irq *irq)
5713 bus_teardown_intr(sc->dev, irq->res, irq->tag);
5715 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
5717 bzero(irq, sizeof(*irq));
5723 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
5726 regs->version = chip_id(sc) | chip_rev(sc) << 10;
5727 t4_get_regs(sc, buf, regs->len);
5730 #define A_PL_INDIR_CMD 0x1f8
5732 #define S_PL_AUTOINC 31
5733 #define M_PL_AUTOINC 0x1U
5734 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
5735 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
5737 #define S_PL_VFID 20
5738 #define M_PL_VFID 0xffU
5739 #define V_PL_VFID(x) ((x) << S_PL_VFID)
5740 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
5743 #define M_PL_ADDR 0xfffffU
5744 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
5745 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
5747 #define A_PL_INDIR_DATA 0x1fc
5750 read_vf_stat(struct adapter *sc, u_int vin, int reg)
5754 mtx_assert(&sc->reg_lock, MA_OWNED);
5755 if (sc->flags & IS_VF) {
5756 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
5757 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
5759 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
5760 V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
5761 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
5762 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
5764 return (((uint64_t)stats[1]) << 32 | stats[0]);
5768 t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
5771 #define GET_STAT(name) \
5772 read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
5774 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
5775 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
5776 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
5777 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
5778 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
5779 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
5780 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
5781 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
5782 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
5783 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
5784 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
5785 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
5786 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
5787 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
5788 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
5789 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
5795 t4_clr_vi_stats(struct adapter *sc, u_int vin)
5799 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) |
5800 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
5801 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
5802 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
5803 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
5807 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
5810 const struct timeval interval = {0, 250000}; /* 250ms */
5812 if (!(vi->flags & VI_INIT_DONE))
5816 timevalsub(&tv, &interval);
5817 if (timevalcmp(&tv, &vi->last_refreshed, <))
5820 mtx_lock(&sc->reg_lock);
5821 t4_get_vi_stats(sc, vi->vin, &vi->stats);
5822 getmicrotime(&vi->last_refreshed);
5823 mtx_unlock(&sc->reg_lock);
5827 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
5829 u_int i, v, tnl_cong_drops, bg_map;
5831 const struct timeval interval = {0, 250000}; /* 250ms */
5834 timevalsub(&tv, &interval);
5835 if (timevalcmp(&tv, &pi->last_refreshed, <))
5839 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
5840 bg_map = pi->mps_bg_map;
5842 i = ffs(bg_map) - 1;
5843 mtx_lock(&sc->reg_lock);
5844 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
5845 A_TP_MIB_TNL_CNG_DROP_0 + i);
5846 mtx_unlock(&sc->reg_lock);
5847 tnl_cong_drops += v;
5848 bg_map &= ~(1 << i);
5850 pi->tnl_cong_drops = tnl_cong_drops;
5851 getmicrotime(&pi->last_refreshed);
5855 cxgbe_tick(void *arg)
5857 struct port_info *pi = arg;
5858 struct adapter *sc = pi->adapter;
5860 PORT_LOCK_ASSERT_OWNED(pi);
5861 cxgbe_refresh_stats(sc, pi);
5863 callout_schedule(&pi->tick, hz);
5869 struct vi_info *vi = arg;
5870 struct adapter *sc = vi->pi->adapter;
5872 vi_refresh_stats(sc, vi);
5874 callout_schedule(&vi->tick, hz);
5878 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
5880 static char *caps_decoder[] = {
5881 "\20\001IPMI\002NCSI", /* 0: NBM */
5882 "\20\001PPP\002QFC\003DCBX", /* 1: link */
5883 "\20\001INGRESS\002EGRESS", /* 2: switch */
5884 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
5885 "\006HASHFILTER\007ETHOFLD",
5886 "\20\001TOE", /* 4: TOE */
5887 "\20\001RDDP\002RDMAC", /* 5: RDMA */
5888 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
5889 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
5890 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
5892 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
5893 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */
5894 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
5895 "\004PO_INITIATOR\005PO_TARGET",
5899 t4_sysctls(struct adapter *sc)
5901 struct sysctl_ctx_list *ctx;
5902 struct sysctl_oid *oid;
5903 struct sysctl_oid_list *children, *c0;
5904 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
5906 ctx = device_get_sysctl_ctx(sc->dev);
5911 oid = device_get_sysctl_tree(sc->dev);
5912 c0 = children = SYSCTL_CHILDREN(oid);
5914 sc->sc_do_rxcopy = 1;
5915 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
5916 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
5918 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
5919 sc->params.nports, "# of ports");
5921 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
5922 CTLTYPE_STRING | CTLFLAG_RD, doorbells, (uintptr_t)&sc->doorbells,
5923 sysctl_bitfield_8b, "A", "available doorbells");
5925 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
5926 sc->params.vpd.cclk, "core clock frequency (in KHz)");
5928 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
5929 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
5930 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
5931 "interrupt holdoff timer values (us)");
5933 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
5934 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
5935 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
5936 "interrupt holdoff packet counter values");
5938 t4_sge_sysctls(sc, ctx, children);
5940 sc->lro_timeout = 100;
5941 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
5942 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
5944 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
5945 &sc->debug_flags, 0, "flags to enable runtime debugging");
5947 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
5948 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
5950 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
5951 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
5953 if (sc->flags & IS_VF)
5956 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
5957 NULL, chip_rev(sc), "chip hardware revision");
5959 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
5960 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
5962 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
5963 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
5965 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
5966 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
5968 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
5969 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
5971 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
5972 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
5974 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
5975 sc->er_version, 0, "expansion ROM version");
5977 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
5978 sc->bs_version, 0, "bootstrap firmware version");
5980 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
5981 NULL, sc->params.scfg_vers, "serial config version");
5983 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
5984 NULL, sc->params.vpd_vers, "VPD version");
5986 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
5987 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
5989 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
5990 sc->cfcsum, "config file checksum");
5992 #define SYSCTL_CAP(name, n, text) \
5993 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
5994 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], (uintptr_t)&sc->name, \
5995 sysctl_bitfield_16b, "A", "available " text " capabilities")
5997 SYSCTL_CAP(nbmcaps, 0, "NBM");
5998 SYSCTL_CAP(linkcaps, 1, "link");
5999 SYSCTL_CAP(switchcaps, 2, "switch");
6000 SYSCTL_CAP(niccaps, 3, "NIC");
6001 SYSCTL_CAP(toecaps, 4, "TCP offload");
6002 SYSCTL_CAP(rdmacaps, 5, "RDMA");
6003 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
6004 SYSCTL_CAP(cryptocaps, 7, "crypto");
6005 SYSCTL_CAP(fcoecaps, 8, "FCoE");
6008 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
6009 NULL, sc->tids.nftids, "number of filters");
6011 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
6012 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
6013 "chip temperature (in Celsius)");
6015 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING |
6016 CTLFLAG_RD, sc, 0, sysctl_loadavg, "A",
6017 "microprocessor load averages (debug firmwares only)");
6019 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_vdd", CTLFLAG_RD,
6020 &sc->params.core_vdd, 0, "core Vdd (in mV)");
6022 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
6023 CTLTYPE_STRING | CTLFLAG_RD, sc, LOCAL_CPUS,
6024 sysctl_cpus, "A", "local CPUs");
6026 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
6027 CTLTYPE_STRING | CTLFLAG_RD, sc, INTR_CPUS,
6028 sysctl_cpus, "A", "preferred CPUs for interrupts");
6030 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
6031 &sc->swintr, 0, "software triggered interrupts");
6034 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
6036 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
6037 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
6038 "logs and miscellaneous information");
6039 children = SYSCTL_CHILDREN(oid);
6041 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
6042 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6043 sysctl_cctrl, "A", "congestion control");
6045 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
6046 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6047 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
6049 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
6050 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
6051 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
6053 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
6054 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
6055 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
6057 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
6058 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
6059 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
6061 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
6062 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
6063 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
6065 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
6066 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
6067 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
6069 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
6070 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cim_la,
6071 "A", "CIM logic analyzer");
6073 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
6074 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6075 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
6077 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
6078 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
6079 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
6081 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
6082 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
6083 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
6085 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
6086 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
6087 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
6089 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
6090 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
6091 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
6093 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
6094 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
6095 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
6097 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
6098 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
6099 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
6101 if (chip_id(sc) > CHELSIO_T4) {
6102 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
6103 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
6104 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
6106 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
6107 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
6108 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
6111 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
6112 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6113 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
6115 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
6116 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6117 sysctl_cim_qcfg, "A", "CIM queue configuration");
6119 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
6120 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6121 sysctl_cpl_stats, "A", "CPL statistics");
6123 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
6124 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6125 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
6127 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
6128 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6129 sysctl_devlog, "A", "firmware's device log");
6131 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
6132 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6133 sysctl_fcoe_stats, "A", "FCoE statistics");
6135 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
6136 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6137 sysctl_hw_sched, "A", "hardware scheduler ");
6139 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
6140 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6141 sysctl_l2t, "A", "hardware L2 table");
6143 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
6144 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6145 sysctl_smt, "A", "hardware source MAC table");
6148 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip",
6149 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6150 sysctl_clip, "A", "active CLIP table entries");
6153 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
6154 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6155 sysctl_lb_stats, "A", "loopback statistics");
6157 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
6158 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6159 sysctl_meminfo, "A", "memory regions");
6161 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
6162 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6163 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
6164 "A", "MPS TCAM entries");
6166 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
6167 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6168 sysctl_path_mtus, "A", "path MTUs");
6170 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
6171 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6172 sysctl_pm_stats, "A", "PM statistics");
6174 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
6175 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6176 sysctl_rdma_stats, "A", "RDMA statistics");
6178 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
6179 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6180 sysctl_tcp_stats, "A", "TCP statistics");
6182 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
6183 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6184 sysctl_tids, "A", "TID information");
6186 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
6187 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6188 sysctl_tp_err_stats, "A", "TP error statistics");
6190 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
6191 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
6192 "TP logic analyzer event capture mask");
6194 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
6195 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6196 sysctl_tp_la, "A", "TP logic analyzer");
6198 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
6199 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6200 sysctl_tx_rate, "A", "Tx rate");
6202 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
6203 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6204 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
6206 if (chip_id(sc) >= CHELSIO_T5) {
6207 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
6208 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
6209 sysctl_wcwr_stats, "A", "write combined work requests");
6213 if (is_offload(sc)) {
6220 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
6221 NULL, "TOE parameters");
6222 children = SYSCTL_CHILDREN(oid);
6224 sc->tt.cong_algorithm = -1;
6225 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
6226 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
6227 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
6230 sc->tt.sndbuf = 256 * 1024;
6231 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
6232 &sc->tt.sndbuf, 0, "max hardware send buffer size");
6235 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
6236 &sc->tt.ddp, 0, "DDP allowed");
6238 sc->tt.rx_coalesce = 1;
6239 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
6240 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
6243 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW,
6244 &sc->tt.tls, 0, "Inline TLS allowed");
6246 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
6247 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports,
6248 "I", "TCP ports that use inline TLS+TOE RX");
6250 sc->tt.tx_align = 1;
6251 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
6252 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
6254 sc->tt.tx_zcopy = 0;
6255 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
6256 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
6257 "Enable zero-copy aio_write(2)");
6259 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
6260 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6261 "cop_managed_offloading", CTLFLAG_RW,
6262 &sc->tt.cop_managed_offloading, 0,
6263 "COP (Connection Offload Policy) controls all TOE offload");
6265 sc->tt.autorcvbuf_inc = 16 * 1024;
6266 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc",
6267 CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0,
6268 "autorcvbuf increment");
6270 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
6271 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
6272 "TP timer tick (us)");
6274 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
6275 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
6276 "TCP timestamp tick (us)");
6278 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
6279 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
6282 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
6283 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
6284 "IU", "DACK timer (us)");
6286 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
6287 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
6288 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)");
6290 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
6291 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
6292 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)");
6294 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
6295 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
6296 sysctl_tp_timer, "LU", "Persist timer min (us)");
6298 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
6299 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
6300 sysctl_tp_timer, "LU", "Persist timer max (us)");
6302 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
6303 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
6304 sysctl_tp_timer, "LU", "Keepalive idle timer (us)");
6306 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
6307 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
6308 sysctl_tp_timer, "LU", "Keepalive interval timer (us)");
6310 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
6311 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
6312 sysctl_tp_timer, "LU", "Initial SRTT (us)");
6314 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
6315 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
6316 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
6318 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
6319 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX,
6320 sysctl_tp_shift_cnt, "IU",
6321 "Number of SYN retransmissions before abort");
6323 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
6324 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2,
6325 sysctl_tp_shift_cnt, "IU",
6326 "Number of retransmissions before abort");
6328 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
6329 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2,
6330 sysctl_tp_shift_cnt, "IU",
6331 "Number of keepalive probes before abort");
6333 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
6334 CTLFLAG_RD, NULL, "TOE retransmit backoffs");
6335 children = SYSCTL_CHILDREN(oid);
6336 for (i = 0; i < 16; i++) {
6337 snprintf(s, sizeof(s), "%u", i);
6338 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
6339 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff,
6340 "IU", "TOE retransmit backoff");
6347 vi_sysctls(struct vi_info *vi)
6349 struct sysctl_ctx_list *ctx;
6350 struct sysctl_oid *oid;
6351 struct sysctl_oid_list *children;
6353 ctx = device_get_sysctl_ctx(vi->dev);
6356 * dev.v?(cxgbe|cxl).X.
6358 oid = device_get_sysctl_tree(vi->dev);
6359 children = SYSCTL_CHILDREN(oid);
6361 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
6362 vi->viid, "VI identifer");
6363 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
6364 &vi->nrxq, 0, "# of rx queues");
6365 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
6366 &vi->ntxq, 0, "# of tx queues");
6367 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
6368 &vi->first_rxq, 0, "index of first rx queue");
6369 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
6370 &vi->first_txq, 0, "index of first tx queue");
6371 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL,
6372 vi->rss_base, "start of RSS indirection table");
6373 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
6374 vi->rss_size, "size of RSS indirection table");
6376 if (IS_MAIN_VI(vi)) {
6377 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
6378 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
6379 "Reserve queue 0 for non-flowid packets");
6383 if (vi->nofldrxq != 0) {
6384 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
6386 "# of rx queues for offloaded TCP connections");
6387 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
6388 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
6389 "index of first TOE rx queue");
6390 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
6391 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
6392 sysctl_holdoff_tmr_idx_ofld, "I",
6393 "holdoff timer index for TOE queues");
6394 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
6395 CTLTYPE_INT | CTLFLAG_RW, vi, 0,
6396 sysctl_holdoff_pktc_idx_ofld, "I",
6397 "holdoff packet counter index for TOE queues");
6400 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
6401 if (vi->nofldtxq != 0) {
6402 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
6404 "# of tx queues for TOE/ETHOFLD");
6405 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
6406 CTLFLAG_RD, &vi->first_ofld_txq, 0,
6407 "index of first TOE/ETHOFLD tx queue");
6411 if (vi->nnmrxq != 0) {
6412 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
6413 &vi->nnmrxq, 0, "# of netmap rx queues");
6414 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
6415 &vi->nnmtxq, 0, "# of netmap tx queues");
6416 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
6417 CTLFLAG_RD, &vi->first_nm_rxq, 0,
6418 "index of first netmap rx queue");
6419 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
6420 CTLFLAG_RD, &vi->first_nm_txq, 0,
6421 "index of first netmap tx queue");
6425 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
6426 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
6427 "holdoff timer index");
6428 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
6429 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
6430 "holdoff packet counter index");
6432 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
6433 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
6435 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
6436 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
6441 cxgbe_sysctls(struct port_info *pi)
6443 struct sysctl_ctx_list *ctx;
6444 struct sysctl_oid *oid;
6445 struct sysctl_oid_list *children, *children2;
6446 struct adapter *sc = pi->adapter;
6449 static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"};
6451 ctx = device_get_sysctl_ctx(pi->dev);
6456 oid = device_get_sysctl_tree(pi->dev);
6457 children = SYSCTL_CHILDREN(oid);
6459 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
6460 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
6461 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
6462 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
6463 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
6464 "PHY temperature (in Celsius)");
6465 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
6466 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
6467 "PHY firmware version");
6470 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
6471 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
6472 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
6473 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
6474 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
6475 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
6476 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
6477 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
6478 "autonegotiation (-1 = not supported)");
6480 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
6481 port_top_speed(pi), "max speed (in Gbps)");
6482 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
6483 pi->mps_bg_map, "MPS buffer group map");
6484 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
6485 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
6487 if (sc->flags & IS_VF)
6491 * dev.(cxgbe|cxl).X.tc.
6493 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
6494 "Tx scheduler traffic classes (cl_rl)");
6495 children2 = SYSCTL_CHILDREN(oid);
6496 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
6497 CTLFLAG_RW, &pi->sched_params->pktsize, 0,
6498 "pktsize for per-flow cl-rl (0 means up to the driver )");
6499 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
6500 CTLFLAG_RW, &pi->sched_params->burstsize, 0,
6501 "burstsize for per-flow cl-rl (0 means up to the driver)");
6502 for (i = 0; i < sc->chip_params->nsched_cls; i++) {
6503 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
6505 snprintf(name, sizeof(name), "%d", i);
6506 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
6507 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
6509 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
6510 CTLTYPE_STRING | CTLFLAG_RD, tc_flags, (uintptr_t)&tc->flags,
6511 sysctl_bitfield_8b, "A", "flags");
6512 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
6513 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
6514 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
6515 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
6516 sysctl_tc_params, "A", "traffic class parameters");
6520 * dev.cxgbe.X.stats.
6522 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
6523 NULL, "port statistics");
6524 children = SYSCTL_CHILDREN(oid);
6525 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
6526 &pi->tx_parse_error, 0,
6527 "# of tx packets with invalid length or # of segments");
6529 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
6530 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
6531 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
6532 sysctl_handle_t4_reg64, "QU", desc)
6534 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
6535 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
6536 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
6537 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
6538 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
6539 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
6540 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
6541 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
6542 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
6543 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
6544 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
6545 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
6546 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
6547 "# of tx frames in this range",
6548 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
6549 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
6550 "# of tx frames in this range",
6551 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
6552 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
6553 "# of tx frames in this range",
6554 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
6555 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
6556 "# of tx frames in this range",
6557 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
6558 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
6559 "# of tx frames in this range",
6560 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
6561 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
6562 "# of tx frames in this range",
6563 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
6564 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
6565 "# of tx frames in this range",
6566 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
6567 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
6568 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
6569 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
6570 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
6571 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
6572 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
6573 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
6574 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
6575 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
6576 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
6577 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
6578 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
6579 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
6580 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
6581 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
6582 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
6583 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
6584 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
6585 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
6586 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
6588 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
6589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
6590 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
6591 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
6592 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
6593 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
6594 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
6595 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
6596 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
6597 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
6598 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
6599 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
6600 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
6601 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
6602 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
6603 "# of frames received with bad FCS",
6604 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
6605 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
6606 "# of frames received with length error",
6607 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
6608 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
6609 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
6610 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
6611 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
6612 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
6613 "# of rx frames in this range",
6614 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
6615 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
6616 "# of rx frames in this range",
6617 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
6618 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
6619 "# of rx frames in this range",
6620 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
6621 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
6622 "# of rx frames in this range",
6623 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
6624 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
6625 "# of rx frames in this range",
6626 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
6627 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
6628 "# of rx frames in this range",
6629 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
6630 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
6631 "# of rx frames in this range",
6632 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
6633 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
6634 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
6635 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
6636 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
6637 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
6638 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
6639 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
6640 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
6641 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
6642 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
6643 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
6644 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
6645 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
6646 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
6647 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
6648 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
6649 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
6650 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
6652 #undef SYSCTL_ADD_T4_REG64
6654 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
6655 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
6656 &pi->stats.name, desc)
6658 /* We get these from port_stats and they may be stale by up to 1s */
6659 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
6660 "# drops due to buffer-group 0 overflows");
6661 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
6662 "# drops due to buffer-group 1 overflows");
6663 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
6664 "# drops due to buffer-group 2 overflows");
6665 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
6666 "# drops due to buffer-group 3 overflows");
6667 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
6668 "# of buffer-group 0 truncated packets");
6669 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
6670 "# of buffer-group 1 truncated packets");
6671 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
6672 "# of buffer-group 2 truncated packets");
6673 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
6674 "# of buffer-group 3 truncated packets");
6676 #undef SYSCTL_ADD_T4_PORTSTAT
6678 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_records",
6679 CTLFLAG_RD, &pi->tx_tls_records,
6680 "# of TLS records transmitted");
6681 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_octets",
6682 CTLFLAG_RD, &pi->tx_tls_octets,
6683 "# of payload octets in transmitted TLS records");
6684 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_records",
6685 CTLFLAG_RD, &pi->rx_tls_records,
6686 "# of TLS records received");
6687 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_octets",
6688 CTLFLAG_RD, &pi->rx_tls_octets,
6689 "# of payload octets in received TLS records");
6693 sysctl_int_array(SYSCTL_HANDLER_ARGS)
6695 int rc, *i, space = 0;
6698 sbuf_new_for_sysctl(&sb, NULL, 64, req);
6699 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
6701 sbuf_printf(&sb, " ");
6702 sbuf_printf(&sb, "%d", *i);
6705 rc = sbuf_finish(&sb);
6711 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
6716 rc = sysctl_wire_old_buffer(req, 0);
6720 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6724 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
6725 rc = sbuf_finish(sb);
6732 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
6737 rc = sysctl_wire_old_buffer(req, 0);
6741 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6745 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
6746 rc = sbuf_finish(sb);
6753 sysctl_btphy(SYSCTL_HANDLER_ARGS)
6755 struct port_info *pi = arg1;
6757 struct adapter *sc = pi->adapter;
6761 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
6764 /* XXX: magic numbers */
6765 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
6767 end_synchronized_op(sc, 0);
6773 rc = sysctl_handle_int(oidp, &v, 0, req);
6778 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
6780 struct vi_info *vi = arg1;
6783 val = vi->rsrv_noflowq;
6784 rc = sysctl_handle_int(oidp, &val, 0, req);
6785 if (rc != 0 || req->newptr == NULL)
6788 if ((val >= 1) && (vi->ntxq > 1))
6789 vi->rsrv_noflowq = 1;
6791 vi->rsrv_noflowq = 0;
6797 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
6799 struct vi_info *vi = arg1;
6800 struct adapter *sc = vi->pi->adapter;
6802 struct sge_rxq *rxq;
6807 rc = sysctl_handle_int(oidp, &idx, 0, req);
6808 if (rc != 0 || req->newptr == NULL)
6811 if (idx < 0 || idx >= SGE_NTIMERS)
6814 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6819 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
6820 for_each_rxq(vi, i, rxq) {
6821 #ifdef atomic_store_rel_8
6822 atomic_store_rel_8(&rxq->iq.intr_params, v);
6824 rxq->iq.intr_params = v;
6829 end_synchronized_op(sc, LOCK_HELD);
6834 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
6836 struct vi_info *vi = arg1;
6837 struct adapter *sc = vi->pi->adapter;
6842 rc = sysctl_handle_int(oidp, &idx, 0, req);
6843 if (rc != 0 || req->newptr == NULL)
6846 if (idx < -1 || idx >= SGE_NCOUNTERS)
6849 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6854 if (vi->flags & VI_INIT_DONE)
6855 rc = EBUSY; /* cannot be changed once the queues are created */
6859 end_synchronized_op(sc, LOCK_HELD);
6864 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
6866 struct vi_info *vi = arg1;
6867 struct adapter *sc = vi->pi->adapter;
6870 qsize = vi->qsize_rxq;
6872 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6873 if (rc != 0 || req->newptr == NULL)
6876 if (qsize < 128 || (qsize & 7))
6879 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6884 if (vi->flags & VI_INIT_DONE)
6885 rc = EBUSY; /* cannot be changed once the queues are created */
6887 vi->qsize_rxq = qsize;
6889 end_synchronized_op(sc, LOCK_HELD);
6894 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
6896 struct vi_info *vi = arg1;
6897 struct adapter *sc = vi->pi->adapter;
6900 qsize = vi->qsize_txq;
6902 rc = sysctl_handle_int(oidp, &qsize, 0, req);
6903 if (rc != 0 || req->newptr == NULL)
6906 if (qsize < 128 || qsize > 65536)
6909 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
6914 if (vi->flags & VI_INIT_DONE)
6915 rc = EBUSY; /* cannot be changed once the queues are created */
6917 vi->qsize_txq = qsize;
6919 end_synchronized_op(sc, LOCK_HELD);
6924 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
6926 struct port_info *pi = arg1;
6927 struct adapter *sc = pi->adapter;
6928 struct link_config *lc = &pi->link_cfg;
6931 if (req->newptr == NULL) {
6933 static char *bits = "\20\1RX\2TX\3AUTO";
6935 rc = sysctl_wire_old_buffer(req, 0);
6939 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
6944 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
6945 (lc->requested_fc & PAUSE_AUTONEG), bits);
6947 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
6948 PAUSE_RX | PAUSE_AUTONEG), bits);
6950 rc = sbuf_finish(sb);
6956 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
6960 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
6966 if (s[0] < '0' || s[0] > '9')
6967 return (EINVAL); /* not a number */
6969 if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
6970 return (EINVAL); /* some other bit is set too */
6972 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
6977 lc->requested_fc = n;
6978 fixup_link_config(pi);
6980 rc = apply_link_config(pi);
6981 set_current_media(pi);
6983 end_synchronized_op(sc, 0);
6990 sysctl_fec(SYSCTL_HANDLER_ARGS)
6992 struct port_info *pi = arg1;
6993 struct adapter *sc = pi->adapter;
6994 struct link_config *lc = &pi->link_cfg;
6998 if (req->newptr == NULL) {
7000 static char *bits = "\20\1RS\2BASE-R\3RSVD1\4RSVD2\5RSVD3\6AUTO";
7002 rc = sysctl_wire_old_buffer(req, 0);
7006 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7011 * Display the requested_fec when the link is down -- the actual
7012 * FEC makes sense only when the link is up.
7015 sbuf_printf(sb, "%b", (lc->fec & M_FW_PORT_CAP32_FEC) |
7016 (lc->requested_fec & FEC_AUTO), bits);
7018 sbuf_printf(sb, "%b", lc->requested_fec, bits);
7020 rc = sbuf_finish(sb);
7026 snprintf(s, sizeof(s), "%d",
7027 lc->requested_fec == FEC_AUTO ? -1 :
7028 lc->requested_fec & M_FW_PORT_CAP32_FEC);
7030 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
7034 n = strtol(&s[0], NULL, 0);
7035 if (n < 0 || n & FEC_AUTO)
7038 if (n & ~M_FW_PORT_CAP32_FEC)
7039 return (EINVAL);/* some other bit is set too */
7041 return (EINVAL);/* one bit can be set at most */
7044 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7049 old = lc->requested_fec;
7051 lc->requested_fec = FEC_AUTO;
7053 lc->requested_fec = FEC_NONE;
7055 if ((lc->supported | V_FW_PORT_CAP32_FEC(n)) !=
7060 lc->requested_fec = n;
7062 fixup_link_config(pi);
7063 if (pi->up_vis > 0) {
7064 rc = apply_link_config(pi);
7066 lc->requested_fec = old;
7067 if (rc == FW_EPROTO)
7073 end_synchronized_op(sc, 0);
7080 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
7082 struct port_info *pi = arg1;
7083 struct adapter *sc = pi->adapter;
7084 struct link_config *lc = &pi->link_cfg;
7087 if (lc->supported & FW_PORT_CAP32_ANEG)
7088 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
7091 rc = sysctl_handle_int(oidp, &val, 0, req);
7092 if (rc != 0 || req->newptr == NULL)
7095 val = AUTONEG_DISABLE;
7097 val = AUTONEG_ENABLE;
7101 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7106 if (val == AUTONEG_ENABLE && !(lc->supported & FW_PORT_CAP32_ANEG)) {
7110 lc->requested_aneg = val;
7111 fixup_link_config(pi);
7113 rc = apply_link_config(pi);
7114 set_current_media(pi);
7117 end_synchronized_op(sc, 0);
7122 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
7124 struct adapter *sc = arg1;
7128 val = t4_read_reg64(sc, reg);
7130 return (sysctl_handle_64(oidp, &val, 0, req));
7134 sysctl_temperature(SYSCTL_HANDLER_ARGS)
7136 struct adapter *sc = arg1;
7138 uint32_t param, val;
7140 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
7143 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7144 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
7145 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
7146 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
7147 end_synchronized_op(sc, 0);
7151 /* unknown is returned as 0 but we display -1 in that case */
7152 t = val == 0 ? -1 : val;
7154 rc = sysctl_handle_int(oidp, &t, 0, req);
7159 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
7161 struct adapter *sc = arg1;
7164 uint32_t param, val;
7166 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
7169 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7170 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
7171 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
7172 end_synchronized_op(sc, 0);
7176 rc = sysctl_wire_old_buffer(req, 0);
7180 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7184 if (val == 0xffffffff) {
7185 /* Only debug and custom firmwares report load averages. */
7186 sbuf_printf(sb, "not available");
7188 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
7189 (val >> 16) & 0xff);
7191 rc = sbuf_finish(sb);
7198 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
7200 struct adapter *sc = arg1;
7203 uint16_t incr[NMTUS][NCCTRL_WIN];
7204 static const char *dec_fac[] = {
7205 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
7209 rc = sysctl_wire_old_buffer(req, 0);
7213 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7217 t4_read_cong_tbl(sc, incr);
7219 for (i = 0; i < NCCTRL_WIN; ++i) {
7220 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
7221 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
7222 incr[5][i], incr[6][i], incr[7][i]);
7223 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
7224 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
7225 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
7226 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
7229 rc = sbuf_finish(sb);
7235 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
7236 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
7237 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
7238 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
7242 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
7244 struct adapter *sc = arg1;
7246 int rc, i, n, qid = arg2;
7249 u_int cim_num_obq = sc->chip_params->cim_num_obq;
7251 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
7252 ("%s: bad qid %d\n", __func__, qid));
7254 if (qid < CIM_NUM_IBQ) {
7257 n = 4 * CIM_IBQ_SIZE;
7258 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
7259 rc = t4_read_cim_ibq(sc, qid, buf, n);
7261 /* outbound queue */
7264 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
7265 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
7266 rc = t4_read_cim_obq(sc, qid, buf, n);
7273 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
7275 rc = sysctl_wire_old_buffer(req, 0);
7279 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7285 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
7286 for (i = 0, p = buf; i < n; i += 16, p += 4)
7287 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
7290 rc = sbuf_finish(sb);
7298 sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
7302 sbuf_printf(sb, "Status Data PC%s",
7303 cfg & F_UPDBGLACAPTPCONLY ? "" :
7304 " LS0Stat LS0Addr LS0Data");
7306 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
7307 if (cfg & F_UPDBGLACAPTPCONLY) {
7308 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
7310 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
7311 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
7312 p[4] & 0xff, p[5] >> 8);
7313 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
7314 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
7315 p[1] & 0xf, p[2] >> 4);
7318 "\n %02x %x%07x %x%07x %08x %08x "
7320 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
7321 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
7328 sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
7332 sbuf_printf(sb, "Status Inst Data PC%s",
7333 cfg & F_UPDBGLACAPTPCONLY ? "" :
7334 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
7336 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
7337 if (cfg & F_UPDBGLACAPTPCONLY) {
7338 sbuf_printf(sb, "\n %02x %08x %08x %08x",
7339 p[3] & 0xff, p[2], p[1], p[0]);
7340 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
7341 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
7342 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
7343 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
7344 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
7345 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
7348 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
7349 "%08x %08x %08x %08x %08x %08x",
7350 (p[9] >> 16) & 0xff,
7351 p[9] & 0xffff, p[8] >> 16,
7352 p[8] & 0xffff, p[7] >> 16,
7353 p[7] & 0xffff, p[6] >> 16,
7354 p[2], p[1], p[0], p[5], p[4], p[3]);
7360 sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
7365 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
7369 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
7370 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
7375 rc = -t4_cim_read_la(sc, buf, NULL);
7378 if (chip_id(sc) < CHELSIO_T6)
7379 sbuf_cim_la4(sc, sb, buf, cfg);
7381 sbuf_cim_la6(sc, sb, buf, cfg);
7389 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
7391 struct adapter *sc = arg1;
7395 rc = sysctl_wire_old_buffer(req, 0);
7398 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7402 rc = sbuf_cim_la(sc, sb, M_WAITOK);
7404 rc = sbuf_finish(sb);
7410 t4_os_dump_cimla(struct adapter *sc, int arg, bool verbose)
7415 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb)
7417 rc = sbuf_cim_la(sc, &sb, M_NOWAIT);
7419 rc = sbuf_finish(&sb);
7421 log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s",
7422 device_get_nameunit(sc->dev), sbuf_data(&sb));
7430 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
7432 struct adapter *sc = arg1;
7438 rc = sysctl_wire_old_buffer(req, 0);
7442 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7446 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
7449 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
7452 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7453 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
7457 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
7458 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
7459 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
7460 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
7461 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
7462 (p[1] >> 2) | ((p[2] & 3) << 30),
7463 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
7467 rc = sbuf_finish(sb);
7474 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
7476 struct adapter *sc = arg1;
7482 rc = sysctl_wire_old_buffer(req, 0);
7486 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7490 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
7493 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
7496 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
7497 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7498 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
7499 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
7500 p[4], p[3], p[2], p[1], p[0]);
7503 sbuf_printf(sb, "\n\nCntl ID Data");
7504 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
7505 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
7506 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
7509 rc = sbuf_finish(sb);
7516 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
7518 struct adapter *sc = arg1;
7521 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7522 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
7523 uint16_t thres[CIM_NUM_IBQ];
7524 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
7525 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
7526 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
7528 cim_num_obq = sc->chip_params->cim_num_obq;
7530 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
7531 obq_rdaddr = A_UP_OBQ_0_REALADDR;
7533 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
7534 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
7536 nq = CIM_NUM_IBQ + cim_num_obq;
7538 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
7540 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
7544 t4_read_cimq_cfg(sc, base, size, thres);
7546 rc = sysctl_wire_old_buffer(req, 0);
7550 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7555 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
7557 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
7558 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
7559 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
7560 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7561 G_QUEREMFLITS(p[2]) * 16);
7562 for ( ; i < nq; i++, p += 4, wr += 2)
7563 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
7564 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
7565 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
7566 G_QUEREMFLITS(p[2]) * 16);
7568 rc = sbuf_finish(sb);
7575 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
7577 struct adapter *sc = arg1;
7580 struct tp_cpl_stats stats;
7582 rc = sysctl_wire_old_buffer(req, 0);
7586 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7590 mtx_lock(&sc->reg_lock);
7591 t4_tp_get_cpl_stats(sc, &stats, 0);
7592 mtx_unlock(&sc->reg_lock);
7594 if (sc->chip_params->nchan > 2) {
7595 sbuf_printf(sb, " channel 0 channel 1"
7596 " channel 2 channel 3");
7597 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
7598 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
7599 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
7600 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
7602 sbuf_printf(sb, " channel 0 channel 1");
7603 sbuf_printf(sb, "\nCPL requests: %10u %10u",
7604 stats.req[0], stats.req[1]);
7605 sbuf_printf(sb, "\nCPL responses: %10u %10u",
7606 stats.rsp[0], stats.rsp[1]);
7609 rc = sbuf_finish(sb);
7616 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
7618 struct adapter *sc = arg1;
7621 struct tp_usm_stats stats;
7623 rc = sysctl_wire_old_buffer(req, 0);
7627 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7631 t4_get_usm_stats(sc, &stats, 1);
7633 sbuf_printf(sb, "Frames: %u\n", stats.frames);
7634 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
7635 sbuf_printf(sb, "Drops: %u", stats.drops);
7637 rc = sbuf_finish(sb);
7643 static const char * const devlog_level_strings[] = {
7644 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
7645 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
7646 [FW_DEVLOG_LEVEL_ERR] = "ERR",
7647 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
7648 [FW_DEVLOG_LEVEL_INFO] = "INFO",
7649 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
7652 static const char * const devlog_facility_strings[] = {
7653 [FW_DEVLOG_FACILITY_CORE] = "CORE",
7654 [FW_DEVLOG_FACILITY_CF] = "CF",
7655 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
7656 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
7657 [FW_DEVLOG_FACILITY_RES] = "RES",
7658 [FW_DEVLOG_FACILITY_HW] = "HW",
7659 [FW_DEVLOG_FACILITY_FLR] = "FLR",
7660 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
7661 [FW_DEVLOG_FACILITY_PHY] = "PHY",
7662 [FW_DEVLOG_FACILITY_MAC] = "MAC",
7663 [FW_DEVLOG_FACILITY_PORT] = "PORT",
7664 [FW_DEVLOG_FACILITY_VI] = "VI",
7665 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
7666 [FW_DEVLOG_FACILITY_ACL] = "ACL",
7667 [FW_DEVLOG_FACILITY_TM] = "TM",
7668 [FW_DEVLOG_FACILITY_QFC] = "QFC",
7669 [FW_DEVLOG_FACILITY_DCB] = "DCB",
7670 [FW_DEVLOG_FACILITY_ETH] = "ETH",
7671 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
7672 [FW_DEVLOG_FACILITY_RI] = "RI",
7673 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
7674 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
7675 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
7676 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
7677 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
7681 sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
7683 int i, j, rc, nentries, first = 0;
7684 struct devlog_params *dparams = &sc->params.devlog;
7685 struct fw_devlog_e *buf, *e;
7686 uint64_t ftstamp = UINT64_MAX;
7688 if (dparams->addr == 0)
7691 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
7692 buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
7696 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
7700 nentries = dparams->size / sizeof(struct fw_devlog_e);
7701 for (i = 0; i < nentries; i++) {
7704 if (e->timestamp == 0)
7707 e->timestamp = be64toh(e->timestamp);
7708 e->seqno = be32toh(e->seqno);
7709 for (j = 0; j < 8; j++)
7710 e->params[j] = be32toh(e->params[j]);
7712 if (e->timestamp < ftstamp) {
7713 ftstamp = e->timestamp;
7718 if (buf[first].timestamp == 0)
7719 goto done; /* nothing in the log */
7721 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
7722 "Seq#", "Tstamp", "Level", "Facility", "Message");
7727 if (e->timestamp == 0)
7730 sbuf_printf(sb, "%10d %15ju %8s %8s ",
7731 e->seqno, e->timestamp,
7732 (e->level < nitems(devlog_level_strings) ?
7733 devlog_level_strings[e->level] : "UNKNOWN"),
7734 (e->facility < nitems(devlog_facility_strings) ?
7735 devlog_facility_strings[e->facility] : "UNKNOWN"));
7736 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
7737 e->params[2], e->params[3], e->params[4],
7738 e->params[5], e->params[6], e->params[7]);
7740 if (++i == nentries)
7742 } while (i != first);
7749 sysctl_devlog(SYSCTL_HANDLER_ARGS)
7751 struct adapter *sc = arg1;
7755 rc = sysctl_wire_old_buffer(req, 0);
7758 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7762 rc = sbuf_devlog(sc, sb, M_WAITOK);
7764 rc = sbuf_finish(sb);
7770 t4_os_dump_devlog(struct adapter *sc)
7775 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb)
7777 rc = sbuf_devlog(sc, &sb, M_NOWAIT);
7779 rc = sbuf_finish(&sb);
7781 log(LOG_DEBUG, "%s: device log follows.\n%s",
7782 device_get_nameunit(sc->dev), sbuf_data(&sb));
7789 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
7791 struct adapter *sc = arg1;
7794 struct tp_fcoe_stats stats[MAX_NCHAN];
7795 int i, nchan = sc->chip_params->nchan;
7797 rc = sysctl_wire_old_buffer(req, 0);
7801 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7805 for (i = 0; i < nchan; i++)
7806 t4_get_fcoe_stats(sc, i, &stats[i], 1);
7809 sbuf_printf(sb, " channel 0 channel 1"
7810 " channel 2 channel 3");
7811 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
7812 stats[0].octets_ddp, stats[1].octets_ddp,
7813 stats[2].octets_ddp, stats[3].octets_ddp);
7814 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
7815 stats[0].frames_ddp, stats[1].frames_ddp,
7816 stats[2].frames_ddp, stats[3].frames_ddp);
7817 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
7818 stats[0].frames_drop, stats[1].frames_drop,
7819 stats[2].frames_drop, stats[3].frames_drop);
7821 sbuf_printf(sb, " channel 0 channel 1");
7822 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
7823 stats[0].octets_ddp, stats[1].octets_ddp);
7824 sbuf_printf(sb, "\nframesDDP: %16u %16u",
7825 stats[0].frames_ddp, stats[1].frames_ddp);
7826 sbuf_printf(sb, "\nframesDrop: %16u %16u",
7827 stats[0].frames_drop, stats[1].frames_drop);
7830 rc = sbuf_finish(sb);
7837 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
7839 struct adapter *sc = arg1;
7842 unsigned int map, kbps, ipg, mode;
7843 unsigned int pace_tab[NTX_SCHED];
7845 rc = sysctl_wire_old_buffer(req, 0);
7849 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7853 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
7854 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
7855 t4_read_pace_tbl(sc, pace_tab);
7857 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
7858 "Class IPG (0.1 ns) Flow IPG (us)");
7860 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
7861 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
7862 sbuf_printf(sb, "\n %u %-5s %u ", i,
7863 (mode & (1 << i)) ? "flow" : "class", map & 3);
7865 sbuf_printf(sb, "%9u ", kbps);
7867 sbuf_printf(sb, " disabled ");
7870 sbuf_printf(sb, "%13u ", ipg);
7872 sbuf_printf(sb, " disabled ");
7875 sbuf_printf(sb, "%10u", pace_tab[i]);
7877 sbuf_printf(sb, " disabled");
7880 rc = sbuf_finish(sb);
7887 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
7889 struct adapter *sc = arg1;
7893 struct lb_port_stats s[2];
7894 static const char *stat_name[] = {
7895 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
7896 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
7897 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
7898 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
7899 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
7900 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
7901 "BG2FramesTrunc:", "BG3FramesTrunc:"
7904 rc = sysctl_wire_old_buffer(req, 0);
7908 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7912 memset(s, 0, sizeof(s));
7914 for (i = 0; i < sc->chip_params->nchan; i += 2) {
7915 t4_get_lb_stats(sc, i, &s[0]);
7916 t4_get_lb_stats(sc, i + 1, &s[1]);
7920 sbuf_printf(sb, "%s Loopback %u"
7921 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
7923 for (j = 0; j < nitems(stat_name); j++)
7924 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
7928 rc = sbuf_finish(sb);
7935 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
7938 struct port_info *pi = arg1;
7939 struct link_config *lc = &pi->link_cfg;
7942 rc = sysctl_wire_old_buffer(req, 0);
7945 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
7949 if (lc->link_ok || lc->link_down_rc == 255)
7950 sbuf_printf(sb, "n/a");
7952 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
7954 rc = sbuf_finish(sb);
7967 mem_desc_cmp(const void *a, const void *b)
7969 return ((const struct mem_desc *)a)->base -
7970 ((const struct mem_desc *)b)->base;
7974 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
7982 size = to - from + 1;
7986 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
7987 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
7991 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
7993 struct adapter *sc = arg1;
7996 uint32_t lo, hi, used, alloc;
7997 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
7998 static const char *region[] = {
7999 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
8000 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
8001 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
8002 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
8003 "RQUDP region:", "PBL region:", "TXPBL region:",
8004 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
8005 "On-chip queues:", "TLS keys:",
8007 struct mem_desc avail[4];
8008 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
8009 struct mem_desc *md = mem;
8011 rc = sysctl_wire_old_buffer(req, 0);
8015 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8019 for (i = 0; i < nitems(mem); i++) {
8024 /* Find and sort the populated memory ranges */
8026 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
8027 if (lo & F_EDRAM0_ENABLE) {
8028 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
8029 avail[i].base = G_EDRAM0_BASE(hi) << 20;
8030 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
8034 if (lo & F_EDRAM1_ENABLE) {
8035 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
8036 avail[i].base = G_EDRAM1_BASE(hi) << 20;
8037 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
8041 if (lo & F_EXT_MEM_ENABLE) {
8042 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
8043 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
8044 avail[i].limit = avail[i].base +
8045 (G_EXT_MEM_SIZE(hi) << 20);
8046 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
8049 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
8050 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
8051 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
8052 avail[i].limit = avail[i].base +
8053 (G_EXT_MEM1_SIZE(hi) << 20);
8057 if (!i) /* no memory available */
8059 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
8061 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
8062 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
8063 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
8064 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
8065 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
8066 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
8067 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
8068 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
8069 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
8071 /* the next few have explicit upper bounds */
8072 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
8073 md->limit = md->base - 1 +
8074 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
8075 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
8078 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
8079 md->limit = md->base - 1 +
8080 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
8081 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
8084 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8085 if (chip_id(sc) <= CHELSIO_T5)
8086 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
8088 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
8092 md->idx = nitems(region); /* hide it */
8096 #define ulp_region(reg) \
8097 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
8098 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
8100 ulp_region(RX_ISCSI);
8101 ulp_region(RX_TDDP);
8103 ulp_region(RX_STAG);
8105 ulp_region(RX_RQUDP);
8111 md->idx = nitems(region);
8114 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
8115 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
8118 if (sge_ctrl & F_VFIFO_ENABLE)
8119 size = G_DBVFIFO_SIZE(fifo_size);
8121 size = G_T6_DBVFIFO_SIZE(fifo_size);
8124 md->base = G_BASEADDR(t4_read_reg(sc,
8125 A_SGE_DBVFIFO_BADDR));
8126 md->limit = md->base + (size << 2) - 1;
8131 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
8134 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
8138 md->base = sc->vres.ocq.start;
8139 if (sc->vres.ocq.size)
8140 md->limit = md->base + sc->vres.ocq.size - 1;
8142 md->idx = nitems(region); /* hide it */
8145 md->base = sc->vres.key.start;
8146 if (sc->vres.key.size)
8147 md->limit = md->base + sc->vres.key.size - 1;
8149 md->idx = nitems(region); /* hide it */
8152 /* add any address-space holes, there can be up to 3 */
8153 for (n = 0; n < i - 1; n++)
8154 if (avail[n].limit < avail[n + 1].base)
8155 (md++)->base = avail[n].limit;
8157 (md++)->base = avail[n].limit;
8160 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
8162 for (lo = 0; lo < i; lo++)
8163 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
8164 avail[lo].limit - 1);
8166 sbuf_printf(sb, "\n");
8167 for (i = 0; i < n; i++) {
8168 if (mem[i].idx >= nitems(region))
8169 continue; /* skip holes */
8171 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
8172 mem_region_show(sb, region[mem[i].idx], mem[i].base,
8176 sbuf_printf(sb, "\n");
8177 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
8178 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
8179 mem_region_show(sb, "uP RAM:", lo, hi);
8181 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
8182 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
8183 mem_region_show(sb, "uP Extmem2:", lo, hi);
8185 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
8186 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
8188 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
8189 (lo & F_PMRXNUMCHN) ? 2 : 1);
8191 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
8192 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
8193 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
8195 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
8196 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
8197 sbuf_printf(sb, "%u p-structs\n",
8198 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
8200 for (i = 0; i < 4; i++) {
8201 if (chip_id(sc) > CHELSIO_T5)
8202 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
8204 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
8206 used = G_T5_USED(lo);
8207 alloc = G_T5_ALLOC(lo);
8210 alloc = G_ALLOC(lo);
8212 /* For T6 these are MAC buffer groups */
8213 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
8216 for (i = 0; i < sc->chip_params->nchan; i++) {
8217 if (chip_id(sc) > CHELSIO_T5)
8218 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
8220 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
8222 used = G_T5_USED(lo);
8223 alloc = G_T5_ALLOC(lo);
8226 alloc = G_ALLOC(lo);
8228 /* For T6 these are MAC buffer groups */
8230 "\nLoopback %d using %u pages out of %u allocated",
8234 rc = sbuf_finish(sb);
8241 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
8245 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
8249 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
8251 struct adapter *sc = arg1;
8255 MPASS(chip_id(sc) <= CHELSIO_T5);
8257 rc = sysctl_wire_old_buffer(req, 0);
8261 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8266 "Idx Ethernet address Mask Vld Ports PF"
8267 " VF Replication P0 P1 P2 P3 ML");
8268 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
8269 uint64_t tcamx, tcamy, mask;
8270 uint32_t cls_lo, cls_hi;
8271 uint8_t addr[ETHER_ADDR_LEN];
8273 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
8274 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
8277 tcamxy2valmask(tcamx, tcamy, addr, &mask);
8278 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
8279 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
8280 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
8281 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
8282 addr[3], addr[4], addr[5], (uintmax_t)mask,
8283 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
8284 G_PORTMAP(cls_hi), G_PF(cls_lo),
8285 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
8287 if (cls_lo & F_REPLICATE) {
8288 struct fw_ldst_cmd ldst_cmd;
8290 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
8291 ldst_cmd.op_to_addrspace =
8292 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
8293 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8294 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
8295 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
8296 ldst_cmd.u.mps.rplc.fid_idx =
8297 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
8298 V_FW_LDST_CMD_IDX(i));
8300 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8304 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
8305 sizeof(ldst_cmd), &ldst_cmd);
8306 end_synchronized_op(sc, 0);
8309 sbuf_printf(sb, "%36d", rc);
8312 sbuf_printf(sb, " %08x %08x %08x %08x",
8313 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
8314 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
8315 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
8316 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
8319 sbuf_printf(sb, "%36s", "");
8321 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
8322 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
8323 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
8327 (void) sbuf_finish(sb);
8329 rc = sbuf_finish(sb);
8336 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
8338 struct adapter *sc = arg1;
8342 MPASS(chip_id(sc) > CHELSIO_T5);
8344 rc = sysctl_wire_old_buffer(req, 0);
8348 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8352 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
8353 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
8355 " P0 P1 P2 P3 ML\n");
8357 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
8358 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
8360 uint64_t tcamx, tcamy, val, mask;
8361 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
8362 uint8_t addr[ETHER_ADDR_LEN];
8364 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
8366 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
8368 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
8369 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
8370 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
8371 tcamy = G_DMACH(val) << 32;
8372 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
8373 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
8374 lookup_type = G_DATALKPTYPE(data2);
8375 port_num = G_DATAPORTNUM(data2);
8376 if (lookup_type && lookup_type != M_DATALKPTYPE) {
8377 /* Inner header VNI */
8378 vniy = ((data2 & F_DATAVIDH2) << 23) |
8379 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
8380 dip_hit = data2 & F_DATADIPHIT;
8385 vlan_vld = data2 & F_DATAVIDH2;
8386 ivlan = G_VIDL(val);
8389 ctl |= V_CTLXYBITSEL(1);
8390 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
8391 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
8392 tcamx = G_DMACH(val) << 32;
8393 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
8394 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
8395 if (lookup_type && lookup_type != M_DATALKPTYPE) {
8396 /* Inner header VNI mask */
8397 vnix = ((data2 & F_DATAVIDH2) << 23) |
8398 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
8404 tcamxy2valmask(tcamx, tcamy, addr, &mask);
8406 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
8407 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
8409 if (lookup_type && lookup_type != M_DATALKPTYPE) {
8410 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
8411 "%012jx %06x %06x - - %3c"
8412 " 'I' %4x %3c %#x%4u%4d", i, addr[0],
8413 addr[1], addr[2], addr[3], addr[4], addr[5],
8414 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
8415 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
8416 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
8417 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
8419 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
8420 "%012jx - - ", i, addr[0], addr[1],
8421 addr[2], addr[3], addr[4], addr[5],
8425 sbuf_printf(sb, "%4u Y ", ivlan);
8427 sbuf_printf(sb, " - N ");
8429 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
8430 lookup_type ? 'I' : 'O', port_num,
8431 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
8432 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
8433 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
8437 if (cls_lo & F_T6_REPLICATE) {
8438 struct fw_ldst_cmd ldst_cmd;
8440 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
8441 ldst_cmd.op_to_addrspace =
8442 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
8443 F_FW_CMD_REQUEST | F_FW_CMD_READ |
8444 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
8445 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
8446 ldst_cmd.u.mps.rplc.fid_idx =
8447 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
8448 V_FW_LDST_CMD_IDX(i));
8450 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8454 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
8455 sizeof(ldst_cmd), &ldst_cmd);
8456 end_synchronized_op(sc, 0);
8459 sbuf_printf(sb, "%72d", rc);
8462 sbuf_printf(sb, " %08x %08x %08x %08x"
8463 " %08x %08x %08x %08x",
8464 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
8465 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
8466 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
8467 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
8468 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
8469 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
8470 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
8471 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
8474 sbuf_printf(sb, "%72s", "");
8476 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
8477 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
8478 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
8479 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
8483 (void) sbuf_finish(sb);
8485 rc = sbuf_finish(sb);
8492 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
8494 struct adapter *sc = arg1;
8497 uint16_t mtus[NMTUS];
8499 rc = sysctl_wire_old_buffer(req, 0);
8503 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8507 t4_read_mtu_tbl(sc, mtus, NULL);
8509 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
8510 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
8511 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
8512 mtus[14], mtus[15]);
8514 rc = sbuf_finish(sb);
8521 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
8523 struct adapter *sc = arg1;
8526 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
8527 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
8528 static const char *tx_stats[MAX_PM_NSTATS] = {
8529 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
8530 "Tx FIFO wait", NULL, "Tx latency"
8532 static const char *rx_stats[MAX_PM_NSTATS] = {
8533 "Read:", "Write bypass:", "Write mem:", "Flush:",
8534 "Rx FIFO wait", NULL, "Rx latency"
8537 rc = sysctl_wire_old_buffer(req, 0);
8541 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8545 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
8546 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
8548 sbuf_printf(sb, " Tx pcmds Tx bytes");
8549 for (i = 0; i < 4; i++) {
8550 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8554 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
8555 for (i = 0; i < 4; i++) {
8556 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8560 if (chip_id(sc) > CHELSIO_T5) {
8562 "\n Total wait Total occupancy");
8563 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8565 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8569 MPASS(i < nitems(tx_stats));
8572 "\n Reads Total wait");
8573 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
8575 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
8579 rc = sbuf_finish(sb);
8586 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
8588 struct adapter *sc = arg1;
8591 struct tp_rdma_stats stats;
8593 rc = sysctl_wire_old_buffer(req, 0);
8597 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8601 mtx_lock(&sc->reg_lock);
8602 t4_tp_get_rdma_stats(sc, &stats, 0);
8603 mtx_unlock(&sc->reg_lock);
8605 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
8606 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
8608 rc = sbuf_finish(sb);
8615 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
8617 struct adapter *sc = arg1;
8620 struct tp_tcp_stats v4, v6;
8622 rc = sysctl_wire_old_buffer(req, 0);
8626 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8630 mtx_lock(&sc->reg_lock);
8631 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
8632 mtx_unlock(&sc->reg_lock);
8636 sbuf_printf(sb, "OutRsts: %20u %20u\n",
8637 v4.tcp_out_rsts, v6.tcp_out_rsts);
8638 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
8639 v4.tcp_in_segs, v6.tcp_in_segs);
8640 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
8641 v4.tcp_out_segs, v6.tcp_out_segs);
8642 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
8643 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
8645 rc = sbuf_finish(sb);
8652 sysctl_tids(SYSCTL_HANDLER_ARGS)
8654 struct adapter *sc = arg1;
8657 struct tid_info *t = &sc->tids;
8659 rc = sysctl_wire_old_buffer(req, 0);
8663 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8668 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
8673 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
8674 t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
8678 sbuf_printf(sb, "TID range: ");
8679 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8682 if (chip_id(sc) <= CHELSIO_T5) {
8683 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
8684 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
8686 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
8687 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
8691 sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1);
8692 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
8694 sbuf_printf(sb, "%u-%u", t->tid_base, t->ntids - 1);
8695 sbuf_printf(sb, ", in use: %u\n",
8696 atomic_load_acq_int(&t->tids_in_use));
8700 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
8701 t->stid_base + t->nstids - 1, t->stids_in_use);
8705 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
8706 t->ftid_end, t->ftids_in_use);
8710 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
8711 t->etid_base + t->netids - 1, t->etids_in_use);
8714 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
8715 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
8716 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
8718 rc = sbuf_finish(sb);
8725 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
8727 struct adapter *sc = arg1;
8730 struct tp_err_stats stats;
8732 rc = sysctl_wire_old_buffer(req, 0);
8736 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8740 mtx_lock(&sc->reg_lock);
8741 t4_tp_get_err_stats(sc, &stats, 0);
8742 mtx_unlock(&sc->reg_lock);
8744 if (sc->chip_params->nchan > 2) {
8745 sbuf_printf(sb, " channel 0 channel 1"
8746 " channel 2 channel 3\n");
8747 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
8748 stats.mac_in_errs[0], stats.mac_in_errs[1],
8749 stats.mac_in_errs[2], stats.mac_in_errs[3]);
8750 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
8751 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
8752 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
8753 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
8754 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
8755 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
8756 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
8757 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
8758 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
8759 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
8760 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
8761 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
8762 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
8763 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
8764 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
8765 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
8766 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
8767 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
8768 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
8769 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
8770 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
8772 sbuf_printf(sb, " channel 0 channel 1\n");
8773 sbuf_printf(sb, "macInErrs: %10u %10u\n",
8774 stats.mac_in_errs[0], stats.mac_in_errs[1]);
8775 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
8776 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
8777 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
8778 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
8779 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
8780 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
8781 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
8782 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
8783 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
8784 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
8785 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
8786 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
8787 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
8788 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
8791 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
8792 stats.ofld_no_neigh, stats.ofld_cong_defer);
8794 rc = sbuf_finish(sb);
8801 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
8803 struct adapter *sc = arg1;
8804 struct tp_params *tpp = &sc->params.tp;
8808 mask = tpp->la_mask >> 16;
8809 rc = sysctl_handle_int(oidp, &mask, 0, req);
8810 if (rc != 0 || req->newptr == NULL)
8814 tpp->la_mask = mask << 16;
8815 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
8827 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
8833 uint64_t mask = (1ULL << f->width) - 1;
8834 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
8835 ((uintmax_t)v >> f->start) & mask);
8837 if (line_size + len >= 79) {
8839 sbuf_printf(sb, "\n ");
8841 sbuf_printf(sb, "%s ", buf);
8842 line_size += len + 1;
8845 sbuf_printf(sb, "\n");
8848 static const struct field_desc tp_la0[] = {
8849 { "RcfOpCodeOut", 60, 4 },
8851 { "WcfState", 52, 4 },
8852 { "RcfOpcSrcOut", 50, 2 },
8853 { "CRxError", 49, 1 },
8854 { "ERxError", 48, 1 },
8855 { "SanityFailed", 47, 1 },
8856 { "SpuriousMsg", 46, 1 },
8857 { "FlushInputMsg", 45, 1 },
8858 { "FlushInputCpl", 44, 1 },
8859 { "RssUpBit", 43, 1 },
8860 { "RssFilterHit", 42, 1 },
8862 { "InitTcb", 31, 1 },
8863 { "LineNumber", 24, 7 },
8865 { "EdataOut", 22, 1 },
8867 { "CdataOut", 20, 1 },
8868 { "EreadPdu", 19, 1 },
8869 { "CreadPdu", 18, 1 },
8870 { "TunnelPkt", 17, 1 },
8871 { "RcfPeerFin", 16, 1 },
8872 { "RcfReasonOut", 12, 4 },
8873 { "TxCchannel", 10, 2 },
8874 { "RcfTxChannel", 8, 2 },
8875 { "RxEchannel", 6, 2 },
8876 { "RcfRxChannel", 5, 1 },
8877 { "RcfDataOutSrdy", 4, 1 },
8879 { "RxOoDvld", 2, 1 },
8880 { "RxCongestion", 1, 1 },
8881 { "TxCongestion", 0, 1 },
8885 static const struct field_desc tp_la1[] = {
8886 { "CplCmdIn", 56, 8 },
8887 { "CplCmdOut", 48, 8 },
8888 { "ESynOut", 47, 1 },
8889 { "EAckOut", 46, 1 },
8890 { "EFinOut", 45, 1 },
8891 { "ERstOut", 44, 1 },
8896 { "DataIn", 39, 1 },
8897 { "DataInVld", 38, 1 },
8899 { "RxBufEmpty", 36, 1 },
8901 { "RxFbCongestion", 34, 1 },
8902 { "TxFbCongestion", 33, 1 },
8903 { "TxPktSumSrdy", 32, 1 },
8904 { "RcfUlpType", 28, 4 },
8906 { "Ebypass", 26, 1 },
8908 { "Static0", 24, 1 },
8910 { "Cbypass", 22, 1 },
8912 { "CPktOut", 20, 1 },
8913 { "RxPagePoolFull", 18, 2 },
8914 { "RxLpbkPkt", 17, 1 },
8915 { "TxLpbkPkt", 16, 1 },
8916 { "RxVfValid", 15, 1 },
8917 { "SynLearned", 14, 1 },
8918 { "SetDelEntry", 13, 1 },
8919 { "SetInvEntry", 12, 1 },
8920 { "CpcmdDvld", 11, 1 },
8921 { "CpcmdSave", 10, 1 },
8922 { "RxPstructsFull", 8, 2 },
8923 { "EpcmdDvld", 7, 1 },
8924 { "EpcmdFlush", 6, 1 },
8925 { "EpcmdTrimPrefix", 5, 1 },
8926 { "EpcmdTrimPostfix", 4, 1 },
8927 { "ERssIp4Pkt", 3, 1 },
8928 { "ERssIp6Pkt", 2, 1 },
8929 { "ERssTcpUdpPkt", 1, 1 },
8930 { "ERssFceFipPkt", 0, 1 },
8934 static const struct field_desc tp_la2[] = {
8935 { "CplCmdIn", 56, 8 },
8936 { "MpsVfVld", 55, 1 },
8943 { "DataIn", 39, 1 },
8944 { "DataInVld", 38, 1 },
8946 { "RxBufEmpty", 36, 1 },
8948 { "RxFbCongestion", 34, 1 },
8949 { "TxFbCongestion", 33, 1 },
8950 { "TxPktSumSrdy", 32, 1 },
8951 { "RcfUlpType", 28, 4 },
8953 { "Ebypass", 26, 1 },
8955 { "Static0", 24, 1 },
8957 { "Cbypass", 22, 1 },
8959 { "CPktOut", 20, 1 },
8960 { "RxPagePoolFull", 18, 2 },
8961 { "RxLpbkPkt", 17, 1 },
8962 { "TxLpbkPkt", 16, 1 },
8963 { "RxVfValid", 15, 1 },
8964 { "SynLearned", 14, 1 },
8965 { "SetDelEntry", 13, 1 },
8966 { "SetInvEntry", 12, 1 },
8967 { "CpcmdDvld", 11, 1 },
8968 { "CpcmdSave", 10, 1 },
8969 { "RxPstructsFull", 8, 2 },
8970 { "EpcmdDvld", 7, 1 },
8971 { "EpcmdFlush", 6, 1 },
8972 { "EpcmdTrimPrefix", 5, 1 },
8973 { "EpcmdTrimPostfix", 4, 1 },
8974 { "ERssIp4Pkt", 3, 1 },
8975 { "ERssIp6Pkt", 2, 1 },
8976 { "ERssTcpUdpPkt", 1, 1 },
8977 { "ERssFceFipPkt", 0, 1 },
8982 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
8985 field_desc_show(sb, *p, tp_la0);
8989 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
8993 sbuf_printf(sb, "\n");
8994 field_desc_show(sb, p[0], tp_la0);
8995 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
8996 field_desc_show(sb, p[1], tp_la0);
9000 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
9004 sbuf_printf(sb, "\n");
9005 field_desc_show(sb, p[0], tp_la0);
9006 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
9007 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
9011 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
9013 struct adapter *sc = arg1;
9018 void (*show_func)(struct sbuf *, uint64_t *, int);
9020 rc = sysctl_wire_old_buffer(req, 0);
9024 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9028 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
9030 t4_tp_read_la(sc, buf, NULL);
9033 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
9036 show_func = tp_la_show2;
9040 show_func = tp_la_show3;
9044 show_func = tp_la_show;
9047 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
9048 (*show_func)(sb, p, i);
9050 rc = sbuf_finish(sb);
9057 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
9059 struct adapter *sc = arg1;
9062 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
9064 rc = sysctl_wire_old_buffer(req, 0);
9068 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9072 t4_get_chan_txrate(sc, nrate, orate);
9074 if (sc->chip_params->nchan > 2) {
9075 sbuf_printf(sb, " channel 0 channel 1"
9076 " channel 2 channel 3\n");
9077 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
9078 nrate[0], nrate[1], nrate[2], nrate[3]);
9079 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
9080 orate[0], orate[1], orate[2], orate[3]);
9082 sbuf_printf(sb, " channel 0 channel 1\n");
9083 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
9084 nrate[0], nrate[1]);
9085 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
9086 orate[0], orate[1]);
9089 rc = sbuf_finish(sb);
9096 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
9098 struct adapter *sc = arg1;
9103 rc = sysctl_wire_old_buffer(req, 0);
9107 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9111 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
9114 t4_ulprx_read_la(sc, buf);
9117 sbuf_printf(sb, " Pcmd Type Message"
9119 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
9120 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
9121 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
9124 rc = sbuf_finish(sb);
9131 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
9133 struct adapter *sc = arg1;
9137 MPASS(chip_id(sc) >= CHELSIO_T5);
9139 rc = sysctl_wire_old_buffer(req, 0);
9143 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9147 v = t4_read_reg(sc, A_SGE_STAT_CFG);
9148 if (G_STATSOURCE_T5(v) == 7) {
9151 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
9153 sbuf_printf(sb, "total %d, incomplete %d",
9154 t4_read_reg(sc, A_SGE_STAT_TOTAL),
9155 t4_read_reg(sc, A_SGE_STAT_MATCH));
9156 } else if (mode == 1) {
9157 sbuf_printf(sb, "total %d, data overflow %d",
9158 t4_read_reg(sc, A_SGE_STAT_TOTAL),
9159 t4_read_reg(sc, A_SGE_STAT_MATCH));
9161 sbuf_printf(sb, "unknown mode %d", mode);
9164 rc = sbuf_finish(sb);
9171 sysctl_cpus(SYSCTL_HANDLER_ARGS)
9173 struct adapter *sc = arg1;
9174 enum cpu_sets op = arg2;
9179 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
9182 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
9186 rc = sysctl_wire_old_buffer(req, 0);
9190 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9195 sbuf_printf(sb, "%d ", i);
9196 rc = sbuf_finish(sb);
9204 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
9206 struct adapter *sc = arg1;
9207 int *old_ports, *new_ports;
9208 int i, new_count, rc;
9210 if (req->newptr == NULL && req->oldptr == NULL)
9211 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
9212 sizeof(sc->tt.tls_rx_ports[0])));
9214 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
9218 if (sc->tt.num_tls_rx_ports == 0) {
9220 rc = SYSCTL_OUT(req, &i, sizeof(i));
9222 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
9223 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
9224 if (rc == 0 && req->newptr != NULL) {
9225 new_count = req->newlen / sizeof(new_ports[0]);
9226 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
9228 rc = SYSCTL_IN(req, new_ports, new_count *
9229 sizeof(new_ports[0]));
9233 /* Allow setting to a single '-1' to clear the list. */
9234 if (new_count == 1 && new_ports[0] == -1) {
9236 old_ports = sc->tt.tls_rx_ports;
9237 sc->tt.tls_rx_ports = NULL;
9238 sc->tt.num_tls_rx_ports = 0;
9240 free(old_ports, M_CXGBE);
9242 for (i = 0; i < new_count; i++) {
9243 if (new_ports[i] < 1 ||
9244 new_ports[i] > IPPORT_MAX) {
9251 old_ports = sc->tt.tls_rx_ports;
9252 sc->tt.tls_rx_ports = new_ports;
9253 sc->tt.num_tls_rx_ports = new_count;
9255 free(old_ports, M_CXGBE);
9259 free(new_ports, M_CXGBE);
9261 end_synchronized_op(sc, 0);
9266 unit_conv(char *buf, size_t len, u_int val, u_int factor)
9268 u_int rem = val % factor;
9271 snprintf(buf, len, "%u", val / factor);
9273 while (rem % 10 == 0)
9275 snprintf(buf, len, "%u.%u", val / factor, rem);
9280 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
9282 struct adapter *sc = arg1;
9285 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
9287 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
9291 re = G_TIMERRESOLUTION(res);
9294 /* TCP timestamp tick */
9295 re = G_TIMESTAMPRESOLUTION(res);
9299 re = G_DELAYEDACKRESOLUTION(res);
9305 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
9307 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
9311 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
9313 struct adapter *sc = arg1;
9314 u_int res, dack_re, v;
9315 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
9317 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
9318 dack_re = G_DELAYEDACKRESOLUTION(res);
9319 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
9321 return (sysctl_handle_int(oidp, &v, 0, req));
9325 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
9327 struct adapter *sc = arg1;
9330 u_long tp_tick_us, v;
9331 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
9333 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
9334 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
9335 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
9336 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
9338 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
9339 tp_tick_us = (cclk_ps << tre) / 1000000;
9341 if (reg == A_TP_INIT_SRTT)
9342 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
9344 v = tp_tick_us * t4_read_reg(sc, reg);
9346 return (sysctl_handle_long(oidp, &v, 0, req));
9350 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
9351 * passed to this function.
9354 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
9356 struct adapter *sc = arg1;
9360 MPASS(idx >= 0 && idx <= 24);
9362 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
9364 return (sysctl_handle_int(oidp, &v, 0, req));
9368 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
9370 struct adapter *sc = arg1;
9374 MPASS(idx >= 0 && idx < 16);
9376 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
9377 shift = (idx & 3) << 3;
9378 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
9380 return (sysctl_handle_int(oidp, &v, 0, req));
9384 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
9386 struct vi_info *vi = arg1;
9387 struct adapter *sc = vi->pi->adapter;
9389 struct sge_ofld_rxq *ofld_rxq;
9392 idx = vi->ofld_tmr_idx;
9394 rc = sysctl_handle_int(oidp, &idx, 0, req);
9395 if (rc != 0 || req->newptr == NULL)
9398 if (idx < 0 || idx >= SGE_NTIMERS)
9401 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
9406 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
9407 for_each_ofld_rxq(vi, i, ofld_rxq) {
9408 #ifdef atomic_store_rel_8
9409 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
9411 ofld_rxq->iq.intr_params = v;
9414 vi->ofld_tmr_idx = idx;
9416 end_synchronized_op(sc, LOCK_HELD);
9421 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
9423 struct vi_info *vi = arg1;
9424 struct adapter *sc = vi->pi->adapter;
9427 idx = vi->ofld_pktc_idx;
9429 rc = sysctl_handle_int(oidp, &idx, 0, req);
9430 if (rc != 0 || req->newptr == NULL)
9433 if (idx < -1 || idx >= SGE_NCOUNTERS)
9436 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
9441 if (vi->flags & VI_INIT_DONE)
9442 rc = EBUSY; /* cannot be changed once the queues are created */
9444 vi->ofld_pktc_idx = idx;
9446 end_synchronized_op(sc, LOCK_HELD);
9452 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
9456 if (cntxt->cid > M_CTXTQID)
9459 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
9460 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
9463 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
9467 if (sc->flags & FW_OK) {
9468 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
9475 * Read via firmware failed or wasn't even attempted. Read directly via
9478 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
9480 end_synchronized_op(sc, 0);
9485 load_fw(struct adapter *sc, struct t4_data *fw)
9490 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
9495 * The firmware, with the sole exception of the memory parity error
9496 * handler, runs from memory and not flash. It is almost always safe to
9497 * install a new firmware on a running system. Just set bit 1 in
9498 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
9500 if (sc->flags & FULL_INIT_DONE &&
9501 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
9506 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
9507 if (fw_data == NULL) {
9512 rc = copyin(fw->data, fw_data, fw->len);
9514 rc = -t4_load_fw(sc, fw_data, fw->len);
9516 free(fw_data, M_CXGBE);
9518 end_synchronized_op(sc, 0);
9523 load_cfg(struct adapter *sc, struct t4_data *cfg)
9526 uint8_t *cfg_data = NULL;
9528 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9532 if (cfg->len == 0) {
9534 rc = -t4_load_cfg(sc, NULL, 0);
9538 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
9539 if (cfg_data == NULL) {
9544 rc = copyin(cfg->data, cfg_data, cfg->len);
9546 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
9548 free(cfg_data, M_CXGBE);
9550 end_synchronized_op(sc, 0);
9555 load_boot(struct adapter *sc, struct t4_bootrom *br)
9558 uint8_t *br_data = NULL;
9561 if (br->len > 1024 * 1024)
9564 if (br->pf_offset == 0) {
9566 if (br->pfidx_addr > 7)
9568 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
9569 A_PCIE_PF_EXPROM_OFST)));
9570 } else if (br->pf_offset == 1) {
9572 offset = G_OFFSET(br->pfidx_addr);
9577 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
9583 rc = -t4_load_boot(sc, NULL, offset, 0);
9587 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
9588 if (br_data == NULL) {
9593 rc = copyin(br->data, br_data, br->len);
9595 rc = -t4_load_boot(sc, br_data, offset, br->len);
9597 free(br_data, M_CXGBE);
9599 end_synchronized_op(sc, 0);
9604 load_bootcfg(struct adapter *sc, struct t4_data *bc)
9607 uint8_t *bc_data = NULL;
9609 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
9615 rc = -t4_load_bootcfg(sc, NULL, 0);
9619 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
9620 if (bc_data == NULL) {
9625 rc = copyin(bc->data, bc_data, bc->len);
9627 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
9629 free(bc_data, M_CXGBE);
9631 end_synchronized_op(sc, 0);
9636 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
9639 struct cudbg_init *cudbg;
9642 /* buf is large, don't block if no memory is available */
9643 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
9647 handle = cudbg_alloc_handle();
9648 if (handle == NULL) {
9653 cudbg = cudbg_get_init(handle);
9655 cudbg->print = (cudbg_print_cb)printf;
9658 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
9659 __func__, dump->wr_flash, dump->len, dump->data);
9663 cudbg->use_flash = 1;
9664 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
9665 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
9667 rc = cudbg_collect(handle, buf, &dump->len);
9671 rc = copyout(buf, dump->data, dump->len);
9673 cudbg_free_handle(handle);
9679 free_offload_policy(struct t4_offload_policy *op)
9681 struct offload_rule *r;
9688 for (i = 0; i < op->nrules; i++, r++) {
9689 free(r->bpf_prog.bf_insns, M_CXGBE);
9691 free(op->rule, M_CXGBE);
9696 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
9699 struct t4_offload_policy *op, *old;
9700 struct bpf_program *bf;
9701 const struct offload_settings *s;
9702 struct offload_rule *r;
9705 if (!is_offload(sc))
9708 if (uop->nrules == 0) {
9709 /* Delete installed policies. */
9712 } if (uop->nrules > 256) { /* arbitrary */
9716 /* Copy userspace offload policy to kernel */
9717 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
9718 op->nrules = uop->nrules;
9719 len = op->nrules * sizeof(struct offload_rule);
9720 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9721 rc = copyin(uop->rule, op->rule, len);
9723 free(op->rule, M_CXGBE);
9729 for (i = 0; i < op->nrules; i++, r++) {
9731 /* Validate open_type */
9732 if (r->open_type != OPEN_TYPE_LISTEN &&
9733 r->open_type != OPEN_TYPE_ACTIVE &&
9734 r->open_type != OPEN_TYPE_PASSIVE &&
9735 r->open_type != OPEN_TYPE_DONTCARE) {
9738 * Rules 0 to i have malloc'd filters that need to be
9739 * freed. Rules i+1 to nrules have userspace pointers
9740 * and should be left alone.
9743 free_offload_policy(op);
9747 /* Validate settings */
9749 if ((s->offload != 0 && s->offload != 1) ||
9750 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
9751 s->sched_class < -1 ||
9752 s->sched_class >= sc->chip_params->nsched_cls) {
9758 u = bf->bf_insns; /* userspace ptr */
9759 bf->bf_insns = NULL;
9760 if (bf->bf_len == 0) {
9761 /* legal, matches everything */
9764 len = bf->bf_len * sizeof(*bf->bf_insns);
9765 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
9766 rc = copyin(u, bf->bf_insns, len);
9770 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
9776 rw_wlock(&sc->policy_lock);
9779 rw_wunlock(&sc->policy_lock);
9780 free_offload_policy(old);
9785 #define MAX_READ_BUF_SIZE (128 * 1024)
9787 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
9789 uint32_t addr, remaining, n;
9794 rc = validate_mem_range(sc, mr->addr, mr->len);
9798 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
9800 remaining = mr->len;
9801 dst = (void *)mr->data;
9804 n = min(remaining, MAX_READ_BUF_SIZE);
9805 read_via_memwin(sc, 2, addr, buf, n);
9807 rc = copyout(buf, dst, n);
9819 #undef MAX_READ_BUF_SIZE
9822 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
9826 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
9829 if (i2cd->len > sizeof(i2cd->data))
9832 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
9835 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
9836 i2cd->offset, i2cd->len, &i2cd->data[0]);
9837 end_synchronized_op(sc, 0);
9843 t4_os_find_pci_capability(struct adapter *sc, int cap)
9847 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
9851 t4_os_pci_save_state(struct adapter *sc)
9854 struct pci_devinfo *dinfo;
9857 dinfo = device_get_ivars(dev);
9859 pci_cfg_save(dev, dinfo, 0);
9864 t4_os_pci_restore_state(struct adapter *sc)
9867 struct pci_devinfo *dinfo;
9870 dinfo = device_get_ivars(dev);
9872 pci_cfg_restore(dev, dinfo);
9877 t4_os_portmod_changed(struct port_info *pi)
9879 struct adapter *sc = pi->adapter;
9882 static const char *mod_str[] = {
9883 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
9886 KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
9887 ("%s: port_type %u", __func__, pi->port_type));
9890 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
9892 build_medialist(pi);
9893 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
9894 fixup_link_config(pi);
9895 apply_link_config(pi);
9898 end_synchronized_op(sc, LOCK_HELD);
9902 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
9903 if_printf(ifp, "transceiver unplugged.\n");
9904 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
9905 if_printf(ifp, "unknown transceiver inserted.\n");
9906 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
9907 if_printf(ifp, "unsupported transceiver inserted.\n");
9908 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
9909 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
9910 port_top_speed(pi), mod_str[pi->mod_type]);
9912 if_printf(ifp, "transceiver (type %d) inserted.\n",
9918 t4_os_link_changed(struct port_info *pi)
9922 struct link_config *lc;
9925 PORT_LOCK_ASSERT_OWNED(pi);
9927 for_each_vi(pi, v, vi) {
9934 ifp->if_baudrate = IF_Mbps(lc->speed);
9935 if_link_state_change(ifp, LINK_STATE_UP);
9937 if_link_state_change(ifp, LINK_STATE_DOWN);
9943 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
9947 sx_slock(&t4_list_lock);
9948 SLIST_FOREACH(sc, &t4_list, link) {
9950 * func should not make any assumptions about what state sc is
9951 * in - the only guarantee is that sc->sc_lock is a valid lock.
9955 sx_sunlock(&t4_list_lock);
9959 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9963 struct adapter *sc = dev->si_drv1;
9965 rc = priv_check(td, PRIV_DRIVER);
9970 case CHELSIO_T4_GETREG: {
9971 struct t4_reg *edata = (struct t4_reg *)data;
9973 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9976 if (edata->size == 4)
9977 edata->val = t4_read_reg(sc, edata->addr);
9978 else if (edata->size == 8)
9979 edata->val = t4_read_reg64(sc, edata->addr);
9985 case CHELSIO_T4_SETREG: {
9986 struct t4_reg *edata = (struct t4_reg *)data;
9988 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9991 if (edata->size == 4) {
9992 if (edata->val & 0xffffffff00000000)
9994 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9995 } else if (edata->size == 8)
9996 t4_write_reg64(sc, edata->addr, edata->val);
10001 case CHELSIO_T4_REGDUMP: {
10002 struct t4_regdump *regs = (struct t4_regdump *)data;
10003 int reglen = t4_get_regs_len(sc);
10006 if (regs->len < reglen) {
10007 regs->len = reglen; /* hint to the caller */
10011 regs->len = reglen;
10012 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
10013 get_regs(sc, regs, buf);
10014 rc = copyout(buf, regs->data, reglen);
10015 free(buf, M_CXGBE);
10018 case CHELSIO_T4_GET_FILTER_MODE:
10019 rc = get_filter_mode(sc, (uint32_t *)data);
10021 case CHELSIO_T4_SET_FILTER_MODE:
10022 rc = set_filter_mode(sc, *(uint32_t *)data);
10024 case CHELSIO_T4_GET_FILTER:
10025 rc = get_filter(sc, (struct t4_filter *)data);
10027 case CHELSIO_T4_SET_FILTER:
10028 rc = set_filter(sc, (struct t4_filter *)data);
10030 case CHELSIO_T4_DEL_FILTER:
10031 rc = del_filter(sc, (struct t4_filter *)data);
10033 case CHELSIO_T4_GET_SGE_CONTEXT:
10034 rc = get_sge_context(sc, (struct t4_sge_context *)data);
10036 case CHELSIO_T4_LOAD_FW:
10037 rc = load_fw(sc, (struct t4_data *)data);
10039 case CHELSIO_T4_GET_MEM:
10040 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
10042 case CHELSIO_T4_GET_I2C:
10043 rc = read_i2c(sc, (struct t4_i2c_data *)data);
10045 case CHELSIO_T4_CLEAR_STATS: {
10047 u_int port_id = *(uint32_t *)data;
10048 struct port_info *pi;
10049 struct vi_info *vi;
10051 if (port_id >= sc->params.nports)
10053 pi = sc->port[port_id];
10058 t4_clr_port_stats(sc, pi->tx_chan);
10059 pi->tx_parse_error = 0;
10060 pi->tnl_cong_drops = 0;
10061 mtx_lock(&sc->reg_lock);
10062 for_each_vi(pi, v, vi) {
10063 if (vi->flags & VI_INIT_DONE)
10064 t4_clr_vi_stats(sc, vi->vin);
10066 bg_map = pi->mps_bg_map;
10069 i = ffs(bg_map) - 1;
10070 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
10071 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
10072 bg_map &= ~(1 << i);
10074 mtx_unlock(&sc->reg_lock);
10077 * Since this command accepts a port, clear stats for
10078 * all VIs on this port.
10080 for_each_vi(pi, v, vi) {
10081 if (vi->flags & VI_INIT_DONE) {
10082 struct sge_rxq *rxq;
10083 struct sge_txq *txq;
10084 struct sge_wrq *wrq;
10086 for_each_rxq(vi, i, rxq) {
10087 #if defined(INET) || defined(INET6)
10088 rxq->lro.lro_queued = 0;
10089 rxq->lro.lro_flushed = 0;
10092 rxq->vlan_extraction = 0;
10095 for_each_txq(vi, i, txq) {
10098 txq->vlan_insertion = 0;
10101 txq->txpkt_wrs = 0;
10102 txq->txpkts0_wrs = 0;
10103 txq->txpkts1_wrs = 0;
10104 txq->txpkts0_pkts = 0;
10105 txq->txpkts1_pkts = 0;
10107 mp_ring_reset_stats(txq->r);
10110 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
10111 /* nothing to clear for each ofld_rxq */
10113 for_each_ofld_txq(vi, i, wrq) {
10114 wrq->tx_wrs_direct = 0;
10115 wrq->tx_wrs_copied = 0;
10119 if (IS_MAIN_VI(vi)) {
10120 wrq = &sc->sge.ctrlq[pi->port_id];
10121 wrq->tx_wrs_direct = 0;
10122 wrq->tx_wrs_copied = 0;
10128 case CHELSIO_T4_SCHED_CLASS:
10129 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
10131 case CHELSIO_T4_SCHED_QUEUE:
10132 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
10134 case CHELSIO_T4_GET_TRACER:
10135 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
10137 case CHELSIO_T4_SET_TRACER:
10138 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
10140 case CHELSIO_T4_LOAD_CFG:
10141 rc = load_cfg(sc, (struct t4_data *)data);
10143 case CHELSIO_T4_LOAD_BOOT:
10144 rc = load_boot(sc, (struct t4_bootrom *)data);
10146 case CHELSIO_T4_LOAD_BOOTCFG:
10147 rc = load_bootcfg(sc, (struct t4_data *)data);
10149 case CHELSIO_T4_CUDBG_DUMP:
10150 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
10152 case CHELSIO_T4_SET_OFLD_POLICY:
10153 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
10164 toe_capability(struct vi_info *vi, int enable)
10167 struct port_info *pi = vi->pi;
10168 struct adapter *sc = pi->adapter;
10170 ASSERT_SYNCHRONIZED_OP(sc);
10172 if (!is_offload(sc))
10176 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
10177 /* TOE is already enabled. */
10182 * We need the port's queues around so that we're able to send
10183 * and receive CPLs to/from the TOE even if the ifnet for this
10184 * port has never been UP'd administratively.
10186 if (!(vi->flags & VI_INIT_DONE)) {
10187 rc = vi_full_init(vi);
10191 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
10192 rc = vi_full_init(&pi->vi[0]);
10197 if (isset(&sc->offload_map, pi->port_id)) {
10198 /* TOE is enabled on another VI of this port. */
10203 if (!uld_active(sc, ULD_TOM)) {
10204 rc = t4_activate_uld(sc, ULD_TOM);
10205 if (rc == EAGAIN) {
10207 "You must kldload t4_tom.ko before trying "
10208 "to enable TOE on a cxgbe interface.\n");
10212 KASSERT(sc->tom_softc != NULL,
10213 ("%s: TOM activated but softc NULL", __func__));
10214 KASSERT(uld_active(sc, ULD_TOM),
10215 ("%s: TOM activated but flag not set", __func__));
10218 /* Activate iWARP and iSCSI too, if the modules are loaded. */
10219 if (!uld_active(sc, ULD_IWARP))
10220 (void) t4_activate_uld(sc, ULD_IWARP);
10221 if (!uld_active(sc, ULD_ISCSI))
10222 (void) t4_activate_uld(sc, ULD_ISCSI);
10225 setbit(&sc->offload_map, pi->port_id);
10229 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
10232 KASSERT(uld_active(sc, ULD_TOM),
10233 ("%s: TOM never initialized?", __func__));
10234 clrbit(&sc->offload_map, pi->port_id);
10241 * Add an upper layer driver to the global list.
10244 t4_register_uld(struct uld_info *ui)
10247 struct uld_info *u;
10249 sx_xlock(&t4_uld_list_lock);
10250 SLIST_FOREACH(u, &t4_uld_list, link) {
10251 if (u->uld_id == ui->uld_id) {
10257 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
10260 sx_xunlock(&t4_uld_list_lock);
10265 t4_unregister_uld(struct uld_info *ui)
10268 struct uld_info *u;
10270 sx_xlock(&t4_uld_list_lock);
10272 SLIST_FOREACH(u, &t4_uld_list, link) {
10274 if (ui->refcount > 0) {
10279 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
10285 sx_xunlock(&t4_uld_list_lock);
10290 t4_activate_uld(struct adapter *sc, int id)
10293 struct uld_info *ui;
10295 ASSERT_SYNCHRONIZED_OP(sc);
10297 if (id < 0 || id > ULD_MAX)
10299 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
10301 sx_slock(&t4_uld_list_lock);
10303 SLIST_FOREACH(ui, &t4_uld_list, link) {
10304 if (ui->uld_id == id) {
10305 if (!(sc->flags & FULL_INIT_DONE)) {
10306 rc = adapter_full_init(sc);
10311 rc = ui->activate(sc);
10313 setbit(&sc->active_ulds, id);
10320 sx_sunlock(&t4_uld_list_lock);
10326 t4_deactivate_uld(struct adapter *sc, int id)
10329 struct uld_info *ui;
10331 ASSERT_SYNCHRONIZED_OP(sc);
10333 if (id < 0 || id > ULD_MAX)
10337 sx_slock(&t4_uld_list_lock);
10339 SLIST_FOREACH(ui, &t4_uld_list, link) {
10340 if (ui->uld_id == id) {
10341 rc = ui->deactivate(sc);
10343 clrbit(&sc->active_ulds, id);
10350 sx_sunlock(&t4_uld_list_lock);
10356 uld_active(struct adapter *sc, int uld_id)
10359 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
10361 return (isset(&sc->active_ulds, uld_id));
10366 * t = ptr to tunable.
10367 * nc = number of CPUs.
10368 * c = compiled in default for that tunable.
10371 calculate_nqueues(int *t, int nc, const int c)
10377 nq = *t < 0 ? -*t : c;
10382 * Come up with reasonable defaults for some of the tunables, provided they're
10383 * not set by the user (in which case we'll use the values as is).
10386 tweak_tunables(void)
10388 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
10392 t4_ntxq = rss_getnumbuckets();
10394 calculate_nqueues(&t4_ntxq, nc, NTXQ);
10398 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
10402 t4_nrxq = rss_getnumbuckets();
10404 calculate_nqueues(&t4_nrxq, nc, NRXQ);
10408 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
10410 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
10411 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
10412 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
10415 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
10416 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
10418 if (t4_toecaps_allowed == -1)
10419 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
10421 if (t4_rdmacaps_allowed == -1) {
10422 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
10423 FW_CAPS_CONFIG_RDMA_RDMAC;
10426 if (t4_iscsicaps_allowed == -1) {
10427 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
10428 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
10429 FW_CAPS_CONFIG_ISCSI_T10DIF;
10432 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
10433 t4_tmr_idx_ofld = TMR_IDX_OFLD;
10435 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
10436 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
10438 if (t4_toecaps_allowed == -1)
10439 t4_toecaps_allowed = 0;
10441 if (t4_rdmacaps_allowed == -1)
10442 t4_rdmacaps_allowed = 0;
10444 if (t4_iscsicaps_allowed == -1)
10445 t4_iscsicaps_allowed = 0;
10449 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
10450 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
10453 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
10454 t4_tmr_idx = TMR_IDX;
10456 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
10457 t4_pktc_idx = PKTC_IDX;
10459 if (t4_qsize_txq < 128)
10460 t4_qsize_txq = 128;
10462 if (t4_qsize_rxq < 128)
10463 t4_qsize_rxq = 128;
10464 while (t4_qsize_rxq & 7)
10467 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
10470 * Number of VIs to create per-port. The first VI is the "main" regular
10471 * VI for the port. The rest are additional virtual interfaces on the
10472 * same physical port. Note that the main VI does not have native
10473 * netmap support but the extra VIs do.
10475 * Limit the number of VIs per port to the number of available
10476 * MAC addresses per port.
10478 if (t4_num_vis < 1)
10480 if (t4_num_vis > nitems(vi_mac_funcs)) {
10481 t4_num_vis = nitems(vi_mac_funcs);
10482 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
10485 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
10486 pcie_relaxed_ordering = 1;
10487 #if defined(__i386__) || defined(__amd64__)
10488 if (cpu_vendor_id == CPU_VENDOR_INTEL)
10489 pcie_relaxed_ordering = 0;
10496 t4_dump_tcb(struct adapter *sc, int tid)
10498 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
10500 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
10501 save = t4_read_reg(sc, reg);
10502 base = sc->memwin[2].mw_base;
10504 /* Dump TCB for the tid */
10505 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
10506 tcb_addr += tid * TCB_SIZE;
10510 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
10512 pf = V_PFNUM(sc->pf);
10513 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
10515 t4_write_reg(sc, reg, win_pos | pf);
10516 t4_read_reg(sc, reg);
10518 off = tcb_addr - win_pos;
10519 for (i = 0; i < 4; i++) {
10521 for (j = 0; j < 8; j++, off += 4)
10522 buf[j] = htonl(t4_read_reg(sc, base + off));
10524 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
10525 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
10529 t4_write_reg(sc, reg, save);
10530 t4_read_reg(sc, reg);
10534 t4_dump_devlog(struct adapter *sc)
10536 struct devlog_params *dparams = &sc->params.devlog;
10537 struct fw_devlog_e e;
10538 int i, first, j, m, nentries, rc;
10539 uint64_t ftstamp = UINT64_MAX;
10541 if (dparams->start == 0) {
10542 db_printf("devlog params not valid\n");
10546 nentries = dparams->size / sizeof(struct fw_devlog_e);
10547 m = fwmtype_to_hwmtype(dparams->memtype);
10549 /* Find the first entry. */
10551 for (i = 0; i < nentries && !db_pager_quit; i++) {
10552 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10553 sizeof(e), (void *)&e);
10557 if (e.timestamp == 0)
10560 e.timestamp = be64toh(e.timestamp);
10561 if (e.timestamp < ftstamp) {
10562 ftstamp = e.timestamp;
10572 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
10573 sizeof(e), (void *)&e);
10577 if (e.timestamp == 0)
10580 e.timestamp = be64toh(e.timestamp);
10581 e.seqno = be32toh(e.seqno);
10582 for (j = 0; j < 8; j++)
10583 e.params[j] = be32toh(e.params[j]);
10585 db_printf("%10d %15ju %8s %8s ",
10586 e.seqno, e.timestamp,
10587 (e.level < nitems(devlog_level_strings) ?
10588 devlog_level_strings[e.level] : "UNKNOWN"),
10589 (e.facility < nitems(devlog_facility_strings) ?
10590 devlog_facility_strings[e.facility] : "UNKNOWN"));
10591 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
10592 e.params[3], e.params[4], e.params[5], e.params[6],
10595 if (++i == nentries)
10597 } while (i != first && !db_pager_quit);
10600 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
10601 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
10603 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
10610 t = db_read_token();
10612 dev = device_lookup_by_name(db_tok_string);
10617 db_printf("usage: show t4 devlog <nexus>\n");
10622 db_printf("device not found\n");
10626 t4_dump_devlog(device_get_softc(dev));
10629 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
10638 t = db_read_token();
10640 dev = device_lookup_by_name(db_tok_string);
10641 t = db_read_token();
10642 if (t == tNUMBER) {
10643 tid = db_tok_number;
10650 db_printf("usage: show t4 tcb <nexus> <tid>\n");
10655 db_printf("device not found\n");
10659 db_printf("invalid tid\n");
10663 t4_dump_tcb(device_get_softc(dev), tid);
10668 * Borrowed from cesa_prep_aes_key().
10670 * NB: The crypto engine wants the words in the decryption key in reverse
10674 t4_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits)
10676 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
10680 rijndaelKeySetupEnc(ek, enc_key, kbits);
10682 dkey += (kbits / 8) / 4;
10686 for (i = 0; i < 4; i++)
10687 *--dkey = htobe32(ek[4 * 10 + i]);
10690 for (i = 0; i < 2; i++)
10691 *--dkey = htobe32(ek[4 * 11 + 2 + i]);
10692 for (i = 0; i < 4; i++)
10693 *--dkey = htobe32(ek[4 * 12 + i]);
10696 for (i = 0; i < 4; i++)
10697 *--dkey = htobe32(ek[4 * 13 + i]);
10698 for (i = 0; i < 4; i++)
10699 *--dkey = htobe32(ek[4 * 14 + i]);
10702 MPASS(dkey == dec_key);
10705 static struct sx mlu; /* mod load unload */
10706 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
10709 mod_event(module_t mod, int cmd, void *arg)
10712 static int loaded = 0;
10717 if (loaded++ == 0) {
10719 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10720 t4_filter_rpl, CPL_COOKIE_FILTER);
10721 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
10722 do_l2t_write_rpl, CPL_COOKIE_FILTER);
10723 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
10724 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
10725 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
10726 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
10727 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
10728 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
10729 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
10730 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
10731 t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
10733 sx_init(&t4_list_lock, "T4/T5 adapters");
10734 SLIST_INIT(&t4_list);
10735 callout_init(&fatal_callout, 1);
10737 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
10738 SLIST_INIT(&t4_uld_list);
10743 t4_tracer_modload();
10751 if (--loaded == 0) {
10754 sx_slock(&t4_list_lock);
10755 if (!SLIST_EMPTY(&t4_list)) {
10757 sx_sunlock(&t4_list_lock);
10761 sx_slock(&t4_uld_list_lock);
10762 if (!SLIST_EMPTY(&t4_uld_list)) {
10764 sx_sunlock(&t4_uld_list_lock);
10765 sx_sunlock(&t4_list_lock);
10770 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
10771 uprintf("%ju clusters with custom free routine "
10772 "still is use.\n", t4_sge_extfree_refs());
10773 pause("t4unload", 2 * hz);
10776 sx_sunlock(&t4_uld_list_lock);
10778 sx_sunlock(&t4_list_lock);
10780 if (t4_sge_extfree_refs() == 0) {
10781 t4_tracer_modunload();
10783 t4_clip_modunload();
10786 sx_destroy(&t4_uld_list_lock);
10788 sx_destroy(&t4_list_lock);
10789 t4_sge_modunload();
10793 loaded++; /* undo earlier decrement */
10804 static devclass_t t4_devclass, t5_devclass, t6_devclass;
10805 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
10806 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
10808 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
10809 MODULE_VERSION(t4nex, 1);
10810 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
10812 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
10813 #endif /* DEV_NETMAP */
10815 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
10816 MODULE_VERSION(t5nex, 1);
10817 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
10819 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
10820 #endif /* DEV_NETMAP */
10822 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
10823 MODULE_VERSION(t6nex, 1);
10824 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
10826 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
10827 #endif /* DEV_NETMAP */
10829 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
10830 MODULE_VERSION(cxgbe, 1);
10832 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
10833 MODULE_VERSION(cxl, 1);
10835 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
10836 MODULE_VERSION(cc, 1);
10838 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
10839 MODULE_VERSION(vcxgbe, 1);
10841 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
10842 MODULE_VERSION(vcxl, 1);
10844 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
10845 MODULE_VERSION(vcc, 1);