2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
33 #include "opt_inet6.h"
34 #include "opt_kern_tls.h"
35 #include "opt_ratelimit.h"
38 #include <sys/param.h>
41 #include <sys/kernel.h>
43 #include <sys/eventhandler.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <sys/firmware.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <net/ethernet.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/if_vlan_var.h>
64 #include <net/rss_config.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
69 #include <netinet/tcp_seq.h>
71 #if defined(__i386__) || defined(__amd64__)
72 #include <machine/md_var.h>
73 #include <machine/cputypes.h>
79 #include <ddb/db_lex.h>
82 #include "common/common.h"
83 #include "common/t4_msg.h"
84 #include "common/t4_regs.h"
85 #include "common/t4_regs_values.h"
86 #include "cudbg/cudbg.h"
90 #include "t4_mp_ring.h"
94 /* T4 bus driver interface */
95 static int t4_probe(device_t);
96 static int t4_attach(device_t);
97 static int t4_detach(device_t);
98 static int t4_child_location(device_t, device_t, struct sbuf *);
99 static int t4_ready(device_t);
100 static int t4_read_port_device(device_t, int, device_t *);
101 static int t4_suspend(device_t);
102 static int t4_resume(device_t);
103 static int t4_reset_prepare(device_t, device_t);
104 static int t4_reset_post(device_t, device_t);
105 static device_method_t t4_methods[] = {
106 DEVMETHOD(device_probe, t4_probe),
107 DEVMETHOD(device_attach, t4_attach),
108 DEVMETHOD(device_detach, t4_detach),
109 DEVMETHOD(device_suspend, t4_suspend),
110 DEVMETHOD(device_resume, t4_resume),
112 DEVMETHOD(bus_child_location, t4_child_location),
113 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
114 DEVMETHOD(bus_reset_post, t4_reset_post),
116 DEVMETHOD(t4_is_main_ready, t4_ready),
117 DEVMETHOD(t4_read_port_device, t4_read_port_device),
121 static driver_t t4_driver = {
124 sizeof(struct adapter)
128 /* T4 port (cxgbe) interface */
129 static int cxgbe_probe(device_t);
130 static int cxgbe_attach(device_t);
131 static int cxgbe_detach(device_t);
132 device_method_t cxgbe_methods[] = {
133 DEVMETHOD(device_probe, cxgbe_probe),
134 DEVMETHOD(device_attach, cxgbe_attach),
135 DEVMETHOD(device_detach, cxgbe_detach),
138 static driver_t cxgbe_driver = {
141 sizeof(struct port_info)
144 /* T4 VI (vcxgbe) interface */
145 static int vcxgbe_probe(device_t);
146 static int vcxgbe_attach(device_t);
147 static int vcxgbe_detach(device_t);
148 static device_method_t vcxgbe_methods[] = {
149 DEVMETHOD(device_probe, vcxgbe_probe),
150 DEVMETHOD(device_attach, vcxgbe_attach),
151 DEVMETHOD(device_detach, vcxgbe_detach),
154 static driver_t vcxgbe_driver = {
157 sizeof(struct vi_info)
160 static d_ioctl_t t4_ioctl;
162 static struct cdevsw t4_cdevsw = {
163 .d_version = D_VERSION,
168 /* T5 bus driver interface */
169 static int t5_probe(device_t);
170 static device_method_t t5_methods[] = {
171 DEVMETHOD(device_probe, t5_probe),
172 DEVMETHOD(device_attach, t4_attach),
173 DEVMETHOD(device_detach, t4_detach),
174 DEVMETHOD(device_suspend, t4_suspend),
175 DEVMETHOD(device_resume, t4_resume),
177 DEVMETHOD(bus_child_location, t4_child_location),
178 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
179 DEVMETHOD(bus_reset_post, t4_reset_post),
181 DEVMETHOD(t4_is_main_ready, t4_ready),
182 DEVMETHOD(t4_read_port_device, t4_read_port_device),
186 static driver_t t5_driver = {
189 sizeof(struct adapter)
193 /* T5 port (cxl) interface */
194 static driver_t cxl_driver = {
197 sizeof(struct port_info)
200 /* T5 VI (vcxl) interface */
201 static driver_t vcxl_driver = {
204 sizeof(struct vi_info)
207 /* T6 bus driver interface */
208 static int t6_probe(device_t);
209 static device_method_t t6_methods[] = {
210 DEVMETHOD(device_probe, t6_probe),
211 DEVMETHOD(device_attach, t4_attach),
212 DEVMETHOD(device_detach, t4_detach),
213 DEVMETHOD(device_suspend, t4_suspend),
214 DEVMETHOD(device_resume, t4_resume),
216 DEVMETHOD(bus_child_location, t4_child_location),
217 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
218 DEVMETHOD(bus_reset_post, t4_reset_post),
220 DEVMETHOD(t4_is_main_ready, t4_ready),
221 DEVMETHOD(t4_read_port_device, t4_read_port_device),
225 static driver_t t6_driver = {
228 sizeof(struct adapter)
232 /* T6 port (cc) interface */
233 static driver_t cc_driver = {
236 sizeof(struct port_info)
239 /* T6 VI (vcc) interface */
240 static driver_t vcc_driver = {
243 sizeof(struct vi_info)
246 /* ifnet interface */
247 static void cxgbe_init(void *);
248 static int cxgbe_ioctl(if_t, unsigned long, caddr_t);
249 static int cxgbe_transmit(if_t, struct mbuf *);
250 static void cxgbe_qflush(if_t);
251 #if defined(KERN_TLS) || defined(RATELIMIT)
252 static int cxgbe_snd_tag_alloc(if_t, union if_snd_tag_alloc_params *,
253 struct m_snd_tag **);
256 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
259 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
260 * then ADAPTER_LOCK, then t4_uld_list_lock.
262 static struct sx t4_list_lock;
263 SLIST_HEAD(, adapter) t4_list;
265 static struct sx t4_uld_list_lock;
266 SLIST_HEAD(, uld_info) t4_uld_list;
270 * Tunables. See tweak_tunables() too.
272 * Each tunable is set to a default value here if it's known at compile-time.
273 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
274 * provide a reasonable default (upto n) when the driver is loaded.
276 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
277 * T5 are under hw.cxl.
279 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
280 "cxgbe(4) parameters");
281 SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
282 "cxgbe(4) T5+ parameters");
283 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
284 "cxgbe(4) TOE parameters");
287 * Number of queues for tx and rx, NIC and offload.
291 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0,
292 "Number of TX queues per port");
293 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
297 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0,
298 "Number of RX queues per port");
299 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
302 static int t4_ntxq_vi = -NTXQ_VI;
303 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0,
304 "Number of TX queues per VI");
307 static int t4_nrxq_vi = -NRXQ_VI;
308 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0,
309 "Number of RX queues per VI");
311 static int t4_rsrv_noflowq = 0;
312 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq,
313 0, "Reserve TX queue 0 of each VI for non-flowid packets");
315 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
317 static int t4_nofldtxq = -NOFLDTXQ;
318 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
319 "Number of offload TX queues per port");
322 static int t4_nofldrxq = -NOFLDRXQ;
323 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
324 "Number of offload RX queues per port");
326 #define NOFLDTXQ_VI 1
327 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
328 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
329 "Number of offload TX queues per VI");
331 #define NOFLDRXQ_VI 1
332 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
333 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
334 "Number of offload RX queues per VI");
336 #define TMR_IDX_OFLD 1
337 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
338 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
339 &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
341 #define PKTC_IDX_OFLD (-1)
342 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
343 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
344 &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
346 /* 0 means chip/fw default, non-zero number is value in microseconds */
347 static u_long t4_toe_keepalive_idle = 0;
348 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN,
349 &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)");
351 /* 0 means chip/fw default, non-zero number is value in microseconds */
352 static u_long t4_toe_keepalive_interval = 0;
353 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN,
354 &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)");
356 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
357 static int t4_toe_keepalive_count = 0;
358 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN,
359 &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort");
361 /* 0 means chip/fw default, non-zero number is value in microseconds */
362 static u_long t4_toe_rexmt_min = 0;
363 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN,
364 &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)");
366 /* 0 means chip/fw default, non-zero number is value in microseconds */
367 static u_long t4_toe_rexmt_max = 0;
368 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN,
369 &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)");
371 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
372 static int t4_toe_rexmt_count = 0;
373 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN,
374 &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort");
376 /* -1 means chip/fw default, other values are raw backoff values to use */
377 static int t4_toe_rexmt_backoff[16] = {
378 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
380 SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff,
381 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
382 "cxgbe(4) TOE retransmit backoff values");
383 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN,
384 &t4_toe_rexmt_backoff[0], 0, "");
385 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN,
386 &t4_toe_rexmt_backoff[1], 0, "");
387 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN,
388 &t4_toe_rexmt_backoff[2], 0, "");
389 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN,
390 &t4_toe_rexmt_backoff[3], 0, "");
391 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN,
392 &t4_toe_rexmt_backoff[4], 0, "");
393 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN,
394 &t4_toe_rexmt_backoff[5], 0, "");
395 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN,
396 &t4_toe_rexmt_backoff[6], 0, "");
397 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN,
398 &t4_toe_rexmt_backoff[7], 0, "");
399 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN,
400 &t4_toe_rexmt_backoff[8], 0, "");
401 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN,
402 &t4_toe_rexmt_backoff[9], 0, "");
403 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN,
404 &t4_toe_rexmt_backoff[10], 0, "");
405 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN,
406 &t4_toe_rexmt_backoff[11], 0, "");
407 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN,
408 &t4_toe_rexmt_backoff[12], 0, "");
409 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN,
410 &t4_toe_rexmt_backoff[13], 0, "");
411 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN,
412 &t4_toe_rexmt_backoff[14], 0, "");
413 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN,
414 &t4_toe_rexmt_backoff[15], 0, "");
418 #define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */
419 #define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */
420 static int t4_native_netmap = NN_EXTRA_VI;
421 SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap,
422 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs");
425 static int t4_nnmtxq = -NNMTXQ;
426 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0,
427 "Number of netmap TX queues");
430 static int t4_nnmrxq = -NNMRXQ;
431 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0,
432 "Number of netmap RX queues");
435 static int t4_nnmtxq_vi = -NNMTXQ_VI;
436 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0,
437 "Number of netmap TX queues per VI");
440 static int t4_nnmrxq_vi = -NNMRXQ_VI;
441 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0,
442 "Number of netmap RX queues per VI");
446 * Holdoff parameters for ports.
449 int t4_tmr_idx = TMR_IDX;
450 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx,
451 0, "Holdoff timer index");
452 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
454 #define PKTC_IDX (-1)
455 int t4_pktc_idx = PKTC_IDX;
456 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx,
457 0, "Holdoff packet counter index");
458 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
461 * Size (# of entries) of each tx and rx queue.
463 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
464 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0,
465 "Number of descriptors in each TX queue");
467 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
468 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0,
469 "Number of descriptors in each RX queue");
472 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
474 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
475 SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types,
476 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
479 * Configuration file. All the _CF names here are special.
481 #define DEFAULT_CF "default"
482 #define BUILTIN_CF "built-in"
483 #define FLASH_CF "flash"
484 #define UWIRE_CF "uwire"
485 #define FPGA_CF "fpga"
486 static char t4_cfg_file[32] = DEFAULT_CF;
487 SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file,
488 sizeof(t4_cfg_file), "Firmware configuration file");
491 * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
492 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
493 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
494 * mark or when signalled to do so, 0 to never emit PAUSE.
495 * pause_autoneg = 1 means PAUSE will be negotiated if possible and the
496 * negotiated settings will override rx_pause/tx_pause.
497 * Otherwise rx_pause/tx_pause are applied forcibly.
499 static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
500 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN,
501 &t4_pause_settings, 0,
502 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
505 * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
506 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
509 static int t4_fec = -1;
510 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
511 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
514 * Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
515 * issues to the firmware. If the firmware doesn't support FORCE_FEC then the
516 * driver runs as if this is set to 0.
517 * -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay.
518 * 0 to never set FORCE_FEC. requested_fec = AUTO means use the hint from the
519 * transceiver. Multiple FEC bits may not be okay but will be passed on to
520 * the firmware anyway (may result in l1cfg errors with old firmwares).
521 * 1 to always set FORCE_FEC. Multiple FEC bits are okay. requested_fec = AUTO
522 * means set all FEC bits that are valid for the speed.
524 static int t4_force_fec = -1;
525 SYSCTL_INT(_hw_cxgbe, OID_AUTO, force_fec, CTLFLAG_RDTUN, &t4_force_fec, 0,
526 "Controls the use of FORCE_FEC bit in L1 configuration.");
529 * Link autonegotiation.
530 * -1 to run with the firmware default.
534 static int t4_autoneg = -1;
535 SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0,
536 "Link autonegotiation");
539 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
540 * encouraged respectively). '-n' is the same as 'n' except the firmware
541 * version used in the checks is read from the firmware bundled with the driver.
543 static int t4_fw_install = 1;
544 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0,
545 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
548 * ASIC features that will be used. Disable the ones you don't want so that the
549 * chip resources aren't wasted on features that will not be used.
551 static int t4_nbmcaps_allowed = 0;
552 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN,
553 &t4_nbmcaps_allowed, 0, "Default NBM capabilities");
555 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
556 SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN,
557 &t4_linkcaps_allowed, 0, "Default link capabilities");
559 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
560 FW_CAPS_CONFIG_SWITCH_EGRESS;
561 SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
562 &t4_switchcaps_allowed, 0, "Default switch capabilities");
565 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
566 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
568 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
569 FW_CAPS_CONFIG_NIC_HASHFILTER;
571 SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN,
572 &t4_niccaps_allowed, 0, "Default NIC capabilities");
574 static int t4_toecaps_allowed = -1;
575 SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN,
576 &t4_toecaps_allowed, 0, "Default TCP offload capabilities");
578 static int t4_rdmacaps_allowed = -1;
579 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN,
580 &t4_rdmacaps_allowed, 0, "Default RDMA capabilities");
582 static int t4_cryptocaps_allowed = -1;
583 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN,
584 &t4_cryptocaps_allowed, 0, "Default crypto capabilities");
586 static int t4_iscsicaps_allowed = -1;
587 SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN,
588 &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities");
590 static int t4_fcoecaps_allowed = 0;
591 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN,
592 &t4_fcoecaps_allowed, 0, "Default FCoE capabilities");
594 static int t5_write_combine = 0;
595 SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
596 0, "Use WC instead of UC for BAR2");
598 static int t4_num_vis = 1;
599 SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
600 "Number of VIs per port");
603 * PCIe Relaxed Ordering.
604 * -1: driver should figure out a good value.
609 static int pcie_relaxed_ordering = -1;
610 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN,
611 &pcie_relaxed_ordering, 0,
612 "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone");
614 static int t4_panic_on_fatal_err = 0;
615 SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN,
616 &t4_panic_on_fatal_err, 0, "panic on fatal errors");
618 static int t4_reset_on_fatal_err = 0;
619 SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN,
620 &t4_reset_on_fatal_err, 0, "reset adapter on fatal errors");
622 static int t4_clock_gate_on_suspend = 0;
623 SYSCTL_INT(_hw_cxgbe, OID_AUTO, clock_gate_on_suspend, CTLFLAG_RWTUN,
624 &t4_clock_gate_on_suspend, 0, "gate the clock on suspend");
626 static int t4_tx_vm_wr = 0;
627 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0,
628 "Use VM work requests to transmit packets.");
631 * Set to non-zero to enable the attack filter. A packet that matches any of
632 * these conditions will get dropped on ingress:
633 * 1) IP && source address == destination address.
634 * 2) TCP/IP && source address is not a unicast address.
635 * 3) TCP/IP && destination address is not a unicast address.
636 * 4) IP && source address is loopback (127.x.y.z).
637 * 5) IP && destination address is loopback (127.x.y.z).
638 * 6) IPv6 && source address == destination address.
639 * 7) IPv6 && source address is not a unicast address.
640 * 8) IPv6 && source address is loopback (::1/128).
641 * 9) IPv6 && destination address is loopback (::1/128).
642 * 10) IPv6 && source address is unspecified (::/128).
643 * 11) IPv6 && destination address is unspecified (::/128).
644 * 12) TCP/IPv6 && source address is multicast (ff00::/8).
645 * 13) TCP/IPv6 && destination address is multicast (ff00::/8).
647 static int t4_attack_filter = 0;
648 SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN,
649 &t4_attack_filter, 0, "Drop suspicious traffic");
651 static int t4_drop_ip_fragments = 0;
652 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN,
653 &t4_drop_ip_fragments, 0, "Drop IP fragments");
655 static int t4_drop_pkts_with_l2_errors = 1;
656 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN,
657 &t4_drop_pkts_with_l2_errors, 0,
658 "Drop all frames with Layer 2 length or checksum errors");
660 static int t4_drop_pkts_with_l3_errors = 0;
661 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN,
662 &t4_drop_pkts_with_l3_errors, 0,
663 "Drop all frames with IP version, length, or checksum errors");
665 static int t4_drop_pkts_with_l4_errors = 0;
666 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN,
667 &t4_drop_pkts_with_l4_errors, 0,
668 "Drop all frames with Layer 4 length, checksum, or other errors");
674 static int t4_cop_managed_offloading = 0;
675 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
676 &t4_cop_managed_offloading, 0,
677 "COP (Connection Offload Policy) controls all TOE offload");
682 * This enables KERN_TLS for all adapters if set.
684 static int t4_kern_tls = 0;
685 SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0,
686 "Enable KERN_TLS mode for T6 adapters");
688 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
689 "cxgbe(4) KERN_TLS parameters");
691 static int t4_tls_inline_keys = 0;
692 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
693 &t4_tls_inline_keys, 0,
694 "Always pass TLS keys in work requests (1) or attempt to store TLS keys "
697 static int t4_tls_combo_wrs = 0;
698 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
699 0, "Attempt to combine TCB field updates with TLS record work requests.");
702 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
703 static int vi_mac_funcs[] = {
707 FW_VI_FUNC_OPENISCSI,
713 struct intrs_and_queues {
714 uint16_t intr_type; /* INTx, MSI, or MSI-X */
715 uint16_t num_vis; /* number of VIs for each port */
716 uint16_t nirq; /* Total # of vectors */
717 uint16_t ntxq; /* # of NIC txq's for each port */
718 uint16_t nrxq; /* # of NIC rxq's for each port */
719 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
720 uint16_t nofldrxq; /* # of TOE rxq's for each port */
721 uint16_t nnmtxq; /* # of netmap txq's */
722 uint16_t nnmrxq; /* # of netmap rxq's */
724 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
725 uint16_t ntxq_vi; /* # of NIC txq's */
726 uint16_t nrxq_vi; /* # of NIC rxq's */
727 uint16_t nofldtxq_vi; /* # of TOE txq's */
728 uint16_t nofldrxq_vi; /* # of TOE rxq's */
729 uint16_t nnmtxq_vi; /* # of netmap txq's */
730 uint16_t nnmrxq_vi; /* # of netmap rxq's */
733 static void setup_memwin(struct adapter *);
734 static void position_memwin(struct adapter *, int, uint32_t);
735 static int validate_mem_range(struct adapter *, uint32_t, uint32_t);
736 static int fwmtype_to_hwmtype(int);
737 static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t,
739 static int fixup_devlog_params(struct adapter *);
740 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
741 static int contact_firmware(struct adapter *);
742 static int partition_resources(struct adapter *);
743 static int get_params__pre_init(struct adapter *);
744 static int set_params__pre_init(struct adapter *);
745 static int get_params__post_init(struct adapter *);
746 static int set_params__post_init(struct adapter *);
747 static void t4_set_desc(struct adapter *);
748 static bool fixed_ifmedia(struct port_info *);
749 static void build_medialist(struct port_info *);
750 static void init_link_config(struct port_info *);
751 static int fixup_link_config(struct port_info *);
752 static int apply_link_config(struct port_info *);
753 static int cxgbe_init_synchronized(struct vi_info *);
754 static int cxgbe_uninit_synchronized(struct vi_info *);
755 static int adapter_full_init(struct adapter *);
756 static void adapter_full_uninit(struct adapter *);
757 static int vi_full_init(struct vi_info *);
758 static void vi_full_uninit(struct vi_info *);
759 static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *);
760 static void quiesce_txq(struct sge_txq *);
761 static void quiesce_wrq(struct sge_wrq *);
762 static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *);
763 static void quiesce_vi(struct vi_info *);
764 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
765 driver_intr_t *, void *, char *);
766 static int t4_free_irq(struct adapter *, struct irq *);
767 static void t4_init_atid_table(struct adapter *);
768 static void t4_free_atid_table(struct adapter *);
769 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
770 static void vi_refresh_stats(struct vi_info *);
771 static void cxgbe_refresh_stats(struct vi_info *);
772 static void cxgbe_tick(void *);
773 static void vi_tick(void *);
774 static void cxgbe_sysctls(struct port_info *);
775 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
776 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
777 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
778 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
779 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
780 static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS);
781 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
782 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
783 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
784 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
785 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
786 static int sysctl_link_fec(SYSCTL_HANDLER_ARGS);
787 static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
788 static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
789 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
790 static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
791 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
792 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
793 static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
794 static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
795 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
796 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
797 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
798 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
799 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
800 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
801 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
802 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
803 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
804 static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
805 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
806 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
807 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
808 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
809 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
810 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
811 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
812 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
813 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
814 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
815 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
816 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
817 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
818 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
819 static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS);
820 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
821 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
822 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
823 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
824 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
825 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
826 static int sysctl_reset(SYSCTL_HANDLER_ARGS);
828 static int sysctl_tls(SYSCTL_HANDLER_ARGS);
829 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
830 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
831 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
832 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
833 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
834 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
835 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
837 static int get_sge_context(struct adapter *, struct t4_sge_context *);
838 static int load_fw(struct adapter *, struct t4_data *);
839 static int load_cfg(struct adapter *, struct t4_data *);
840 static int load_boot(struct adapter *, struct t4_bootrom *);
841 static int load_bootcfg(struct adapter *, struct t4_data *);
842 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
843 static void free_offload_policy(struct t4_offload_policy *);
844 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
845 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
846 static int read_i2c(struct adapter *, struct t4_i2c_data *);
847 static int clear_stats(struct adapter *, u_int);
848 static int hold_clip_addr(struct adapter *, struct t4_clip_addr *);
849 static int release_clip_addr(struct adapter *, struct t4_clip_addr *);
851 static int toe_capability(struct vi_info *, bool);
852 static int t4_deactivate_all_uld(struct adapter *);
853 static void t4_async_event(struct adapter *);
856 static int ktls_capability(struct adapter *, bool);
858 static int mod_event(module_t, int, void *);
859 static int notify_siblings(device_t, int);
860 static uint64_t vi_get_counter(if_t, ift_counter);
861 static uint64_t cxgbe_get_counter(if_t, ift_counter);
862 static void enable_vxlan_rx(struct adapter *);
863 static void reset_adapter_task(void *, int);
864 static void fatal_error_task(void *, int);
865 static void dump_devlog(struct adapter *);
866 static void dump_cim_regs(struct adapter *);
867 static void dump_cimla(struct adapter *);
873 {0xa000, "Chelsio Terminator 4 FPGA"},
874 {0x4400, "Chelsio T440-dbg"},
875 {0x4401, "Chelsio T420-CR"},
876 {0x4402, "Chelsio T422-CR"},
877 {0x4403, "Chelsio T440-CR"},
878 {0x4404, "Chelsio T420-BCH"},
879 {0x4405, "Chelsio T440-BCH"},
880 {0x4406, "Chelsio T440-CH"},
881 {0x4407, "Chelsio T420-SO"},
882 {0x4408, "Chelsio T420-CX"},
883 {0x4409, "Chelsio T420-BT"},
884 {0x440a, "Chelsio T404-BT"},
885 {0x440e, "Chelsio T440-LP-CR"},
887 {0xb000, "Chelsio Terminator 5 FPGA"},
888 {0x5400, "Chelsio T580-dbg"},
889 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
890 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
891 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
892 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
893 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
894 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
895 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
896 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
897 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
898 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
899 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
900 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
901 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
902 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
903 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
904 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
905 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
908 {0x5483, "Custom T540-CR"},
909 {0x5484, "Custom T540-BT"},
911 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
912 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
913 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
914 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
915 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
916 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
917 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
918 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
919 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
920 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
921 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
922 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
923 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
924 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
925 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
926 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
929 {0x6480, "Custom T6225-CR"},
930 {0x6481, "Custom T62100-CR"},
931 {0x6482, "Custom T6225-CR"},
932 {0x6483, "Custom T62100-CR"},
933 {0x6484, "Custom T64100-CR"},
934 {0x6485, "Custom T6240-SO"},
935 {0x6486, "Custom T6225-SO-CR"},
936 {0x6487, "Custom T6225-CR"},
941 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
942 * be exactly the same for both rxq and ofld_rxq.
944 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
945 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
947 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
950 t4_probe(device_t dev)
953 uint16_t v = pci_get_vendor(dev);
954 uint16_t d = pci_get_device(dev);
955 uint8_t f = pci_get_function(dev);
957 if (v != PCI_VENDOR_ID_CHELSIO)
960 /* Attach only to PF0 of the FPGA */
961 if (d == 0xa000 && f != 0)
964 for (i = 0; i < nitems(t4_pciids); i++) {
965 if (d == t4_pciids[i].device) {
966 device_set_desc(dev, t4_pciids[i].desc);
967 return (BUS_PROBE_DEFAULT);
975 t5_probe(device_t dev)
978 uint16_t v = pci_get_vendor(dev);
979 uint16_t d = pci_get_device(dev);
980 uint8_t f = pci_get_function(dev);
982 if (v != PCI_VENDOR_ID_CHELSIO)
985 /* Attach only to PF0 of the FPGA */
986 if (d == 0xb000 && f != 0)
989 for (i = 0; i < nitems(t5_pciids); i++) {
990 if (d == t5_pciids[i].device) {
991 device_set_desc(dev, t5_pciids[i].desc);
992 return (BUS_PROBE_DEFAULT);
1000 t6_probe(device_t dev)
1003 uint16_t v = pci_get_vendor(dev);
1004 uint16_t d = pci_get_device(dev);
1006 if (v != PCI_VENDOR_ID_CHELSIO)
1009 for (i = 0; i < nitems(t6_pciids); i++) {
1010 if (d == t6_pciids[i].device) {
1011 device_set_desc(dev, t6_pciids[i].desc);
1012 return (BUS_PROBE_DEFAULT);
1020 t5_attribute_workaround(device_t dev)
1026 * The T5 chips do not properly echo the No Snoop and Relaxed
1027 * Ordering attributes when replying to a TLP from a Root
1028 * Port. As a workaround, find the parent Root Port and
1029 * disable No Snoop and Relaxed Ordering. Note that this
1030 * affects all devices under this root port.
1032 root_port = pci_find_pcie_root_port(dev);
1033 if (root_port == NULL) {
1034 device_printf(dev, "Unable to find parent root port\n");
1038 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
1039 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
1040 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
1042 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
1043 device_get_nameunit(root_port));
1046 static const struct devnames devnames[] = {
1048 .nexus_name = "t4nex",
1049 .ifnet_name = "cxgbe",
1050 .vi_ifnet_name = "vcxgbe",
1051 .pf03_drv_name = "t4iov",
1052 .vf_nexus_name = "t4vf",
1053 .vf_ifnet_name = "cxgbev"
1055 .nexus_name = "t5nex",
1056 .ifnet_name = "cxl",
1057 .vi_ifnet_name = "vcxl",
1058 .pf03_drv_name = "t5iov",
1059 .vf_nexus_name = "t5vf",
1060 .vf_ifnet_name = "cxlv"
1062 .nexus_name = "t6nex",
1064 .vi_ifnet_name = "vcc",
1065 .pf03_drv_name = "t6iov",
1066 .vf_nexus_name = "t6vf",
1067 .vf_ifnet_name = "ccv"
1072 t4_init_devnames(struct adapter *sc)
1077 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
1078 sc->names = &devnames[id - CHELSIO_T4];
1080 device_printf(sc->dev, "chip id %d is not supported.\n", id);
1086 t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
1088 const char *parent, *name;
1093 parent = device_get_nameunit(sc->dev);
1094 name = sc->names->ifnet_name;
1095 while (resource_find_dev(&line, name, &unit, "at", parent) == 0) {
1096 if (resource_long_value(name, unit, "port", &value) == 0 &&
1097 value == pi->port_id)
1104 t4_calibration(void *arg)
1107 struct clock_sync *cur, *nex;
1112 sc = (struct adapter *)arg;
1114 KASSERT((hw_off_limits(sc) == 0), ("hw_off_limits at t4_calibration"));
1115 hw = t4_read_reg64(sc, A_SGE_TIMESTAMP_LO);
1118 cur = &sc->cal_info[sc->cal_current];
1119 next_up = (sc->cal_current + 1) % CNT_CAL_INFO;
1120 nex = &sc->cal_info[next_up];
1121 if (__predict_false(sc->cal_count == 0)) {
1122 /* First time in, just get the values in */
1129 if (cur->hw_cur == hw) {
1130 /* The clock is not advancing? */
1132 atomic_store_rel_int(&cur->gen, 0);
1136 seqc_write_begin(&nex->gen);
1137 nex->hw_prev = cur->hw_cur;
1138 nex->sbt_prev = cur->sbt_cur;
1141 seqc_write_end(&nex->gen);
1142 sc->cal_current = next_up;
1144 callout_reset_sbt_curcpu(&sc->cal_callout, SBT_1S, 0, t4_calibration,
1149 t4_calibration_start(struct adapter *sc)
1152 * Here if we have not done a calibration
1153 * then do so otherwise start the appropriate
1158 for (i = 0; i < CNT_CAL_INFO; i++) {
1159 sc->cal_info[i].gen = 0;
1161 sc->cal_current = 0;
1168 t4_attach(device_t dev)
1171 int rc = 0, i, j, rqidx, tqidx, nports;
1172 struct make_dev_args mda;
1173 struct intrs_and_queues iaq;
1176 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1183 int nm_rqidx, nm_tqidx;
1187 sc = device_get_softc(dev);
1189 sysctl_ctx_init(&sc->ctx);
1190 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
1192 if ((pci_get_device(dev) & 0xff00) == 0x5400)
1193 t5_attribute_workaround(dev);
1194 pci_enable_busmaster(dev);
1195 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
1198 pci_set_max_read_req(dev, 4096);
1199 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
1200 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
1201 if (pcie_relaxed_ordering == 0 &&
1202 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
1203 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
1204 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1205 } else if (pcie_relaxed_ordering == 1 &&
1206 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
1207 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
1208 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1212 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
1213 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
1215 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
1216 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
1217 device_get_nameunit(dev));
1219 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
1220 device_get_nameunit(dev));
1221 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
1224 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
1225 TAILQ_INIT(&sc->sfl);
1226 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
1228 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
1231 rw_init(&sc->policy_lock, "connection offload policy");
1233 callout_init(&sc->ktls_tick, 1);
1235 callout_init(&sc->cal_callout, 1);
1237 refcount_init(&sc->vxlan_refcount, 0);
1239 TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc);
1240 TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc);
1242 sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx,
1243 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq",
1244 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues");
1245 sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx,
1246 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq",
1247 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue");
1249 rc = t4_map_bars_0_and_4(sc);
1251 goto done; /* error message displayed already */
1253 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
1255 /* Prepare the adapter for operation. */
1256 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
1257 rc = -t4_prep_adapter(sc, buf);
1260 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
1265 * This is the real PF# to which we're attaching. Works from within PCI
1266 * passthrough environments too, where pci_get_function() could return a
1267 * different PF# depending on the passthrough configuration. We need to
1268 * use the real PF# in all our communication with the firmware.
1270 j = t4_read_reg(sc, A_PL_WHOAMI);
1271 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
1274 t4_init_devnames(sc);
1275 if (sc->names == NULL) {
1277 goto done; /* error message displayed already */
1281 * Do this really early, with the memory windows set up even before the
1282 * character device. The userland tool's register i/o and mem read
1283 * will work even in "recovery mode".
1286 if (t4_init_devlog_params(sc, 0) == 0)
1287 fixup_devlog_params(sc);
1288 make_dev_args_init(&mda);
1289 mda.mda_devsw = &t4_cdevsw;
1290 mda.mda_uid = UID_ROOT;
1291 mda.mda_gid = GID_WHEEL;
1292 mda.mda_mode = 0600;
1293 mda.mda_si_drv1 = sc;
1294 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
1296 device_printf(dev, "failed to create nexus char device: %d.\n",
1299 /* Go no further if recovery mode has been requested. */
1300 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
1301 device_printf(dev, "recovery mode.\n");
1305 #if defined(__i386__)
1306 if ((cpu_feature & CPUID_CX8) == 0) {
1307 device_printf(dev, "64 bit atomics not available.\n");
1313 /* Contact the firmware and try to become the master driver. */
1314 rc = contact_firmware(sc);
1316 goto done; /* error message displayed already */
1317 MPASS(sc->flags & FW_OK);
1319 rc = get_params__pre_init(sc);
1321 goto done; /* error message displayed already */
1323 if (sc->flags & MASTER_PF) {
1324 rc = partition_resources(sc);
1326 goto done; /* error message displayed already */
1330 rc = get_params__post_init(sc);
1332 goto done; /* error message displayed already */
1334 rc = set_params__post_init(sc);
1336 goto done; /* error message displayed already */
1338 rc = t4_map_bar_2(sc);
1340 goto done; /* error message displayed already */
1342 rc = t4_create_dma_tag(sc);
1344 goto done; /* error message displayed already */
1347 * First pass over all the ports - allocate VIs and initialize some
1348 * basic parameters like mac address, port type, etc.
1350 for_each_port(sc, i) {
1351 struct port_info *pi;
1353 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
1356 /* These must be set before t4_port_init */
1360 * XXX: vi[0] is special so we can't delay this allocation until
1361 * pi->nvi's final value is known.
1363 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1367 * Allocate the "main" VI and initialize parameters
1370 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1372 device_printf(dev, "unable to initialize port %d: %d\n",
1374 free(pi->vi, M_CXGBE);
1380 if (is_bt(pi->port_type))
1381 setbit(&sc->bt_map, pi->tx_chan);
1383 MPASS(!isset(&sc->bt_map, pi->tx_chan));
1385 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1386 device_get_nameunit(dev), i);
1387 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1388 sc->chan_map[pi->tx_chan] = i;
1391 * The MPS counter for FCS errors doesn't work correctly on the
1392 * T6 so we use the MAC counter here. Which MAC is in use
1393 * depends on the link settings which will be known when the
1398 } else if (is_t4(sc)) {
1399 pi->fcs_reg = PORT_REG(pi->tx_chan,
1400 A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
1402 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
1403 A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
1407 /* All VIs on this port share this media. */
1408 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1409 cxgbe_media_status);
1412 init_link_config(pi);
1413 fixup_link_config(pi);
1414 build_medialist(pi);
1415 if (fixed_ifmedia(pi))
1416 pi->flags |= FIXED_IFMEDIA;
1419 pi->dev = device_add_child(dev, sc->names->ifnet_name,
1420 t4_ifnet_unit(sc, pi));
1421 if (pi->dev == NULL) {
1423 "failed to add device for port %d.\n", i);
1427 pi->vi[0].dev = pi->dev;
1428 device_set_softc(pi->dev, pi);
1432 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1434 nports = sc->params.nports;
1435 rc = cfg_itype_and_nqueues(sc, &iaq);
1437 goto done; /* error message displayed already */
1439 num_vis = iaq.num_vis;
1440 sc->intr_type = iaq.intr_type;
1441 sc->intr_count = iaq.nirq;
1444 s->nrxq = nports * iaq.nrxq;
1445 s->ntxq = nports * iaq.ntxq;
1447 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1448 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1450 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1451 s->neq += nports; /* ctrl queues: 1 per port */
1452 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1453 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1454 if (is_offload(sc) || is_ethoffload(sc)) {
1455 s->nofldtxq = nports * iaq.nofldtxq;
1457 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1458 s->neq += s->nofldtxq;
1460 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq),
1461 M_CXGBE, M_ZERO | M_WAITOK);
1465 if (is_offload(sc)) {
1466 s->nofldrxq = nports * iaq.nofldrxq;
1468 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1469 s->neq += s->nofldrxq; /* free list */
1470 s->niq += s->nofldrxq;
1472 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1473 M_CXGBE, M_ZERO | M_WAITOK);
1479 if (t4_native_netmap & NN_MAIN_VI) {
1480 s->nnmrxq += nports * iaq.nnmrxq;
1481 s->nnmtxq += nports * iaq.nnmtxq;
1483 if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) {
1484 s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi;
1485 s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi;
1487 s->neq += s->nnmtxq + s->nnmrxq;
1488 s->niq += s->nnmrxq;
1490 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1491 M_CXGBE, M_ZERO | M_WAITOK);
1492 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1493 M_CXGBE, M_ZERO | M_WAITOK);
1495 MPASS(s->niq <= s->iqmap_sz);
1496 MPASS(s->neq <= s->eqmap_sz);
1498 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1500 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1502 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1504 s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE,
1506 s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE,
1509 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1512 t4_init_l2t(sc, M_WAITOK);
1513 t4_init_smt(sc, M_WAITOK);
1514 t4_init_tx_sched(sc);
1515 t4_init_atid_table(sc);
1517 t4_init_etid_table(sc);
1520 t4_init_clip_table(sc);
1522 if (sc->vres.key.size != 0)
1523 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
1524 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
1527 * Second pass over the ports. This time we know the number of rx and
1528 * tx queues that each port should get.
1531 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1538 nm_rqidx = nm_tqidx = 0;
1540 for_each_port(sc, i) {
1541 struct port_info *pi = sc->port[i];
1548 for_each_vi(pi, j, vi) {
1551 vi->first_intr = -1;
1552 vi->qsize_rxq = t4_qsize_rxq;
1553 vi->qsize_txq = t4_qsize_txq;
1555 vi->first_rxq = rqidx;
1556 vi->first_txq = tqidx;
1557 vi->tmr_idx = t4_tmr_idx;
1558 vi->pktc_idx = t4_pktc_idx;
1559 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1560 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1565 if (j == 0 && vi->ntxq > 1)
1566 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1568 vi->rsrv_noflowq = 0;
1570 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1571 vi->first_ofld_txq = ofld_tqidx;
1572 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1573 ofld_tqidx += vi->nofldtxq;
1576 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1577 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1578 vi->first_ofld_rxq = ofld_rqidx;
1579 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1581 ofld_rqidx += vi->nofldrxq;
1584 vi->first_nm_rxq = nm_rqidx;
1585 vi->first_nm_txq = nm_tqidx;
1587 vi->nnmrxq = iaq.nnmrxq;
1588 vi->nnmtxq = iaq.nnmtxq;
1590 vi->nnmrxq = iaq.nnmrxq_vi;
1591 vi->nnmtxq = iaq.nnmtxq_vi;
1593 nm_rqidx += vi->nnmrxq;
1594 nm_tqidx += vi->nnmtxq;
1599 rc = t4_setup_intr_handlers(sc);
1602 "failed to setup interrupt handlers: %d\n", rc);
1606 rc = bus_generic_probe(dev);
1608 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1613 * Ensure thread-safe mailbox access (in debug builds).
1615 * So far this was the only thread accessing the mailbox but various
1616 * ifnets and sysctls are about to be created and their handlers/ioctls
1617 * will access the mailbox from different threads.
1619 sc->flags |= CHK_MBOX_ACCESS;
1621 rc = bus_generic_attach(dev);
1624 "failed to attach all child ports: %d\n", rc);
1627 t4_calibration_start(sc);
1630 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1631 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1632 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1633 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1634 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1638 notify_siblings(dev, 0);
1641 if (rc != 0 && sc->cdev) {
1642 /* cdev was created and so cxgbetool works; recover that way. */
1644 "error during attach, adapter is now in recovery mode.\n");
1649 t4_detach_common(dev);
1657 t4_child_location(device_t bus, device_t dev, struct sbuf *sb)
1660 struct port_info *pi;
1663 sc = device_get_softc(bus);
1664 for_each_port(sc, i) {
1666 if (pi != NULL && pi->dev == dev) {
1667 sbuf_printf(sb, "port=%d", pi->port_id);
1675 t4_ready(device_t dev)
1679 sc = device_get_softc(dev);
1680 if (sc->flags & FW_OK)
1686 t4_read_port_device(device_t dev, int port, device_t *child)
1689 struct port_info *pi;
1691 sc = device_get_softc(dev);
1692 if (port < 0 || port >= MAX_NPORTS)
1694 pi = sc->port[port];
1695 if (pi == NULL || pi->dev == NULL)
1702 notify_siblings(device_t dev, int detaching)
1708 for (i = 0; i < PCI_FUNCMAX; i++) {
1709 if (i == pci_get_function(dev))
1711 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1712 pci_get_slot(dev), i);
1713 if (sibling == NULL || !device_is_attached(sibling))
1716 error = T4_DETACH_CHILD(sibling);
1718 (void)T4_ATTACH_CHILD(sibling);
1729 t4_detach(device_t dev)
1733 rc = notify_siblings(dev, 1);
1736 "failed to detach sibling devices: %d\n", rc);
1740 return (t4_detach_common(dev));
1744 t4_detach_common(device_t dev)
1747 struct port_info *pi;
1750 sc = device_get_softc(dev);
1753 rc = t4_deactivate_all_uld(sc);
1756 "failed to detach upper layer drivers: %d\n", rc);
1762 destroy_dev(sc->cdev);
1766 sx_xlock(&t4_list_lock);
1767 SLIST_REMOVE(&t4_list, sc, adapter, link);
1768 sx_xunlock(&t4_list_lock);
1770 sc->flags &= ~CHK_MBOX_ACCESS;
1771 if (sc->flags & FULL_INIT_DONE) {
1772 if (!(sc->flags & IS_VF))
1773 t4_intr_disable(sc);
1776 if (device_is_attached(dev)) {
1777 rc = bus_generic_detach(dev);
1780 "failed to detach child devices: %d\n", rc);
1785 for (i = 0; i < sc->intr_count; i++)
1786 t4_free_irq(sc, &sc->irq[i]);
1788 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1789 t4_free_tx_sched(sc);
1791 for (i = 0; i < MAX_NPORTS; i++) {
1794 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1796 device_delete_child(dev, pi->dev);
1798 mtx_destroy(&pi->pi_lock);
1799 free(pi->vi, M_CXGBE);
1803 callout_stop(&sc->cal_callout);
1804 callout_drain(&sc->cal_callout);
1805 device_delete_children(dev);
1806 sysctl_ctx_free(&sc->ctx);
1807 adapter_full_uninit(sc);
1809 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1810 t4_fw_bye(sc, sc->mbox);
1812 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1813 pci_release_msi(dev);
1816 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1820 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1824 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1828 t4_free_l2t(sc->l2t);
1830 t4_free_smt(sc->smt);
1831 t4_free_atid_table(sc);
1833 t4_free_etid_table(sc);
1836 vmem_destroy(sc->key_map);
1838 t4_destroy_clip_table(sc);
1841 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1842 free(sc->sge.ofld_txq, M_CXGBE);
1845 free(sc->sge.ofld_rxq, M_CXGBE);
1848 free(sc->sge.nm_rxq, M_CXGBE);
1849 free(sc->sge.nm_txq, M_CXGBE);
1851 free(sc->irq, M_CXGBE);
1852 free(sc->sge.rxq, M_CXGBE);
1853 free(sc->sge.txq, M_CXGBE);
1854 free(sc->sge.ctrlq, M_CXGBE);
1855 free(sc->sge.iqmap, M_CXGBE);
1856 free(sc->sge.eqmap, M_CXGBE);
1857 free(sc->tids.ftid_tab, M_CXGBE);
1858 free(sc->tids.hpftid_tab, M_CXGBE);
1859 free_hftid_hash(&sc->tids);
1860 free(sc->tids.tid_tab, M_CXGBE);
1861 t4_destroy_dma_tag(sc);
1863 callout_drain(&sc->ktls_tick);
1864 callout_drain(&sc->sfl_callout);
1865 if (mtx_initialized(&sc->tids.ftid_lock)) {
1866 mtx_destroy(&sc->tids.ftid_lock);
1867 cv_destroy(&sc->tids.ftid_cv);
1869 if (mtx_initialized(&sc->tids.atid_lock))
1870 mtx_destroy(&sc->tids.atid_lock);
1871 if (mtx_initialized(&sc->ifp_lock))
1872 mtx_destroy(&sc->ifp_lock);
1874 if (rw_initialized(&sc->policy_lock)) {
1875 rw_destroy(&sc->policy_lock);
1877 if (sc->policy != NULL)
1878 free_offload_policy(sc->policy);
1882 for (i = 0; i < NUM_MEMWIN; i++) {
1883 struct memwin *mw = &sc->memwin[i];
1885 if (rw_initialized(&mw->mw_lock))
1886 rw_destroy(&mw->mw_lock);
1889 mtx_destroy(&sc->sfl_lock);
1890 mtx_destroy(&sc->reg_lock);
1891 mtx_destroy(&sc->sc_lock);
1893 bzero(sc, sizeof(*sc));
1899 ok_to_reset(struct adapter *sc)
1901 struct tid_info *t = &sc->tids;
1902 struct port_info *pi;
1905 int caps = IFCAP_TOE | IFCAP_NETMAP | IFCAP_TXRTLMT;
1908 caps |= IFCAP_TXTLS;
1910 ASSERT_SYNCHRONIZED_OP(sc);
1911 MPASS(!(sc->flags & IS_VF));
1913 for_each_port(sc, i) {
1915 for_each_vi(pi, j, vi) {
1916 if (if_getcapenable(vi->ifp) & caps)
1921 if (atomic_load_int(&t->tids_in_use) > 0)
1923 if (atomic_load_int(&t->stids_in_use) > 0)
1925 if (atomic_load_int(&t->atids_in_use) > 0)
1927 if (atomic_load_int(&t->ftids_in_use) > 0)
1929 if (atomic_load_int(&t->hpftids_in_use) > 0)
1931 if (atomic_load_int(&t->etids_in_use) > 0)
1938 stop_adapter(struct adapter *sc)
1940 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED)))
1941 return (1); /* Already stopped. */
1942 return (t4_shutdown_adapter(sc));
1946 t4_suspend(device_t dev)
1948 struct adapter *sc = device_get_softc(dev);
1949 struct port_info *pi;
1952 struct sge_rxq *rxq;
1953 struct sge_txq *txq;
1954 struct sge_wrq *wrq;
1956 struct sge_ofld_rxq *ofld_rxq;
1958 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1959 struct sge_ofld_txq *ofld_txq;
1963 CH_ALERT(sc, "suspend requested\n");
1965 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4sus");
1969 /* XXX: Can the kernel call suspend repeatedly without resume? */
1970 MPASS(!hw_off_limits(sc));
1972 if (!ok_to_reset(sc)) {
1973 /* XXX: should list what resource is preventing suspend. */
1974 CH_ERR(sc, "not safe to suspend.\n");
1979 /* No more DMA or interrupts. */
1982 /* Quiesce all activity. */
1983 for_each_port(sc, i) {
1985 pi->vxlan_tcam_entry = false;
1988 if (pi->up_vis > 0) {
1990 * t4_shutdown_adapter has already shut down all the
1991 * PHYs but it also disables interrupts and DMA so there
1992 * won't be a link interrupt. So we update the state
1993 * manually and inform the kernel.
1995 pi->link_cfg.link_ok = false;
1996 t4_os_link_changed(pi);
2000 for_each_vi(pi, j, vi) {
2001 vi->xact_addr_filt = -1;
2002 mtx_lock(&vi->tick_mtx);
2003 vi->flags |= VI_SKIP_STATS;
2004 mtx_unlock(&vi->tick_mtx);
2005 if (!(vi->flags & VI_INIT_DONE))
2009 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2010 mtx_lock(&vi->tick_mtx);
2011 callout_stop(&vi->tick);
2012 mtx_unlock(&vi->tick_mtx);
2013 callout_drain(&vi->tick);
2017 * Note that the HW is not available.
2019 for_each_txq(vi, k, txq) {
2021 txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED);
2024 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2025 for_each_ofld_txq(vi, k, ofld_txq) {
2026 ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED;
2029 for_each_rxq(vi, k, rxq) {
2030 rxq->iq.flags &= ~IQ_HW_ALLOCATED;
2032 #if defined(TCP_OFFLOAD)
2033 for_each_ofld_rxq(vi, k, ofld_rxq) {
2034 ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED;
2041 if (sc->flags & FULL_INIT_DONE) {
2043 wrq = &sc->sge.ctrlq[i];
2044 wrq->eq.flags &= ~EQ_HW_ALLOCATED;
2048 if (sc->flags & FULL_INIT_DONE) {
2049 /* Firmware event queue */
2050 sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED;
2051 quiesce_iq_fl(sc, &sc->sge.fwq, NULL);
2054 /* Stop calibration */
2055 callout_stop(&sc->cal_callout);
2056 callout_drain(&sc->cal_callout);
2058 /* Mark the adapter totally off limits. */
2059 mtx_lock(&sc->reg_lock);
2060 atomic_set_int(&sc->error_flags, HW_OFF_LIMITS);
2061 sc->flags &= ~(FW_OK | MASTER_PF);
2062 sc->reset_thread = NULL;
2063 mtx_unlock(&sc->reg_lock);
2065 if (t4_clock_gate_on_suspend) {
2066 t4_set_reg_field(sc, A_PMU_PART_CG_PWRMODE, F_MA_PART_CGEN |
2067 F_LE_PART_CGEN | F_EDC1_PART_CGEN | F_EDC0_PART_CGEN |
2068 F_TP_PART_CGEN | F_PDP_PART_CGEN | F_SGE_PART_CGEN, 0);
2071 CH_ALERT(sc, "suspend completed.\n");
2073 end_synchronized_op(sc, 0);
2077 struct adapter_pre_reset_state {
2081 uint16_t switchcaps;
2085 uint16_t cryptocaps;
2092 struct adapter_params params;
2093 struct t4_virt_res vres;
2094 struct tid_info tids;
2103 save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
2106 ASSERT_SYNCHRONIZED_OP(sc);
2108 o->flags = sc->flags;
2110 o->nbmcaps = sc->nbmcaps;
2111 o->linkcaps = sc->linkcaps;
2112 o->switchcaps = sc->switchcaps;
2113 o->niccaps = sc->niccaps;
2114 o->toecaps = sc->toecaps;
2115 o->rdmacaps = sc->rdmacaps;
2116 o->cryptocaps = sc->cryptocaps;
2117 o->iscsicaps = sc->iscsicaps;
2118 o->fcoecaps = sc->fcoecaps;
2120 o->cfcsum = sc->cfcsum;
2121 MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file));
2122 memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file));
2124 o->params = sc->params;
2129 o->rawf_base = sc->rawf_base;
2130 o->nrawf = sc->nrawf;
2134 compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
2138 ASSERT_SYNCHRONIZED_OP(sc);
2141 #define COMPARE_CAPS(c) do { \
2142 if (o->c##caps != sc->c##caps) { \
2143 CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \
2150 COMPARE_CAPS(switch);
2154 COMPARE_CAPS(crypto);
2155 COMPARE_CAPS(iscsi);
2159 /* Firmware config file */
2160 if (o->cfcsum != sc->cfcsum) {
2161 CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file,
2162 o->cfcsum, sc->cfg_file, sc->cfcsum);
2166 #define COMPARE_PARAM(p, name) do { \
2167 if (o->p != sc->p) { \
2168 CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \
2172 COMPARE_PARAM(sge.iq_start, iq_start);
2173 COMPARE_PARAM(sge.eq_start, eq_start);
2174 COMPARE_PARAM(tids.ftid_base, ftid_base);
2175 COMPARE_PARAM(tids.ftid_end, ftid_end);
2176 COMPARE_PARAM(tids.nftids, nftids);
2177 COMPARE_PARAM(vres.l2t.start, l2t_start);
2178 COMPARE_PARAM(vres.l2t.size, l2t_size);
2179 COMPARE_PARAM(sge.iqmap_sz, iqmap_sz);
2180 COMPARE_PARAM(sge.eqmap_sz, eqmap_sz);
2181 COMPARE_PARAM(tids.tid_base, tid_base);
2182 COMPARE_PARAM(tids.hpftid_base, hpftid_base);
2183 COMPARE_PARAM(tids.hpftid_end, hpftid_end);
2184 COMPARE_PARAM(tids.nhpftids, nhpftids);
2185 COMPARE_PARAM(rawf_base, rawf_base);
2186 COMPARE_PARAM(nrawf, nrawf);
2187 COMPARE_PARAM(params.mps_bg_map, mps_bg_map);
2188 COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support);
2189 COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl);
2190 COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support);
2191 COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr);
2192 COMPARE_PARAM(tids.ntids, ntids);
2193 COMPARE_PARAM(tids.etid_base, etid_base);
2194 COMPARE_PARAM(tids.etid_end, etid_end);
2195 COMPARE_PARAM(tids.netids, netids);
2196 COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred);
2197 COMPARE_PARAM(params.ethoffload, ethoffload);
2198 COMPARE_PARAM(tids.natids, natids);
2199 COMPARE_PARAM(tids.stid_base, stid_base);
2200 COMPARE_PARAM(vres.ddp.start, ddp_start);
2201 COMPARE_PARAM(vres.ddp.size, ddp_size);
2202 COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred);
2203 COMPARE_PARAM(vres.stag.start, stag_start);
2204 COMPARE_PARAM(vres.stag.size, stag_size);
2205 COMPARE_PARAM(vres.rq.start, rq_start);
2206 COMPARE_PARAM(vres.rq.size, rq_size);
2207 COMPARE_PARAM(vres.pbl.start, pbl_start);
2208 COMPARE_PARAM(vres.pbl.size, pbl_size);
2209 COMPARE_PARAM(vres.qp.start, qp_start);
2210 COMPARE_PARAM(vres.qp.size, qp_size);
2211 COMPARE_PARAM(vres.cq.start, cq_start);
2212 COMPARE_PARAM(vres.cq.size, cq_size);
2213 COMPARE_PARAM(vres.ocq.start, ocq_start);
2214 COMPARE_PARAM(vres.ocq.size, ocq_size);
2215 COMPARE_PARAM(vres.srq.start, srq_start);
2216 COMPARE_PARAM(vres.srq.size, srq_size);
2217 COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp);
2218 COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter);
2219 COMPARE_PARAM(vres.iscsi.start, iscsi_start);
2220 COMPARE_PARAM(vres.iscsi.size, iscsi_size);
2221 COMPARE_PARAM(vres.key.start, key_start);
2222 COMPARE_PARAM(vres.key.size, key_size);
2223 #undef COMPARE_PARAM
2229 t4_resume(device_t dev)
2231 struct adapter *sc = device_get_softc(dev);
2232 struct adapter_pre_reset_state *old_state = NULL;
2233 struct port_info *pi;
2236 struct sge_txq *txq;
2239 CH_ALERT(sc, "resume requested.\n");
2241 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4res");
2244 MPASS(hw_off_limits(sc));
2245 MPASS((sc->flags & FW_OK) == 0);
2246 MPASS((sc->flags & MASTER_PF) == 0);
2247 MPASS(sc->reset_thread == NULL);
2248 sc->reset_thread = curthread;
2250 /* Register access is expected to work by the time we're here. */
2251 if (t4_read_reg(sc, A_PL_WHOAMI) == 0xffffffff) {
2252 CH_ERR(sc, "%s: can't read device registers\n", __func__);
2257 /* Note that HW_OFF_LIMITS is cleared a bit later. */
2258 atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR | ADAP_STOPPED);
2260 /* Restore memory window. */
2263 /* Go no further if recovery mode has been requested. */
2264 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
2265 CH_ALERT(sc, "recovery mode on resume.\n");
2267 mtx_lock(&sc->reg_lock);
2268 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
2269 mtx_unlock(&sc->reg_lock);
2273 old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK);
2274 save_caps_and_params(sc, old_state);
2276 /* Reestablish contact with firmware and become the primary PF. */
2277 rc = contact_firmware(sc);
2279 goto done; /* error message displayed already */
2280 MPASS(sc->flags & FW_OK);
2282 if (sc->flags & MASTER_PF) {
2283 rc = partition_resources(sc);
2285 goto done; /* error message displayed already */
2289 rc = get_params__post_init(sc);
2291 goto done; /* error message displayed already */
2293 rc = set_params__post_init(sc);
2295 goto done; /* error message displayed already */
2297 rc = compare_caps_and_params(sc, old_state);
2299 goto done; /* error message displayed already */
2301 for_each_port(sc, i) {
2304 MPASS(pi->vi != NULL);
2305 MPASS(pi->vi[0].dev == pi->dev);
2307 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
2310 "failed to re-initialize port %d: %d\n", i, rc);
2313 MPASS(sc->chan_map[pi->tx_chan] == i);
2316 fixup_link_config(pi);
2317 build_medialist(pi);
2319 for_each_vi(pi, j, vi) {
2322 rc = alloc_extra_vi(sc, pi, vi);
2325 "failed to re-allocate extra VI: %d\n", rc);
2332 * Interrupts and queues are about to be enabled and other threads will
2333 * want to access the hardware too. It is safe to do so. Note that
2334 * this thread is still in the middle of a synchronized_op.
2336 mtx_lock(&sc->reg_lock);
2337 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
2338 mtx_unlock(&sc->reg_lock);
2340 if (sc->flags & FULL_INIT_DONE) {
2341 rc = adapter_full_init(sc);
2343 CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc);
2347 if (sc->vxlan_refcount > 0)
2348 enable_vxlan_rx(sc);
2350 for_each_port(sc, i) {
2352 for_each_vi(pi, j, vi) {
2353 mtx_lock(&vi->tick_mtx);
2354 vi->flags &= ~VI_SKIP_STATS;
2355 mtx_unlock(&vi->tick_mtx);
2356 if (!(vi->flags & VI_INIT_DONE))
2358 rc = vi_full_init(vi);
2360 CH_ERR(vi, "failed to re-initialize "
2361 "interface: %d\n", rc);
2366 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
2369 * Note that we do not setup multicast addresses
2370 * in the first pass. This ensures that the
2371 * unicast DMACs for all VIs on all ports get an
2374 rc = update_mac_settings(ifp, XGMAC_ALL &
2377 CH_ERR(vi, "failed to re-configure MAC: %d\n", rc);
2380 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true,
2383 CH_ERR(vi, "failed to re-enable VI: %d\n", rc);
2386 for_each_txq(vi, k, txq) {
2388 txq->eq.flags |= EQ_ENABLED;
2391 mtx_lock(&vi->tick_mtx);
2392 callout_schedule(&vi->tick, hz);
2393 mtx_unlock(&vi->tick_mtx);
2396 if (pi->up_vis > 0) {
2397 t4_update_port_info(pi);
2398 fixup_link_config(pi);
2399 build_medialist(pi);
2400 apply_link_config(pi);
2401 if (pi->link_cfg.link_ok)
2402 t4_os_link_changed(pi);
2407 /* Now reprogram the L2 multicast addresses. */
2408 for_each_port(sc, i) {
2410 for_each_vi(pi, j, vi) {
2411 if (!(vi->flags & VI_INIT_DONE))
2414 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
2416 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
2418 CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc);
2419 rc = 0; /* carry on */
2425 /* Reset all calibration */
2426 t4_calibration_start(sc);
2431 CH_ALERT(sc, "resume completed.\n");
2433 end_synchronized_op(sc, 0);
2434 free(old_state, M_CXGBE);
2439 t4_reset_prepare(device_t dev, device_t child)
2441 struct adapter *sc = device_get_softc(dev);
2443 CH_ALERT(sc, "reset_prepare.\n");
2448 t4_reset_post(device_t dev, device_t child)
2450 struct adapter *sc = device_get_softc(dev);
2452 CH_ALERT(sc, "reset_post.\n");
2457 reset_adapter(struct adapter *sc)
2459 int rc, oldinc, error_flags;
2461 CH_ALERT(sc, "reset requested.\n");
2463 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst1");
2467 if (hw_off_limits(sc)) {
2468 CH_ERR(sc, "adapter is suspended, use resume (not reset).\n");
2473 if (!ok_to_reset(sc)) {
2474 /* XXX: should list what resource is preventing reset. */
2475 CH_ERR(sc, "not safe to reset.\n");
2481 oldinc = sc->incarnation;
2482 end_synchronized_op(sc, 0);
2484 return (rc); /* Error logged already. */
2486 atomic_add_int(&sc->num_resets, 1);
2488 rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0);
2491 CH_ERR(sc, "bus_reset_child failed: %d.\n", rc);
2493 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst2");
2496 error_flags = atomic_load_int(&sc->error_flags);
2497 if (sc->incarnation > oldinc && error_flags == 0) {
2498 CH_ALERT(sc, "bus_reset_child succeeded.\n");
2500 CH_ERR(sc, "adapter did not reset properly, flags "
2501 "0x%08x, error_flags 0x%08x.\n", sc->flags,
2505 end_synchronized_op(sc, 0);
2512 reset_adapter_task(void *arg, int pending)
2514 /* XXX: t4_async_event here? */
2519 cxgbe_probe(device_t dev)
2522 struct port_info *pi = device_get_softc(dev);
2524 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
2525 device_set_desc_copy(dev, buf);
2527 return (BUS_PROBE_DEFAULT);
2530 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
2531 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
2532 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
2533 IFCAP_HWRXTSTMP | IFCAP_MEXTPG)
2534 #define T4_CAP_ENABLE (T4_CAP)
2537 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
2541 struct sysctl_ctx_list *ctx = &vi->ctx;
2542 struct sysctl_oid_list *children;
2543 struct pfil_head_args pa;
2544 struct adapter *sc = vi->adapter;
2546 sysctl_ctx_init(ctx);
2547 children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev));
2548 vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq",
2549 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC rx queues");
2550 vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq",
2551 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC tx queues");
2553 vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq",
2554 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap rx queues");
2555 vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq",
2556 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queues");
2559 vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq",
2560 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE rx queues");
2562 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2563 vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq",
2564 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE/ETHOFLD tx queues");
2567 vi->xact_addr_filt = -1;
2568 mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF);
2569 callout_init_mtx(&vi->tick, &vi->tick_mtx, 0);
2570 if (sc->flags & IS_VF || t4_tx_vm_wr != 0)
2571 vi->flags |= TX_USES_VM_WR;
2573 /* Allocate an ifnet and set it up */
2574 ifp = if_alloc_dev(IFT_ETHER, dev);
2576 device_printf(dev, "Cannot allocate ifnet\n");
2580 if_setsoftc(ifp, vi);
2582 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2583 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2585 if_setinitfn(ifp, cxgbe_init);
2586 if_setioctlfn(ifp, cxgbe_ioctl);
2587 if_settransmitfn(ifp, cxgbe_transmit);
2588 if_setqflushfn(ifp, cxgbe_qflush);
2589 if (vi->pi->nvi > 1 || sc->flags & IS_VF)
2590 if_setgetcounterfn(ifp, vi_get_counter);
2592 if_setgetcounterfn(ifp, cxgbe_get_counter);
2593 #if defined(KERN_TLS) || defined(RATELIMIT)
2594 if_setsndtagallocfn(ifp, cxgbe_snd_tag_alloc);
2597 if_setratelimitqueryfn(ifp, cxgbe_ratelimit_query);
2600 if_setcapabilities(ifp, T4_CAP);
2601 if_setcapenable(ifp, T4_CAP_ENABLE);
2602 if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
2603 CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2604 if (chip_id(sc) >= CHELSIO_T6) {
2605 if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
2606 if_setcapenablebit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
2607 if_sethwassistbits(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
2608 CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP |
2609 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN, 0);
2613 if (vi->nofldrxq != 0)
2614 if_setcapabilitiesbit(ifp, IFCAP_TOE, 0);
2617 if (is_ethoffload(sc) && vi->nofldtxq != 0) {
2618 if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT, 0);
2619 if_setcapenablebit(ifp, IFCAP_TXRTLMT, 0);
2623 if_sethwtsomax(ifp, IP_MAXPACKET);
2624 if (vi->flags & TX_USES_VM_WR)
2625 if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_VM_TSO);
2627 if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_TSO);
2629 if (is_ethoffload(sc) && vi->nofldtxq != 0)
2630 if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_EO_TSO);
2632 if_sethwtsomaxsegsize(ifp, 65536);
2635 if_setcapabilitiesbit(ifp, IFCAP_TXTLS, 0);
2636 if (sc->flags & KERN_TLS_ON || !is_t6(sc))
2637 if_setcapenablebit(ifp, IFCAP_TXTLS, 0);
2641 ether_ifattach(ifp, vi->hw_addr);
2643 if (vi->nnmrxq != 0)
2644 cxgbe_nm_attach(vi);
2646 sb = sbuf_new_auto();
2647 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
2648 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2649 switch (if_getcapabilities(ifp) & (IFCAP_TOE | IFCAP_TXRTLMT)) {
2651 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
2653 case IFCAP_TOE | IFCAP_TXRTLMT:
2654 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
2657 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
2662 if (if_getcapabilities(ifp) & IFCAP_TOE)
2663 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
2666 if (if_getcapabilities(ifp) & IFCAP_NETMAP)
2667 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
2668 vi->nnmtxq, vi->nnmrxq);
2671 device_printf(dev, "%s\n", sbuf_data(sb));
2676 pa.pa_version = PFIL_VERSION;
2677 pa.pa_flags = PFIL_IN;
2678 pa.pa_type = PFIL_TYPE_ETHERNET;
2679 pa.pa_headname = if_name(ifp);
2680 vi->pfil = pfil_head_register(&pa);
2686 cxgbe_attach(device_t dev)
2688 struct port_info *pi = device_get_softc(dev);
2689 struct adapter *sc = pi->adapter;
2693 sysctl_ctx_init(&pi->ctx);
2695 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
2699 for_each_vi(pi, i, vi) {
2702 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
2703 if (vi->dev == NULL) {
2704 device_printf(dev, "failed to add VI %d\n", i);
2707 device_set_softc(vi->dev, vi);
2712 bus_generic_attach(dev);
2718 cxgbe_vi_detach(struct vi_info *vi)
2722 if (vi->pfil != NULL) {
2723 pfil_head_unregister(vi->pfil);
2727 ether_ifdetach(ifp);
2729 /* Let detach proceed even if these fail. */
2731 if (if_getcapabilities(ifp) & IFCAP_NETMAP)
2732 cxgbe_nm_detach(vi);
2734 cxgbe_uninit_synchronized(vi);
2735 callout_drain(&vi->tick);
2736 mtx_destroy(&vi->tick_mtx);
2737 sysctl_ctx_free(&vi->ctx);
2745 cxgbe_detach(device_t dev)
2747 struct port_info *pi = device_get_softc(dev);
2748 struct adapter *sc = pi->adapter;
2751 /* Detach the extra VIs first. */
2752 rc = bus_generic_detach(dev);
2755 device_delete_children(dev);
2757 sysctl_ctx_free(&pi->ctx);
2758 begin_vi_detach(sc, &pi->vi[0]);
2759 if (pi->flags & HAS_TRACEQ) {
2760 sc->traceq = -1; /* cloner should not create ifnet */
2761 t4_tracer_port_detach(sc);
2763 cxgbe_vi_detach(&pi->vi[0]);
2764 ifmedia_removeall(&pi->media);
2765 end_vi_detach(sc, &pi->vi[0]);
2771 cxgbe_init(void *arg)
2773 struct vi_info *vi = arg;
2774 struct adapter *sc = vi->adapter;
2776 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
2778 cxgbe_init_synchronized(vi);
2779 end_synchronized_op(sc, 0);
2783 cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
2785 int rc = 0, mtu, flags;
2786 struct vi_info *vi = if_getsoftc(ifp);
2787 struct port_info *pi = vi->pi;
2788 struct adapter *sc = pi->adapter;
2789 struct ifreq *ifr = (struct ifreq *)data;
2795 if (mtu < ETHERMIN || mtu > MAX_MTU)
2798 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
2801 if_setmtu(ifp, mtu);
2802 if (vi->flags & VI_INIT_DONE) {
2803 t4_update_fl_bufsize(ifp);
2804 if (!hw_off_limits(sc) &&
2805 if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2806 rc = update_mac_settings(ifp, XGMAC_MTU);
2808 end_synchronized_op(sc, 0);
2812 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
2816 if (hw_off_limits(sc)) {
2821 if (if_getflags(ifp) & IFF_UP) {
2822 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2823 flags = vi->if_flags;
2824 if ((if_getflags(ifp) ^ flags) &
2825 (IFF_PROMISC | IFF_ALLMULTI)) {
2826 rc = update_mac_settings(ifp,
2827 XGMAC_PROMISC | XGMAC_ALLMULTI);
2830 rc = cxgbe_init_synchronized(vi);
2832 vi->if_flags = if_getflags(ifp);
2833 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2834 rc = cxgbe_uninit_synchronized(vi);
2836 end_synchronized_op(sc, 0);
2841 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
2844 if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2845 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
2846 end_synchronized_op(sc, 0);
2850 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
2854 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2855 if (mask & IFCAP_TXCSUM) {
2856 if_togglecapenable(ifp, IFCAP_TXCSUM);
2857 if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP);
2859 if (IFCAP_TSO4 & if_getcapenable(ifp) &&
2860 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
2861 mask &= ~IFCAP_TSO4;
2862 if_setcapenablebit(ifp, 0, IFCAP_TSO4);
2864 "tso4 disabled due to -txcsum.\n");
2867 if (mask & IFCAP_TXCSUM_IPV6) {
2868 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
2869 if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2871 if (IFCAP_TSO6 & if_getcapenable(ifp) &&
2872 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
2873 mask &= ~IFCAP_TSO6;
2874 if_setcapenablebit(ifp, 0, IFCAP_TSO6);
2876 "tso6 disabled due to -txcsum6.\n");
2879 if (mask & IFCAP_RXCSUM)
2880 if_togglecapenable(ifp, IFCAP_RXCSUM);
2881 if (mask & IFCAP_RXCSUM_IPV6)
2882 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
2885 * Note that we leave CSUM_TSO alone (it is always set). The
2886 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
2887 * sending a TSO request our way, so it's sufficient to toggle
2890 if (mask & IFCAP_TSO4) {
2891 if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
2892 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
2893 if_printf(ifp, "enable txcsum first.\n");
2897 if_togglecapenable(ifp, IFCAP_TSO4);
2899 if (mask & IFCAP_TSO6) {
2900 if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
2901 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
2902 if_printf(ifp, "enable txcsum6 first.\n");
2906 if_togglecapenable(ifp, IFCAP_TSO6);
2908 if (mask & IFCAP_LRO) {
2909 #if defined(INET) || defined(INET6)
2911 struct sge_rxq *rxq;
2913 if_togglecapenable(ifp, IFCAP_LRO);
2914 for_each_rxq(vi, i, rxq) {
2915 if (if_getcapenable(ifp) & IFCAP_LRO)
2916 rxq->iq.flags |= IQ_LRO_ENABLED;
2918 rxq->iq.flags &= ~IQ_LRO_ENABLED;
2923 if (mask & IFCAP_TOE) {
2924 int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE;
2926 rc = toe_capability(vi, enable);
2930 if_togglecapenable(ifp, mask);
2933 if (mask & IFCAP_VLAN_HWTAGGING) {
2934 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2935 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2936 rc = update_mac_settings(ifp, XGMAC_VLANEX);
2938 if (mask & IFCAP_VLAN_MTU) {
2939 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
2941 /* Need to find out how to disable auto-mtu-inflation */
2943 if (mask & IFCAP_VLAN_HWTSO)
2944 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2945 if (mask & IFCAP_VLAN_HWCSUM)
2946 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2948 if (mask & IFCAP_TXRTLMT)
2949 if_togglecapenable(ifp, IFCAP_TXRTLMT);
2951 if (mask & IFCAP_HWRXTSTMP) {
2953 struct sge_rxq *rxq;
2955 if_togglecapenable(ifp, IFCAP_HWRXTSTMP);
2956 for_each_rxq(vi, i, rxq) {
2957 if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP)
2958 rxq->iq.flags |= IQ_RX_TIMESTAMP;
2960 rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
2963 if (mask & IFCAP_MEXTPG)
2964 if_togglecapenable(ifp, IFCAP_MEXTPG);
2967 if (mask & IFCAP_TXTLS) {
2968 int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TXTLS;
2970 rc = ktls_capability(sc, enable);
2974 if_togglecapenable(ifp, mask & IFCAP_TXTLS);
2977 if (mask & IFCAP_VXLAN_HWCSUM) {
2978 if_togglecapenable(ifp, IFCAP_VXLAN_HWCSUM);
2979 if_togglehwassist(ifp, CSUM_INNER_IP6_UDP |
2980 CSUM_INNER_IP6_TCP | CSUM_INNER_IP |
2981 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP);
2983 if (mask & IFCAP_VXLAN_HWTSO) {
2984 if_togglecapenable(ifp, IFCAP_VXLAN_HWTSO);
2985 if_togglehwassist(ifp, CSUM_INNER_IP6_TSO |
2989 #ifdef VLAN_CAPABILITIES
2990 VLAN_CAPABILITIES(ifp);
2993 end_synchronized_op(sc, 0);
2999 rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
3003 struct ifi2creq i2c;
3005 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
3008 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
3012 if (i2c.len > sizeof(i2c.data)) {
3016 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
3019 if (hw_off_limits(sc))
3022 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
3023 i2c.offset, i2c.len, &i2c.data[0]);
3024 end_synchronized_op(sc, 0);
3026 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
3031 rc = ether_ioctl(ifp, cmd, data);
3038 cxgbe_transmit(if_t ifp, struct mbuf *m)
3040 struct vi_info *vi = if_getsoftc(ifp);
3041 struct port_info *pi = vi->pi;
3043 struct sge_txq *txq;
3048 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
3049 #if defined(KERN_TLS) || defined(RATELIMIT)
3050 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
3051 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
3054 if (__predict_false(pi->link_cfg.link_ok == false)) {
3059 rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR);
3060 if (__predict_false(rc != 0)) {
3061 if (__predict_true(rc == EINPROGRESS)) {
3062 /* queued by parse_pkt */
3067 MPASS(m == NULL); /* was freed already */
3068 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
3074 txq = &sc->sge.txq[vi->first_txq];
3075 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
3076 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
3080 rc = mp_ring_enqueue(txq->r, items, 1, 256);
3081 if (__predict_false(rc != 0))
3088 cxgbe_qflush(if_t ifp)
3090 struct vi_info *vi = if_getsoftc(ifp);
3091 struct sge_txq *txq;
3094 /* queues do not exist if !VI_INIT_DONE. */
3095 if (vi->flags & VI_INIT_DONE) {
3096 for_each_txq(vi, i, txq) {
3098 txq->eq.flags |= EQ_QFLUSH;
3100 while (!mp_ring_is_idle(txq->r)) {
3101 mp_ring_check_drainage(txq->r, 4096);
3105 txq->eq.flags &= ~EQ_QFLUSH;
3113 vi_get_counter(if_t ifp, ift_counter c)
3115 struct vi_info *vi = if_getsoftc(ifp);
3116 struct fw_vi_stats_vf *s = &vi->stats;
3118 mtx_lock(&vi->tick_mtx);
3119 vi_refresh_stats(vi);
3120 mtx_unlock(&vi->tick_mtx);
3123 case IFCOUNTER_IPACKETS:
3124 return (s->rx_bcast_frames + s->rx_mcast_frames +
3125 s->rx_ucast_frames);
3126 case IFCOUNTER_IERRORS:
3127 return (s->rx_err_frames);
3128 case IFCOUNTER_OPACKETS:
3129 return (s->tx_bcast_frames + s->tx_mcast_frames +
3130 s->tx_ucast_frames + s->tx_offload_frames);
3131 case IFCOUNTER_OERRORS:
3132 return (s->tx_drop_frames);
3133 case IFCOUNTER_IBYTES:
3134 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
3136 case IFCOUNTER_OBYTES:
3137 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
3138 s->tx_ucast_bytes + s->tx_offload_bytes);
3139 case IFCOUNTER_IMCASTS:
3140 return (s->rx_mcast_frames);
3141 case IFCOUNTER_OMCASTS:
3142 return (s->tx_mcast_frames);
3143 case IFCOUNTER_OQDROPS: {
3147 if (vi->flags & VI_INIT_DONE) {
3149 struct sge_txq *txq;
3151 for_each_txq(vi, i, txq)
3152 drops += counter_u64_fetch(txq->r->dropped);
3160 return (if_get_counter_default(ifp, c));
3165 cxgbe_get_counter(if_t ifp, ift_counter c)
3167 struct vi_info *vi = if_getsoftc(ifp);
3168 struct port_info *pi = vi->pi;
3169 struct port_stats *s = &pi->stats;
3171 mtx_lock(&vi->tick_mtx);
3172 cxgbe_refresh_stats(vi);
3173 mtx_unlock(&vi->tick_mtx);
3176 case IFCOUNTER_IPACKETS:
3177 return (s->rx_frames);
3179 case IFCOUNTER_IERRORS:
3180 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
3181 s->rx_fcs_err + s->rx_len_err);
3183 case IFCOUNTER_OPACKETS:
3184 return (s->tx_frames);
3186 case IFCOUNTER_OERRORS:
3187 return (s->tx_error_frames);
3189 case IFCOUNTER_IBYTES:
3190 return (s->rx_octets);
3192 case IFCOUNTER_OBYTES:
3193 return (s->tx_octets);
3195 case IFCOUNTER_IMCASTS:
3196 return (s->rx_mcast_frames);
3198 case IFCOUNTER_OMCASTS:
3199 return (s->tx_mcast_frames);
3201 case IFCOUNTER_IQDROPS:
3202 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3203 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3204 s->rx_trunc3 + pi->tnl_cong_drops);
3206 case IFCOUNTER_OQDROPS: {
3210 if (vi->flags & VI_INIT_DONE) {
3212 struct sge_txq *txq;
3214 for_each_txq(vi, i, txq)
3215 drops += counter_u64_fetch(txq->r->dropped);
3223 return (if_get_counter_default(ifp, c));
3227 #if defined(KERN_TLS) || defined(RATELIMIT)
3229 cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
3230 struct m_snd_tag **pt)
3234 switch (params->hdr.type) {
3236 case IF_SND_TAG_TYPE_RATE_LIMIT:
3237 error = cxgbe_rate_tag_alloc(ifp, params, pt);
3241 case IF_SND_TAG_TYPE_TLS:
3243 struct vi_info *vi = if_getsoftc(ifp);
3245 if (is_t6(vi->pi->adapter))
3246 error = t6_tls_tag_alloc(ifp, params, pt);
3260 * The kernel picks a media from the list we had provided but we still validate
3264 cxgbe_media_change(if_t ifp)
3266 struct vi_info *vi = if_getsoftc(ifp);
3267 struct port_info *pi = vi->pi;
3268 struct ifmedia *ifm = &pi->media;
3269 struct link_config *lc = &pi->link_cfg;
3270 struct adapter *sc = pi->adapter;
3273 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
3277 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
3278 /* ifconfig .. media autoselect */
3279 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
3280 rc = ENOTSUP; /* AN not supported by transceiver */
3283 lc->requested_aneg = AUTONEG_ENABLE;
3284 lc->requested_speed = 0;
3285 lc->requested_fc |= PAUSE_AUTONEG;
3287 lc->requested_aneg = AUTONEG_DISABLE;
3288 lc->requested_speed =
3289 ifmedia_baudrate(ifm->ifm_media) / 1000000;
3290 lc->requested_fc = 0;
3291 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
3292 lc->requested_fc |= PAUSE_RX;
3293 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
3294 lc->requested_fc |= PAUSE_TX;
3296 if (pi->up_vis > 0 && !hw_off_limits(sc)) {
3297 fixup_link_config(pi);
3298 rc = apply_link_config(pi);
3302 end_synchronized_op(sc, 0);
3307 * Base media word (without ETHER, pause, link active, etc.) for the port at the
3311 port_mword(struct port_info *pi, uint32_t speed)
3314 MPASS(speed & M_FW_PORT_CAP32_SPEED);
3315 MPASS(powerof2(speed));
3317 switch(pi->port_type) {
3318 case FW_PORT_TYPE_BT_SGMII:
3319 case FW_PORT_TYPE_BT_XFI:
3320 case FW_PORT_TYPE_BT_XAUI:
3323 case FW_PORT_CAP32_SPEED_100M:
3325 case FW_PORT_CAP32_SPEED_1G:
3326 return (IFM_1000_T);
3327 case FW_PORT_CAP32_SPEED_10G:
3331 case FW_PORT_TYPE_KX4:
3332 if (speed == FW_PORT_CAP32_SPEED_10G)
3333 return (IFM_10G_KX4);
3335 case FW_PORT_TYPE_CX4:
3336 if (speed == FW_PORT_CAP32_SPEED_10G)
3337 return (IFM_10G_CX4);
3339 case FW_PORT_TYPE_KX:
3340 if (speed == FW_PORT_CAP32_SPEED_1G)
3341 return (IFM_1000_KX);
3343 case FW_PORT_TYPE_KR:
3344 case FW_PORT_TYPE_BP_AP:
3345 case FW_PORT_TYPE_BP4_AP:
3346 case FW_PORT_TYPE_BP40_BA:
3347 case FW_PORT_TYPE_KR4_100G:
3348 case FW_PORT_TYPE_KR_SFP28:
3349 case FW_PORT_TYPE_KR_XLAUI:
3351 case FW_PORT_CAP32_SPEED_1G:
3352 return (IFM_1000_KX);
3353 case FW_PORT_CAP32_SPEED_10G:
3354 return (IFM_10G_KR);
3355 case FW_PORT_CAP32_SPEED_25G:
3356 return (IFM_25G_KR);
3357 case FW_PORT_CAP32_SPEED_40G:
3358 return (IFM_40G_KR4);
3359 case FW_PORT_CAP32_SPEED_50G:
3360 return (IFM_50G_KR2);
3361 case FW_PORT_CAP32_SPEED_100G:
3362 return (IFM_100G_KR4);
3365 case FW_PORT_TYPE_FIBER_XFI:
3366 case FW_PORT_TYPE_FIBER_XAUI:
3367 case FW_PORT_TYPE_SFP:
3368 case FW_PORT_TYPE_QSFP_10G:
3369 case FW_PORT_TYPE_QSA:
3370 case FW_PORT_TYPE_QSFP:
3371 case FW_PORT_TYPE_CR4_QSFP:
3372 case FW_PORT_TYPE_CR_QSFP:
3373 case FW_PORT_TYPE_CR2_QSFP:
3374 case FW_PORT_TYPE_SFP28:
3375 /* Pluggable transceiver */
3376 switch (pi->mod_type) {
3377 case FW_PORT_MOD_TYPE_LR:
3379 case FW_PORT_CAP32_SPEED_1G:
3380 return (IFM_1000_LX);
3381 case FW_PORT_CAP32_SPEED_10G:
3382 return (IFM_10G_LR);
3383 case FW_PORT_CAP32_SPEED_25G:
3384 return (IFM_25G_LR);
3385 case FW_PORT_CAP32_SPEED_40G:
3386 return (IFM_40G_LR4);
3387 case FW_PORT_CAP32_SPEED_50G:
3388 return (IFM_50G_LR2);
3389 case FW_PORT_CAP32_SPEED_100G:
3390 return (IFM_100G_LR4);
3393 case FW_PORT_MOD_TYPE_SR:
3395 case FW_PORT_CAP32_SPEED_1G:
3396 return (IFM_1000_SX);
3397 case FW_PORT_CAP32_SPEED_10G:
3398 return (IFM_10G_SR);
3399 case FW_PORT_CAP32_SPEED_25G:
3400 return (IFM_25G_SR);
3401 case FW_PORT_CAP32_SPEED_40G:
3402 return (IFM_40G_SR4);
3403 case FW_PORT_CAP32_SPEED_50G:
3404 return (IFM_50G_SR2);
3405 case FW_PORT_CAP32_SPEED_100G:
3406 return (IFM_100G_SR4);
3409 case FW_PORT_MOD_TYPE_ER:
3410 if (speed == FW_PORT_CAP32_SPEED_10G)
3411 return (IFM_10G_ER);
3413 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3414 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3416 case FW_PORT_CAP32_SPEED_1G:
3417 return (IFM_1000_CX);
3418 case FW_PORT_CAP32_SPEED_10G:
3419 return (IFM_10G_TWINAX);
3420 case FW_PORT_CAP32_SPEED_25G:
3421 return (IFM_25G_CR);
3422 case FW_PORT_CAP32_SPEED_40G:
3423 return (IFM_40G_CR4);
3424 case FW_PORT_CAP32_SPEED_50G:
3425 return (IFM_50G_CR2);
3426 case FW_PORT_CAP32_SPEED_100G:
3427 return (IFM_100G_CR4);
3430 case FW_PORT_MOD_TYPE_LRM:
3431 if (speed == FW_PORT_CAP32_SPEED_10G)
3432 return (IFM_10G_LRM);
3434 case FW_PORT_MOD_TYPE_NA:
3435 MPASS(0); /* Not pluggable? */
3437 case FW_PORT_MOD_TYPE_ERROR:
3438 case FW_PORT_MOD_TYPE_UNKNOWN:
3439 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
3441 case FW_PORT_MOD_TYPE_NONE:
3445 case FW_PORT_TYPE_NONE:
3449 return (IFM_UNKNOWN);
3453 cxgbe_media_status(if_t ifp, struct ifmediareq *ifmr)
3455 struct vi_info *vi = if_getsoftc(ifp);
3456 struct port_info *pi = vi->pi;
3457 struct adapter *sc = pi->adapter;
3458 struct link_config *lc = &pi->link_cfg;
3460 if (begin_synchronized_op(sc, vi , SLEEP_OK | INTR_OK, "t4med") != 0)
3464 if (pi->up_vis == 0 && !hw_off_limits(sc)) {
3466 * If all the interfaces are administratively down the firmware
3467 * does not report transceiver changes. Refresh port info here
3468 * so that ifconfig displays accurate ifmedia at all times.
3469 * This is the only reason we have a synchronized op in this
3470 * function. Just PORT_LOCK would have been enough otherwise.
3472 t4_update_port_info(pi);
3473 build_medialist(pi);
3477 ifmr->ifm_status = IFM_AVALID;
3478 if (lc->link_ok == false)
3480 ifmr->ifm_status |= IFM_ACTIVE;
3483 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
3484 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
3485 if (lc->fc & PAUSE_RX)
3486 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3487 if (lc->fc & PAUSE_TX)
3488 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3489 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
3492 end_synchronized_op(sc, 0);
3496 vcxgbe_probe(device_t dev)
3499 struct vi_info *vi = device_get_softc(dev);
3501 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
3503 device_set_desc_copy(dev, buf);
3505 return (BUS_PROBE_DEFAULT);
3509 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
3511 int func, index, rc;
3512 uint32_t param, val;
3514 ASSERT_SYNCHRONIZED_OP(sc);
3516 index = vi - pi->vi;
3517 MPASS(index > 0); /* This function deals with _extra_ VIs only */
3518 KASSERT(index < nitems(vi_mac_funcs),
3519 ("%s: VI %s doesn't have a MAC func", __func__,
3520 device_get_nameunit(vi->dev)));
3521 func = vi_mac_funcs[index];
3522 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
3523 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
3525 CH_ERR(vi, "failed to allocate virtual interface %d"
3526 "for port %d: %d\n", index, pi->port_id, -rc);
3531 if (vi->rss_size == 1) {
3533 * This VI didn't get a slice of the RSS table. Reduce the
3534 * number of VIs being created (hw.cxgbe.num_vis) or modify the
3535 * configuration file (nvi, rssnvi for this PF) if this is a
3538 device_printf(vi->dev, "RSS table not available.\n");
3539 vi->rss_base = 0xffff;
3544 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3545 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
3546 V_FW_PARAMS_PARAM_YZ(vi->viid);
3547 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3549 vi->rss_base = 0xffff;
3551 MPASS((val >> 16) == vi->rss_size);
3552 vi->rss_base = val & 0xffff;
3559 vcxgbe_attach(device_t dev)
3562 struct port_info *pi;
3566 vi = device_get_softc(dev);
3570 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
3573 rc = alloc_extra_vi(sc, pi, vi);
3574 end_synchronized_op(sc, 0);
3578 rc = cxgbe_vi_attach(dev, vi);
3580 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
3587 vcxgbe_detach(device_t dev)
3592 vi = device_get_softc(dev);
3595 begin_vi_detach(sc, vi);
3596 cxgbe_vi_detach(vi);
3597 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
3598 end_vi_detach(sc, vi);
3603 static struct callout fatal_callout;
3604 static struct taskqueue *reset_tq;
3607 delayed_panic(void *arg)
3609 struct adapter *sc = arg;
3611 panic("%s: panic on fatal error", device_get_nameunit(sc->dev));
3615 fatal_error_task(void *arg, int pending)
3617 struct adapter *sc = arg;
3623 if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) {
3629 if (t4_reset_on_fatal_err) {
3630 CH_ALERT(sc, "resetting on fatal error.\n");
3631 rc = reset_adapter(sc);
3632 if (rc == 0 && t4_panic_on_fatal_err) {
3633 CH_ALERT(sc, "reset was successful, "
3634 "system will NOT panic.\n");
3639 if (t4_panic_on_fatal_err) {
3640 CH_ALERT(sc, "panicking on fatal error (after 30s).\n");
3641 callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
3646 t4_fatal_err(struct adapter *sc, bool fw_error)
3648 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
3651 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR)))
3655 * We are here because of a firmware error/timeout and not
3656 * because of a hardware interrupt. It is possible (although
3657 * not very likely) that an error interrupt was also raised but
3658 * this thread ran first and inhibited t4_intr_err. We walk the
3659 * main INT_CAUSE registers here to make sure we haven't missed
3660 * anything interesting.
3662 t4_slow_intr_handler(sc, verbose);
3663 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
3665 t4_report_fw_error(sc);
3666 log(LOG_ALERT, "%s: encountered fatal error, adapter stopped (%d).\n",
3667 device_get_nameunit(sc->dev), fw_error);
3668 taskqueue_enqueue(reset_tq, &sc->fatal_error_task);
3672 t4_add_adapter(struct adapter *sc)
3674 sx_xlock(&t4_list_lock);
3675 SLIST_INSERT_HEAD(&t4_list, sc, link);
3676 sx_xunlock(&t4_list_lock);
3680 t4_map_bars_0_and_4(struct adapter *sc)
3682 sc->regs_rid = PCIR_BAR(0);
3683 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3684 &sc->regs_rid, RF_ACTIVE);
3685 if (sc->regs_res == NULL) {
3686 device_printf(sc->dev, "cannot map registers.\n");
3689 sc->bt = rman_get_bustag(sc->regs_res);
3690 sc->bh = rman_get_bushandle(sc->regs_res);
3691 sc->mmio_len = rman_get_size(sc->regs_res);
3692 setbit(&sc->doorbells, DOORBELL_KDB);
3694 sc->msix_rid = PCIR_BAR(4);
3695 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3696 &sc->msix_rid, RF_ACTIVE);
3697 if (sc->msix_res == NULL) {
3698 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
3706 t4_map_bar_2(struct adapter *sc)
3710 * T4: only iWARP driver uses the userspace doorbells. There is no need
3711 * to map it if RDMA is disabled.
3713 if (is_t4(sc) && sc->rdmacaps == 0)
3716 sc->udbs_rid = PCIR_BAR(2);
3717 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3718 &sc->udbs_rid, RF_ACTIVE);
3719 if (sc->udbs_res == NULL) {
3720 device_printf(sc->dev, "cannot map doorbell BAR.\n");
3723 sc->udbs_base = rman_get_virtual(sc->udbs_res);
3725 if (chip_id(sc) >= CHELSIO_T5) {
3726 setbit(&sc->doorbells, DOORBELL_UDB);
3727 #if defined(__i386__) || defined(__amd64__)
3728 if (t5_write_combine) {
3732 * Enable write combining on BAR2. This is the
3733 * userspace doorbell BAR and is split into 128B
3734 * (UDBS_SEG_SIZE) doorbell regions, each associated
3735 * with an egress queue. The first 64B has the doorbell
3736 * and the second 64B can be used to submit a tx work
3737 * request with an implicit doorbell.
3740 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
3741 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
3743 clrbit(&sc->doorbells, DOORBELL_UDB);
3744 setbit(&sc->doorbells, DOORBELL_WCWR);
3745 setbit(&sc->doorbells, DOORBELL_UDBWC);
3747 device_printf(sc->dev,
3748 "couldn't enable write combining: %d\n",
3752 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
3753 t4_write_reg(sc, A_SGE_STAT_CFG,
3754 V_STATSOURCE_T5(7) | mode);
3758 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
3763 struct memwin_init {
3768 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
3769 { MEMWIN0_BASE, MEMWIN0_APERTURE },
3770 { MEMWIN1_BASE, MEMWIN1_APERTURE },
3771 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
3774 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
3775 { MEMWIN0_BASE, MEMWIN0_APERTURE },
3776 { MEMWIN1_BASE, MEMWIN1_APERTURE },
3777 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
3781 setup_memwin(struct adapter *sc)
3783 const struct memwin_init *mw_init;
3790 * Read low 32b of bar0 indirectly via the hardware backdoor
3791 * mechanism. Works from within PCI passthrough environments
3792 * too, where rman_get_start() can return a different value. We
3793 * need to program the T4 memory window decoders with the actual
3794 * addresses that will be coming across the PCIe link.
3796 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
3797 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
3799 mw_init = &t4_memwin[0];
3801 /* T5+ use the relative offset inside the PCIe BAR */
3804 mw_init = &t5_memwin[0];
3807 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
3808 if (!rw_initialized(&mw->mw_lock)) {
3809 rw_init(&mw->mw_lock, "memory window access");
3810 mw->mw_base = mw_init->base;
3811 mw->mw_aperture = mw_init->aperture;
3815 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
3816 (mw->mw_base + bar0) | V_BIR(0) |
3817 V_WINDOW(ilog2(mw->mw_aperture) - 10));
3818 rw_wlock(&mw->mw_lock);
3819 position_memwin(sc, i, mw->mw_curpos);
3820 rw_wunlock(&mw->mw_lock);
3824 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
3828 * Positions the memory window at the given address in the card's address space.
3829 * There are some alignment requirements and the actual position may be at an
3830 * address prior to the requested address. mw->mw_curpos always has the actual
3831 * position of the window.
3834 position_memwin(struct adapter *sc, int idx, uint32_t addr)
3840 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3841 mw = &sc->memwin[idx];
3842 rw_assert(&mw->mw_lock, RA_WLOCKED);
3846 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
3848 pf = V_PFNUM(sc->pf);
3849 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
3851 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
3852 t4_write_reg(sc, reg, mw->mw_curpos | pf);
3853 t4_read_reg(sc, reg); /* flush */
3857 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
3863 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3865 /* Memory can only be accessed in naturally aligned 4 byte units */
3866 if (addr & 3 || len & 3 || len <= 0)
3869 mw = &sc->memwin[idx];
3871 rw_rlock(&mw->mw_lock);
3872 mw_end = mw->mw_curpos + mw->mw_aperture;
3873 if (addr >= mw_end || addr < mw->mw_curpos) {
3874 /* Will need to reposition the window */
3875 if (!rw_try_upgrade(&mw->mw_lock)) {
3876 rw_runlock(&mw->mw_lock);
3877 rw_wlock(&mw->mw_lock);
3879 rw_assert(&mw->mw_lock, RA_WLOCKED);
3880 position_memwin(sc, idx, addr);
3881 rw_downgrade(&mw->mw_lock);
3882 mw_end = mw->mw_curpos + mw->mw_aperture;
3884 rw_assert(&mw->mw_lock, RA_RLOCKED);
3885 while (addr < mw_end && len > 0) {
3887 v = t4_read_reg(sc, mw->mw_base + addr -
3889 *val++ = le32toh(v);
3892 t4_write_reg(sc, mw->mw_base + addr -
3893 mw->mw_curpos, htole32(v));
3898 rw_runlock(&mw->mw_lock);
3905 t4_init_atid_table(struct adapter *sc)
3914 MPASS(t->atid_tab == NULL);
3916 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
3918 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
3919 t->afree = t->atid_tab;
3920 t->atids_in_use = 0;
3921 for (i = 1; i < t->natids; i++)
3922 t->atid_tab[i - 1].next = &t->atid_tab[i];
3923 t->atid_tab[t->natids - 1].next = NULL;
3927 t4_free_atid_table(struct adapter *sc)
3933 KASSERT(t->atids_in_use == 0,
3934 ("%s: %d atids still in use.", __func__, t->atids_in_use));
3936 if (mtx_initialized(&t->atid_lock))
3937 mtx_destroy(&t->atid_lock);
3938 free(t->atid_tab, M_CXGBE);
3943 alloc_atid(struct adapter *sc, void *ctx)
3945 struct tid_info *t = &sc->tids;
3948 mtx_lock(&t->atid_lock);
3950 union aopen_entry *p = t->afree;
3952 atid = p - t->atid_tab;
3953 MPASS(atid <= M_TID_TID);
3958 mtx_unlock(&t->atid_lock);
3963 lookup_atid(struct adapter *sc, int atid)
3965 struct tid_info *t = &sc->tids;
3967 return (t->atid_tab[atid].data);
3971 free_atid(struct adapter *sc, int atid)
3973 struct tid_info *t = &sc->tids;
3974 union aopen_entry *p = &t->atid_tab[atid];
3976 mtx_lock(&t->atid_lock);
3980 mtx_unlock(&t->atid_lock);
3984 queue_tid_release(struct adapter *sc, int tid)
3987 CXGBE_UNIMPLEMENTED("deferred tid release");
3991 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
3994 struct cpl_tid_release *req;
3996 wr = alloc_wrqe(sizeof(*req), ctrlq);
3998 queue_tid_release(sc, tid); /* defer */
4003 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
4009 t4_range_cmp(const void *a, const void *b)
4011 return ((const struct t4_range *)a)->start -
4012 ((const struct t4_range *)b)->start;
4016 * Verify that the memory range specified by the addr/len pair is valid within
4017 * the card's address space.
4020 validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len)
4022 struct t4_range mem_ranges[4], *r, *next;
4023 uint32_t em, addr_len;
4024 int i, n, remaining;
4026 /* Memory can only be accessed in naturally aligned 4 byte units */
4027 if (addr & 3 || len & 3 || len == 0)
4030 /* Enabled memories */
4031 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4035 bzero(r, sizeof(mem_ranges));
4036 if (em & F_EDRAM0_ENABLE) {
4037 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4038 r->size = G_EDRAM0_SIZE(addr_len) << 20;
4040 r->start = G_EDRAM0_BASE(addr_len) << 20;
4041 if (addr >= r->start &&
4042 addr + len <= r->start + r->size)
4048 if (em & F_EDRAM1_ENABLE) {
4049 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4050 r->size = G_EDRAM1_SIZE(addr_len) << 20;
4052 r->start = G_EDRAM1_BASE(addr_len) << 20;
4053 if (addr >= r->start &&
4054 addr + len <= r->start + r->size)
4060 if (em & F_EXT_MEM_ENABLE) {
4061 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4062 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
4064 r->start = G_EXT_MEM_BASE(addr_len) << 20;
4065 if (addr >= r->start &&
4066 addr + len <= r->start + r->size)
4072 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
4073 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
4074 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
4076 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
4077 if (addr >= r->start &&
4078 addr + len <= r->start + r->size)
4084 MPASS(n <= nitems(mem_ranges));
4087 /* Sort and merge the ranges. */
4088 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
4090 /* Start from index 0 and examine the next n - 1 entries. */
4092 for (remaining = n - 1; remaining > 0; remaining--, r++) {
4094 MPASS(r->size > 0); /* r is a valid entry. */
4096 MPASS(next->size > 0); /* and so is the next one. */
4098 while (r->start + r->size >= next->start) {
4099 /* Merge the next one into the current entry. */
4100 r->size = max(r->start + r->size,
4101 next->start + next->size) - r->start;
4102 n--; /* One fewer entry in total. */
4103 if (--remaining == 0)
4104 goto done; /* short circuit */
4107 if (next != r + 1) {
4109 * Some entries were merged into r and next
4110 * points to the first valid entry that couldn't
4113 MPASS(next->size > 0); /* must be valid */
4114 memcpy(r + 1, next, remaining * sizeof(*r));
4117 * This so that the foo->size assertion in the
4118 * next iteration of the loop do the right
4119 * thing for entries that were pulled up and are
4122 MPASS(n < nitems(mem_ranges));
4123 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
4124 sizeof(struct t4_range));
4129 /* Done merging the ranges. */
4132 for (i = 0; i < n; i++, r++) {
4133 if (addr >= r->start &&
4134 addr + len <= r->start + r->size)
4143 fwmtype_to_hwmtype(int mtype)
4147 case FW_MEMTYPE_EDC0:
4149 case FW_MEMTYPE_EDC1:
4151 case FW_MEMTYPE_EXTMEM:
4153 case FW_MEMTYPE_EXTMEM1:
4156 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
4161 * Verify that the memory range specified by the memtype/offset/len pair is
4162 * valid and lies entirely within the memtype specified. The global address of
4163 * the start of the range is returned in addr.
4166 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len,
4169 uint32_t em, addr_len, maddr;
4171 /* Memory can only be accessed in naturally aligned 4 byte units */
4172 if (off & 3 || len & 3 || len == 0)
4175 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4176 switch (fwmtype_to_hwmtype(mtype)) {
4178 if (!(em & F_EDRAM0_ENABLE))
4180 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4181 maddr = G_EDRAM0_BASE(addr_len) << 20;
4184 if (!(em & F_EDRAM1_ENABLE))
4186 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4187 maddr = G_EDRAM1_BASE(addr_len) << 20;
4190 if (!(em & F_EXT_MEM_ENABLE))
4192 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4193 maddr = G_EXT_MEM_BASE(addr_len) << 20;
4196 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
4198 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
4199 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
4205 *addr = maddr + off; /* global address */
4206 return (validate_mem_range(sc, *addr, len));
4210 fixup_devlog_params(struct adapter *sc)
4212 struct devlog_params *dparams = &sc->params.devlog;
4215 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
4216 dparams->size, &dparams->addr);
4222 update_nirq(struct intrs_and_queues *iaq, int nports)
4225 iaq->nirq = T4_EXTRA_INTR;
4226 iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq);
4227 iaq->nirq += nports * iaq->nofldrxq;
4228 iaq->nirq += nports * (iaq->num_vis - 1) *
4229 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
4230 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
4234 * Adjust requirements to fit the number of interrupts available.
4237 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
4241 const int nports = sc->params.nports;
4246 bzero(iaq, sizeof(*iaq));
4247 iaq->intr_type = itype;
4248 iaq->num_vis = t4_num_vis;
4249 iaq->ntxq = t4_ntxq;
4250 iaq->ntxq_vi = t4_ntxq_vi;
4251 iaq->nrxq = t4_nrxq;
4252 iaq->nrxq_vi = t4_nrxq_vi;
4253 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4254 if (is_offload(sc) || is_ethoffload(sc)) {
4255 iaq->nofldtxq = t4_nofldtxq;
4256 iaq->nofldtxq_vi = t4_nofldtxq_vi;
4260 if (is_offload(sc)) {
4261 iaq->nofldrxq = t4_nofldrxq;
4262 iaq->nofldrxq_vi = t4_nofldrxq_vi;
4266 if (t4_native_netmap & NN_MAIN_VI) {
4267 iaq->nnmtxq = t4_nnmtxq;
4268 iaq->nnmrxq = t4_nnmrxq;
4270 if (t4_native_netmap & NN_EXTRA_VI) {
4271 iaq->nnmtxq_vi = t4_nnmtxq_vi;
4272 iaq->nnmrxq_vi = t4_nnmrxq_vi;
4276 update_nirq(iaq, nports);
4277 if (iaq->nirq <= navail &&
4278 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4280 * This is the normal case -- there are enough interrupts for
4287 * If extra VIs have been configured try reducing their count and see if
4290 while (iaq->num_vis > 1) {
4292 update_nirq(iaq, nports);
4293 if (iaq->nirq <= navail &&
4294 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4295 device_printf(sc->dev, "virtual interfaces per port "
4296 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
4297 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
4298 "itype %d, navail %u, nirq %d.\n",
4299 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
4300 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
4301 itype, navail, iaq->nirq);
4307 * Extra VIs will not be created. Log a message if they were requested.
4309 MPASS(iaq->num_vis == 1);
4310 iaq->ntxq_vi = iaq->nrxq_vi = 0;
4311 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
4312 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
4313 if (iaq->num_vis != t4_num_vis) {
4314 device_printf(sc->dev, "extra virtual interfaces disabled. "
4315 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
4316 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
4317 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
4318 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
4322 * Keep reducing the number of NIC rx queues to the next lower power of
4323 * 2 (for even RSS distribution) and halving the TOE rx queues and see
4327 if (iaq->nrxq > 1) {
4330 } while (!powerof2(iaq->nrxq));
4331 if (iaq->nnmrxq > iaq->nrxq)
4332 iaq->nnmrxq = iaq->nrxq;
4334 if (iaq->nofldrxq > 1)
4335 iaq->nofldrxq >>= 1;
4337 old_nirq = iaq->nirq;
4338 update_nirq(iaq, nports);
4339 if (iaq->nirq <= navail &&
4340 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4341 device_printf(sc->dev, "running with reduced number of "
4342 "rx queues because of shortage of interrupts. "
4343 "nrxq=%u, nofldrxq=%u. "
4344 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
4345 iaq->nofldrxq, itype, navail, iaq->nirq);
4348 } while (old_nirq != iaq->nirq);
4350 /* One interrupt for everything. Ugh. */
4351 device_printf(sc->dev, "running with minimal number of queues. "
4352 "itype %d, navail %u.\n", itype, navail);
4356 if (iaq->nofldrxq > 0) {
4363 MPASS(iaq->num_vis > 0);
4364 if (iaq->num_vis > 1) {
4365 MPASS(iaq->nrxq_vi > 0);
4366 MPASS(iaq->ntxq_vi > 0);
4368 MPASS(iaq->nirq > 0);
4369 MPASS(iaq->nrxq > 0);
4370 MPASS(iaq->ntxq > 0);
4371 if (itype == INTR_MSI) {
4372 MPASS(powerof2(iaq->nirq));
4377 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
4379 int rc, itype, navail, nalloc;
4381 for (itype = INTR_MSIX; itype; itype >>= 1) {
4383 if ((itype & t4_intr_types) == 0)
4384 continue; /* not allowed */
4386 if (itype == INTR_MSIX)
4387 navail = pci_msix_count(sc->dev);
4388 else if (itype == INTR_MSI)
4389 navail = pci_msi_count(sc->dev);
4396 calculate_iaq(sc, iaq, itype, navail);
4399 if (itype == INTR_MSIX)
4400 rc = pci_alloc_msix(sc->dev, &nalloc);
4401 else if (itype == INTR_MSI)
4402 rc = pci_alloc_msi(sc->dev, &nalloc);
4404 if (rc == 0 && nalloc > 0) {
4405 if (nalloc == iaq->nirq)
4409 * Didn't get the number requested. Use whatever number
4410 * the kernel is willing to allocate.
4412 device_printf(sc->dev, "fewer vectors than requested, "
4413 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
4414 itype, iaq->nirq, nalloc);
4415 pci_release_msi(sc->dev);
4420 device_printf(sc->dev,
4421 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
4422 itype, rc, iaq->nirq, nalloc);
4425 device_printf(sc->dev,
4426 "failed to find a usable interrupt type. "
4427 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
4428 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
4433 #define FW_VERSION(chip) ( \
4434 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
4435 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
4436 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
4437 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
4438 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
4440 /* Just enough of fw_hdr to cover all version info. */
4446 __be32 tp_microcode_ver;
4451 __u8 intfver_iscsipdu;
4453 __u8 intfver_fcoepdu;
4456 /* Spot check a couple of fields. */
4457 CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver));
4458 CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic));
4459 CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe));
4469 .kld_name = "t4fw_cfg",
4470 .fw_mod_name = "t4fw",
4472 .chip = FW_HDR_CHIP_T4,
4473 .fw_ver = htobe32(FW_VERSION(T4)),
4474 .intfver_nic = FW_INTFVER(T4, NIC),
4475 .intfver_vnic = FW_INTFVER(T4, VNIC),
4476 .intfver_ofld = FW_INTFVER(T4, OFLD),
4477 .intfver_ri = FW_INTFVER(T4, RI),
4478 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
4479 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4480 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
4481 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4485 .kld_name = "t5fw_cfg",
4486 .fw_mod_name = "t5fw",
4488 .chip = FW_HDR_CHIP_T5,
4489 .fw_ver = htobe32(FW_VERSION(T5)),
4490 .intfver_nic = FW_INTFVER(T5, NIC),
4491 .intfver_vnic = FW_INTFVER(T5, VNIC),
4492 .intfver_ofld = FW_INTFVER(T5, OFLD),
4493 .intfver_ri = FW_INTFVER(T5, RI),
4494 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
4495 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4496 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
4497 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4501 .kld_name = "t6fw_cfg",
4502 .fw_mod_name = "t6fw",
4504 .chip = FW_HDR_CHIP_T6,
4505 .fw_ver = htobe32(FW_VERSION(T6)),
4506 .intfver_nic = FW_INTFVER(T6, NIC),
4507 .intfver_vnic = FW_INTFVER(T6, VNIC),
4508 .intfver_ofld = FW_INTFVER(T6, OFLD),
4509 .intfver_ri = FW_INTFVER(T6, RI),
4510 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4511 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4512 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4513 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4518 static struct fw_info *
4519 find_fw_info(int chip)
4523 for (i = 0; i < nitems(fw_info); i++) {
4524 if (fw_info[i].chip == chip)
4525 return (&fw_info[i]);
4531 * Is the given firmware API compatible with the one the driver was compiled
4535 fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
4538 /* short circuit if it's the exact same firmware version */
4539 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
4543 * XXX: Is this too conservative? Perhaps I should limit this to the
4544 * features that are supported in the driver.
4546 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
4547 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
4548 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
4549 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
4557 load_fw_module(struct adapter *sc, const struct firmware **dcfg,
4558 const struct firmware **fw)
4560 struct fw_info *fw_info;
4566 fw_info = find_fw_info(chip_id(sc));
4567 if (fw_info == NULL) {
4568 device_printf(sc->dev,
4569 "unable to look up firmware information for chip %d.\n",
4574 *dcfg = firmware_get(fw_info->kld_name);
4575 if (*dcfg != NULL) {
4577 *fw = firmware_get(fw_info->fw_mod_name);
4585 unload_fw_module(struct adapter *sc, const struct firmware *dcfg,
4586 const struct firmware *fw)
4590 firmware_put(fw, FIRMWARE_UNLOAD);
4592 firmware_put(dcfg, FIRMWARE_UNLOAD);
4597 * 0 means no firmware install attempted.
4598 * ERESTART means a firmware install was attempted and was successful.
4599 * +ve errno means a firmware install was attempted but failed.
4602 install_kld_firmware(struct adapter *sc, struct fw_h *card_fw,
4603 const struct fw_h *drv_fw, const char *reason, int *already)
4605 const struct firmware *cfg, *fw;
4606 const uint32_t c = be32toh(card_fw->fw_ver);
4609 struct fw_h bundled_fw;
4610 bool load_attempted;
4613 load_attempted = false;
4614 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install;
4616 memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw));
4617 if (t4_fw_install < 0) {
4618 rc = load_fw_module(sc, &cfg, &fw);
4619 if (rc != 0 || fw == NULL) {
4620 device_printf(sc->dev,
4621 "failed to load firmware module: %d. cfg %p, fw %p;"
4622 " will use compiled-in firmware version for"
4623 "hw.cxgbe.fw_install checks.\n",
4626 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw));
4628 load_attempted = true;
4630 d = be32toh(bundled_fw.fw_ver);
4635 if ((sc->flags & FW_OK) == 0) {
4637 if (c == 0xffffffff) {
4646 if (!fw_compatible(card_fw, &bundled_fw)) {
4647 reason = "incompatible or unusable";
4652 reason = "older than the version bundled with this driver";
4656 if (fw_install == 2 && d != c) {
4657 reason = "different than the version bundled with this driver";
4661 /* No reason to do anything to the firmware already on the card. */
4670 if (fw_install == 0) {
4671 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4672 "but the driver is prohibited from installing a firmware "
4674 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4675 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
4681 * We'll attempt to install a firmware. Load the module first (if it
4682 * hasn't been loaded already).
4684 if (!load_attempted) {
4685 rc = load_fw_module(sc, &cfg, &fw);
4686 if (rc != 0 || fw == NULL) {
4687 device_printf(sc->dev,
4688 "failed to load firmware module: %d. cfg %p, fw %p\n",
4694 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4695 "but the driver cannot take corrective action because it "
4696 "is unable to load the firmware module.\n",
4697 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4698 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
4699 rc = sc->flags & FW_OK ? 0 : ENOENT;
4702 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver);
4704 MPASS(t4_fw_install > 0);
4705 device_printf(sc->dev,
4706 "firmware in KLD (%u.%u.%u.%u) is not what the driver was "
4707 "expecting (%u.%u.%u.%u) and will not be used.\n",
4708 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4709 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k),
4710 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4711 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
4712 rc = sc->flags & FW_OK ? 0 : EINVAL;
4716 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4717 "installing firmware %u.%u.%u.%u on card.\n",
4718 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4719 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
4720 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4721 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
4723 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
4725 device_printf(sc->dev, "failed to install firmware: %d\n", rc);
4727 /* Installed successfully, update the cached header too. */
4729 memcpy(card_fw, fw->data, sizeof(*card_fw));
4732 unload_fw_module(sc, cfg, fw);
4738 * Establish contact with the firmware and attempt to become the master driver.
4740 * A firmware will be installed to the card if needed (if the driver is allowed
4744 contact_firmware(struct adapter *sc)
4746 int rc, already = 0;
4747 enum dev_state state;
4748 struct fw_info *fw_info;
4749 struct fw_hdr *card_fw; /* fw on the card */
4750 const struct fw_h *drv_fw;
4752 fw_info = find_fw_info(chip_id(sc));
4753 if (fw_info == NULL) {
4754 device_printf(sc->dev,
4755 "unable to look up firmware information for chip %d.\n",
4759 drv_fw = &fw_info->fw_h;
4761 /* Read the header of the firmware on the card */
4762 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
4764 rc = -t4_get_fw_hdr(sc, card_fw);
4766 device_printf(sc->dev,
4767 "unable to read firmware header from card's flash: %d\n",
4772 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL,
4779 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
4780 if (rc < 0 || state == DEV_STATE_ERR) {
4782 device_printf(sc->dev,
4783 "failed to connect to the firmware: %d, %d. "
4784 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4786 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4787 "not responding properly to HELLO", &already) == ERESTART)
4792 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT);
4793 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */
4796 sc->flags |= MASTER_PF;
4797 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4803 } else if (state == DEV_STATE_UNINIT) {
4805 * We didn't get to be the master so we definitely won't be
4806 * configuring the chip. It's a bug if someone else hasn't
4807 * configured it already.
4809 device_printf(sc->dev, "couldn't be master(%d), "
4810 "device not already initialized either(%d). "
4811 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4816 * Some other PF is the master and has configured the chip.
4817 * This is allowed but untested.
4819 device_printf(sc->dev, "PF%d is master, device state %d. "
4820 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4821 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc);
4826 if (rc != 0 && sc->flags & FW_OK) {
4827 t4_fw_bye(sc, sc->mbox);
4828 sc->flags &= ~FW_OK;
4830 free(card_fw, M_CXGBE);
4835 copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
4836 uint32_t mtype, uint32_t moff)
4838 struct fw_info *fw_info;
4839 const struct firmware *dcfg, *rcfg = NULL;
4840 const uint32_t *cfdata;
4841 uint32_t cflen, addr;
4844 load_fw_module(sc, &dcfg, NULL);
4846 /* Card specific interpretation of "default". */
4847 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4848 if (pci_get_device(sc->dev) == 0x440a)
4849 snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF);
4851 snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF);
4854 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4856 device_printf(sc->dev,
4857 "KLD with default config is not available.\n");
4861 cfdata = dcfg->data;
4862 cflen = dcfg->datasize & ~3;
4866 fw_info = find_fw_info(chip_id(sc));
4867 if (fw_info == NULL) {
4868 device_printf(sc->dev,
4869 "unable to look up firmware information for chip %d.\n",
4874 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file);
4876 rcfg = firmware_get(s);
4878 device_printf(sc->dev,
4879 "unable to load module \"%s\" for configuration "
4880 "profile \"%s\".\n", s, cfg_file);
4884 cfdata = rcfg->data;
4885 cflen = rcfg->datasize & ~3;
4888 if (cflen > FLASH_CFG_MAX_SIZE) {
4889 device_printf(sc->dev,
4890 "config file too long (%d, max allowed is %d).\n",
4891 cflen, FLASH_CFG_MAX_SIZE);
4896 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
4898 device_printf(sc->dev,
4899 "%s: addr (%d/0x%x) or len %d is not valid: %d.\n",
4900 __func__, mtype, moff, cflen, rc);
4904 write_via_memwin(sc, 2, addr, cfdata, cflen);
4907 firmware_put(rcfg, FIRMWARE_UNLOAD);
4908 unload_fw_module(sc, dcfg, NULL);
4912 struct caps_allowed {
4915 uint16_t switchcaps;
4919 uint16_t cryptocaps;
4924 #define FW_PARAM_DEV(param) \
4925 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4926 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4927 #define FW_PARAM_PFVF(param) \
4928 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4929 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
4932 * Provide a configuration profile to the firmware and have it initialize the
4933 * chip accordingly. This may involve uploading a configuration file to the
4937 apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
4938 const struct caps_allowed *caps_allowed)
4941 struct fw_caps_config_cmd caps;
4942 uint32_t mtype, moff, finicsum, cfcsum, param, val;
4944 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
4946 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
4950 bzero(&caps, sizeof(caps));
4951 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4952 F_FW_CMD_REQUEST | F_FW_CMD_READ);
4953 if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) {
4956 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4957 } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
4958 mtype = FW_MEMTYPE_FLASH;
4959 moff = t4_flash_cfg_addr(sc);
4960 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4961 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4962 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4966 * Ask the firmware where it wants us to upload the config file.
4968 param = FW_PARAM_DEV(CF);
4969 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4971 /* No support for config file? Shouldn't happen. */
4972 device_printf(sc->dev,
4973 "failed to query config file location: %d.\n", rc);
4976 mtype = G_FW_PARAMS_PARAM_Y(val);
4977 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
4978 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4979 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4980 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4983 rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
4985 device_printf(sc->dev,
4986 "failed to upload config file to card: %d.\n", rc);
4990 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4992 device_printf(sc->dev, "failed to pre-process config file: %d "
4993 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
4997 finicsum = be32toh(caps.finicsum);
4998 cfcsum = be32toh(caps.cfcsum); /* actual */
4999 if (finicsum != cfcsum) {
5000 device_printf(sc->dev,
5001 "WARNING: config file checksum mismatch: %08x %08x\n",
5004 sc->cfcsum = cfcsum;
5005 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file);
5008 * Let the firmware know what features will (not) be used so it can tune
5009 * things accordingly.
5011 #define LIMIT_CAPS(x) do { \
5012 caps.x##caps &= htobe16(caps_allowed->x##caps); \
5024 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
5026 * TOE and hashfilters are mutually exclusive. It is a config
5027 * file or firmware bug if both are reported as available. Try
5028 * to cope with the situation in non-debug builds by disabling
5031 MPASS(caps.toecaps == 0);
5038 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5039 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5040 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
5041 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
5043 device_printf(sc->dev,
5044 "failed to process config file: %d.\n", rc);
5048 t4_tweak_chip_settings(sc);
5049 set_params__pre_init(sc);
5051 /* get basic stuff going */
5052 rc = -t4_fw_initialize(sc, sc->mbox);
5054 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc);
5062 * Partition chip resources for use between various PFs, VFs, etc.
5065 partition_resources(struct adapter *sc)
5067 char cfg_file[sizeof(t4_cfg_file)];
5068 struct caps_allowed caps_allowed;
5072 /* Only the master driver gets to configure the chip resources. */
5073 MPASS(sc->flags & MASTER_PF);
5075 #define COPY_CAPS(x) do { \
5076 caps_allowed.x##caps = t4_##x##caps_allowed; \
5078 bzero(&caps_allowed, sizeof(caps_allowed));
5088 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true;
5089 snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file);
5091 rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed);
5092 if (rc != 0 && fallback) {
5094 device_printf(sc->dev,
5095 "failed (%d) to configure card with \"%s\" profile, "
5096 "will fall back to a basic configuration and retry.\n",
5098 snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF);
5099 bzero(&caps_allowed, sizeof(caps_allowed));
5101 caps_allowed.niccaps = FW_CAPS_CONFIG_NIC;
5110 * Retrieve parameters that are needed (or nice to have) very early.
5113 get_params__pre_init(struct adapter *sc)
5116 uint32_t param[2], val[2];
5118 t4_get_version_info(sc);
5120 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
5121 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
5122 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
5123 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
5124 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
5126 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
5127 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
5128 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
5129 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
5130 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
5132 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
5133 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
5134 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
5135 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
5136 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
5138 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
5139 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
5140 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
5141 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
5142 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
5144 param[0] = FW_PARAM_DEV(PORTVEC);
5145 param[1] = FW_PARAM_DEV(CCLK);
5146 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5148 device_printf(sc->dev,
5149 "failed to query parameters (pre_init): %d.\n", rc);
5153 sc->params.portvec = val[0];
5154 sc->params.nports = bitcount32(val[0]);
5155 sc->params.vpd.cclk = val[1];
5157 /* Read device log parameters. */
5158 rc = -t4_init_devlog_params(sc, 1);
5160 fixup_devlog_params(sc);
5162 device_printf(sc->dev,
5163 "failed to get devlog parameters: %d.\n", rc);
5164 rc = 0; /* devlog isn't critical for device operation */
5171 * Any params that need to be set before FW_INITIALIZE.
5174 set_params__pre_init(struct adapter *sc)
5177 uint32_t param, val;
5179 if (chip_id(sc) >= CHELSIO_T6) {
5180 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
5182 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5183 /* firmwares < 1.20.1.0 do not have this param. */
5184 if (rc == FW_EINVAL &&
5185 sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) {
5189 device_printf(sc->dev,
5190 "failed to enable high priority filters :%d.\n",
5194 param = FW_PARAM_DEV(PPOD_EDRAM);
5195 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5196 if (rc == 0 && val == 1) {
5197 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m,
5200 device_printf(sc->dev,
5201 "failed to set PPOD_EDRAM: %d.\n", rc);
5206 /* Enable opaque VIIDs with firmwares that support it. */
5207 param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5209 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5210 if (rc == 0 && val == 1)
5211 sc->params.viid_smt_extn_support = true;
5213 sc->params.viid_smt_extn_support = false;
5219 * Retrieve various parameters that are of interest to the driver. The device
5220 * has been initialized by the firmware at this point.
5223 get_params__post_init(struct adapter *sc)
5226 uint32_t param[7], val[7];
5227 struct fw_caps_config_cmd caps;
5229 param[0] = FW_PARAM_PFVF(IQFLINT_START);
5230 param[1] = FW_PARAM_PFVF(EQ_START);
5231 param[2] = FW_PARAM_PFVF(FILTER_START);
5232 param[3] = FW_PARAM_PFVF(FILTER_END);
5233 param[4] = FW_PARAM_PFVF(L2T_START);
5234 param[5] = FW_PARAM_PFVF(L2T_END);
5235 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5236 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5237 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
5238 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
5240 device_printf(sc->dev,
5241 "failed to query parameters (post_init): %d.\n", rc);
5245 sc->sge.iq_start = val[0];
5246 sc->sge.eq_start = val[1];
5247 if ((int)val[3] > (int)val[2]) {
5248 sc->tids.ftid_base = val[2];
5249 sc->tids.ftid_end = val[3];
5250 sc->tids.nftids = val[3] - val[2] + 1;
5252 sc->vres.l2t.start = val[4];
5253 sc->vres.l2t.size = val[5] - val[4] + 1;
5254 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
5255 ("%s: L2 table size (%u) larger than expected (%u)",
5256 __func__, sc->vres.l2t.size, L2T_SIZE));
5257 sc->params.core_vdd = val[6];
5259 param[0] = FW_PARAM_PFVF(IQFLINT_END);
5260 param[1] = FW_PARAM_PFVF(EQ_END);
5261 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5263 device_printf(sc->dev,
5264 "failed to query parameters (post_init2): %d.\n", rc);
5267 MPASS((int)val[0] >= sc->sge.iq_start);
5268 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
5269 MPASS((int)val[1] >= sc->sge.eq_start);
5270 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
5272 if (chip_id(sc) >= CHELSIO_T6) {
5274 sc->tids.tid_base = t4_read_reg(sc,
5275 A_LE_DB_ACTIVE_TABLE_START_INDEX);
5277 param[0] = FW_PARAM_PFVF(HPFILTER_START);
5278 param[1] = FW_PARAM_PFVF(HPFILTER_END);
5279 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5281 device_printf(sc->dev,
5282 "failed to query hpfilter parameters: %d.\n", rc);
5285 if ((int)val[1] > (int)val[0]) {
5286 sc->tids.hpftid_base = val[0];
5287 sc->tids.hpftid_end = val[1];
5288 sc->tids.nhpftids = val[1] - val[0] + 1;
5291 * These should go off if the layout changes and the
5292 * driver needs to catch up.
5294 MPASS(sc->tids.hpftid_base == 0);
5295 MPASS(sc->tids.tid_base == sc->tids.nhpftids);
5298 param[0] = FW_PARAM_PFVF(RAWF_START);
5299 param[1] = FW_PARAM_PFVF(RAWF_END);
5300 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5302 device_printf(sc->dev,
5303 "failed to query rawf parameters: %d.\n", rc);
5306 if ((int)val[1] > (int)val[0]) {
5307 sc->rawf_base = val[0];
5308 sc->nrawf = val[1] - val[0] + 1;
5313 * MPSBGMAP is queried separately because only recent firmwares support
5314 * it as a parameter and we don't want the compound query above to fail
5315 * on older firmwares.
5317 param[0] = FW_PARAM_DEV(MPSBGMAP);
5319 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5321 sc->params.mps_bg_map = val[0];
5323 sc->params.mps_bg_map = 0;
5326 * Determine whether the firmware supports the filter2 work request.
5327 * This is queried separately for the same reason as MPSBGMAP above.
5329 param[0] = FW_PARAM_DEV(FILTER2_WR);
5331 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5333 sc->params.filter2_wr_support = val[0] != 0;
5335 sc->params.filter2_wr_support = 0;
5338 * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
5339 * This is queried separately for the same reason as other params above.
5341 param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5343 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5345 sc->params.ulptx_memwrite_dsgl = val[0] != 0;
5347 sc->params.ulptx_memwrite_dsgl = false;
5349 /* FW_RI_FR_NSMR_TPTE_WR support */
5350 param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5351 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5353 sc->params.fr_nsmr_tpte_wr_support = val[0] != 0;
5355 sc->params.fr_nsmr_tpte_wr_support = false;
5357 /* Support for 512 SGL entries per FR MR. */
5358 param[0] = FW_PARAM_DEV(DEV_512SGL_MR);
5359 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5361 sc->params.dev_512sgl_mr = val[0] != 0;
5363 sc->params.dev_512sgl_mr = false;
5365 param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
5366 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5368 sc->params.max_pkts_per_eth_tx_pkts_wr = val[0];
5370 sc->params.max_pkts_per_eth_tx_pkts_wr = 15;
5372 param[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5373 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5375 MPASS(val[0] > 0 && val[0] < 256); /* nsched_cls is 8b */
5376 sc->params.nsched_cls = val[0];
5378 sc->params.nsched_cls = sc->chip_params->nsched_cls;
5380 /* get capabilites */
5381 bzero(&caps, sizeof(caps));
5382 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5383 F_FW_CMD_REQUEST | F_FW_CMD_READ);
5384 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
5385 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
5387 device_printf(sc->dev,
5388 "failed to get card capabilities: %d.\n", rc);
5392 #define READ_CAPS(x) do { \
5393 sc->x = htobe16(caps.x); \
5396 READ_CAPS(linkcaps);
5397 READ_CAPS(switchcaps);
5400 READ_CAPS(rdmacaps);
5401 READ_CAPS(cryptocaps);
5402 READ_CAPS(iscsicaps);
5403 READ_CAPS(fcoecaps);
5405 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
5406 MPASS(chip_id(sc) > CHELSIO_T4);
5407 MPASS(sc->toecaps == 0);
5410 param[0] = FW_PARAM_DEV(NTID);
5411 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5413 device_printf(sc->dev,
5414 "failed to query HASHFILTER parameters: %d.\n", rc);
5417 sc->tids.ntids = val[0];
5418 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
5419 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
5420 sc->tids.ntids -= sc->tids.nhpftids;
5422 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
5423 sc->params.hash_filter = 1;
5425 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
5426 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
5427 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
5428 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5429 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
5431 device_printf(sc->dev,
5432 "failed to query NIC parameters: %d.\n", rc);
5435 if ((int)val[1] > (int)val[0]) {
5436 sc->tids.etid_base = val[0];
5437 sc->tids.etid_end = val[1];
5438 sc->tids.netids = val[1] - val[0] + 1;
5439 sc->params.eo_wr_cred = val[2];
5440 sc->params.ethoffload = 1;
5444 /* query offload-related parameters */
5445 param[0] = FW_PARAM_DEV(NTID);
5446 param[1] = FW_PARAM_PFVF(SERVER_START);
5447 param[2] = FW_PARAM_PFVF(SERVER_END);
5448 param[3] = FW_PARAM_PFVF(TDDP_START);
5449 param[4] = FW_PARAM_PFVF(TDDP_END);
5450 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5451 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5453 device_printf(sc->dev,
5454 "failed to query TOE parameters: %d.\n", rc);
5457 sc->tids.ntids = val[0];
5458 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
5459 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
5460 sc->tids.ntids -= sc->tids.nhpftids;
5462 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
5463 if ((int)val[2] > (int)val[1]) {
5464 sc->tids.stid_base = val[1];
5465 sc->tids.nstids = val[2] - val[1] + 1;
5467 sc->vres.ddp.start = val[3];
5468 sc->vres.ddp.size = val[4] - val[3] + 1;
5469 sc->params.ofldq_wr_cred = val[5];
5470 sc->params.offload = 1;
5473 * The firmware attempts memfree TOE configuration for -SO cards
5474 * and will report toecaps=0 if it runs out of resources (this
5475 * depends on the config file). It may not report 0 for other
5476 * capabilities dependent on the TOE in this case. Set them to
5477 * 0 here so that the driver doesn't bother tracking resources
5478 * that will never be used.
5484 param[0] = FW_PARAM_PFVF(STAG_START);
5485 param[1] = FW_PARAM_PFVF(STAG_END);
5486 param[2] = FW_PARAM_PFVF(RQ_START);
5487 param[3] = FW_PARAM_PFVF(RQ_END);
5488 param[4] = FW_PARAM_PFVF(PBL_START);
5489 param[5] = FW_PARAM_PFVF(PBL_END);
5490 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5492 device_printf(sc->dev,
5493 "failed to query RDMA parameters(1): %d.\n", rc);
5496 sc->vres.stag.start = val[0];
5497 sc->vres.stag.size = val[1] - val[0] + 1;
5498 sc->vres.rq.start = val[2];
5499 sc->vres.rq.size = val[3] - val[2] + 1;
5500 sc->vres.pbl.start = val[4];
5501 sc->vres.pbl.size = val[5] - val[4] + 1;
5503 param[0] = FW_PARAM_PFVF(SQRQ_START);
5504 param[1] = FW_PARAM_PFVF(SQRQ_END);
5505 param[2] = FW_PARAM_PFVF(CQ_START);
5506 param[3] = FW_PARAM_PFVF(CQ_END);
5507 param[4] = FW_PARAM_PFVF(OCQ_START);
5508 param[5] = FW_PARAM_PFVF(OCQ_END);
5509 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5511 device_printf(sc->dev,
5512 "failed to query RDMA parameters(2): %d.\n", rc);
5515 sc->vres.qp.start = val[0];
5516 sc->vres.qp.size = val[1] - val[0] + 1;
5517 sc->vres.cq.start = val[2];
5518 sc->vres.cq.size = val[3] - val[2] + 1;
5519 sc->vres.ocq.start = val[4];
5520 sc->vres.ocq.size = val[5] - val[4] + 1;
5522 param[0] = FW_PARAM_PFVF(SRQ_START);
5523 param[1] = FW_PARAM_PFVF(SRQ_END);
5524 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
5525 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5526 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
5528 device_printf(sc->dev,
5529 "failed to query RDMA parameters(3): %d.\n", rc);
5532 sc->vres.srq.start = val[0];
5533 sc->vres.srq.size = val[1] - val[0] + 1;
5534 sc->params.max_ordird_qp = val[2];
5535 sc->params.max_ird_adapter = val[3];
5537 if (sc->iscsicaps) {
5538 param[0] = FW_PARAM_PFVF(ISCSI_START);
5539 param[1] = FW_PARAM_PFVF(ISCSI_END);
5540 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5542 device_printf(sc->dev,
5543 "failed to query iSCSI parameters: %d.\n", rc);
5546 sc->vres.iscsi.start = val[0];
5547 sc->vres.iscsi.size = val[1] - val[0] + 1;
5549 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
5550 param[0] = FW_PARAM_PFVF(TLS_START);
5551 param[1] = FW_PARAM_PFVF(TLS_END);
5552 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5554 device_printf(sc->dev,
5555 "failed to query TLS parameters: %d.\n", rc);
5558 sc->vres.key.start = val[0];
5559 sc->vres.key.size = val[1] - val[0] + 1;
5563 * We've got the params we wanted to query directly from the firmware.
5564 * Grab some others via other means.
5566 t4_init_sge_params(sc);
5567 t4_init_tp_params(sc);
5568 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
5569 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
5571 rc = t4_verify_chip_settings(sc);
5574 t4_init_rx_buf_info(sc);
5581 ktls_tick(void *arg)
5587 tstamp = tcp_ts_getticks();
5588 t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1);
5589 t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31);
5590 callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK);
5594 t6_config_kern_tls(struct adapter *sc, bool enable)
5597 uint32_t param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5598 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_KTLS_HW) |
5599 V_FW_PARAMS_PARAM_Y(enable ? 1 : 0) |
5600 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
5602 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, ¶m);
5604 CH_ERR(sc, "failed to %s NIC TLS: %d\n",
5605 enable ? "enable" : "disable", rc);
5610 sc->flags |= KERN_TLS_ON;
5611 callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc,
5614 sc->flags &= ~KERN_TLS_ON;
5615 callout_stop(&sc->ktls_tick);
5623 set_params__post_init(struct adapter *sc)
5625 uint32_t mask, param, val;
5630 /* ask for encapsulated CPLs */
5631 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5633 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5635 /* Enable 32b port caps if the firmware supports it. */
5636 param = FW_PARAM_PFVF(PORT_CAPS32);
5638 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0)
5639 sc->params.port_caps32 = 1;
5641 /* Let filter + maskhash steer to a part of the VI's RSS region. */
5642 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1);
5643 t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER),
5644 V_MASKFILTER(val - 1));
5646 mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER |
5647 F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN |
5648 F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
5649 F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM;
5651 if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) {
5652 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE,
5653 F_ATTACKFILTERENABLE);
5654 val |= F_DROPERRORATTACK;
5656 if (t4_drop_ip_fragments != 0) {
5657 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP,
5659 val |= F_DROPERRORFRAG;
5661 if (t4_drop_pkts_with_l2_errors != 0)
5662 val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN;
5663 if (t4_drop_pkts_with_l3_errors != 0) {
5664 val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN |
5667 if (t4_drop_pkts_with_l4_errors != 0) {
5668 val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
5669 F_DROPERRORTCPOPT | F_DROPERRORCSUM;
5671 t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val);
5675 * Override the TOE timers with user provided tunables. This is not the
5676 * recommended way to change the timers (the firmware config file is) so
5677 * these tunables are not documented.
5679 * All the timer tunables are in microseconds.
5681 if (t4_toe_keepalive_idle != 0) {
5682 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
5683 v &= M_KEEPALIVEIDLE;
5684 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
5685 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
5687 if (t4_toe_keepalive_interval != 0) {
5688 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
5689 v &= M_KEEPALIVEINTVL;
5690 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
5691 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
5693 if (t4_toe_keepalive_count != 0) {
5694 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
5695 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
5696 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
5697 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
5698 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
5700 if (t4_toe_rexmt_min != 0) {
5701 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
5703 t4_set_reg_field(sc, A_TP_RXT_MIN,
5704 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
5706 if (t4_toe_rexmt_max != 0) {
5707 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
5709 t4_set_reg_field(sc, A_TP_RXT_MAX,
5710 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
5712 if (t4_toe_rexmt_count != 0) {
5713 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
5714 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
5715 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
5716 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
5717 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
5719 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
5720 if (t4_toe_rexmt_backoff[i] != -1) {
5721 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
5722 shift = (i & 3) << 3;
5723 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
5724 M_TIMERBACKOFFINDEX0 << shift, v << shift);
5730 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS &&
5731 sc->toecaps & FW_CAPS_CONFIG_TOE) {
5733 * Limit TOE connections to 2 reassembly "islands".
5734 * This is required to permit migrating TOE
5735 * connections to UPL_MODE_TLS.
5737 t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG,
5738 V_PASSMODE(M_PASSMODE), V_PASSMODE(2));
5742 sc->tlst.inline_keys = t4_tls_inline_keys;
5743 sc->tlst.combo_wrs = t4_tls_combo_wrs;
5744 if (t4_kern_tls != 0 && is_t6(sc))
5745 t6_config_kern_tls(sc, true);
5751 #undef FW_PARAM_PFVF
5755 t4_set_desc(struct adapter *sc)
5758 struct adapter_params *p = &sc->params;
5760 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
5762 device_set_desc_copy(sc->dev, buf);
5766 ifmedia_add4(struct ifmedia *ifm, int m)
5769 ifmedia_add(ifm, m, 0, NULL);
5770 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
5771 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
5772 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
5776 * This is the selected media, which is not quite the same as the active media.
5777 * The media line in ifconfig is "media: Ethernet selected (active)" if selected
5778 * and active are not the same, and "media: Ethernet selected" otherwise.
5781 set_current_media(struct port_info *pi)
5783 struct link_config *lc;
5784 struct ifmedia *ifm;
5788 PORT_LOCK_ASSERT_OWNED(pi);
5790 /* Leave current media alone if it's already set to IFM_NONE. */
5792 if (ifm->ifm_cur != NULL &&
5793 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
5797 if (lc->requested_aneg != AUTONEG_DISABLE &&
5798 lc->pcaps & FW_PORT_CAP32_ANEG) {
5799 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
5802 mword = IFM_ETHER | IFM_FDX;
5803 if (lc->requested_fc & PAUSE_TX)
5804 mword |= IFM_ETH_TXPAUSE;
5805 if (lc->requested_fc & PAUSE_RX)
5806 mword |= IFM_ETH_RXPAUSE;
5807 if (lc->requested_speed == 0)
5808 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */
5810 speed = lc->requested_speed;
5811 mword |= port_mword(pi, speed_to_fwcap(speed));
5812 ifmedia_set(ifm, mword);
5816 * Returns true if the ifmedia list for the port cannot change.
5819 fixed_ifmedia(struct port_info *pi)
5822 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
5823 pi->port_type == FW_PORT_TYPE_BT_XFI ||
5824 pi->port_type == FW_PORT_TYPE_BT_XAUI ||
5825 pi->port_type == FW_PORT_TYPE_KX4 ||
5826 pi->port_type == FW_PORT_TYPE_KX ||
5827 pi->port_type == FW_PORT_TYPE_KR ||
5828 pi->port_type == FW_PORT_TYPE_BP_AP ||
5829 pi->port_type == FW_PORT_TYPE_BP4_AP ||
5830 pi->port_type == FW_PORT_TYPE_BP40_BA ||
5831 pi->port_type == FW_PORT_TYPE_KR4_100G ||
5832 pi->port_type == FW_PORT_TYPE_KR_SFP28 ||
5833 pi->port_type == FW_PORT_TYPE_KR_XLAUI);
5837 build_medialist(struct port_info *pi)
5840 int unknown, mword, bit;
5841 struct link_config *lc;
5842 struct ifmedia *ifm;
5844 PORT_LOCK_ASSERT_OWNED(pi);
5846 if (pi->flags & FIXED_IFMEDIA)
5850 * Rebuild the ifmedia list.
5853 ifmedia_removeall(ifm);
5855 ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */
5856 if (__predict_false(ss == 0)) { /* not supposed to happen. */
5859 MPASS(LIST_EMPTY(&ifm->ifm_list));
5860 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
5861 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
5866 for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
5868 MPASS(speed & M_FW_PORT_CAP32_SPEED);
5870 mword = port_mword(pi, speed);
5871 if (mword == IFM_NONE) {
5873 } else if (mword == IFM_UNKNOWN)
5876 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
5879 if (unknown > 0) /* Add one unknown for all unknown media types. */
5880 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
5881 if (lc->pcaps & FW_PORT_CAP32_ANEG)
5882 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
5884 set_current_media(pi);
5888 * Initialize the requested fields in the link config based on driver tunables.
5891 init_link_config(struct port_info *pi)
5893 struct link_config *lc = &pi->link_cfg;
5895 PORT_LOCK_ASSERT_OWNED(pi);
5897 lc->requested_caps = 0;
5898 lc->requested_speed = 0;
5900 if (t4_autoneg == 0)
5901 lc->requested_aneg = AUTONEG_DISABLE;
5902 else if (t4_autoneg == 1)
5903 lc->requested_aneg = AUTONEG_ENABLE;
5905 lc->requested_aneg = AUTONEG_AUTO;
5907 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
5910 if (t4_fec & FEC_AUTO)
5911 lc->requested_fec = FEC_AUTO;
5912 else if (t4_fec == 0)
5913 lc->requested_fec = FEC_NONE;
5915 /* -1 is handled by the FEC_AUTO block above and not here. */
5916 lc->requested_fec = t4_fec &
5917 (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE);
5918 if (lc->requested_fec == 0)
5919 lc->requested_fec = FEC_AUTO;
5921 if (t4_force_fec < 0)
5923 else if (t4_force_fec > 0)
5930 * Makes sure that all requested settings comply with what's supported by the
5931 * port. Returns the number of settings that were invalid and had to be fixed.
5934 fixup_link_config(struct port_info *pi)
5937 struct link_config *lc = &pi->link_cfg;
5940 PORT_LOCK_ASSERT_OWNED(pi);
5942 /* Speed (when not autonegotiating) */
5943 if (lc->requested_speed != 0) {
5944 fwspeed = speed_to_fwcap(lc->requested_speed);
5945 if ((fwspeed & lc->pcaps) == 0) {
5947 lc->requested_speed = 0;
5951 /* Link autonegotiation */
5952 MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
5953 lc->requested_aneg == AUTONEG_DISABLE ||
5954 lc->requested_aneg == AUTONEG_AUTO);
5955 if (lc->requested_aneg == AUTONEG_ENABLE &&
5956 !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
5958 lc->requested_aneg = AUTONEG_AUTO;
5962 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
5963 if (lc->requested_fc & PAUSE_TX &&
5964 !(lc->pcaps & FW_PORT_CAP32_FC_TX)) {
5966 lc->requested_fc &= ~PAUSE_TX;
5968 if (lc->requested_fc & PAUSE_RX &&
5969 !(lc->pcaps & FW_PORT_CAP32_FC_RX)) {
5971 lc->requested_fc &= ~PAUSE_RX;
5973 if (!(lc->requested_fc & PAUSE_AUTONEG) &&
5974 !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) {
5976 lc->requested_fc |= PAUSE_AUTONEG;
5980 if ((lc->requested_fec & FEC_RS &&
5981 !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) ||
5982 (lc->requested_fec & FEC_BASER_RS &&
5983 !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) {
5985 lc->requested_fec = FEC_AUTO;
5992 * Apply the requested L1 settings, which are expected to be valid, to the
5996 apply_link_config(struct port_info *pi)
5998 struct adapter *sc = pi->adapter;
5999 struct link_config *lc = &pi->link_cfg;
6003 ASSERT_SYNCHRONIZED_OP(sc);
6004 PORT_LOCK_ASSERT_OWNED(pi);
6006 if (lc->requested_aneg == AUTONEG_ENABLE)
6007 MPASS(lc->pcaps & FW_PORT_CAP32_ANEG);
6008 if (!(lc->requested_fc & PAUSE_AUTONEG))
6009 MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE);
6010 if (lc->requested_fc & PAUSE_TX)
6011 MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX);
6012 if (lc->requested_fc & PAUSE_RX)
6013 MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX);
6014 if (lc->requested_fec & FEC_RS)
6015 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS);
6016 if (lc->requested_fec & FEC_BASER_RS)
6017 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
6019 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6021 /* Don't complain if the VF driver gets back an EPERM. */
6022 if (!(sc->flags & IS_VF) || rc != FW_EPERM)
6023 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
6026 * An L1_CFG will almost always result in a link-change event if
6027 * the link is up, and the driver will refresh the actual
6028 * fec/fc/etc. when the notification is processed. If the link
6029 * is down then the actual settings are meaningless.
6031 * This takes care of the case where a change in the L1 settings
6032 * may not result in a notification.
6034 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
6035 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
6040 #define FW_MAC_EXACT_CHUNK 7
6043 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
6051 add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
6053 struct mcaddr_ctx *ctx = arg;
6054 struct vi_info *vi = if_getsoftc(ctx->ifp);
6055 struct port_info *pi = vi->pi;
6056 struct adapter *sc = pi->adapter;
6061 ctx->mcaddr[ctx->i] = LLADDR(sdl);
6062 MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i]));
6065 if (ctx->i == FW_MAC_EXACT_CHUNK) {
6066 ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del,
6067 ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0);
6071 for (j = 0; j < ctx->i; j++) {
6073 "failed to add mc address"
6075 "%02x:%02x:%02x rc=%d\n",
6076 ctx->mcaddr[j][0], ctx->mcaddr[j][1],
6077 ctx->mcaddr[j][2], ctx->mcaddr[j][3],
6078 ctx->mcaddr[j][4], ctx->mcaddr[j][5],
6091 * Program the port's XGMAC based on parameters in ifnet. The caller also
6092 * indicates which parameters should be programmed (the rest are left alone).
6095 update_mac_settings(if_t ifp, int flags)
6098 struct vi_info *vi = if_getsoftc(ifp);
6099 struct port_info *pi = vi->pi;
6100 struct adapter *sc = pi->adapter;
6101 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
6102 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
6104 ASSERT_SYNCHRONIZED_OP(sc);
6105 KASSERT(flags, ("%s: not told what to update.", __func__));
6107 if (flags & XGMAC_MTU)
6108 mtu = if_getmtu(ifp);
6110 if (flags & XGMAC_PROMISC)
6111 promisc = if_getflags(ifp) & IFF_PROMISC ? 1 : 0;
6113 if (flags & XGMAC_ALLMULTI)
6114 allmulti = if_getflags(ifp) & IFF_ALLMULTI ? 1 : 0;
6116 if (flags & XGMAC_VLANEX)
6117 vlanex = if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING ? 1 : 0;
6119 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
6120 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
6121 allmulti, 1, vlanex, false);
6123 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
6129 if (flags & XGMAC_UCADDR) {
6130 uint8_t ucaddr[ETHER_ADDR_LEN];
6132 bcopy(if_getlladdr(ifp), ucaddr, sizeof(ucaddr));
6133 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
6134 ucaddr, true, &vi->smt_idx);
6137 if_printf(ifp, "change_mac failed: %d\n", rc);
6140 vi->xact_addr_filt = rc;
6145 if (flags & XGMAC_MCADDRS) {
6146 struct epoch_tracker et;
6147 struct mcaddr_ctx ctx;
6156 * Unlike other drivers, we accumulate list of pointers into
6157 * interface address lists and we need to keep it safe even
6158 * after if_foreach_llmaddr() returns, thus we must enter the
6161 NET_EPOCH_ENTER(et);
6162 if_foreach_llmaddr(ifp, add_maddr, &ctx);
6169 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
6170 ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0);
6174 for (j = 0; j < ctx.i; j++) {
6176 "failed to add mcast address"
6178 "%02x:%02x:%02x rc=%d\n",
6179 ctx.mcaddr[j][0], ctx.mcaddr[j][1],
6180 ctx.mcaddr[j][2], ctx.mcaddr[j][3],
6181 ctx.mcaddr[j][4], ctx.mcaddr[j][5],
6190 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0);
6192 if_printf(ifp, "failed to set mcast address hash: %d\n",
6195 /* We clobbered the VXLAN entry if there was one. */
6196 pi->vxlan_tcam_entry = false;
6200 if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 &&
6201 pi->vxlan_tcam_entry == false) {
6202 rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac,
6203 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
6207 if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n",
6210 MPASS(rc == sc->rawf_base + pi->port_id);
6212 pi->vxlan_tcam_entry = true;
6220 * {begin|end}_synchronized_op must be called from the same thread.
6223 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
6229 /* the caller thinks it's ok to sleep, but is it really? */
6230 if (flags & SLEEP_OK)
6231 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
6232 "begin_synchronized_op");
6243 if (vi && IS_DETACHING(vi)) {
6253 if (!(flags & SLEEP_OK)) {
6258 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
6264 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
6267 sc->last_op = wmesg;
6268 sc->last_op_thr = curthread;
6269 sc->last_op_flags = flags;
6273 if (!(flags & HOLD_LOCK) || rc)
6280 * Tell if_ioctl and if_init that the VI is going away. This is
6281 * special variant of begin_synchronized_op and must be paired with a
6282 * call to end_vi_detach.
6285 begin_vi_detach(struct adapter *sc, struct vi_info *vi)
6291 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
6294 sc->last_op = "t4detach";
6295 sc->last_op_thr = curthread;
6296 sc->last_op_flags = 0;
6302 end_vi_detach(struct adapter *sc, struct vi_info *vi)
6305 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
6313 * {begin|end}_synchronized_op must be called from the same thread.
6316 end_synchronized_op(struct adapter *sc, int flags)
6319 if (flags & LOCK_HELD)
6320 ADAPTER_LOCK_ASSERT_OWNED(sc);
6324 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
6331 cxgbe_init_synchronized(struct vi_info *vi)
6333 struct port_info *pi = vi->pi;
6334 struct adapter *sc = pi->adapter;
6337 struct sge_txq *txq;
6339 ASSERT_SYNCHRONIZED_OP(sc);
6341 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
6342 return (0); /* already running */
6344 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
6345 return (rc); /* error message displayed already */
6347 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
6348 return (rc); /* error message displayed already */
6350 rc = update_mac_settings(ifp, XGMAC_ALL);
6352 goto done; /* error message displayed already */
6355 if (pi->up_vis == 0) {
6356 t4_update_port_info(pi);
6357 fixup_link_config(pi);
6358 build_medialist(pi);
6359 apply_link_config(pi);
6362 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
6364 if_printf(ifp, "enable_vi failed: %d\n", rc);
6370 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
6374 for_each_txq(vi, i, txq) {
6376 txq->eq.flags |= EQ_ENABLED;
6381 * The first iq of the first port to come up is used for tracing.
6383 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
6384 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
6385 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
6386 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
6387 V_QUEUENUMBER(sc->traceq));
6388 pi->flags |= HAS_TRACEQ;
6393 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
6394 if (pi->link_cfg.link_ok)
6395 t4_os_link_changed(pi);
6398 mtx_lock(&vi->tick_mtx);
6399 if (vi->pi->nvi > 1 || sc->flags & IS_VF)
6400 callout_reset(&vi->tick, hz, vi_tick, vi);
6402 callout_reset(&vi->tick, hz, cxgbe_tick, vi);
6403 mtx_unlock(&vi->tick_mtx);
6406 cxgbe_uninit_synchronized(vi);
6415 cxgbe_uninit_synchronized(struct vi_info *vi)
6417 struct port_info *pi = vi->pi;
6418 struct adapter *sc = pi->adapter;
6421 struct sge_txq *txq;
6423 ASSERT_SYNCHRONIZED_OP(sc);
6425 if (!(vi->flags & VI_INIT_DONE)) {
6426 if (__predict_false(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
6427 KASSERT(0, ("uninited VI is running"));
6428 if_printf(ifp, "uninited VI with running ifnet. "
6429 "vi->flags 0x%016lx, if_flags 0x%08x, "
6430 "if_drv_flags 0x%08x\n", vi->flags, if_getflags(ifp),
6431 if_getdrvflags(ifp));
6437 * Disable the VI so that all its data in either direction is discarded
6438 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
6439 * tick) intact as the TP can deliver negative advice or data that it's
6440 * holding in its RAM (for an offloaded connection) even after the VI is
6443 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
6445 if_printf(ifp, "disable_vi failed: %d\n", rc);
6449 for_each_txq(vi, i, txq) {
6451 txq->eq.flags &= ~EQ_ENABLED;
6455 mtx_lock(&vi->tick_mtx);
6456 callout_stop(&vi->tick);
6457 mtx_unlock(&vi->tick_mtx);
6460 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
6464 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
6466 if (pi->up_vis > 0) {
6471 pi->link_cfg.link_ok = false;
6472 pi->link_cfg.speed = 0;
6473 pi->link_cfg.link_down_rc = 255;
6474 t4_os_link_changed(pi);
6481 * It is ok for this function to fail midway and return right away. t4_detach
6482 * will walk the entire sc->irq list and clean up whatever is valid.
6485 t4_setup_intr_handlers(struct adapter *sc)
6487 int rc, rid, p, q, v;
6490 struct port_info *pi;
6492 struct sge *sge = &sc->sge;
6493 struct sge_rxq *rxq;
6495 struct sge_ofld_rxq *ofld_rxq;
6498 struct sge_nm_rxq *nm_rxq;
6501 int nbuckets = rss_getnumbuckets();
6508 rid = sc->intr_type == INTR_INTX ? 0 : 1;
6509 if (forwarding_intr_to_fwq(sc))
6510 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
6512 /* Multiple interrupts. */
6513 if (sc->flags & IS_VF)
6514 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
6515 ("%s: too few intr.", __func__));
6517 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
6518 ("%s: too few intr.", __func__));
6520 /* The first one is always error intr on PFs */
6521 if (!(sc->flags & IS_VF)) {
6522 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
6529 /* The second one is always the firmware event queue (first on VFs) */
6530 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
6536 for_each_port(sc, p) {
6538 for_each_vi(pi, v, vi) {
6539 vi->first_intr = rid - 1;
6541 if (vi->nnmrxq > 0) {
6542 int n = max(vi->nrxq, vi->nnmrxq);
6544 rxq = &sge->rxq[vi->first_rxq];
6546 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
6548 for (q = 0; q < n; q++) {
6549 snprintf(s, sizeof(s), "%x%c%x", p,
6555 irq->nm_rxq = nm_rxq++;
6557 if (irq->nm_rxq != NULL &&
6559 /* Netmap rx only */
6560 rc = t4_alloc_irq(sc, irq, rid,
6561 t4_nm_intr, irq->nm_rxq, s);
6563 if (irq->nm_rxq != NULL &&
6565 /* NIC and Netmap rx */
6566 rc = t4_alloc_irq(sc, irq, rid,
6567 t4_vi_intr, irq, s);
6570 if (irq->rxq != NULL &&
6571 irq->nm_rxq == NULL) {
6573 rc = t4_alloc_irq(sc, irq, rid,
6574 t4_intr, irq->rxq, s);
6580 bus_bind_intr(sc->dev, irq->res,
6581 rss_getcpu(q % nbuckets));
6589 for_each_rxq(vi, q, rxq) {
6590 snprintf(s, sizeof(s), "%x%c%x", p,
6592 rc = t4_alloc_irq(sc, irq, rid,
6597 bus_bind_intr(sc->dev, irq->res,
6598 rss_getcpu(q % nbuckets));
6606 for_each_ofld_rxq(vi, q, ofld_rxq) {
6607 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
6608 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
6619 MPASS(irq == &sc->irq[sc->intr_count]);
6625 write_global_rss_key(struct adapter *sc)
6629 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
6630 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
6632 CTASSERT(RSS_KEYSIZE == 40);
6634 rss_getkey((void *)&raw_rss_key[0]);
6635 for (i = 0; i < nitems(rss_key); i++) {
6636 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
6638 t4_write_rss_key(sc, &rss_key[0], -1, 1);
6646 adapter_full_init(struct adapter *sc)
6650 ASSERT_SYNCHRONIZED_OP(sc);
6653 * queues that belong to the adapter (not any particular port).
6655 rc = t4_setup_adapter_queues(sc);
6659 for (i = 0; i < nitems(sc->tq); i++) {
6660 if (sc->tq[i] != NULL)
6662 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
6663 taskqueue_thread_enqueue, &sc->tq[i]);
6664 if (sc->tq[i] == NULL) {
6665 CH_ERR(sc, "failed to allocate task queue %d\n", i);
6668 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
6669 device_get_nameunit(sc->dev), i);
6672 if (!(sc->flags & IS_VF)) {
6673 write_global_rss_key(sc);
6680 adapter_init(struct adapter *sc)
6684 ASSERT_SYNCHRONIZED_OP(sc);
6685 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
6686 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
6687 ("%s: FULL_INIT_DONE already", __func__));
6689 rc = adapter_full_init(sc);
6691 adapter_full_uninit(sc);
6693 sc->flags |= FULL_INIT_DONE;
6702 adapter_full_uninit(struct adapter *sc)
6706 t4_teardown_adapter_queues(sc);
6708 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
6709 taskqueue_free(sc->tq[i]);
6713 sc->flags &= ~FULL_INIT_DONE;
6717 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
6718 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
6719 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
6720 RSS_HASHTYPE_RSS_UDP_IPV6)
6722 /* Translates kernel hash types to hardware. */
6724 hashconfig_to_hashen(int hashconfig)
6728 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
6729 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
6730 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
6731 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
6732 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
6733 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
6734 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
6736 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
6737 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
6738 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
6740 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
6741 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
6742 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
6743 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
6748 /* Translates hardware hash types to kernel. */
6750 hashen_to_hashconfig(int hashen)
6754 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
6756 * If UDP hashing was enabled it must have been enabled for
6757 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
6758 * enabling any 4-tuple hash is nonsense configuration.
6760 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
6761 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
6763 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
6764 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
6765 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
6766 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
6768 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
6769 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
6770 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
6771 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
6772 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
6773 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
6774 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
6775 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
6777 return (hashconfig);
6785 vi_full_init(struct vi_info *vi)
6787 struct adapter *sc = vi->adapter;
6788 struct sge_rxq *rxq;
6791 int nbuckets = rss_getnumbuckets();
6792 int hashconfig = rss_gethashconfig();
6796 ASSERT_SYNCHRONIZED_OP(sc);
6799 * Allocate tx/rx/fl queues for this VI.
6801 rc = t4_setup_vi_queues(vi);
6806 * Setup RSS for this VI. Save a copy of the RSS table for later use.
6808 if (vi->nrxq > vi->rss_size) {
6809 CH_ALERT(vi, "nrxq (%d) > hw RSS table size (%d); "
6810 "some queues will never receive traffic.\n", vi->nrxq,
6812 } else if (vi->rss_size % vi->nrxq) {
6813 CH_ALERT(vi, "nrxq (%d), hw RSS table size (%d); "
6814 "expect uneven traffic distribution.\n", vi->nrxq,
6818 if (vi->nrxq != nbuckets) {
6819 CH_ALERT(vi, "nrxq (%d) != kernel RSS buckets (%d);"
6820 "performance will be impacted.\n", vi->nrxq, nbuckets);
6823 if (vi->rss == NULL)
6824 vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE,
6826 for (i = 0; i < vi->rss_size;) {
6828 j = rss_get_indirection_to_bucket(i);
6830 rxq = &sc->sge.rxq[vi->first_rxq + j];
6831 vi->rss[i++] = rxq->iq.abs_id;
6833 for_each_rxq(vi, j, rxq) {
6834 vi->rss[i++] = rxq->iq.abs_id;
6835 if (i == vi->rss_size)
6841 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
6842 vi->rss, vi->rss_size);
6844 CH_ERR(vi, "rss_config failed: %d\n", rc);
6849 vi->hashen = hashconfig_to_hashen(hashconfig);
6852 * We may have had to enable some hashes even though the global config
6853 * wants them disabled. This is a potential problem that must be
6854 * reported to the user.
6856 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig;
6859 * If we consider only the supported hash types, then the enabled hashes
6860 * are a superset of the requested hashes. In other words, there cannot
6861 * be any supported hash that was requested but not enabled, but there
6862 * can be hashes that were not requested but had to be enabled.
6864 extra &= SUPPORTED_RSS_HASHTYPES;
6865 MPASS((extra & hashconfig) == 0);
6869 "global RSS config (0x%x) cannot be accommodated.\n",
6872 if (extra & RSS_HASHTYPE_RSS_IPV4)
6873 CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n");
6874 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
6875 CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n");
6876 if (extra & RSS_HASHTYPE_RSS_IPV6)
6877 CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n");
6878 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
6879 CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n");
6880 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
6881 CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n");
6882 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
6883 CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n");
6885 vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
6886 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
6887 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
6888 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
6890 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0],
6893 CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc);
6901 vi_init(struct vi_info *vi)
6905 ASSERT_SYNCHRONIZED_OP(vi->adapter);
6906 KASSERT((vi->flags & VI_INIT_DONE) == 0,
6907 ("%s: VI_INIT_DONE already", __func__));
6909 rc = vi_full_init(vi);
6913 vi->flags |= VI_INIT_DONE;
6922 vi_full_uninit(struct vi_info *vi)
6925 if (vi->flags & VI_INIT_DONE) {
6927 free(vi->rss, M_CXGBE);
6928 free(vi->nm_rss, M_CXGBE);
6931 t4_teardown_vi_queues(vi);
6932 vi->flags &= ~VI_INIT_DONE;
6936 quiesce_txq(struct sge_txq *txq)
6938 struct sge_eq *eq = &txq->eq;
6939 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
6941 MPASS(eq->flags & EQ_SW_ALLOCATED);
6942 MPASS(!(eq->flags & EQ_ENABLED));
6944 /* Wait for the mp_ring to empty. */
6945 while (!mp_ring_is_idle(txq->r)) {
6946 mp_ring_check_drainage(txq->r, 4096);
6947 pause("rquiesce", 1);
6949 MPASS(txq->txp.npkt == 0);
6951 if (eq->flags & EQ_HW_ALLOCATED) {
6953 * Hardware is alive and working normally. Wait for it to
6954 * finish and then wait for the driver to catch up and reclaim
6957 while (spg->cidx != htobe16(eq->pidx))
6958 pause("equiesce", 1);
6959 while (eq->cidx != eq->pidx)
6960 pause("dquiesce", 1);
6963 * Hardware is unavailable. Discard all pending tx and reclaim
6964 * descriptors directly.
6967 while (eq->cidx != eq->pidx) {
6968 struct mbuf *m, *nextpkt;
6969 struct tx_sdesc *txsd;
6971 txsd = &txq->sdesc[eq->cidx];
6972 for (m = txsd->m; m != NULL; m = nextpkt) {
6973 nextpkt = m->m_nextpkt;
6974 m->m_nextpkt = NULL;
6977 IDXINCR(eq->cidx, txsd->desc_used, eq->sidx);
6979 spg->pidx = spg->cidx = htobe16(eq->cidx);
6985 quiesce_wrq(struct sge_wrq *wrq)
6992 quiesce_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
6994 /* Synchronize with the interrupt handler */
6995 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
6999 MPASS(iq->flags & IQ_HAS_FL);
7001 mtx_lock(&sc->sfl_lock);
7003 fl->flags |= FL_DOOMED;
7005 callout_stop(&sc->sfl_callout);
7006 mtx_unlock(&sc->sfl_lock);
7008 KASSERT((fl->flags & FL_STARVING) == 0,
7009 ("%s: still starving", __func__));
7011 /* Release all buffers if hardware is no longer available. */
7012 if (!(iq->flags & IQ_HW_ALLOCATED))
7013 free_fl_buffers(sc, fl);
7018 * Wait for all activity on all the queues of the VI to complete. It is assumed
7019 * that no new work is being enqueued by the hardware or the driver. That part
7020 * should be arranged before calling this function.
7023 quiesce_vi(struct vi_info *vi)
7026 struct adapter *sc = vi->adapter;
7027 struct sge_rxq *rxq;
7028 struct sge_txq *txq;
7030 struct sge_ofld_rxq *ofld_rxq;
7032 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
7033 struct sge_ofld_txq *ofld_txq;
7036 if (!(vi->flags & VI_INIT_DONE))
7039 for_each_txq(vi, i, txq) {
7043 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
7044 for_each_ofld_txq(vi, i, ofld_txq) {
7045 quiesce_wrq(&ofld_txq->wrq);
7049 for_each_rxq(vi, i, rxq) {
7050 quiesce_iq_fl(sc, &rxq->iq, &rxq->fl);
7054 for_each_ofld_rxq(vi, i, ofld_rxq) {
7055 quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl);
7061 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
7062 driver_intr_t *handler, void *arg, char *name)
7067 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
7068 RF_SHAREABLE | RF_ACTIVE);
7069 if (irq->res == NULL) {
7070 device_printf(sc->dev,
7071 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
7075 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
7076 NULL, handler, arg, &irq->tag);
7078 device_printf(sc->dev,
7079 "failed to setup interrupt for rid %d, name %s: %d\n",
7082 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
7088 t4_free_irq(struct adapter *sc, struct irq *irq)
7091 bus_teardown_intr(sc->dev, irq->res, irq->tag);
7093 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
7095 bzero(irq, sizeof(*irq));
7101 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
7104 regs->version = chip_id(sc) | chip_rev(sc) << 10;
7105 t4_get_regs(sc, buf, regs->len);
7108 #define A_PL_INDIR_CMD 0x1f8
7110 #define S_PL_AUTOINC 31
7111 #define M_PL_AUTOINC 0x1U
7112 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
7113 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
7115 #define S_PL_VFID 20
7116 #define M_PL_VFID 0xffU
7117 #define V_PL_VFID(x) ((x) << S_PL_VFID)
7118 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
7121 #define M_PL_ADDR 0xfffffU
7122 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
7123 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
7125 #define A_PL_INDIR_DATA 0x1fc
7128 read_vf_stat(struct adapter *sc, u_int vin, int reg)
7132 if (sc->flags & IS_VF) {
7133 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
7134 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
7136 mtx_assert(&sc->reg_lock, MA_OWNED);
7137 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
7138 V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
7139 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
7140 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
7142 return (((uint64_t)stats[1]) << 32 | stats[0]);
7146 t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
7149 #define GET_STAT(name) \
7150 read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
7152 if (!(sc->flags & IS_VF))
7153 mtx_lock(&sc->reg_lock);
7154 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
7155 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
7156 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
7157 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
7158 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
7159 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
7160 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
7161 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
7162 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
7163 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
7164 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
7165 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
7166 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
7167 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
7168 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
7169 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
7170 if (!(sc->flags & IS_VF))
7171 mtx_unlock(&sc->reg_lock);
7177 t4_clr_vi_stats(struct adapter *sc, u_int vin)
7181 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) |
7182 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
7183 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
7184 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
7185 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
7189 vi_refresh_stats(struct vi_info *vi)
7192 const struct timeval interval = {0, 250000}; /* 250ms */
7194 mtx_assert(&vi->tick_mtx, MA_OWNED);
7196 if (vi->flags & VI_SKIP_STATS)
7200 timevalsub(&tv, &interval);
7201 if (timevalcmp(&tv, &vi->last_refreshed, <))
7204 t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats);
7205 getmicrotime(&vi->last_refreshed);
7209 cxgbe_refresh_stats(struct vi_info *vi)
7211 u_int i, v, tnl_cong_drops, chan_map;
7213 const struct timeval interval = {0, 250000}; /* 250ms */
7214 struct port_info *pi;
7217 mtx_assert(&vi->tick_mtx, MA_OWNED);
7219 if (vi->flags & VI_SKIP_STATS)
7223 timevalsub(&tv, &interval);
7224 if (timevalcmp(&tv, &vi->last_refreshed, <))
7230 t4_get_port_stats(sc, pi->port_id, &pi->stats);
7231 chan_map = pi->rx_e_chan_map;
7233 i = ffs(chan_map) - 1;
7234 mtx_lock(&sc->reg_lock);
7235 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
7236 A_TP_MIB_TNL_CNG_DROP_0 + i);
7237 mtx_unlock(&sc->reg_lock);
7238 tnl_cong_drops += v;
7239 chan_map &= ~(1 << i);
7241 pi->tnl_cong_drops = tnl_cong_drops;
7242 getmicrotime(&vi->last_refreshed);
7246 cxgbe_tick(void *arg)
7248 struct vi_info *vi = arg;
7250 MPASS(IS_MAIN_VI(vi));
7251 mtx_assert(&vi->tick_mtx, MA_OWNED);
7253 cxgbe_refresh_stats(vi);
7254 callout_schedule(&vi->tick, hz);
7260 struct vi_info *vi = arg;
7262 mtx_assert(&vi->tick_mtx, MA_OWNED);
7264 vi_refresh_stats(vi);
7265 callout_schedule(&vi->tick, hz);
7269 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
7271 static char *caps_decoder[] = {
7272 "\20\001IPMI\002NCSI", /* 0: NBM */
7273 "\20\001PPP\002QFC\003DCBX", /* 1: link */
7274 "\20\001INGRESS\002EGRESS", /* 2: switch */
7275 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
7276 "\006HASHFILTER\007ETHOFLD",
7277 "\20\001TOE", /* 4: TOE */
7278 "\20\001RDDP\002RDMAC", /* 5: RDMA */
7279 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
7280 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
7281 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
7283 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
7284 "\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
7286 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
7287 "\004PO_INITIATOR\005PO_TARGET",
7291 t4_sysctls(struct adapter *sc)
7293 struct sysctl_ctx_list *ctx = &sc->ctx;
7294 struct sysctl_oid *oid;
7295 struct sysctl_oid_list *children, *c0;
7296 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
7301 oid = device_get_sysctl_tree(sc->dev);
7302 c0 = children = SYSCTL_CHILDREN(oid);
7304 sc->sc_do_rxcopy = 1;
7305 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
7306 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
7308 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
7309 sc->params.nports, "# of ports");
7311 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
7312 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells,
7313 (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A",
7314 "available doorbells");
7316 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
7317 sc->params.vpd.cclk, "core clock frequency (in KHz)");
7319 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
7320 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
7321 sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val),
7322 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
7324 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
7325 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
7326 sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val),
7327 sysctl_int_array, "A", "interrupt holdoff packet counter values");
7329 t4_sge_sysctls(sc, ctx, children);
7331 sc->lro_timeout = 100;
7332 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
7333 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
7335 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
7336 &sc->debug_flags, 0, "flags to enable runtime debugging");
7338 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
7339 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
7341 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
7342 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
7344 if (sc->flags & IS_VF)
7347 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
7348 NULL, chip_rev(sc), "chip hardware revision");
7350 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
7351 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
7353 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
7354 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
7356 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
7357 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
7359 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
7360 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
7362 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
7363 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
7365 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
7366 sc->er_version, 0, "expansion ROM version");
7368 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
7369 sc->bs_version, 0, "bootstrap firmware version");
7371 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
7372 NULL, sc->params.scfg_vers, "serial config version");
7374 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
7375 NULL, sc->params.vpd_vers, "VPD version");
7377 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
7378 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
7380 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
7381 sc->cfcsum, "config file checksum");
7383 #define SYSCTL_CAP(name, n, text) \
7384 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
7385 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \
7386 (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \
7387 "available " text " capabilities")
7389 SYSCTL_CAP(nbmcaps, 0, "NBM");
7390 SYSCTL_CAP(linkcaps, 1, "link");
7391 SYSCTL_CAP(switchcaps, 2, "switch");
7392 SYSCTL_CAP(niccaps, 3, "NIC");
7393 SYSCTL_CAP(toecaps, 4, "TCP offload");
7394 SYSCTL_CAP(rdmacaps, 5, "RDMA");
7395 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
7396 SYSCTL_CAP(cryptocaps, 7, "crypto");
7397 SYSCTL_CAP(fcoecaps, 8, "FCoE");
7400 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
7401 NULL, sc->tids.nftids, "number of filters");
7403 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
7404 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7405 sysctl_temperature, "I", "chip temperature (in Celsius)");
7406 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor",
7407 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7408 sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
7410 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
7411 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7412 sysctl_loadavg, "A",
7413 "microprocessor load averages (debug firmwares only)");
7415 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
7416 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
7417 "I", "core Vdd (in mV)");
7419 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
7420 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS,
7421 sysctl_cpus, "A", "local CPUs");
7423 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
7424 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS,
7425 sysctl_cpus, "A", "preferred CPUs for interrupts");
7427 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
7428 &sc->swintr, 0, "software triggered interrupts");
7430 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset",
7431 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I",
7432 "1 = reset adapter, 0 = zero reset counter");
7435 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
7437 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
7438 CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
7439 "logs and miscellaneous information");
7440 children = SYSCTL_CHILDREN(oid);
7442 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
7443 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7444 sysctl_cctrl, "A", "congestion control");
7446 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
7447 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7448 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
7450 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
7451 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
7452 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
7454 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
7455 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
7456 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
7458 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
7459 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
7460 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
7462 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
7463 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
7464 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
7466 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
7467 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
7468 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
7470 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
7471 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7472 sysctl_cim_la, "A", "CIM logic analyzer");
7474 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
7475 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7476 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
7478 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
7479 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7480 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
7482 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
7483 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7484 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
7486 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
7487 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7488 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
7490 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
7491 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7492 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
7494 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
7495 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7496 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
7498 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
7499 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7500 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
7502 if (chip_id(sc) > CHELSIO_T4) {
7503 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
7504 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7505 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
7506 "CIM OBQ 6 (SGE0-RX)");
7508 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
7509 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7510 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
7511 "CIM OBQ 7 (SGE1-RX)");
7514 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
7515 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7516 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
7518 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
7519 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7520 sysctl_cim_qcfg, "A", "CIM queue configuration");
7522 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
7523 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7524 sysctl_cpl_stats, "A", "CPL statistics");
7526 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
7527 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7528 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
7530 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats",
7531 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7532 sysctl_tid_stats, "A", "tid stats");
7534 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
7535 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7536 sysctl_devlog, "A", "firmware's device log");
7538 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
7539 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7540 sysctl_fcoe_stats, "A", "FCoE statistics");
7542 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
7543 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7544 sysctl_hw_sched, "A", "hardware scheduler ");
7546 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
7547 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7548 sysctl_l2t, "A", "hardware L2 table");
7550 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
7551 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7552 sysctl_smt, "A", "hardware source MAC table");
7555 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip",
7556 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7557 sysctl_clip, "A", "active CLIP table entries");
7560 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
7561 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7562 sysctl_lb_stats, "A", "loopback statistics");
7564 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
7565 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7566 sysctl_meminfo, "A", "memory regions");
7568 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
7569 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7570 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
7571 "A", "MPS TCAM entries");
7573 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
7574 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7575 sysctl_path_mtus, "A", "path MTUs");
7577 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
7578 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7579 sysctl_pm_stats, "A", "PM statistics");
7581 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
7582 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7583 sysctl_rdma_stats, "A", "RDMA statistics");
7585 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
7586 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7587 sysctl_tcp_stats, "A", "TCP statistics");
7589 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
7590 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7591 sysctl_tids, "A", "TID information");
7593 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
7594 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7595 sysctl_tp_err_stats, "A", "TP error statistics");
7597 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats",
7598 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7599 sysctl_tnl_stats, "A", "TP tunnel statistics");
7601 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
7602 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7603 sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask");
7605 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
7606 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7607 sysctl_tp_la, "A", "TP logic analyzer");
7609 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
7610 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7611 sysctl_tx_rate, "A", "Tx rate");
7613 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
7614 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7615 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
7617 if (chip_id(sc) >= CHELSIO_T5) {
7618 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
7619 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7620 sysctl_wcwr_stats, "A", "write combined work requests");
7628 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls",
7629 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters");
7630 children = SYSCTL_CHILDREN(oid);
7632 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys",
7633 CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS "
7634 "keys in work requests (1) or attempt to store TLS keys "
7638 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs",
7639 CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to "
7640 "combine TCB field updates with TLS record work "
7646 if (is_offload(sc)) {
7653 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe",
7654 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters");
7655 children = SYSCTL_CHILDREN(oid);
7657 sc->tt.cong_algorithm = -1;
7658 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
7659 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
7660 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
7664 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
7665 &sc->tt.sndbuf, 0, "hardware send buffer");
7668 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp",
7669 CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, "");
7670 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW,
7671 &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)");
7673 sc->tt.rx_coalesce = -1;
7674 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
7675 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
7678 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT |
7679 CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I",
7680 "Inline TLS allowed");
7682 sc->tt.tx_align = -1;
7683 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
7684 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
7686 sc->tt.tx_zcopy = 0;
7687 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
7688 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
7689 "Enable zero-copy aio_write(2)");
7691 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
7692 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7693 "cop_managed_offloading", CTLFLAG_RW,
7694 &sc->tt.cop_managed_offloading, 0,
7695 "COP (Connection Offload Policy) controls all TOE offload");
7697 sc->tt.autorcvbuf_inc = 16 * 1024;
7698 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc",
7699 CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0,
7700 "autorcvbuf increment");
7702 sc->tt.update_hc_on_pmtu_change = 1;
7703 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7704 "update_hc_on_pmtu_change", CTLFLAG_RW,
7705 &sc->tt.update_hc_on_pmtu_change, 0,
7706 "Update hostcache entry if the PMTU changes");
7709 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "iso", CTLFLAG_RW,
7710 &sc->tt.iso, 0, "Enable iSCSI segmentation offload");
7712 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
7713 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7714 sysctl_tp_tick, "A", "TP timer tick (us)");
7716 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
7717 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
7718 sysctl_tp_tick, "A", "TCP timestamp tick (us)");
7720 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
7721 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
7722 sysctl_tp_tick, "A", "DACK tick (us)");
7724 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
7725 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7726 sysctl_tp_dack_timer, "IU", "DACK timer (us)");
7728 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
7729 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7730 A_TP_RXT_MIN, sysctl_tp_timer, "LU",
7731 "Minimum retransmit interval (us)");
7733 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
7734 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7735 A_TP_RXT_MAX, sysctl_tp_timer, "LU",
7736 "Maximum retransmit interval (us)");
7738 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
7739 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7740 A_TP_PERS_MIN, sysctl_tp_timer, "LU",
7741 "Persist timer min (us)");
7743 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
7744 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7745 A_TP_PERS_MAX, sysctl_tp_timer, "LU",
7746 "Persist timer max (us)");
7748 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
7749 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7750 A_TP_KEEP_IDLE, sysctl_tp_timer, "LU",
7751 "Keepalive idle timer (us)");
7753 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
7754 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7755 A_TP_KEEP_INTVL, sysctl_tp_timer, "LU",
7756 "Keepalive interval timer (us)");
7758 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
7759 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7760 A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)");
7762 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
7763 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7764 A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU",
7765 "FINWAIT2 timer (us)");
7767 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
7768 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7769 S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU",
7770 "Number of SYN retransmissions before abort");
7772 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
7773 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7774 S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU",
7775 "Number of retransmissions before abort");
7777 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
7778 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7779 S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU",
7780 "Number of keepalive probes before abort");
7782 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
7783 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7784 "TOE retransmit backoffs");
7785 children = SYSCTL_CHILDREN(oid);
7786 for (i = 0; i < 16; i++) {
7787 snprintf(s, sizeof(s), "%u", i);
7788 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
7789 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7790 i, sysctl_tp_backoff, "IU",
7791 "TOE retransmit backoff");
7798 vi_sysctls(struct vi_info *vi)
7800 struct sysctl_ctx_list *ctx = &vi->ctx;
7801 struct sysctl_oid *oid;
7802 struct sysctl_oid_list *children;
7805 * dev.v?(cxgbe|cxl).X.
7807 oid = device_get_sysctl_tree(vi->dev);
7808 children = SYSCTL_CHILDREN(oid);
7810 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
7811 vi->viid, "VI identifer");
7812 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
7813 &vi->nrxq, 0, "# of rx queues");
7814 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
7815 &vi->ntxq, 0, "# of tx queues");
7816 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
7817 &vi->first_rxq, 0, "index of first rx queue");
7818 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
7819 &vi->first_txq, 0, "index of first tx queue");
7820 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL,
7821 vi->rss_base, "start of RSS indirection table");
7822 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
7823 vi->rss_size, "size of RSS indirection table");
7825 if (IS_MAIN_VI(vi)) {
7826 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
7827 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7828 sysctl_noflowq, "IU",
7829 "Reserve queue 0 for non-flowid packets");
7832 if (vi->adapter->flags & IS_VF) {
7833 MPASS(vi->flags & TX_USES_VM_WR);
7834 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD,
7835 NULL, 1, "use VM work requests for transmit");
7837 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr",
7838 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7839 sysctl_tx_vm_wr, "I", "use VM work requestes for transmit");
7843 if (vi->nofldrxq != 0) {
7844 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
7846 "# of rx queues for offloaded TCP connections");
7847 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
7848 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
7849 "index of first TOE rx queue");
7850 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
7851 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7852 sysctl_holdoff_tmr_idx_ofld, "I",
7853 "holdoff timer index for TOE queues");
7854 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
7855 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7856 sysctl_holdoff_pktc_idx_ofld, "I",
7857 "holdoff packet counter index for TOE queues");
7860 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
7861 if (vi->nofldtxq != 0) {
7862 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
7864 "# of tx queues for TOE/ETHOFLD");
7865 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
7866 CTLFLAG_RD, &vi->first_ofld_txq, 0,
7867 "index of first TOE/ETHOFLD tx queue");
7871 if (vi->nnmrxq != 0) {
7872 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
7873 &vi->nnmrxq, 0, "# of netmap rx queues");
7874 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
7875 &vi->nnmtxq, 0, "# of netmap tx queues");
7876 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
7877 CTLFLAG_RD, &vi->first_nm_rxq, 0,
7878 "index of first netmap rx queue");
7879 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
7880 CTLFLAG_RD, &vi->first_nm_txq, 0,
7881 "index of first netmap tx queue");
7885 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
7886 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7887 sysctl_holdoff_tmr_idx, "I", "holdoff timer index");
7888 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
7889 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7890 sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index");
7892 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
7893 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7894 sysctl_qsize_rxq, "I", "rx queue size");
7895 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
7896 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7897 sysctl_qsize_txq, "I", "tx queue size");
7901 cxgbe_sysctls(struct port_info *pi)
7903 struct sysctl_ctx_list *ctx = &pi->ctx;
7904 struct sysctl_oid *oid;
7905 struct sysctl_oid_list *children, *children2;
7906 struct adapter *sc = pi->adapter;
7909 static char *tc_flags = {"\20\1USER"};
7914 oid = device_get_sysctl_tree(pi->dev);
7915 children = SYSCTL_CHILDREN(oid);
7917 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc",
7918 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7919 sysctl_linkdnrc, "A", "reason why link is down");
7920 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
7921 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
7922 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7923 sysctl_btphy, "I", "PHY temperature (in Celsius)");
7924 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
7925 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1,
7926 sysctl_btphy, "I", "PHY firmware version");
7929 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
7930 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7931 sysctl_pause_settings, "A",
7932 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
7933 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_fec",
7934 CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_link_fec, "A",
7935 "FEC in use on the link");
7936 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "requested_fec",
7937 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7938 sysctl_requested_fec, "A",
7939 "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)");
7940 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec",
7941 CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A",
7942 "FEC recommended by the cable/transceiver");
7943 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
7944 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7945 sysctl_autoneg, "I",
7946 "autonegotiation (-1 = not supported)");
7947 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "force_fec",
7948 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7949 sysctl_force_fec, "I", "when to use FORCE_FEC bit for link config");
7951 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rcaps", CTLFLAG_RD,
7952 &pi->link_cfg.requested_caps, 0, "L1 config requested by driver");
7953 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD,
7954 &pi->link_cfg.pcaps, 0, "port capabilities");
7955 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD,
7956 &pi->link_cfg.acaps, 0, "advertised capabilities");
7957 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD,
7958 &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities");
7960 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
7961 port_top_speed(pi), "max speed (in Gbps)");
7962 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
7963 pi->mps_bg_map, "MPS buffer group map");
7964 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
7965 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
7966 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_c_chan", CTLFLAG_RD, NULL,
7967 pi->rx_c_chan, "TP rx c-channel");
7969 if (sc->flags & IS_VF)
7973 * dev.(cxgbe|cxl).X.tc.
7975 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc",
7976 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7977 "Tx scheduler traffic classes (cl_rl)");
7978 children2 = SYSCTL_CHILDREN(oid);
7979 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
7980 CTLFLAG_RW, &pi->sched_params->pktsize, 0,
7981 "pktsize for per-flow cl-rl (0 means up to the driver )");
7982 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
7983 CTLFLAG_RW, &pi->sched_params->burstsize, 0,
7984 "burstsize for per-flow cl-rl (0 means up to the driver)");
7985 for (i = 0; i < sc->params.nsched_cls; i++) {
7986 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
7988 snprintf(name, sizeof(name), "%d", i);
7989 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
7990 SYSCTL_CHILDREN(oid), OID_AUTO, name,
7991 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class"));
7992 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "state",
7993 CTLFLAG_RD, &tc->state, 0, "current state");
7994 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
7995 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags,
7996 (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags");
7997 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
7998 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
7999 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
8000 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
8001 (pi->port_id << 16) | i, sysctl_tc_params, "A",
8002 "traffic class parameters");
8006 * dev.cxgbe.X.stats.
8008 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
8009 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics");
8010 children = SYSCTL_CHILDREN(oid);
8011 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
8012 &pi->tx_parse_error, 0,
8013 "# of tx packets with invalid length or # of segments");
8015 #define T4_REGSTAT(name, stat, desc) \
8016 SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
8017 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
8018 (is_t4(sc) ? PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L) : \
8019 T5_PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L)), \
8020 sysctl_handle_t4_reg64, "QU", desc)
8022 /* We get these from port_stats and they may be stale by up to 1s */
8023 #define T4_PORTSTAT(name, desc) \
8024 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
8025 &pi->stats.name, desc)
8027 T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
8028 T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
8029 T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
8030 T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
8031 T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
8032 T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
8033 T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
8034 T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
8035 T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
8036 T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
8037 T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
8038 T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
8039 T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
8040 T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
8041 T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
8042 T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
8043 T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
8044 T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
8045 T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
8046 T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
8047 T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
8048 T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
8049 T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
8051 T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
8052 T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
8053 T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
8054 T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
8055 T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
8056 T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
8057 T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
8059 T4_PORTSTAT(rx_fcs_err,
8060 "# of frames received with bad FCS since last link up");
8062 T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
8063 "# of frames received with bad FCS");
8065 T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
8066 T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
8067 T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
8068 T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
8069 T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
8070 T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
8071 T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
8072 T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
8073 T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
8074 T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
8075 T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
8076 T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
8077 T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
8078 T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
8079 T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
8080 T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
8081 T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
8082 T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
8083 T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
8085 T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
8086 T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
8087 T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
8088 T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
8089 T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
8090 T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
8091 T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
8092 T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
8099 sysctl_int_array(SYSCTL_HANDLER_ARGS)
8101 int rc, *i, space = 0;
8104 sbuf_new_for_sysctl(&sb, NULL, 64, req);
8105 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
8107 sbuf_printf(&sb, " ");
8108 sbuf_printf(&sb, "%d", *i);
8111 rc = sbuf_finish(&sb);
8117 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
8122 rc = sysctl_wire_old_buffer(req, 0);
8126 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8130 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
8131 rc = sbuf_finish(sb);
8138 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
8143 rc = sysctl_wire_old_buffer(req, 0);
8147 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8151 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
8152 rc = sbuf_finish(sb);
8159 sysctl_btphy(SYSCTL_HANDLER_ARGS)
8161 struct port_info *pi = arg1;
8163 struct adapter *sc = pi->adapter;
8167 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
8170 if (hw_off_limits(sc))
8173 /* XXX: magic numbers */
8174 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e,
8175 op ? 0x20 : 0xc820, &v);
8177 end_synchronized_op(sc, 0);
8183 rc = sysctl_handle_int(oidp, &v, 0, req);
8188 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
8190 struct vi_info *vi = arg1;
8193 val = vi->rsrv_noflowq;
8194 rc = sysctl_handle_int(oidp, &val, 0, req);
8195 if (rc != 0 || req->newptr == NULL)
8198 if ((val >= 1) && (vi->ntxq > 1))
8199 vi->rsrv_noflowq = 1;
8201 vi->rsrv_noflowq = 0;
8207 sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
8209 struct vi_info *vi = arg1;
8210 struct adapter *sc = vi->adapter;
8213 MPASS(!(sc->flags & IS_VF));
8215 val = vi->flags & TX_USES_VM_WR ? 1 : 0;
8216 rc = sysctl_handle_int(oidp, &val, 0, req);
8217 if (rc != 0 || req->newptr == NULL)
8220 if (val != 0 && val != 1)
8223 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8227 if (hw_off_limits(sc))
8229 else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) {
8231 * We don't want parse_pkt to run with one setting (VF or PF)
8232 * and then eth_tx to see a different setting but still use
8233 * stale information calculated by parse_pkt.
8237 struct port_info *pi = vi->pi;
8238 struct sge_txq *txq;
8240 uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr;
8243 vi->flags |= TX_USES_VM_WR;
8244 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO);
8245 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
8246 V_TXPKT_INTF(pi->tx_chan));
8247 if (!(sc->flags & IS_VF))
8250 vi->flags &= ~TX_USES_VM_WR;
8251 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO);
8252 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
8253 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
8254 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
8256 for_each_txq(vi, i, txq) {
8257 txq->cpl_ctrl0 = ctrl0;
8258 txq->txp.max_npkt = npkt;
8261 end_synchronized_op(sc, LOCK_HELD);
8266 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
8268 struct vi_info *vi = arg1;
8269 struct adapter *sc = vi->adapter;
8271 struct sge_rxq *rxq;
8276 rc = sysctl_handle_int(oidp, &idx, 0, req);
8277 if (rc != 0 || req->newptr == NULL)
8280 if (idx < 0 || idx >= SGE_NTIMERS)
8283 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8288 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
8289 for_each_rxq(vi, i, rxq) {
8290 #ifdef atomic_store_rel_8
8291 atomic_store_rel_8(&rxq->iq.intr_params, v);
8293 rxq->iq.intr_params = v;
8298 end_synchronized_op(sc, LOCK_HELD);
8303 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
8305 struct vi_info *vi = arg1;
8306 struct adapter *sc = vi->adapter;
8311 rc = sysctl_handle_int(oidp, &idx, 0, req);
8312 if (rc != 0 || req->newptr == NULL)
8315 if (idx < -1 || idx >= SGE_NCOUNTERS)
8318 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8323 if (vi->flags & VI_INIT_DONE)
8324 rc = EBUSY; /* cannot be changed once the queues are created */
8328 end_synchronized_op(sc, LOCK_HELD);
8333 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
8335 struct vi_info *vi = arg1;
8336 struct adapter *sc = vi->adapter;
8339 qsize = vi->qsize_rxq;
8341 rc = sysctl_handle_int(oidp, &qsize, 0, req);
8342 if (rc != 0 || req->newptr == NULL)
8345 if (qsize < 128 || (qsize & 7))
8348 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8353 if (vi->flags & VI_INIT_DONE)
8354 rc = EBUSY; /* cannot be changed once the queues are created */
8356 vi->qsize_rxq = qsize;
8358 end_synchronized_op(sc, LOCK_HELD);
8363 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
8365 struct vi_info *vi = arg1;
8366 struct adapter *sc = vi->adapter;
8369 qsize = vi->qsize_txq;
8371 rc = sysctl_handle_int(oidp, &qsize, 0, req);
8372 if (rc != 0 || req->newptr == NULL)
8375 if (qsize < 128 || qsize > 65536)
8378 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8383 if (vi->flags & VI_INIT_DONE)
8384 rc = EBUSY; /* cannot be changed once the queues are created */
8386 vi->qsize_txq = qsize;
8388 end_synchronized_op(sc, LOCK_HELD);
8393 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
8395 struct port_info *pi = arg1;
8396 struct adapter *sc = pi->adapter;
8397 struct link_config *lc = &pi->link_cfg;
8400 if (req->newptr == NULL) {
8402 static char *bits = "\20\1RX\2TX\3AUTO";
8404 rc = sysctl_wire_old_buffer(req, 0);
8408 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8413 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
8414 (lc->requested_fc & PAUSE_AUTONEG), bits);
8416 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
8417 PAUSE_RX | PAUSE_AUTONEG), bits);
8419 rc = sbuf_finish(sb);
8425 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
8429 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
8435 if (s[0] < '0' || s[0] > '9')
8436 return (EINVAL); /* not a number */
8438 if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
8439 return (EINVAL); /* some other bit is set too */
8441 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8445 if (!hw_off_limits(sc)) {
8447 lc->requested_fc = n;
8448 fixup_link_config(pi);
8450 rc = apply_link_config(pi);
8451 set_current_media(pi);
8454 end_synchronized_op(sc, 0);
8461 sysctl_link_fec(SYSCTL_HANDLER_ARGS)
8463 struct port_info *pi = arg1;
8464 struct link_config *lc = &pi->link_cfg;
8467 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
8469 rc = sysctl_wire_old_buffer(req, 0);
8473 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8477 sbuf_printf(sb, "%b", lc->fec, bits);
8479 sbuf_printf(sb, "no link");
8480 rc = sbuf_finish(sb);
8487 sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
8489 struct port_info *pi = arg1;
8490 struct adapter *sc = pi->adapter;
8491 struct link_config *lc = &pi->link_cfg;
8495 if (req->newptr == NULL) {
8497 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
8498 "\5RSVD3\6auto\7module";
8500 rc = sysctl_wire_old_buffer(req, 0);
8504 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8508 sbuf_printf(sb, "%b", lc->requested_fec, bits);
8509 rc = sbuf_finish(sb);
8515 snprintf(s, sizeof(s), "%d",
8516 lc->requested_fec == FEC_AUTO ? -1 :
8517 lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE));
8519 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
8523 n = strtol(&s[0], NULL, 0);
8524 if (n < 0 || n & FEC_AUTO)
8526 else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE))
8527 return (EINVAL);/* some other bit is set too */
8529 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8534 old = lc->requested_fec;
8536 lc->requested_fec = FEC_AUTO;
8537 else if (n == 0 || n == FEC_NONE)
8538 lc->requested_fec = FEC_NONE;
8541 V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) !=
8546 lc->requested_fec = n & (M_FW_PORT_CAP32_FEC |
8549 if (!hw_off_limits(sc)) {
8550 fixup_link_config(pi);
8551 if (pi->up_vis > 0) {
8552 rc = apply_link_config(pi);
8554 lc->requested_fec = old;
8555 if (rc == FW_EPROTO)
8562 end_synchronized_op(sc, 0);
8569 sysctl_module_fec(SYSCTL_HANDLER_ARGS)
8571 struct port_info *pi = arg1;
8572 struct adapter *sc = pi->adapter;
8573 struct link_config *lc = &pi->link_cfg;
8577 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
8579 rc = sysctl_wire_old_buffer(req, 0);
8583 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8587 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) {
8591 if (hw_off_limits(sc)) {
8596 if (pi->up_vis == 0) {
8598 * If all the interfaces are administratively down the firmware
8599 * does not report transceiver changes. Refresh port info here.
8600 * This is the only reason we have a synchronized op in this
8601 * function. Just PORT_LOCK would have been enough otherwise.
8603 t4_update_port_info(pi);
8607 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE ||
8608 !fec_supported(lc->pcaps)) {
8609 sbuf_printf(sb, "n/a");
8613 sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
8615 rc = sbuf_finish(sb);
8619 end_synchronized_op(sc, 0);
8625 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
8627 struct port_info *pi = arg1;
8628 struct adapter *sc = pi->adapter;
8629 struct link_config *lc = &pi->link_cfg;
8632 if (lc->pcaps & FW_PORT_CAP32_ANEG)
8633 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
8636 rc = sysctl_handle_int(oidp, &val, 0, req);
8637 if (rc != 0 || req->newptr == NULL)
8640 val = AUTONEG_DISABLE;
8642 val = AUTONEG_ENABLE;
8646 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8651 if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
8655 lc->requested_aneg = val;
8656 if (!hw_off_limits(sc)) {
8657 fixup_link_config(pi);
8659 rc = apply_link_config(pi);
8660 set_current_media(pi);
8664 end_synchronized_op(sc, 0);
8669 sysctl_force_fec(SYSCTL_HANDLER_ARGS)
8671 struct port_info *pi = arg1;
8672 struct adapter *sc = pi->adapter;
8673 struct link_config *lc = &pi->link_cfg;
8676 val = lc->force_fec;
8677 MPASS(val >= -1 && val <= 1);
8678 rc = sysctl_handle_int(oidp, &val, 0, req);
8679 if (rc != 0 || req->newptr == NULL)
8681 if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC))
8683 if (val < -1 || val > 1)
8686 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff");
8690 lc->force_fec = val;
8691 if (!hw_off_limits(sc)) {
8692 fixup_link_config(pi);
8694 rc = apply_link_config(pi);
8697 end_synchronized_op(sc, 0);
8702 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
8704 struct adapter *sc = arg1;
8708 mtx_lock(&sc->reg_lock);
8709 if (hw_off_limits(sc))
8713 val = t4_read_reg64(sc, reg);
8715 mtx_unlock(&sc->reg_lock);
8717 rc = sysctl_handle_64(oidp, &val, 0, req);
8722 sysctl_temperature(SYSCTL_HANDLER_ARGS)
8724 struct adapter *sc = arg1;
8726 uint32_t param, val;
8728 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
8731 if (hw_off_limits(sc))
8734 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8735 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
8736 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
8737 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
8739 end_synchronized_op(sc, 0);
8743 /* unknown is returned as 0 but we display -1 in that case */
8744 t = val == 0 ? -1 : val;
8746 rc = sysctl_handle_int(oidp, &t, 0, req);
8751 sysctl_vdd(SYSCTL_HANDLER_ARGS)
8753 struct adapter *sc = arg1;
8755 uint32_t param, val;
8757 if (sc->params.core_vdd == 0) {
8758 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8762 if (hw_off_limits(sc))
8765 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8766 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
8767 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
8768 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1,
8771 end_synchronized_op(sc, 0);
8774 sc->params.core_vdd = val;
8777 return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req));
8781 sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
8783 struct adapter *sc = arg1;
8785 uint32_t param, val;
8787 v = sc->sensor_resets;
8788 rc = sysctl_handle_int(oidp, &v, 0, req);
8789 if (rc != 0 || req->newptr == NULL || v <= 0)
8792 if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) ||
8793 chip_id(sc) < CHELSIO_T5)
8796 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
8799 if (hw_off_limits(sc))
8802 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8803 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
8804 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR));
8806 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
8808 end_synchronized_op(sc, 0);
8810 sc->sensor_resets++;
8815 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
8817 struct adapter *sc = arg1;
8820 uint32_t param, val;
8822 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
8825 if (hw_off_limits(sc))
8828 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8829 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
8830 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
8832 end_synchronized_op(sc, 0);
8836 rc = sysctl_wire_old_buffer(req, 0);
8840 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8844 if (val == 0xffffffff) {
8845 /* Only debug and custom firmwares report load averages. */
8846 sbuf_printf(sb, "not available");
8848 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
8849 (val >> 16) & 0xff);
8851 rc = sbuf_finish(sb);
8858 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
8860 struct adapter *sc = arg1;
8863 uint16_t incr[NMTUS][NCCTRL_WIN];
8864 static const char *dec_fac[] = {
8865 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
8869 rc = sysctl_wire_old_buffer(req, 0);
8873 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8877 mtx_lock(&sc->reg_lock);
8878 if (hw_off_limits(sc))
8881 t4_read_cong_tbl(sc, incr);
8882 mtx_unlock(&sc->reg_lock);
8886 for (i = 0; i < NCCTRL_WIN; ++i) {
8887 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
8888 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
8889 incr[5][i], incr[6][i], incr[7][i]);
8890 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
8891 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
8892 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
8893 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
8896 rc = sbuf_finish(sb);
8902 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
8903 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
8904 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
8905 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
8909 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
8911 struct adapter *sc = arg1;
8913 int rc, i, n, qid = arg2;
8916 u_int cim_num_obq = sc->chip_params->cim_num_obq;
8918 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
8919 ("%s: bad qid %d\n", __func__, qid));
8921 if (qid < CIM_NUM_IBQ) {
8924 n = 4 * CIM_IBQ_SIZE;
8925 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
8926 mtx_lock(&sc->reg_lock);
8927 if (hw_off_limits(sc))
8930 rc = t4_read_cim_ibq(sc, qid, buf, n);
8931 mtx_unlock(&sc->reg_lock);
8933 /* outbound queue */
8936 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
8937 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
8938 mtx_lock(&sc->reg_lock);
8939 if (hw_off_limits(sc))
8942 rc = t4_read_cim_obq(sc, qid, buf, n);
8943 mtx_unlock(&sc->reg_lock);
8950 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
8952 rc = sysctl_wire_old_buffer(req, 0);
8956 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
8962 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
8963 for (i = 0, p = buf; i < n; i += 16, p += 4)
8964 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
8967 rc = sbuf_finish(sb);
8975 sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
8979 sbuf_printf(sb, "Status Data PC%s",
8980 cfg & F_UPDBGLACAPTPCONLY ? "" :
8981 " LS0Stat LS0Addr LS0Data");
8983 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
8984 if (cfg & F_UPDBGLACAPTPCONLY) {
8985 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
8987 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
8988 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
8989 p[4] & 0xff, p[5] >> 8);
8990 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
8991 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8992 p[1] & 0xf, p[2] >> 4);
8995 "\n %02x %x%07x %x%07x %08x %08x "
8997 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8998 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
9005 sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
9009 sbuf_printf(sb, "Status Inst Data PC%s",
9010 cfg & F_UPDBGLACAPTPCONLY ? "" :
9011 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
9013 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
9014 if (cfg & F_UPDBGLACAPTPCONLY) {
9015 sbuf_printf(sb, "\n %02x %08x %08x %08x",
9016 p[3] & 0xff, p[2], p[1], p[0]);
9017 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
9018 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
9019 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
9020 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
9021 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
9022 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
9025 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
9026 "%08x %08x %08x %08x %08x %08x",
9027 (p[9] >> 16) & 0xff,
9028 p[9] & 0xffff, p[8] >> 16,
9029 p[8] & 0xffff, p[7] >> 16,
9030 p[7] & 0xffff, p[6] >> 16,
9031 p[2], p[1], p[0], p[5], p[4], p[3]);
9037 sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
9042 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
9043 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
9048 mtx_lock(&sc->reg_lock);
9049 if (hw_off_limits(sc))
9052 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9054 rc = -t4_cim_read_la(sc, buf, NULL);
9056 mtx_unlock(&sc->reg_lock);
9058 if (chip_id(sc) < CHELSIO_T6)
9059 sbuf_cim_la4(sc, sb, buf, cfg);
9061 sbuf_cim_la6(sc, sb, buf, cfg);
9068 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
9070 struct adapter *sc = arg1;
9074 rc = sysctl_wire_old_buffer(req, 0);
9077 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9081 rc = sbuf_cim_la(sc, sb, M_WAITOK);
9083 rc = sbuf_finish(sb);
9089 dump_cim_regs(struct adapter *sc)
9091 log(LOG_DEBUG, "%s: CIM debug regs1 %08x %08x %08x %08x %08x\n",
9092 device_get_nameunit(sc->dev),
9093 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0),
9094 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1),
9095 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA2),
9096 t4_read_reg(sc, A_EDC_H_BIST_DATA_PATTERN),
9097 t4_read_reg(sc, A_EDC_H_BIST_STATUS_RDATA));
9098 log(LOG_DEBUG, "%s: CIM debug regs2 %08x %08x %08x %08x %08x\n",
9099 device_get_nameunit(sc->dev),
9100 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0),
9101 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1),
9102 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0 + 0x800),
9103 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1 + 0x800),
9104 t4_read_reg(sc, A_EDC_H_BIST_CMD_LEN));
9108 dump_cimla(struct adapter *sc)
9113 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
9114 log(LOG_DEBUG, "%s: failed to generate CIM LA dump.\n",
9115 device_get_nameunit(sc->dev));
9118 rc = sbuf_cim_la(sc, &sb, M_WAITOK);
9120 rc = sbuf_finish(&sb);
9122 log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s\n",
9123 device_get_nameunit(sc->dev), sbuf_data(&sb));
9130 t4_os_cim_err(struct adapter *sc)
9132 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
9136 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
9138 struct adapter *sc = arg1;
9144 rc = sysctl_wire_old_buffer(req, 0);
9148 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9152 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
9155 mtx_lock(&sc->reg_lock);
9156 if (hw_off_limits(sc))
9159 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
9160 mtx_unlock(&sc->reg_lock);
9165 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
9166 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
9170 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
9171 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
9172 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
9173 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
9174 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
9175 (p[1] >> 2) | ((p[2] & 3) << 30),
9176 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
9179 rc = sbuf_finish(sb);
9187 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
9189 struct adapter *sc = arg1;
9195 rc = sysctl_wire_old_buffer(req, 0);
9199 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9203 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
9206 mtx_lock(&sc->reg_lock);
9207 if (hw_off_limits(sc))
9210 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
9211 mtx_unlock(&sc->reg_lock);
9216 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
9217 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
9218 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
9219 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
9220 p[4], p[3], p[2], p[1], p[0]);
9223 sbuf_printf(sb, "\n\nCntl ID Data");
9224 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
9225 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
9226 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
9229 rc = sbuf_finish(sb);
9237 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
9239 struct adapter *sc = arg1;
9242 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
9243 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
9244 uint16_t thres[CIM_NUM_IBQ];
9245 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
9246 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
9247 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
9249 cim_num_obq = sc->chip_params->cim_num_obq;
9251 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
9252 obq_rdaddr = A_UP_OBQ_0_REALADDR;
9254 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
9255 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
9257 nq = CIM_NUM_IBQ + cim_num_obq;
9259 mtx_lock(&sc->reg_lock);
9260 if (hw_off_limits(sc))
9263 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
9265 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq,
9268 t4_read_cimq_cfg(sc, base, size, thres);
9271 mtx_unlock(&sc->reg_lock);
9275 rc = sysctl_wire_old_buffer(req, 0);
9279 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
9284 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
9286 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
9287 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
9288 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
9289 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
9290 G_QUEREMFLITS(p[2]) * 16);
9291 for ( ; i < nq; i++, p += 4, wr += 2)
9292 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
9293 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
9294 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
9295 G_QUEREMFLITS(p[2]) * 16);
9297 rc = sbuf_finish(sb);
9304 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
9306 struct adapter *sc = arg1;
9309 struct tp_cpl_stats stats;
9311 rc = sysctl_wire_old_buffer(req, 0);
9315 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9319 mtx_lock(&sc->reg_lock);
9320 if (hw_off_limits(sc))
9323 t4_tp_get_cpl_stats(sc, &stats, 0);
9324 mtx_unlock(&sc->reg_lock);
9328 if (sc->chip_params->nchan > 2) {
9329 sbuf_printf(sb, " channel 0 channel 1"
9330 " channel 2 channel 3");
9331 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
9332 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
9333 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
9334 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
9336 sbuf_printf(sb, " channel 0 channel 1");
9337 sbuf_printf(sb, "\nCPL requests: %10u %10u",
9338 stats.req[0], stats.req[1]);
9339 sbuf_printf(sb, "\nCPL responses: %10u %10u",
9340 stats.rsp[0], stats.rsp[1]);
9343 rc = sbuf_finish(sb);
9350 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
9352 struct adapter *sc = arg1;
9355 struct tp_usm_stats stats;
9357 rc = sysctl_wire_old_buffer(req, 0);
9361 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9365 mtx_lock(&sc->reg_lock);
9366 if (hw_off_limits(sc))
9369 t4_get_usm_stats(sc, &stats, 1);
9370 mtx_unlock(&sc->reg_lock);
9372 sbuf_printf(sb, "Frames: %u\n", stats.frames);
9373 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
9374 sbuf_printf(sb, "Drops: %u", stats.drops);
9375 rc = sbuf_finish(sb);
9383 sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
9385 struct adapter *sc = arg1;
9388 struct tp_tid_stats stats;
9390 rc = sysctl_wire_old_buffer(req, 0);
9394 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9398 mtx_lock(&sc->reg_lock);
9399 if (hw_off_limits(sc))
9402 t4_tp_get_tid_stats(sc, &stats, 1);
9403 mtx_unlock(&sc->reg_lock);
9405 sbuf_printf(sb, "Delete: %u\n", stats.del);
9406 sbuf_printf(sb, "Invalidate: %u\n", stats.inv);
9407 sbuf_printf(sb, "Active: %u\n", stats.act);
9408 sbuf_printf(sb, "Passive: %u", stats.pas);
9409 rc = sbuf_finish(sb);
9416 static const char * const devlog_level_strings[] = {
9417 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
9418 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
9419 [FW_DEVLOG_LEVEL_ERR] = "ERR",
9420 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
9421 [FW_DEVLOG_LEVEL_INFO] = "INFO",
9422 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
9425 static const char * const devlog_facility_strings[] = {
9426 [FW_DEVLOG_FACILITY_CORE] = "CORE",
9427 [FW_DEVLOG_FACILITY_CF] = "CF",
9428 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
9429 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
9430 [FW_DEVLOG_FACILITY_RES] = "RES",
9431 [FW_DEVLOG_FACILITY_HW] = "HW",
9432 [FW_DEVLOG_FACILITY_FLR] = "FLR",
9433 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
9434 [FW_DEVLOG_FACILITY_PHY] = "PHY",
9435 [FW_DEVLOG_FACILITY_MAC] = "MAC",
9436 [FW_DEVLOG_FACILITY_PORT] = "PORT",
9437 [FW_DEVLOG_FACILITY_VI] = "VI",
9438 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
9439 [FW_DEVLOG_FACILITY_ACL] = "ACL",
9440 [FW_DEVLOG_FACILITY_TM] = "TM",
9441 [FW_DEVLOG_FACILITY_QFC] = "QFC",
9442 [FW_DEVLOG_FACILITY_DCB] = "DCB",
9443 [FW_DEVLOG_FACILITY_ETH] = "ETH",
9444 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
9445 [FW_DEVLOG_FACILITY_RI] = "RI",
9446 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
9447 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
9448 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
9449 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
9450 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
9454 sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
9456 int i, j, rc, nentries, first = 0;
9457 struct devlog_params *dparams = &sc->params.devlog;
9458 struct fw_devlog_e *buf, *e;
9459 uint64_t ftstamp = UINT64_MAX;
9461 if (dparams->addr == 0)
9464 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
9465 buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
9469 mtx_lock(&sc->reg_lock);
9470 if (hw_off_limits(sc))
9473 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
9475 mtx_unlock(&sc->reg_lock);
9479 nentries = dparams->size / sizeof(struct fw_devlog_e);
9480 for (i = 0; i < nentries; i++) {
9483 if (e->timestamp == 0)
9486 e->timestamp = be64toh(e->timestamp);
9487 e->seqno = be32toh(e->seqno);
9488 for (j = 0; j < 8; j++)
9489 e->params[j] = be32toh(e->params[j]);
9491 if (e->timestamp < ftstamp) {
9492 ftstamp = e->timestamp;
9497 if (buf[first].timestamp == 0)
9498 goto done; /* nothing in the log */
9500 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
9501 "Seq#", "Tstamp", "Level", "Facility", "Message");
9506 if (e->timestamp == 0)
9509 sbuf_printf(sb, "%10d %15ju %8s %8s ",
9510 e->seqno, e->timestamp,
9511 (e->level < nitems(devlog_level_strings) ?
9512 devlog_level_strings[e->level] : "UNKNOWN"),
9513 (e->facility < nitems(devlog_facility_strings) ?
9514 devlog_facility_strings[e->facility] : "UNKNOWN"));
9515 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
9516 e->params[2], e->params[3], e->params[4],
9517 e->params[5], e->params[6], e->params[7]);
9519 if (++i == nentries)
9521 } while (i != first);
9528 sysctl_devlog(SYSCTL_HANDLER_ARGS)
9530 struct adapter *sc = arg1;
9534 rc = sysctl_wire_old_buffer(req, 0);
9537 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9541 rc = sbuf_devlog(sc, sb, M_WAITOK);
9543 rc = sbuf_finish(sb);
9549 dump_devlog(struct adapter *sc)
9554 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
9555 log(LOG_DEBUG, "%s: failed to generate devlog dump.\n",
9556 device_get_nameunit(sc->dev));
9559 rc = sbuf_devlog(sc, &sb, M_WAITOK);
9561 rc = sbuf_finish(&sb);
9563 log(LOG_DEBUG, "%s: device log follows.\n%s",
9564 device_get_nameunit(sc->dev), sbuf_data(&sb));
9571 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
9573 struct adapter *sc = arg1;
9576 struct tp_fcoe_stats stats[MAX_NCHAN];
9577 int i, nchan = sc->chip_params->nchan;
9579 rc = sysctl_wire_old_buffer(req, 0);
9583 mtx_lock(&sc->reg_lock);
9584 if (hw_off_limits(sc))
9587 for (i = 0; i < nchan; i++)
9588 t4_get_fcoe_stats(sc, i, &stats[i], 1);
9590 mtx_unlock(&sc->reg_lock);
9594 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9599 sbuf_printf(sb, " channel 0 channel 1"
9600 " channel 2 channel 3");
9601 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
9602 stats[0].octets_ddp, stats[1].octets_ddp,
9603 stats[2].octets_ddp, stats[3].octets_ddp);
9604 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
9605 stats[0].frames_ddp, stats[1].frames_ddp,
9606 stats[2].frames_ddp, stats[3].frames_ddp);
9607 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
9608 stats[0].frames_drop, stats[1].frames_drop,
9609 stats[2].frames_drop, stats[3].frames_drop);
9611 sbuf_printf(sb, " channel 0 channel 1");
9612 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
9613 stats[0].octets_ddp, stats[1].octets_ddp);
9614 sbuf_printf(sb, "\nframesDDP: %16u %16u",
9615 stats[0].frames_ddp, stats[1].frames_ddp);
9616 sbuf_printf(sb, "\nframesDrop: %16u %16u",
9617 stats[0].frames_drop, stats[1].frames_drop);
9620 rc = sbuf_finish(sb);
9627 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
9629 struct adapter *sc = arg1;
9632 unsigned int map, kbps, ipg, mode;
9633 unsigned int pace_tab[NTX_SCHED];
9635 rc = sysctl_wire_old_buffer(req, 0);
9639 sb = sbuf_new_for_sysctl(NULL, NULL, 512, req);
9643 mtx_lock(&sc->reg_lock);
9644 if (hw_off_limits(sc)) {
9649 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
9650 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
9651 t4_read_pace_tbl(sc, pace_tab);
9653 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
9654 "Class IPG (0.1 ns) Flow IPG (us)");
9656 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
9657 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
9658 sbuf_printf(sb, "\n %u %-5s %u ", i,
9659 (mode & (1 << i)) ? "flow" : "class", map & 3);
9661 sbuf_printf(sb, "%9u ", kbps);
9663 sbuf_printf(sb, " disabled ");
9666 sbuf_printf(sb, "%13u ", ipg);
9668 sbuf_printf(sb, " disabled ");
9671 sbuf_printf(sb, "%10u", pace_tab[i]);
9673 sbuf_printf(sb, " disabled");
9675 rc = sbuf_finish(sb);
9677 mtx_unlock(&sc->reg_lock);
9683 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
9685 struct adapter *sc = arg1;
9689 struct lb_port_stats s[2];
9690 static const char *stat_name[] = {
9691 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
9692 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
9693 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
9694 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
9695 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
9696 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
9697 "BG2FramesTrunc:", "BG3FramesTrunc:"
9700 rc = sysctl_wire_old_buffer(req, 0);
9704 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9708 memset(s, 0, sizeof(s));
9710 for (i = 0; i < sc->chip_params->nchan; i += 2) {
9711 mtx_lock(&sc->reg_lock);
9712 if (hw_off_limits(sc))
9715 t4_get_lb_stats(sc, i, &s[0]);
9716 t4_get_lb_stats(sc, i + 1, &s[1]);
9718 mtx_unlock(&sc->reg_lock);
9724 sbuf_printf(sb, "%s Loopback %u"
9725 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
9727 for (j = 0; j < nitems(stat_name); j++)
9728 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
9732 rc = sbuf_finish(sb);
9739 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
9742 struct port_info *pi = arg1;
9743 struct link_config *lc = &pi->link_cfg;
9746 rc = sysctl_wire_old_buffer(req, 0);
9749 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
9753 if (lc->link_ok || lc->link_down_rc == 255)
9754 sbuf_printf(sb, "n/a");
9756 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
9758 rc = sbuf_finish(sb);
9771 mem_desc_cmp(const void *a, const void *b)
9773 const u_int v1 = ((const struct mem_desc *)a)->base;
9774 const u_int v2 = ((const struct mem_desc *)b)->base;
9785 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
9793 size = to - from + 1;
9797 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
9798 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
9802 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
9804 struct adapter *sc = arg1;
9807 uint32_t lo, hi, used, free, alloc;
9808 static const char *memory[] = {
9809 "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
9811 static const char *region[] = {
9812 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
9813 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
9814 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
9815 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
9816 "RQUDP region:", "PBL region:", "TXPBL region:",
9817 "TLSKey region:", "DBVFIFO region:", "ULPRX state:",
9818 "ULPTX state:", "On-chip queues:",
9820 struct mem_desc avail[4];
9821 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
9822 struct mem_desc *md = mem;
9824 rc = sysctl_wire_old_buffer(req, 0);
9828 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9832 for (i = 0; i < nitems(mem); i++) {
9837 mtx_lock(&sc->reg_lock);
9838 if (hw_off_limits(sc)) {
9843 /* Find and sort the populated memory ranges */
9845 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
9846 if (lo & F_EDRAM0_ENABLE) {
9847 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
9848 avail[i].base = G_EDRAM0_BASE(hi) << 20;
9849 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
9853 if (lo & F_EDRAM1_ENABLE) {
9854 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
9855 avail[i].base = G_EDRAM1_BASE(hi) << 20;
9856 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
9860 if (lo & F_EXT_MEM_ENABLE) {
9861 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
9862 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
9863 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
9864 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
9867 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
9868 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
9869 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
9870 avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
9874 if (is_t6(sc) && lo & F_HMA_MUX) {
9875 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
9876 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
9877 avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
9881 MPASS(i <= nitems(avail));
9882 if (!i) /* no memory available */
9884 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
9886 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
9887 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
9888 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
9889 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
9890 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
9891 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
9892 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
9893 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
9894 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
9896 /* the next few have explicit upper bounds */
9897 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
9898 md->limit = md->base - 1 +
9899 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
9900 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
9903 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
9904 md->limit = md->base - 1 +
9905 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
9906 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
9909 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
9910 if (chip_id(sc) <= CHELSIO_T5)
9911 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
9913 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
9917 md->idx = nitems(region); /* hide it */
9921 #define ulp_region(reg) \
9922 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
9923 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
9925 ulp_region(RX_ISCSI);
9926 ulp_region(RX_TDDP);
9928 ulp_region(RX_STAG);
9930 ulp_region(RX_RQUDP);
9933 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
9934 ulp_region(RX_TLS_KEY);
9940 md->idx = nitems(region);
9943 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
9944 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
9947 if (sge_ctrl & F_VFIFO_ENABLE)
9948 size = fifo_size << 2;
9950 size = G_T6_DBVFIFO_SIZE(fifo_size) << 6;
9953 md->base = t4_read_reg(sc, A_SGE_DBVFIFO_BADDR);
9954 md->limit = md->base + size - 1;
9956 md->idx = nitems(region);
9960 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
9963 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
9967 md->base = sc->vres.ocq.start;
9968 if (sc->vres.ocq.size)
9969 md->limit = md->base + sc->vres.ocq.size - 1;
9971 md->idx = nitems(region); /* hide it */
9974 /* add any address-space holes, there can be up to 3 */
9975 for (n = 0; n < i - 1; n++)
9976 if (avail[n].limit < avail[n + 1].base)
9977 (md++)->base = avail[n].limit;
9979 (md++)->base = avail[n].limit;
9982 MPASS(n <= nitems(mem));
9983 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
9985 for (lo = 0; lo < i; lo++)
9986 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
9987 avail[lo].limit - 1);
9989 sbuf_printf(sb, "\n");
9990 for (i = 0; i < n; i++) {
9991 if (mem[i].idx >= nitems(region))
9992 continue; /* skip holes */
9994 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
9995 mem_region_show(sb, region[mem[i].idx], mem[i].base,
9999 sbuf_printf(sb, "\n");
10000 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
10001 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
10002 mem_region_show(sb, "uP RAM:", lo, hi);
10004 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
10005 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
10006 mem_region_show(sb, "uP Extmem2:", lo, hi);
10008 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
10009 for (i = 0, free = 0; i < 2; i++)
10010 free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT));
10011 sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
10012 G_PMRXMAXPAGE(lo), free,
10013 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
10014 (lo & F_PMRXNUMCHN) ? 2 : 1);
10016 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
10017 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
10018 for (i = 0, free = 0; i < 4; i++)
10019 free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT));
10020 sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
10021 G_PMTXMAXPAGE(lo), free,
10022 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
10023 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
10024 sbuf_printf(sb, "%u p-structs (%u free)\n",
10025 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT),
10026 G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT)));
10028 for (i = 0; i < 4; i++) {
10029 if (chip_id(sc) > CHELSIO_T5)
10030 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
10032 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
10034 used = G_T5_USED(lo);
10035 alloc = G_T5_ALLOC(lo);
10038 alloc = G_ALLOC(lo);
10040 /* For T6 these are MAC buffer groups */
10041 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
10044 for (i = 0; i < sc->chip_params->nchan; i++) {
10045 if (chip_id(sc) > CHELSIO_T5)
10046 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
10048 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
10050 used = G_T5_USED(lo);
10051 alloc = G_T5_ALLOC(lo);
10054 alloc = G_ALLOC(lo);
10056 /* For T6 these are MAC buffer groups */
10058 "\nLoopback %d using %u pages out of %u allocated",
10062 mtx_unlock(&sc->reg_lock);
10064 rc = sbuf_finish(sb);
10070 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
10074 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
10078 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
10080 struct adapter *sc = arg1;
10084 MPASS(chip_id(sc) <= CHELSIO_T5);
10086 rc = sysctl_wire_old_buffer(req, 0);
10090 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
10095 "Idx Ethernet address Mask Vld Ports PF"
10096 " VF Replication P0 P1 P2 P3 ML");
10097 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
10098 uint64_t tcamx, tcamy, mask;
10099 uint32_t cls_lo, cls_hi;
10100 uint8_t addr[ETHER_ADDR_LEN];
10102 mtx_lock(&sc->reg_lock);
10103 if (hw_off_limits(sc))
10106 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
10107 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
10109 mtx_unlock(&sc->reg_lock);
10114 tcamxy2valmask(tcamx, tcamy, addr, &mask);
10115 mtx_lock(&sc->reg_lock);
10116 if (hw_off_limits(sc))
10119 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
10120 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
10122 mtx_unlock(&sc->reg_lock);
10125 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
10126 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
10127 addr[3], addr[4], addr[5], (uintmax_t)mask,
10128 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
10129 G_PORTMAP(cls_hi), G_PF(cls_lo),
10130 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
10132 if (cls_lo & F_REPLICATE) {
10133 struct fw_ldst_cmd ldst_cmd;
10135 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10136 ldst_cmd.op_to_addrspace =
10137 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
10138 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10139 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
10140 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
10141 ldst_cmd.u.mps.rplc.fid_idx =
10142 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
10143 V_FW_LDST_CMD_IDX(i));
10145 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
10149 if (hw_off_limits(sc))
10152 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
10153 sizeof(ldst_cmd), &ldst_cmd);
10154 end_synchronized_op(sc, 0);
10158 sbuf_printf(sb, " %08x %08x %08x %08x",
10159 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
10160 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
10161 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
10162 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
10165 sbuf_printf(sb, "%36s", "");
10167 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
10168 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
10169 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
10173 (void) sbuf_finish(sb);
10175 rc = sbuf_finish(sb);
10182 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
10184 struct adapter *sc = arg1;
10188 MPASS(chip_id(sc) > CHELSIO_T5);
10190 rc = sysctl_wire_old_buffer(req, 0);
10194 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
10198 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
10199 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
10201 " P0 P1 P2 P3 ML\n");
10203 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
10204 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
10206 uint64_t tcamx, tcamy, val, mask;
10207 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
10208 uint8_t addr[ETHER_ADDR_LEN];
10210 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
10212 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
10214 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
10215 mtx_lock(&sc->reg_lock);
10216 if (hw_off_limits(sc))
10219 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
10220 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
10221 tcamy = G_DMACH(val) << 32;
10222 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
10223 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
10225 mtx_unlock(&sc->reg_lock);
10229 lookup_type = G_DATALKPTYPE(data2);
10230 port_num = G_DATAPORTNUM(data2);
10231 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10232 /* Inner header VNI */
10233 vniy = ((data2 & F_DATAVIDH2) << 23) |
10234 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
10235 dip_hit = data2 & F_DATADIPHIT;
10240 vlan_vld = data2 & F_DATAVIDH2;
10241 ivlan = G_VIDL(val);
10244 ctl |= V_CTLXYBITSEL(1);
10245 mtx_lock(&sc->reg_lock);
10246 if (hw_off_limits(sc))
10249 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
10250 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
10251 tcamx = G_DMACH(val) << 32;
10252 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
10253 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
10255 mtx_unlock(&sc->reg_lock);
10259 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10260 /* Inner header VNI mask */
10261 vnix = ((data2 & F_DATAVIDH2) << 23) |
10262 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
10268 tcamxy2valmask(tcamx, tcamy, addr, &mask);
10270 mtx_lock(&sc->reg_lock);
10271 if (hw_off_limits(sc))
10274 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
10275 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
10277 mtx_unlock(&sc->reg_lock);
10281 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10282 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
10283 "%012jx %06x %06x - - %3c"
10284 " I %4x %3c %#x%4u%4d", i, addr[0],
10285 addr[1], addr[2], addr[3], addr[4], addr[5],
10286 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
10287 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
10288 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
10289 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
10291 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
10292 "%012jx - - ", i, addr[0], addr[1],
10293 addr[2], addr[3], addr[4], addr[5],
10297 sbuf_printf(sb, "%4u Y ", ivlan);
10299 sbuf_printf(sb, " - N ");
10301 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
10302 lookup_type ? 'I' : 'O', port_num,
10303 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
10304 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
10305 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
10309 if (cls_lo & F_T6_REPLICATE) {
10310 struct fw_ldst_cmd ldst_cmd;
10312 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10313 ldst_cmd.op_to_addrspace =
10314 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
10315 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10316 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
10317 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
10318 ldst_cmd.u.mps.rplc.fid_idx =
10319 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
10320 V_FW_LDST_CMD_IDX(i));
10322 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
10326 if (hw_off_limits(sc))
10329 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
10330 sizeof(ldst_cmd), &ldst_cmd);
10331 end_synchronized_op(sc, 0);
10335 sbuf_printf(sb, " %08x %08x %08x %08x"
10336 " %08x %08x %08x %08x",
10337 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
10338 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
10339 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
10340 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
10341 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
10342 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
10343 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
10344 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
10347 sbuf_printf(sb, "%72s", "");
10349 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
10350 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
10351 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
10352 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
10356 (void) sbuf_finish(sb);
10358 rc = sbuf_finish(sb);
10365 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
10367 struct adapter *sc = arg1;
10370 uint16_t mtus[NMTUS];
10372 rc = sysctl_wire_old_buffer(req, 0);
10376 mtx_lock(&sc->reg_lock);
10377 if (hw_off_limits(sc))
10380 t4_read_mtu_tbl(sc, mtus, NULL);
10381 mtx_unlock(&sc->reg_lock);
10385 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10389 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
10390 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
10391 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
10392 mtus[14], mtus[15]);
10394 rc = sbuf_finish(sb);
10401 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
10403 struct adapter *sc = arg1;
10406 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
10407 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
10408 static const char *tx_stats[MAX_PM_NSTATS] = {
10409 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
10410 "Tx FIFO wait", NULL, "Tx latency"
10412 static const char *rx_stats[MAX_PM_NSTATS] = {
10413 "Read:", "Write bypass:", "Write mem:", "Flush:",
10414 "Rx FIFO wait", NULL, "Rx latency"
10417 rc = sysctl_wire_old_buffer(req, 0);
10421 mtx_lock(&sc->reg_lock);
10422 if (hw_off_limits(sc))
10425 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
10426 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
10428 mtx_unlock(&sc->reg_lock);
10432 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10436 sbuf_printf(sb, " Tx pcmds Tx bytes");
10437 for (i = 0; i < 4; i++) {
10438 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10442 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
10443 for (i = 0; i < 4; i++) {
10444 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10448 if (chip_id(sc) > CHELSIO_T5) {
10450 "\n Total wait Total occupancy");
10451 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10453 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10457 MPASS(i < nitems(tx_stats));
10460 "\n Reads Total wait");
10461 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10463 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10467 rc = sbuf_finish(sb);
10474 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
10476 struct adapter *sc = arg1;
10479 struct tp_rdma_stats stats;
10481 rc = sysctl_wire_old_buffer(req, 0);
10485 mtx_lock(&sc->reg_lock);
10486 if (hw_off_limits(sc))
10489 t4_tp_get_rdma_stats(sc, &stats, 0);
10490 mtx_unlock(&sc->reg_lock);
10494 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10498 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
10499 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
10501 rc = sbuf_finish(sb);
10508 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
10510 struct adapter *sc = arg1;
10513 struct tp_tcp_stats v4, v6;
10515 rc = sysctl_wire_old_buffer(req, 0);
10519 mtx_lock(&sc->reg_lock);
10520 if (hw_off_limits(sc))
10523 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
10524 mtx_unlock(&sc->reg_lock);
10528 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10534 sbuf_printf(sb, "OutRsts: %20u %20u\n",
10535 v4.tcp_out_rsts, v6.tcp_out_rsts);
10536 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
10537 v4.tcp_in_segs, v6.tcp_in_segs);
10538 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
10539 v4.tcp_out_segs, v6.tcp_out_segs);
10540 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
10541 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
10543 rc = sbuf_finish(sb);
10550 sysctl_tids(SYSCTL_HANDLER_ARGS)
10552 struct adapter *sc = arg1;
10556 struct tid_info *t = &sc->tids;
10558 rc = sysctl_wire_old_buffer(req, 0);
10562 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10567 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
10572 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
10573 t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
10577 bool hashen = false;
10579 mtx_lock(&sc->reg_lock);
10580 if (hw_off_limits(sc))
10582 else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
10584 if (chip_id(sc) <= CHELSIO_T5) {
10585 x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
10586 y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
10588 x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
10589 y = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
10592 mtx_unlock(&sc->reg_lock);
10596 sbuf_printf(sb, "TID range: ");
10599 sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1);
10600 sbuf_printf(sb, "%u-%u", y, t->ntids - 1);
10602 sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base +
10605 sbuf_printf(sb, ", in use: %u\n",
10606 atomic_load_acq_int(&t->tids_in_use));
10610 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
10611 t->stid_base + t->nstids - 1, t->stids_in_use);
10615 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
10616 t->ftid_end, t->ftids_in_use);
10620 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
10621 t->etid_base + t->netids - 1, t->etids_in_use);
10624 mtx_lock(&sc->reg_lock);
10625 if (hw_off_limits(sc))
10628 x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4);
10629 y = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6);
10631 mtx_unlock(&sc->reg_lock);
10634 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y);
10637 rc = sbuf_finish(sb);
10639 (void)sbuf_finish(sb);
10646 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
10648 struct adapter *sc = arg1;
10651 struct tp_err_stats stats;
10653 rc = sysctl_wire_old_buffer(req, 0);
10657 mtx_lock(&sc->reg_lock);
10658 if (hw_off_limits(sc))
10661 t4_tp_get_err_stats(sc, &stats, 0);
10662 mtx_unlock(&sc->reg_lock);
10666 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10670 if (sc->chip_params->nchan > 2) {
10671 sbuf_printf(sb, " channel 0 channel 1"
10672 " channel 2 channel 3\n");
10673 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
10674 stats.mac_in_errs[0], stats.mac_in_errs[1],
10675 stats.mac_in_errs[2], stats.mac_in_errs[3]);
10676 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
10677 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
10678 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
10679 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
10680 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
10681 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
10682 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
10683 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
10684 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
10685 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
10686 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
10687 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
10688 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
10689 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
10690 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
10691 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
10692 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
10693 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
10694 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
10695 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
10696 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
10698 sbuf_printf(sb, " channel 0 channel 1\n");
10699 sbuf_printf(sb, "macInErrs: %10u %10u\n",
10700 stats.mac_in_errs[0], stats.mac_in_errs[1]);
10701 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
10702 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
10703 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
10704 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
10705 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
10706 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
10707 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
10708 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
10709 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
10710 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
10711 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
10712 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
10713 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
10714 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
10717 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
10718 stats.ofld_no_neigh, stats.ofld_cong_defer);
10720 rc = sbuf_finish(sb);
10727 sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
10729 struct adapter *sc = arg1;
10732 struct tp_tnl_stats stats;
10734 rc = sysctl_wire_old_buffer(req, 0);
10738 mtx_lock(&sc->reg_lock);
10739 if (hw_off_limits(sc))
10742 t4_tp_get_tnl_stats(sc, &stats, 1);
10743 mtx_unlock(&sc->reg_lock);
10747 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10751 if (sc->chip_params->nchan > 2) {
10752 sbuf_printf(sb, " channel 0 channel 1"
10753 " channel 2 channel 3\n");
10754 sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n",
10755 stats.out_pkt[0], stats.out_pkt[1],
10756 stats.out_pkt[2], stats.out_pkt[3]);
10757 sbuf_printf(sb, "InPkts: %10u %10u %10u %10u",
10758 stats.in_pkt[0], stats.in_pkt[1],
10759 stats.in_pkt[2], stats.in_pkt[3]);
10761 sbuf_printf(sb, " channel 0 channel 1\n");
10762 sbuf_printf(sb, "OutPkts: %10u %10u\n",
10763 stats.out_pkt[0], stats.out_pkt[1]);
10764 sbuf_printf(sb, "InPkts: %10u %10u",
10765 stats.in_pkt[0], stats.in_pkt[1]);
10768 rc = sbuf_finish(sb);
10775 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
10777 struct adapter *sc = arg1;
10778 struct tp_params *tpp = &sc->params.tp;
10782 mask = tpp->la_mask >> 16;
10783 rc = sysctl_handle_int(oidp, &mask, 0, req);
10784 if (rc != 0 || req->newptr == NULL)
10788 mtx_lock(&sc->reg_lock);
10789 if (hw_off_limits(sc))
10792 tpp->la_mask = mask << 16;
10793 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U,
10796 mtx_unlock(&sc->reg_lock);
10801 struct field_desc {
10808 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
10814 uint64_t mask = (1ULL << f->width) - 1;
10815 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
10816 ((uintmax_t)v >> f->start) & mask);
10818 if (line_size + len >= 79) {
10820 sbuf_printf(sb, "\n ");
10822 sbuf_printf(sb, "%s ", buf);
10823 line_size += len + 1;
10826 sbuf_printf(sb, "\n");
10829 static const struct field_desc tp_la0[] = {
10830 { "RcfOpCodeOut", 60, 4 },
10831 { "State", 56, 4 },
10832 { "WcfState", 52, 4 },
10833 { "RcfOpcSrcOut", 50, 2 },
10834 { "CRxError", 49, 1 },
10835 { "ERxError", 48, 1 },
10836 { "SanityFailed", 47, 1 },
10837 { "SpuriousMsg", 46, 1 },
10838 { "FlushInputMsg", 45, 1 },
10839 { "FlushInputCpl", 44, 1 },
10840 { "RssUpBit", 43, 1 },
10841 { "RssFilterHit", 42, 1 },
10843 { "InitTcb", 31, 1 },
10844 { "LineNumber", 24, 7 },
10846 { "EdataOut", 22, 1 },
10848 { "CdataOut", 20, 1 },
10849 { "EreadPdu", 19, 1 },
10850 { "CreadPdu", 18, 1 },
10851 { "TunnelPkt", 17, 1 },
10852 { "RcfPeerFin", 16, 1 },
10853 { "RcfReasonOut", 12, 4 },
10854 { "TxCchannel", 10, 2 },
10855 { "RcfTxChannel", 8, 2 },
10856 { "RxEchannel", 6, 2 },
10857 { "RcfRxChannel", 5, 1 },
10858 { "RcfDataOutSrdy", 4, 1 },
10859 { "RxDvld", 3, 1 },
10860 { "RxOoDvld", 2, 1 },
10861 { "RxCongestion", 1, 1 },
10862 { "TxCongestion", 0, 1 },
10866 static const struct field_desc tp_la1[] = {
10867 { "CplCmdIn", 56, 8 },
10868 { "CplCmdOut", 48, 8 },
10869 { "ESynOut", 47, 1 },
10870 { "EAckOut", 46, 1 },
10871 { "EFinOut", 45, 1 },
10872 { "ERstOut", 44, 1 },
10873 { "SynIn", 43, 1 },
10874 { "AckIn", 42, 1 },
10875 { "FinIn", 41, 1 },
10876 { "RstIn", 40, 1 },
10877 { "DataIn", 39, 1 },
10878 { "DataInVld", 38, 1 },
10879 { "PadIn", 37, 1 },
10880 { "RxBufEmpty", 36, 1 },
10881 { "RxDdp", 35, 1 },
10882 { "RxFbCongestion", 34, 1 },
10883 { "TxFbCongestion", 33, 1 },
10884 { "TxPktSumSrdy", 32, 1 },
10885 { "RcfUlpType", 28, 4 },
10886 { "Eread", 27, 1 },
10887 { "Ebypass", 26, 1 },
10888 { "Esave", 25, 1 },
10889 { "Static0", 24, 1 },
10890 { "Cread", 23, 1 },
10891 { "Cbypass", 22, 1 },
10892 { "Csave", 21, 1 },
10893 { "CPktOut", 20, 1 },
10894 { "RxPagePoolFull", 18, 2 },
10895 { "RxLpbkPkt", 17, 1 },
10896 { "TxLpbkPkt", 16, 1 },
10897 { "RxVfValid", 15, 1 },
10898 { "SynLearned", 14, 1 },
10899 { "SetDelEntry", 13, 1 },
10900 { "SetInvEntry", 12, 1 },
10901 { "CpcmdDvld", 11, 1 },
10902 { "CpcmdSave", 10, 1 },
10903 { "RxPstructsFull", 8, 2 },
10904 { "EpcmdDvld", 7, 1 },
10905 { "EpcmdFlush", 6, 1 },
10906 { "EpcmdTrimPrefix", 5, 1 },
10907 { "EpcmdTrimPostfix", 4, 1 },
10908 { "ERssIp4Pkt", 3, 1 },
10909 { "ERssIp6Pkt", 2, 1 },
10910 { "ERssTcpUdpPkt", 1, 1 },
10911 { "ERssFceFipPkt", 0, 1 },
10915 static const struct field_desc tp_la2[] = {
10916 { "CplCmdIn", 56, 8 },
10917 { "MpsVfVld", 55, 1 },
10918 { "MpsPf", 52, 3 },
10919 { "MpsVf", 44, 8 },
10920 { "SynIn", 43, 1 },
10921 { "AckIn", 42, 1 },
10922 { "FinIn", 41, 1 },
10923 { "RstIn", 40, 1 },
10924 { "DataIn", 39, 1 },
10925 { "DataInVld", 38, 1 },
10926 { "PadIn", 37, 1 },
10927 { "RxBufEmpty", 36, 1 },
10928 { "RxDdp", 35, 1 },
10929 { "RxFbCongestion", 34, 1 },
10930 { "TxFbCongestion", 33, 1 },
10931 { "TxPktSumSrdy", 32, 1 },
10932 { "RcfUlpType", 28, 4 },
10933 { "Eread", 27, 1 },
10934 { "Ebypass", 26, 1 },
10935 { "Esave", 25, 1 },
10936 { "Static0", 24, 1 },
10937 { "Cread", 23, 1 },
10938 { "Cbypass", 22, 1 },
10939 { "Csave", 21, 1 },
10940 { "CPktOut", 20, 1 },
10941 { "RxPagePoolFull", 18, 2 },
10942 { "RxLpbkPkt", 17, 1 },
10943 { "TxLpbkPkt", 16, 1 },
10944 { "RxVfValid", 15, 1 },
10945 { "SynLearned", 14, 1 },
10946 { "SetDelEntry", 13, 1 },
10947 { "SetInvEntry", 12, 1 },
10948 { "CpcmdDvld", 11, 1 },
10949 { "CpcmdSave", 10, 1 },
10950 { "RxPstructsFull", 8, 2 },
10951 { "EpcmdDvld", 7, 1 },
10952 { "EpcmdFlush", 6, 1 },
10953 { "EpcmdTrimPrefix", 5, 1 },
10954 { "EpcmdTrimPostfix", 4, 1 },
10955 { "ERssIp4Pkt", 3, 1 },
10956 { "ERssIp6Pkt", 2, 1 },
10957 { "ERssTcpUdpPkt", 1, 1 },
10958 { "ERssFceFipPkt", 0, 1 },
10963 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
10966 field_desc_show(sb, *p, tp_la0);
10970 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
10974 sbuf_printf(sb, "\n");
10975 field_desc_show(sb, p[0], tp_la0);
10976 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
10977 field_desc_show(sb, p[1], tp_la0);
10981 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
10985 sbuf_printf(sb, "\n");
10986 field_desc_show(sb, p[0], tp_la0);
10987 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
10988 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
10992 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
10994 struct adapter *sc = arg1;
10999 void (*show_func)(struct sbuf *, uint64_t *, int);
11001 rc = sysctl_wire_old_buffer(req, 0);
11005 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11009 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
11011 mtx_lock(&sc->reg_lock);
11012 if (hw_off_limits(sc))
11015 t4_tp_read_la(sc, buf, NULL);
11016 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
11019 show_func = tp_la_show2;
11023 show_func = tp_la_show3;
11027 show_func = tp_la_show;
11030 mtx_unlock(&sc->reg_lock);
11035 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
11036 (*show_func)(sb, p, i);
11037 rc = sbuf_finish(sb);
11040 free(buf, M_CXGBE);
11045 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
11047 struct adapter *sc = arg1;
11050 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
11052 rc = sysctl_wire_old_buffer(req, 0);
11056 mtx_lock(&sc->reg_lock);
11057 if (hw_off_limits(sc))
11060 t4_get_chan_txrate(sc, nrate, orate);
11061 mtx_unlock(&sc->reg_lock);
11065 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
11069 if (sc->chip_params->nchan > 2) {
11070 sbuf_printf(sb, " channel 0 channel 1"
11071 " channel 2 channel 3\n");
11072 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
11073 nrate[0], nrate[1], nrate[2], nrate[3]);
11074 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
11075 orate[0], orate[1], orate[2], orate[3]);
11077 sbuf_printf(sb, " channel 0 channel 1\n");
11078 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
11079 nrate[0], nrate[1]);
11080 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
11081 orate[0], orate[1]);
11084 rc = sbuf_finish(sb);
11091 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
11093 struct adapter *sc = arg1;
11098 rc = sysctl_wire_old_buffer(req, 0);
11102 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11106 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
11107 M_ZERO | M_WAITOK);
11109 mtx_lock(&sc->reg_lock);
11110 if (hw_off_limits(sc))
11113 t4_ulprx_read_la(sc, buf);
11114 mtx_unlock(&sc->reg_lock);
11119 sbuf_printf(sb, " Pcmd Type Message"
11121 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
11122 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
11123 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
11125 rc = sbuf_finish(sb);
11128 free(buf, M_CXGBE);
11133 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
11135 struct adapter *sc = arg1;
11138 uint32_t cfg, s1, s2;
11140 MPASS(chip_id(sc) >= CHELSIO_T5);
11142 rc = sysctl_wire_old_buffer(req, 0);
11146 mtx_lock(&sc->reg_lock);
11147 if (hw_off_limits(sc))
11150 cfg = t4_read_reg(sc, A_SGE_STAT_CFG);
11151 s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL);
11152 s2 = t4_read_reg(sc, A_SGE_STAT_MATCH);
11154 mtx_unlock(&sc->reg_lock);
11158 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11162 if (G_STATSOURCE_T5(cfg) == 7) {
11165 mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg);
11167 sbuf_printf(sb, "total %d, incomplete %d", s1, s2);
11168 else if (mode == 1)
11169 sbuf_printf(sb, "total %d, data overflow %d", s1, s2);
11171 sbuf_printf(sb, "unknown mode %d", mode);
11173 rc = sbuf_finish(sb);
11180 sysctl_cpus(SYSCTL_HANDLER_ARGS)
11182 struct adapter *sc = arg1;
11183 enum cpu_sets op = arg2;
11188 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
11191 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
11195 rc = sysctl_wire_old_buffer(req, 0);
11199 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11204 sbuf_printf(sb, "%d ", i);
11205 rc = sbuf_finish(sb);
11212 sysctl_reset(SYSCTL_HANDLER_ARGS)
11214 struct adapter *sc = arg1;
11218 val = atomic_load_int(&sc->num_resets);
11219 rc = sysctl_handle_int(oidp, &val, 0, req);
11220 if (rc != 0 || req->newptr == NULL)
11224 /* Zero out the counter that tracks reset. */
11225 atomic_store_int(&sc->num_resets, 0);
11230 return (EINVAL); /* 0 or 1 are the only legal values */
11232 if (hw_off_limits(sc)) /* harmless race */
11235 taskqueue_enqueue(reset_tq, &sc->reset_task);
11241 sysctl_tls(SYSCTL_HANDLER_ARGS)
11243 struct adapter *sc = arg1;
11245 struct vi_info *vi;
11248 rc = sysctl_handle_int(oidp, &v, 0, req);
11249 if (rc != 0 || req->newptr == NULL)
11252 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS))
11255 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls");
11258 if (hw_off_limits(sc))
11262 for_each_port(sc, i) {
11263 for_each_vi(sc->port[i], j, vi) {
11264 if (vi->flags & VI_INIT_DONE)
11265 t4_update_fl_bufsize(vi->ifp);
11269 end_synchronized_op(sc, 0);
11276 unit_conv(char *buf, size_t len, u_int val, u_int factor)
11278 u_int rem = val % factor;
11281 snprintf(buf, len, "%u", val / factor);
11283 while (rem % 10 == 0)
11285 snprintf(buf, len, "%u.%u", val / factor, rem);
11290 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
11292 struct adapter *sc = arg1;
11295 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11297 mtx_lock(&sc->reg_lock);
11298 if (hw_off_limits(sc))
11301 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
11302 mtx_unlock(&sc->reg_lock);
11303 if (res == (u_int)-1)
11309 re = G_TIMERRESOLUTION(res);
11312 /* TCP timestamp tick */
11313 re = G_TIMESTAMPRESOLUTION(res);
11317 re = G_DELAYEDACKRESOLUTION(res);
11323 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
11325 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
11329 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
11331 struct adapter *sc = arg1;
11333 u_int dack_tmr, dack_re, v;
11334 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11336 mtx_lock(&sc->reg_lock);
11337 if (hw_off_limits(sc))
11341 dack_re = G_DELAYEDACKRESOLUTION(t4_read_reg(sc,
11342 A_TP_TIMER_RESOLUTION));
11343 dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER);
11345 mtx_unlock(&sc->reg_lock);
11349 v = ((cclk_ps << dack_re) / 1000000) * dack_tmr;
11351 return (sysctl_handle_int(oidp, &v, 0, req));
11355 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
11357 struct adapter *sc = arg1;
11358 int rc, reg = arg2;
11360 u_long tp_tick_us, v;
11361 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11363 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
11364 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
11365 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
11366 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
11368 mtx_lock(&sc->reg_lock);
11369 if (hw_off_limits(sc))
11373 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
11374 tp_tick_us = (cclk_ps << tre) / 1000000;
11375 if (reg == A_TP_INIT_SRTT)
11376 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
11378 v = tp_tick_us * t4_read_reg(sc, reg);
11380 mtx_unlock(&sc->reg_lock);
11384 return (sysctl_handle_long(oidp, &v, 0, req));
11388 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
11389 * passed to this function.
11392 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
11394 struct adapter *sc = arg1;
11395 int rc, idx = arg2;
11398 MPASS(idx >= 0 && idx <= 24);
11400 mtx_lock(&sc->reg_lock);
11401 if (hw_off_limits(sc))
11405 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
11407 mtx_unlock(&sc->reg_lock);
11411 return (sysctl_handle_int(oidp, &v, 0, req));
11415 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
11417 struct adapter *sc = arg1;
11418 int rc, idx = arg2;
11421 MPASS(idx >= 0 && idx < 16);
11423 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
11424 shift = (idx & 3) << 3;
11425 mtx_lock(&sc->reg_lock);
11426 if (hw_off_limits(sc))
11430 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
11432 mtx_unlock(&sc->reg_lock);
11436 return (sysctl_handle_int(oidp, &v, 0, req));
11440 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
11442 struct vi_info *vi = arg1;
11443 struct adapter *sc = vi->adapter;
11445 struct sge_ofld_rxq *ofld_rxq;
11448 idx = vi->ofld_tmr_idx;
11450 rc = sysctl_handle_int(oidp, &idx, 0, req);
11451 if (rc != 0 || req->newptr == NULL)
11454 if (idx < 0 || idx >= SGE_NTIMERS)
11457 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
11462 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
11463 for_each_ofld_rxq(vi, i, ofld_rxq) {
11464 #ifdef atomic_store_rel_8
11465 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
11467 ofld_rxq->iq.intr_params = v;
11470 vi->ofld_tmr_idx = idx;
11472 end_synchronized_op(sc, LOCK_HELD);
11477 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
11479 struct vi_info *vi = arg1;
11480 struct adapter *sc = vi->adapter;
11483 idx = vi->ofld_pktc_idx;
11485 rc = sysctl_handle_int(oidp, &idx, 0, req);
11486 if (rc != 0 || req->newptr == NULL)
11489 if (idx < -1 || idx >= SGE_NCOUNTERS)
11492 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
11497 if (vi->flags & VI_INIT_DONE)
11498 rc = EBUSY; /* cannot be changed once the queues are created */
11500 vi->ofld_pktc_idx = idx;
11502 end_synchronized_op(sc, LOCK_HELD);
11508 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
11512 if (cntxt->cid > M_CTXTQID)
11515 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
11516 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
11519 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
11523 if (hw_off_limits(sc)) {
11528 if (sc->flags & FW_OK) {
11529 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
11536 * Read via firmware failed or wasn't even attempted. Read directly via
11539 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
11541 end_synchronized_op(sc, 0);
11546 load_fw(struct adapter *sc, struct t4_data *fw)
11551 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
11555 if (hw_off_limits(sc)) {
11561 * The firmware, with the sole exception of the memory parity error
11562 * handler, runs from memory and not flash. It is almost always safe to
11563 * install a new firmware on a running system. Just set bit 1 in
11564 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
11566 if (sc->flags & FULL_INIT_DONE &&
11567 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
11572 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
11574 rc = copyin(fw->data, fw_data, fw->len);
11576 rc = -t4_load_fw(sc, fw_data, fw->len);
11578 free(fw_data, M_CXGBE);
11580 end_synchronized_op(sc, 0);
11585 load_cfg(struct adapter *sc, struct t4_data *cfg)
11588 uint8_t *cfg_data = NULL;
11590 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
11594 if (hw_off_limits(sc)) {
11599 if (cfg->len == 0) {
11601 rc = -t4_load_cfg(sc, NULL, 0);
11605 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
11607 rc = copyin(cfg->data, cfg_data, cfg->len);
11609 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
11611 free(cfg_data, M_CXGBE);
11613 end_synchronized_op(sc, 0);
11618 load_boot(struct adapter *sc, struct t4_bootrom *br)
11621 uint8_t *br_data = NULL;
11624 if (br->len > 1024 * 1024)
11627 if (br->pf_offset == 0) {
11629 if (br->pfidx_addr > 7)
11631 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
11632 A_PCIE_PF_EXPROM_OFST)));
11633 } else if (br->pf_offset == 1) {
11635 offset = G_OFFSET(br->pfidx_addr);
11640 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
11644 if (hw_off_limits(sc)) {
11649 if (br->len == 0) {
11651 rc = -t4_load_boot(sc, NULL, offset, 0);
11655 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
11657 rc = copyin(br->data, br_data, br->len);
11659 rc = -t4_load_boot(sc, br_data, offset, br->len);
11661 free(br_data, M_CXGBE);
11663 end_synchronized_op(sc, 0);
11668 load_bootcfg(struct adapter *sc, struct t4_data *bc)
11671 uint8_t *bc_data = NULL;
11673 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
11677 if (hw_off_limits(sc)) {
11682 if (bc->len == 0) {
11684 rc = -t4_load_bootcfg(sc, NULL, 0);
11688 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
11690 rc = copyin(bc->data, bc_data, bc->len);
11692 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
11694 free(bc_data, M_CXGBE);
11696 end_synchronized_op(sc, 0);
11701 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
11704 struct cudbg_init *cudbg;
11705 void *handle, *buf;
11707 /* buf is large, don't block if no memory is available */
11708 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
11712 handle = cudbg_alloc_handle();
11713 if (handle == NULL) {
11718 cudbg = cudbg_get_init(handle);
11720 cudbg->print = (cudbg_print_cb)printf;
11723 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
11724 __func__, dump->wr_flash, dump->len, dump->data);
11727 if (dump->wr_flash)
11728 cudbg->use_flash = 1;
11729 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
11730 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
11732 rc = cudbg_collect(handle, buf, &dump->len);
11736 rc = copyout(buf, dump->data, dump->len);
11738 cudbg_free_handle(handle);
11739 free(buf, M_CXGBE);
11744 free_offload_policy(struct t4_offload_policy *op)
11746 struct offload_rule *r;
11753 for (i = 0; i < op->nrules; i++, r++) {
11754 free(r->bpf_prog.bf_insns, M_CXGBE);
11756 free(op->rule, M_CXGBE);
11761 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
11764 struct t4_offload_policy *op, *old;
11765 struct bpf_program *bf;
11766 const struct offload_settings *s;
11767 struct offload_rule *r;
11770 if (!is_offload(sc))
11773 if (uop->nrules == 0) {
11774 /* Delete installed policies. */
11777 } else if (uop->nrules > 256) { /* arbitrary */
11781 /* Copy userspace offload policy to kernel */
11782 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
11783 op->nrules = uop->nrules;
11784 len = op->nrules * sizeof(struct offload_rule);
11785 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
11786 rc = copyin(uop->rule, op->rule, len);
11788 free(op->rule, M_CXGBE);
11794 for (i = 0; i < op->nrules; i++, r++) {
11796 /* Validate open_type */
11797 if (r->open_type != OPEN_TYPE_LISTEN &&
11798 r->open_type != OPEN_TYPE_ACTIVE &&
11799 r->open_type != OPEN_TYPE_PASSIVE &&
11800 r->open_type != OPEN_TYPE_DONTCARE) {
11803 * Rules 0 to i have malloc'd filters that need to be
11804 * freed. Rules i+1 to nrules have userspace pointers
11805 * and should be left alone.
11808 free_offload_policy(op);
11812 /* Validate settings */
11814 if ((s->offload != 0 && s->offload != 1) ||
11815 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
11816 s->sched_class < -1 ||
11817 s->sched_class >= sc->params.nsched_cls) {
11823 u = bf->bf_insns; /* userspace ptr */
11824 bf->bf_insns = NULL;
11825 if (bf->bf_len == 0) {
11826 /* legal, matches everything */
11829 len = bf->bf_len * sizeof(*bf->bf_insns);
11830 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
11831 rc = copyin(u, bf->bf_insns, len);
11835 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
11841 rw_wlock(&sc->policy_lock);
11844 rw_wunlock(&sc->policy_lock);
11845 free_offload_policy(old);
11850 #define MAX_READ_BUF_SIZE (128 * 1024)
11852 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
11854 uint32_t addr, remaining, n;
11859 mtx_lock(&sc->reg_lock);
11860 if (hw_off_limits(sc))
11863 rc = validate_mem_range(sc, mr->addr, mr->len);
11864 mtx_unlock(&sc->reg_lock);
11868 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
11870 remaining = mr->len;
11871 dst = (void *)mr->data;
11873 while (remaining) {
11874 n = min(remaining, MAX_READ_BUF_SIZE);
11875 mtx_lock(&sc->reg_lock);
11876 if (hw_off_limits(sc))
11879 read_via_memwin(sc, 2, addr, buf, n);
11880 mtx_unlock(&sc->reg_lock);
11884 rc = copyout(buf, dst, n);
11893 free(buf, M_CXGBE);
11896 #undef MAX_READ_BUF_SIZE
11899 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
11903 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
11906 if (i2cd->len > sizeof(i2cd->data))
11909 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
11912 if (hw_off_limits(sc))
11915 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
11916 i2cd->offset, i2cd->len, &i2cd->data[0]);
11917 end_synchronized_op(sc, 0);
11923 clear_stats(struct adapter *sc, u_int port_id)
11925 int i, v, chan_map;
11926 struct port_info *pi;
11927 struct vi_info *vi;
11928 struct sge_rxq *rxq;
11929 struct sge_txq *txq;
11930 struct sge_wrq *wrq;
11931 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
11932 struct sge_ofld_txq *ofld_txq;
11935 struct sge_ofld_rxq *ofld_rxq;
11938 if (port_id >= sc->params.nports)
11940 pi = sc->port[port_id];
11944 mtx_lock(&sc->reg_lock);
11945 if (!hw_off_limits(sc)) {
11947 t4_clr_port_stats(sc, pi->tx_chan);
11949 if (pi->fcs_reg != -1)
11950 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
11952 pi->stats.rx_fcs_err = 0;
11954 for_each_vi(pi, v, vi) {
11955 if (vi->flags & VI_INIT_DONE)
11956 t4_clr_vi_stats(sc, vi->vin);
11958 chan_map = pi->rx_e_chan_map;
11961 i = ffs(chan_map) - 1;
11962 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
11963 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
11964 chan_map &= ~(1 << i);
11967 mtx_unlock(&sc->reg_lock);
11968 pi->tx_parse_error = 0;
11969 pi->tnl_cong_drops = 0;
11972 * Since this command accepts a port, clear stats for
11973 * all VIs on this port.
11975 for_each_vi(pi, v, vi) {
11976 if (vi->flags & VI_INIT_DONE) {
11978 for_each_rxq(vi, i, rxq) {
11979 #if defined(INET) || defined(INET6)
11980 rxq->lro.lro_queued = 0;
11981 rxq->lro.lro_flushed = 0;
11984 rxq->vlan_extraction = 0;
11985 rxq->vxlan_rxcsum = 0;
11987 rxq->fl.cl_allocated = 0;
11988 rxq->fl.cl_recycled = 0;
11989 rxq->fl.cl_fast_recycled = 0;
11992 for_each_txq(vi, i, txq) {
11995 txq->vlan_insertion = 0;
11998 txq->txpkt_wrs = 0;
11999 txq->txpkts0_wrs = 0;
12000 txq->txpkts1_wrs = 0;
12001 txq->txpkts0_pkts = 0;
12002 txq->txpkts1_pkts = 0;
12003 txq->txpkts_flush = 0;
12005 txq->vxlan_tso_wrs = 0;
12006 txq->vxlan_txcsum = 0;
12007 txq->kern_tls_records = 0;
12008 txq->kern_tls_short = 0;
12009 txq->kern_tls_partial = 0;
12010 txq->kern_tls_full = 0;
12011 txq->kern_tls_octets = 0;
12012 txq->kern_tls_waste = 0;
12013 txq->kern_tls_options = 0;
12014 txq->kern_tls_header = 0;
12015 txq->kern_tls_fin = 0;
12016 txq->kern_tls_fin_short = 0;
12017 txq->kern_tls_cbc = 0;
12018 txq->kern_tls_gcm = 0;
12019 mp_ring_reset_stats(txq->r);
12022 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
12023 for_each_ofld_txq(vi, i, ofld_txq) {
12024 ofld_txq->wrq.tx_wrs_direct = 0;
12025 ofld_txq->wrq.tx_wrs_copied = 0;
12026 counter_u64_zero(ofld_txq->tx_iscsi_pdus);
12027 counter_u64_zero(ofld_txq->tx_iscsi_octets);
12028 counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs);
12029 counter_u64_zero(ofld_txq->tx_toe_tls_records);
12030 counter_u64_zero(ofld_txq->tx_toe_tls_octets);
12034 for_each_ofld_rxq(vi, i, ofld_rxq) {
12035 ofld_rxq->fl.cl_allocated = 0;
12036 ofld_rxq->fl.cl_recycled = 0;
12037 ofld_rxq->fl.cl_fast_recycled = 0;
12039 ofld_rxq->rx_iscsi_ddp_setup_ok);
12041 ofld_rxq->rx_iscsi_ddp_setup_error);
12042 ofld_rxq->rx_iscsi_ddp_pdus = 0;
12043 ofld_rxq->rx_iscsi_ddp_octets = 0;
12044 ofld_rxq->rx_iscsi_fl_pdus = 0;
12045 ofld_rxq->rx_iscsi_fl_octets = 0;
12046 ofld_rxq->rx_toe_tls_records = 0;
12047 ofld_rxq->rx_toe_tls_octets = 0;
12051 if (IS_MAIN_VI(vi)) {
12052 wrq = &sc->sge.ctrlq[pi->port_id];
12053 wrq->tx_wrs_direct = 0;
12054 wrq->tx_wrs_copied = 0;
12063 hold_clip_addr(struct adapter *sc, struct t4_clip_addr *ca)
12066 struct in6_addr in6;
12068 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
12069 if (t4_get_clip_entry(sc, &in6, true) != NULL)
12079 release_clip_addr(struct adapter *sc, struct t4_clip_addr *ca)
12082 struct in6_addr in6;
12084 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
12085 return (t4_release_clip_addr(sc, &in6));
12092 t4_os_find_pci_capability(struct adapter *sc, int cap)
12096 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
12100 t4_os_pci_save_state(struct adapter *sc)
12103 struct pci_devinfo *dinfo;
12106 dinfo = device_get_ivars(dev);
12108 pci_cfg_save(dev, dinfo, 0);
12113 t4_os_pci_restore_state(struct adapter *sc)
12116 struct pci_devinfo *dinfo;
12119 dinfo = device_get_ivars(dev);
12121 pci_cfg_restore(dev, dinfo);
12126 t4_os_portmod_changed(struct port_info *pi)
12128 struct adapter *sc = pi->adapter;
12129 struct vi_info *vi;
12131 static const char *mod_str[] = {
12132 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
12135 KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
12136 ("%s: port_type %u", __func__, pi->port_type));
12139 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
12141 build_medialist(pi);
12142 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
12143 fixup_link_config(pi);
12144 apply_link_config(pi);
12147 end_synchronized_op(sc, LOCK_HELD);
12151 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
12152 if_printf(ifp, "transceiver unplugged.\n");
12153 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
12154 if_printf(ifp, "unknown transceiver inserted.\n");
12155 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
12156 if_printf(ifp, "unsupported transceiver inserted.\n");
12157 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
12158 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
12159 port_top_speed(pi), mod_str[pi->mod_type]);
12161 if_printf(ifp, "transceiver (type %d) inserted.\n",
12167 t4_os_link_changed(struct port_info *pi)
12169 struct vi_info *vi;
12171 struct link_config *lc = &pi->link_cfg;
12172 struct adapter *sc = pi->adapter;
12175 PORT_LOCK_ASSERT_OWNED(pi);
12179 if (lc->speed > 25000 ||
12180 (lc->speed == 25000 && lc->fec == FEC_RS)) {
12181 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
12182 A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
12184 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
12185 A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
12187 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
12188 pi->stats.rx_fcs_err = 0;
12193 MPASS(pi->fcs_reg != -1);
12194 MPASS(pi->fcs_base == 0);
12197 for_each_vi(pi, v, vi) {
12203 if_setbaudrate(ifp, IF_Mbps(lc->speed));
12204 if_link_state_change(ifp, LINK_STATE_UP);
12206 if_link_state_change(ifp, LINK_STATE_DOWN);
12212 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
12214 struct adapter *sc;
12216 sx_slock(&t4_list_lock);
12217 SLIST_FOREACH(sc, &t4_list, link) {
12219 * func should not make any assumptions about what state sc is
12220 * in - the only guarantee is that sc->sc_lock is a valid lock.
12224 sx_sunlock(&t4_list_lock);
12228 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
12232 struct adapter *sc = dev->si_drv1;
12234 rc = priv_check(td, PRIV_DRIVER);
12239 case CHELSIO_T4_GETREG: {
12240 struct t4_reg *edata = (struct t4_reg *)data;
12242 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
12245 mtx_lock(&sc->reg_lock);
12246 if (hw_off_limits(sc))
12248 else if (edata->size == 4)
12249 edata->val = t4_read_reg(sc, edata->addr);
12250 else if (edata->size == 8)
12251 edata->val = t4_read_reg64(sc, edata->addr);
12254 mtx_unlock(&sc->reg_lock);
12258 case CHELSIO_T4_SETREG: {
12259 struct t4_reg *edata = (struct t4_reg *)data;
12261 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
12264 mtx_lock(&sc->reg_lock);
12265 if (hw_off_limits(sc))
12267 else if (edata->size == 4) {
12268 if (edata->val & 0xffffffff00000000)
12270 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
12271 } else if (edata->size == 8)
12272 t4_write_reg64(sc, edata->addr, edata->val);
12275 mtx_unlock(&sc->reg_lock);
12279 case CHELSIO_T4_REGDUMP: {
12280 struct t4_regdump *regs = (struct t4_regdump *)data;
12281 int reglen = t4_get_regs_len(sc);
12284 if (regs->len < reglen) {
12285 regs->len = reglen; /* hint to the caller */
12289 regs->len = reglen;
12290 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
12291 mtx_lock(&sc->reg_lock);
12292 if (hw_off_limits(sc))
12295 get_regs(sc, regs, buf);
12296 mtx_unlock(&sc->reg_lock);
12298 rc = copyout(buf, regs->data, reglen);
12299 free(buf, M_CXGBE);
12302 case CHELSIO_T4_GET_FILTER_MODE:
12303 rc = get_filter_mode(sc, (uint32_t *)data);
12305 case CHELSIO_T4_SET_FILTER_MODE:
12306 rc = set_filter_mode(sc, *(uint32_t *)data);
12308 case CHELSIO_T4_SET_FILTER_MASK:
12309 rc = set_filter_mask(sc, *(uint32_t *)data);
12311 case CHELSIO_T4_GET_FILTER:
12312 rc = get_filter(sc, (struct t4_filter *)data);
12314 case CHELSIO_T4_SET_FILTER:
12315 rc = set_filter(sc, (struct t4_filter *)data);
12317 case CHELSIO_T4_DEL_FILTER:
12318 rc = del_filter(sc, (struct t4_filter *)data);
12320 case CHELSIO_T4_GET_SGE_CONTEXT:
12321 rc = get_sge_context(sc, (struct t4_sge_context *)data);
12323 case CHELSIO_T4_LOAD_FW:
12324 rc = load_fw(sc, (struct t4_data *)data);
12326 case CHELSIO_T4_GET_MEM:
12327 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
12329 case CHELSIO_T4_GET_I2C:
12330 rc = read_i2c(sc, (struct t4_i2c_data *)data);
12332 case CHELSIO_T4_CLEAR_STATS:
12333 rc = clear_stats(sc, *(uint32_t *)data);
12335 case CHELSIO_T4_SCHED_CLASS:
12336 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
12338 case CHELSIO_T4_SCHED_QUEUE:
12339 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
12341 case CHELSIO_T4_GET_TRACER:
12342 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
12344 case CHELSIO_T4_SET_TRACER:
12345 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
12347 case CHELSIO_T4_LOAD_CFG:
12348 rc = load_cfg(sc, (struct t4_data *)data);
12350 case CHELSIO_T4_LOAD_BOOT:
12351 rc = load_boot(sc, (struct t4_bootrom *)data);
12353 case CHELSIO_T4_LOAD_BOOTCFG:
12354 rc = load_bootcfg(sc, (struct t4_data *)data);
12356 case CHELSIO_T4_CUDBG_DUMP:
12357 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
12359 case CHELSIO_T4_SET_OFLD_POLICY:
12360 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
12362 case CHELSIO_T4_HOLD_CLIP_ADDR:
12363 rc = hold_clip_addr(sc, (struct t4_clip_addr *)data);
12365 case CHELSIO_T4_RELEASE_CLIP_ADDR:
12366 rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
12377 toe_capability(struct vi_info *vi, bool enable)
12380 struct port_info *pi = vi->pi;
12381 struct adapter *sc = pi->adapter;
12383 ASSERT_SYNCHRONIZED_OP(sc);
12385 if (!is_offload(sc))
12387 if (hw_off_limits(sc))
12392 if (sc->flags & KERN_TLS_ON && is_t6(sc)) {
12394 struct port_info *p;
12398 * Reconfigure hardware for TOE if TXTLS is not enabled
12402 for_each_port(sc, i) {
12404 for_each_vi(p, j, v) {
12405 if (if_getcapenable(v->ifp) & IFCAP_TXTLS) {
12407 "%s has NIC TLS enabled.\n",
12408 device_get_nameunit(v->dev));
12414 CH_WARN(sc, "Disable NIC TLS on all interfaces "
12415 "associated with this adapter before "
12416 "trying to enable TOE.\n");
12419 rc = t6_config_kern_tls(sc, false);
12424 if ((if_getcapenable(vi->ifp) & IFCAP_TOE) != 0) {
12425 /* TOE is already enabled. */
12430 * We need the port's queues around so that we're able to send
12431 * and receive CPLs to/from the TOE even if the ifnet for this
12432 * port has never been UP'd administratively.
12434 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
12436 if (!(pi->vi[0].flags & VI_INIT_DONE) &&
12437 ((rc = vi_init(&pi->vi[0])) != 0))
12440 if (isset(&sc->offload_map, pi->port_id)) {
12441 /* TOE is enabled on another VI of this port. */
12446 if (!uld_active(sc, ULD_TOM)) {
12447 rc = t4_activate_uld(sc, ULD_TOM);
12448 if (rc == EAGAIN) {
12450 "You must kldload t4_tom.ko before trying "
12451 "to enable TOE on a cxgbe interface.\n");
12455 KASSERT(sc->tom_softc != NULL,
12456 ("%s: TOM activated but softc NULL", __func__));
12457 KASSERT(uld_active(sc, ULD_TOM),
12458 ("%s: TOM activated but flag not set", __func__));
12461 /* Activate iWARP and iSCSI too, if the modules are loaded. */
12462 if (!uld_active(sc, ULD_IWARP))
12463 (void) t4_activate_uld(sc, ULD_IWARP);
12464 if (!uld_active(sc, ULD_ISCSI))
12465 (void) t4_activate_uld(sc, ULD_ISCSI);
12468 setbit(&sc->offload_map, pi->port_id);
12472 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
12475 KASSERT(uld_active(sc, ULD_TOM),
12476 ("%s: TOM never initialized?", __func__));
12477 clrbit(&sc->offload_map, pi->port_id);
12484 * Add an upper layer driver to the global list.
12487 t4_register_uld(struct uld_info *ui)
12490 struct uld_info *u;
12492 sx_xlock(&t4_uld_list_lock);
12493 SLIST_FOREACH(u, &t4_uld_list, link) {
12494 if (u->uld_id == ui->uld_id) {
12500 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
12503 sx_xunlock(&t4_uld_list_lock);
12508 t4_unregister_uld(struct uld_info *ui)
12511 struct uld_info *u;
12513 sx_xlock(&t4_uld_list_lock);
12515 SLIST_FOREACH(u, &t4_uld_list, link) {
12517 if (ui->refcount > 0) {
12522 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
12528 sx_xunlock(&t4_uld_list_lock);
12533 t4_activate_uld(struct adapter *sc, int id)
12536 struct uld_info *ui;
12538 ASSERT_SYNCHRONIZED_OP(sc);
12540 if (id < 0 || id > ULD_MAX)
12542 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
12544 sx_slock(&t4_uld_list_lock);
12546 SLIST_FOREACH(ui, &t4_uld_list, link) {
12547 if (ui->uld_id == id) {
12548 if (!(sc->flags & FULL_INIT_DONE)) {
12549 rc = adapter_init(sc);
12554 rc = ui->activate(sc);
12556 setbit(&sc->active_ulds, id);
12563 sx_sunlock(&t4_uld_list_lock);
12569 t4_deactivate_uld(struct adapter *sc, int id)
12572 struct uld_info *ui;
12574 ASSERT_SYNCHRONIZED_OP(sc);
12576 if (id < 0 || id > ULD_MAX)
12580 sx_slock(&t4_uld_list_lock);
12582 SLIST_FOREACH(ui, &t4_uld_list, link) {
12583 if (ui->uld_id == id) {
12584 rc = ui->deactivate(sc);
12586 clrbit(&sc->active_ulds, id);
12593 sx_sunlock(&t4_uld_list_lock);
12599 t4_deactivate_all_uld(struct adapter *sc)
12602 struct uld_info *ui;
12604 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld");
12608 sx_slock(&t4_uld_list_lock);
12610 SLIST_FOREACH(ui, &t4_uld_list, link) {
12611 if (isset(&sc->active_ulds, ui->uld_id)) {
12612 rc = ui->deactivate(sc);
12615 clrbit(&sc->active_ulds, ui->uld_id);
12620 sx_sunlock(&t4_uld_list_lock);
12621 end_synchronized_op(sc, 0);
12627 t4_async_event(struct adapter *sc)
12629 struct uld_info *ui;
12631 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
12633 sx_slock(&t4_uld_list_lock);
12634 SLIST_FOREACH(ui, &t4_uld_list, link) {
12635 if (ui->uld_id == ULD_IWARP) {
12636 ui->async_event(sc);
12640 sx_sunlock(&t4_uld_list_lock);
12641 end_synchronized_op(sc, 0);
12645 uld_active(struct adapter *sc, int uld_id)
12648 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
12650 return (isset(&sc->active_ulds, uld_id));
12656 ktls_capability(struct adapter *sc, bool enable)
12658 ASSERT_SYNCHRONIZED_OP(sc);
12664 if (hw_off_limits(sc))
12668 if (sc->flags & KERN_TLS_ON)
12669 return (0); /* already on */
12670 if (sc->offload_map != 0) {
12672 "Disable TOE on all interfaces associated with "
12673 "this adapter before trying to enable NIC TLS.\n");
12676 return (t6_config_kern_tls(sc, true));
12679 * Nothing to do for disable. If TOE is enabled sometime later
12680 * then toe_capability will reconfigure the hardware.
12688 * t = ptr to tunable.
12689 * nc = number of CPUs.
12690 * c = compiled in default for that tunable.
12693 calculate_nqueues(int *t, int nc, const int c)
12699 nq = *t < 0 ? -*t : c;
12704 * Come up with reasonable defaults for some of the tunables, provided they're
12705 * not set by the user (in which case we'll use the values as is).
12708 tweak_tunables(void)
12710 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
12714 t4_ntxq = rss_getnumbuckets();
12716 calculate_nqueues(&t4_ntxq, nc, NTXQ);
12720 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
12724 t4_nrxq = rss_getnumbuckets();
12726 calculate_nqueues(&t4_nrxq, nc, NRXQ);
12730 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
12732 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
12733 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
12734 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
12737 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
12738 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
12741 #if defined(TCP_OFFLOAD) || defined(KERN_TLS)
12742 if (t4_toecaps_allowed == -1)
12743 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
12745 if (t4_toecaps_allowed == -1)
12746 t4_toecaps_allowed = 0;
12750 if (t4_rdmacaps_allowed == -1) {
12751 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
12752 FW_CAPS_CONFIG_RDMA_RDMAC;
12755 if (t4_iscsicaps_allowed == -1) {
12756 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
12757 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
12758 FW_CAPS_CONFIG_ISCSI_T10DIF;
12761 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
12762 t4_tmr_idx_ofld = TMR_IDX_OFLD;
12764 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
12765 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
12767 if (t4_rdmacaps_allowed == -1)
12768 t4_rdmacaps_allowed = 0;
12770 if (t4_iscsicaps_allowed == -1)
12771 t4_iscsicaps_allowed = 0;
12775 calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ);
12776 calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ);
12777 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
12778 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
12781 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
12782 t4_tmr_idx = TMR_IDX;
12784 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
12785 t4_pktc_idx = PKTC_IDX;
12787 if (t4_qsize_txq < 128)
12788 t4_qsize_txq = 128;
12790 if (t4_qsize_rxq < 128)
12791 t4_qsize_rxq = 128;
12792 while (t4_qsize_rxq & 7)
12795 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
12798 * Number of VIs to create per-port. The first VI is the "main" regular
12799 * VI for the port. The rest are additional virtual interfaces on the
12800 * same physical port. Note that the main VI does not have native
12801 * netmap support but the extra VIs do.
12803 * Limit the number of VIs per port to the number of available
12804 * MAC addresses per port.
12806 if (t4_num_vis < 1)
12808 if (t4_num_vis > nitems(vi_mac_funcs)) {
12809 t4_num_vis = nitems(vi_mac_funcs);
12810 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
12813 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
12814 pcie_relaxed_ordering = 1;
12815 #if defined(__i386__) || defined(__amd64__)
12816 if (cpu_vendor_id == CPU_VENDOR_INTEL)
12817 pcie_relaxed_ordering = 0;
12824 t4_dump_tcb(struct adapter *sc, int tid)
12826 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
12828 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
12829 save = t4_read_reg(sc, reg);
12830 base = sc->memwin[2].mw_base;
12832 /* Dump TCB for the tid */
12833 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
12834 tcb_addr += tid * TCB_SIZE;
12838 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
12840 pf = V_PFNUM(sc->pf);
12841 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
12843 t4_write_reg(sc, reg, win_pos | pf);
12844 t4_read_reg(sc, reg);
12846 off = tcb_addr - win_pos;
12847 for (i = 0; i < 4; i++) {
12849 for (j = 0; j < 8; j++, off += 4)
12850 buf[j] = htonl(t4_read_reg(sc, base + off));
12852 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
12853 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
12857 t4_write_reg(sc, reg, save);
12858 t4_read_reg(sc, reg);
12862 t4_dump_devlog(struct adapter *sc)
12864 struct devlog_params *dparams = &sc->params.devlog;
12865 struct fw_devlog_e e;
12866 int i, first, j, m, nentries, rc;
12867 uint64_t ftstamp = UINT64_MAX;
12869 if (dparams->start == 0) {
12870 db_printf("devlog params not valid\n");
12874 nentries = dparams->size / sizeof(struct fw_devlog_e);
12875 m = fwmtype_to_hwmtype(dparams->memtype);
12877 /* Find the first entry. */
12879 for (i = 0; i < nentries && !db_pager_quit; i++) {
12880 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
12881 sizeof(e), (void *)&e);
12885 if (e.timestamp == 0)
12888 e.timestamp = be64toh(e.timestamp);
12889 if (e.timestamp < ftstamp) {
12890 ftstamp = e.timestamp;
12900 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
12901 sizeof(e), (void *)&e);
12905 if (e.timestamp == 0)
12908 e.timestamp = be64toh(e.timestamp);
12909 e.seqno = be32toh(e.seqno);
12910 for (j = 0; j < 8; j++)
12911 e.params[j] = be32toh(e.params[j]);
12913 db_printf("%10d %15ju %8s %8s ",
12914 e.seqno, e.timestamp,
12915 (e.level < nitems(devlog_level_strings) ?
12916 devlog_level_strings[e.level] : "UNKNOWN"),
12917 (e.facility < nitems(devlog_facility_strings) ?
12918 devlog_facility_strings[e.facility] : "UNKNOWN"));
12919 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
12920 e.params[3], e.params[4], e.params[5], e.params[6],
12923 if (++i == nentries)
12925 } while (i != first && !db_pager_quit);
12928 static DB_DEFINE_TABLE(show, t4, show_t4);
12930 DB_TABLE_COMMAND_FLAGS(show_t4, devlog, db_show_devlog, CS_OWN)
12937 t = db_read_token();
12939 dev = device_lookup_by_name(db_tok_string);
12944 db_printf("usage: show t4 devlog <nexus>\n");
12949 db_printf("device not found\n");
12953 t4_dump_devlog(device_get_softc(dev));
12956 DB_TABLE_COMMAND_FLAGS(show_t4, tcb, db_show_t4tcb, CS_OWN)
12965 t = db_read_token();
12967 dev = device_lookup_by_name(db_tok_string);
12968 t = db_read_token();
12969 if (t == tNUMBER) {
12970 tid = db_tok_number;
12977 db_printf("usage: show t4 tcb <nexus> <tid>\n");
12982 db_printf("device not found\n");
12986 db_printf("invalid tid\n");
12990 t4_dump_tcb(device_get_softc(dev), tid);
12994 static eventhandler_tag vxlan_start_evtag;
12995 static eventhandler_tag vxlan_stop_evtag;
12997 struct vxlan_evargs {
13003 enable_vxlan_rx(struct adapter *sc)
13006 struct port_info *pi;
13007 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
13009 ASSERT_SYNCHRONIZED_OP(sc);
13011 t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) |
13013 for_each_port(sc, i) {
13015 if (pi->vxlan_tcam_entry == true)
13017 rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac,
13018 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
13023 "failed to add VXLAN TCAM entry: %d.\n", rc);
13025 MPASS(rc == sc->rawf_base + pi->port_id);
13026 pi->vxlan_tcam_entry = true;
13032 t4_vxlan_start(struct adapter *sc, void *arg)
13034 struct vxlan_evargs *v = arg;
13036 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
13038 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0)
13041 if (sc->vxlan_refcount == 0) {
13042 sc->vxlan_port = v->port;
13043 sc->vxlan_refcount = 1;
13044 if (!hw_off_limits(sc))
13045 enable_vxlan_rx(sc);
13046 } else if (sc->vxlan_port == v->port) {
13047 sc->vxlan_refcount++;
13049 CH_ERR(sc, "VXLAN already configured on port %d; "
13050 "ignoring attempt to configure it on port %d\n",
13051 sc->vxlan_port, v->port);
13053 end_synchronized_op(sc, 0);
13057 t4_vxlan_stop(struct adapter *sc, void *arg)
13059 struct vxlan_evargs *v = arg;
13061 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
13063 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0)
13067 * VXLANs may have been configured before the driver was loaded so we
13068 * may see more stops than starts. This is not handled cleanly but at
13069 * least we keep the refcount sane.
13071 if (sc->vxlan_port != v->port)
13073 if (sc->vxlan_refcount == 0) {
13074 CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; "
13075 "ignoring attempt to stop it again.\n", sc->vxlan_port);
13076 } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc))
13077 t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0);
13079 end_synchronized_op(sc, 0);
13083 t4_vxlan_start_handler(void *arg __unused, if_t ifp,
13084 sa_family_t family, u_int port)
13086 struct vxlan_evargs v;
13088 MPASS(family == AF_INET || family == AF_INET6);
13092 t4_iterate(t4_vxlan_start, &v);
13096 t4_vxlan_stop_handler(void *arg __unused, if_t ifp, sa_family_t family,
13099 struct vxlan_evargs v;
13101 MPASS(family == AF_INET || family == AF_INET6);
13105 t4_iterate(t4_vxlan_stop, &v);
13109 static struct sx mlu; /* mod load unload */
13110 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
13113 mod_event(module_t mod, int cmd, void *arg)
13116 static int loaded = 0;
13121 if (loaded++ == 0) {
13123 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
13124 t4_filter_rpl, CPL_COOKIE_FILTER);
13125 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
13126 do_l2t_write_rpl, CPL_COOKIE_FILTER);
13127 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
13128 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
13129 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
13130 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
13131 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
13132 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
13133 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
13134 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
13135 t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
13137 sx_init(&t4_list_lock, "T4/T5 adapters");
13138 SLIST_INIT(&t4_list);
13139 callout_init(&fatal_callout, 1);
13141 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
13142 SLIST_INIT(&t4_uld_list);
13150 t4_tracer_modload();
13152 vxlan_start_evtag =
13153 EVENTHANDLER_REGISTER(vxlan_start,
13154 t4_vxlan_start_handler, NULL,
13155 EVENTHANDLER_PRI_ANY);
13157 EVENTHANDLER_REGISTER(vxlan_stop,
13158 t4_vxlan_stop_handler, NULL,
13159 EVENTHANDLER_PRI_ANY);
13160 reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK,
13161 taskqueue_thread_enqueue, &reset_tq);
13162 taskqueue_start_threads(&reset_tq, 1, PI_SOFT,
13170 if (--loaded == 0) {
13173 taskqueue_free(reset_tq);
13174 sx_slock(&t4_list_lock);
13175 if (!SLIST_EMPTY(&t4_list)) {
13177 sx_sunlock(&t4_list_lock);
13181 sx_slock(&t4_uld_list_lock);
13182 if (!SLIST_EMPTY(&t4_uld_list)) {
13184 sx_sunlock(&t4_uld_list_lock);
13185 sx_sunlock(&t4_list_lock);
13190 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
13191 uprintf("%ju clusters with custom free routine "
13192 "still is use.\n", t4_sge_extfree_refs());
13193 pause("t4unload", 2 * hz);
13196 sx_sunlock(&t4_uld_list_lock);
13198 sx_sunlock(&t4_list_lock);
13200 if (t4_sge_extfree_refs() == 0) {
13201 EVENTHANDLER_DEREGISTER(vxlan_start,
13202 vxlan_start_evtag);
13203 EVENTHANDLER_DEREGISTER(vxlan_stop,
13205 t4_tracer_modunload();
13207 t6_ktls_modunload();
13210 t4_clip_modunload();
13213 sx_destroy(&t4_uld_list_lock);
13215 sx_destroy(&t4_list_lock);
13216 t4_sge_modunload();
13220 loaded++; /* undo earlier decrement */
13231 DRIVER_MODULE(t4nex, pci, t4_driver, mod_event, 0);
13232 MODULE_VERSION(t4nex, 1);
13233 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
13235 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
13236 #endif /* DEV_NETMAP */
13238 DRIVER_MODULE(t5nex, pci, t5_driver, mod_event, 0);
13239 MODULE_VERSION(t5nex, 1);
13240 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
13242 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
13243 #endif /* DEV_NETMAP */
13245 DRIVER_MODULE(t6nex, pci, t6_driver, mod_event, 0);
13246 MODULE_VERSION(t6nex, 1);
13247 MODULE_DEPEND(t6nex, crypto, 1, 1, 1);
13248 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
13250 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
13251 #endif /* DEV_NETMAP */
13253 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0);
13254 MODULE_VERSION(cxgbe, 1);
13256 DRIVER_MODULE(cxl, t5nex, cxl_driver, 0, 0);
13257 MODULE_VERSION(cxl, 1);
13259 DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0);
13260 MODULE_VERSION(cc, 1);
13262 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0);
13263 MODULE_VERSION(vcxgbe, 1);
13265 DRIVER_MODULE(vcxl, cxl, vcxl_driver, 0, 0);
13266 MODULE_VERSION(vcxl, 1);
13268 DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0);
13269 MODULE_VERSION(vcc, 1);