2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
33 #include "opt_inet6.h"
34 #include "opt_kern_tls.h"
35 #include "opt_ratelimit.h"
38 #include <sys/param.h>
41 #include <sys/kernel.h>
43 #include <sys/eventhandler.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/pciio.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <sys/firmware.h>
55 #include <sys/socket.h>
56 #include <sys/sockio.h>
57 #include <sys/sysctl.h>
58 #include <net/ethernet.h>
60 #include <net/if_types.h>
61 #include <net/if_dl.h>
62 #include <net/if_vlan_var.h>
64 #include <net/rss_config.h>
66 #include <netinet/in.h>
67 #include <netinet/ip.h>
69 #include <netinet/tcp_seq.h>
71 #if defined(__i386__) || defined(__amd64__)
72 #include <machine/md_var.h>
73 #include <machine/cputypes.h>
79 #include <ddb/db_lex.h>
82 #include "common/common.h"
83 #include "common/t4_msg.h"
84 #include "common/t4_regs.h"
85 #include "common/t4_regs_values.h"
86 #include "cudbg/cudbg.h"
90 #include "t4_mp_ring.h"
94 /* T4 bus driver interface */
95 static int t4_probe(device_t);
96 static int t4_attach(device_t);
97 static int t4_detach(device_t);
98 static int t4_child_location(device_t, device_t, struct sbuf *);
99 static int t4_ready(device_t);
100 static int t4_read_port_device(device_t, int, device_t *);
101 static int t4_suspend(device_t);
102 static int t4_resume(device_t);
103 static int t4_reset_prepare(device_t, device_t);
104 static int t4_reset_post(device_t, device_t);
105 static device_method_t t4_methods[] = {
106 DEVMETHOD(device_probe, t4_probe),
107 DEVMETHOD(device_attach, t4_attach),
108 DEVMETHOD(device_detach, t4_detach),
109 DEVMETHOD(device_suspend, t4_suspend),
110 DEVMETHOD(device_resume, t4_resume),
112 DEVMETHOD(bus_child_location, t4_child_location),
113 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
114 DEVMETHOD(bus_reset_post, t4_reset_post),
116 DEVMETHOD(t4_is_main_ready, t4_ready),
117 DEVMETHOD(t4_read_port_device, t4_read_port_device),
121 static driver_t t4_driver = {
124 sizeof(struct adapter)
128 /* T4 port (cxgbe) interface */
129 static int cxgbe_probe(device_t);
130 static int cxgbe_attach(device_t);
131 static int cxgbe_detach(device_t);
132 device_method_t cxgbe_methods[] = {
133 DEVMETHOD(device_probe, cxgbe_probe),
134 DEVMETHOD(device_attach, cxgbe_attach),
135 DEVMETHOD(device_detach, cxgbe_detach),
138 static driver_t cxgbe_driver = {
141 sizeof(struct port_info)
144 /* T4 VI (vcxgbe) interface */
145 static int vcxgbe_probe(device_t);
146 static int vcxgbe_attach(device_t);
147 static int vcxgbe_detach(device_t);
148 static device_method_t vcxgbe_methods[] = {
149 DEVMETHOD(device_probe, vcxgbe_probe),
150 DEVMETHOD(device_attach, vcxgbe_attach),
151 DEVMETHOD(device_detach, vcxgbe_detach),
154 static driver_t vcxgbe_driver = {
157 sizeof(struct vi_info)
160 static d_ioctl_t t4_ioctl;
162 static struct cdevsw t4_cdevsw = {
163 .d_version = D_VERSION,
168 /* T5 bus driver interface */
169 static int t5_probe(device_t);
170 static device_method_t t5_methods[] = {
171 DEVMETHOD(device_probe, t5_probe),
172 DEVMETHOD(device_attach, t4_attach),
173 DEVMETHOD(device_detach, t4_detach),
174 DEVMETHOD(device_suspend, t4_suspend),
175 DEVMETHOD(device_resume, t4_resume),
177 DEVMETHOD(bus_child_location, t4_child_location),
178 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
179 DEVMETHOD(bus_reset_post, t4_reset_post),
181 DEVMETHOD(t4_is_main_ready, t4_ready),
182 DEVMETHOD(t4_read_port_device, t4_read_port_device),
186 static driver_t t5_driver = {
189 sizeof(struct adapter)
193 /* T5 port (cxl) interface */
194 static driver_t cxl_driver = {
197 sizeof(struct port_info)
200 /* T5 VI (vcxl) interface */
201 static driver_t vcxl_driver = {
204 sizeof(struct vi_info)
207 /* T6 bus driver interface */
208 static int t6_probe(device_t);
209 static device_method_t t6_methods[] = {
210 DEVMETHOD(device_probe, t6_probe),
211 DEVMETHOD(device_attach, t4_attach),
212 DEVMETHOD(device_detach, t4_detach),
213 DEVMETHOD(device_suspend, t4_suspend),
214 DEVMETHOD(device_resume, t4_resume),
216 DEVMETHOD(bus_child_location, t4_child_location),
217 DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
218 DEVMETHOD(bus_reset_post, t4_reset_post),
220 DEVMETHOD(t4_is_main_ready, t4_ready),
221 DEVMETHOD(t4_read_port_device, t4_read_port_device),
225 static driver_t t6_driver = {
228 sizeof(struct adapter)
232 /* T6 port (cc) interface */
233 static driver_t cc_driver = {
236 sizeof(struct port_info)
239 /* T6 VI (vcc) interface */
240 static driver_t vcc_driver = {
243 sizeof(struct vi_info)
246 /* ifnet interface */
247 static void cxgbe_init(void *);
248 static int cxgbe_ioctl(if_t, unsigned long, caddr_t);
249 static int cxgbe_transmit(if_t, struct mbuf *);
250 static void cxgbe_qflush(if_t);
251 #if defined(KERN_TLS) || defined(RATELIMIT)
252 static int cxgbe_snd_tag_alloc(if_t, union if_snd_tag_alloc_params *,
253 struct m_snd_tag **);
256 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
259 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
260 * then ADAPTER_LOCK, then t4_uld_list_lock.
262 static struct sx t4_list_lock;
263 SLIST_HEAD(, adapter) t4_list;
265 static struct sx t4_uld_list_lock;
266 SLIST_HEAD(, uld_info) t4_uld_list;
270 * Tunables. See tweak_tunables() too.
272 * Each tunable is set to a default value here if it's known at compile-time.
273 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
274 * provide a reasonable default (upto n) when the driver is loaded.
276 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
277 * T5 are under hw.cxl.
279 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
280 "cxgbe(4) parameters");
281 SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
282 "cxgbe(4) T5+ parameters");
283 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
284 "cxgbe(4) TOE parameters");
287 * Number of queues for tx and rx, NIC and offload.
291 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0,
292 "Number of TX queues per port");
293 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
297 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0,
298 "Number of RX queues per port");
299 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
302 static int t4_ntxq_vi = -NTXQ_VI;
303 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0,
304 "Number of TX queues per VI");
307 static int t4_nrxq_vi = -NRXQ_VI;
308 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0,
309 "Number of RX queues per VI");
311 static int t4_rsrv_noflowq = 0;
312 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq,
313 0, "Reserve TX queue 0 of each VI for non-flowid packets");
315 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
317 static int t4_nofldtxq = -NOFLDTXQ;
318 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
319 "Number of offload TX queues per port");
322 static int t4_nofldrxq = -NOFLDRXQ;
323 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
324 "Number of offload RX queues per port");
326 #define NOFLDTXQ_VI 1
327 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
328 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
329 "Number of offload TX queues per VI");
331 #define NOFLDRXQ_VI 1
332 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
333 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
334 "Number of offload RX queues per VI");
336 #define TMR_IDX_OFLD 1
337 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
338 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
339 &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
341 #define PKTC_IDX_OFLD (-1)
342 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
343 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
344 &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
346 /* 0 means chip/fw default, non-zero number is value in microseconds */
347 static u_long t4_toe_keepalive_idle = 0;
348 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN,
349 &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)");
351 /* 0 means chip/fw default, non-zero number is value in microseconds */
352 static u_long t4_toe_keepalive_interval = 0;
353 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN,
354 &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)");
356 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
357 static int t4_toe_keepalive_count = 0;
358 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN,
359 &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort");
361 /* 0 means chip/fw default, non-zero number is value in microseconds */
362 static u_long t4_toe_rexmt_min = 0;
363 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN,
364 &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)");
366 /* 0 means chip/fw default, non-zero number is value in microseconds */
367 static u_long t4_toe_rexmt_max = 0;
368 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN,
369 &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)");
371 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
372 static int t4_toe_rexmt_count = 0;
373 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN,
374 &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort");
376 /* -1 means chip/fw default, other values are raw backoff values to use */
377 static int t4_toe_rexmt_backoff[16] = {
378 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
380 SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff,
381 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
382 "cxgbe(4) TOE retransmit backoff values");
383 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN,
384 &t4_toe_rexmt_backoff[0], 0, "");
385 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN,
386 &t4_toe_rexmt_backoff[1], 0, "");
387 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN,
388 &t4_toe_rexmt_backoff[2], 0, "");
389 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN,
390 &t4_toe_rexmt_backoff[3], 0, "");
391 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN,
392 &t4_toe_rexmt_backoff[4], 0, "");
393 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN,
394 &t4_toe_rexmt_backoff[5], 0, "");
395 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN,
396 &t4_toe_rexmt_backoff[6], 0, "");
397 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN,
398 &t4_toe_rexmt_backoff[7], 0, "");
399 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN,
400 &t4_toe_rexmt_backoff[8], 0, "");
401 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN,
402 &t4_toe_rexmt_backoff[9], 0, "");
403 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN,
404 &t4_toe_rexmt_backoff[10], 0, "");
405 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN,
406 &t4_toe_rexmt_backoff[11], 0, "");
407 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN,
408 &t4_toe_rexmt_backoff[12], 0, "");
409 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN,
410 &t4_toe_rexmt_backoff[13], 0, "");
411 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN,
412 &t4_toe_rexmt_backoff[14], 0, "");
413 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN,
414 &t4_toe_rexmt_backoff[15], 0, "");
416 int t4_ddp_rcvbuf_len = 256 * 1024;
417 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_len, CTLFLAG_RWTUN,
418 &t4_ddp_rcvbuf_len, 0, "length of each DDP RX buffer");
420 unsigned int t4_ddp_rcvbuf_cache = 4;
421 SYSCTL_UINT(_hw_cxgbe_toe, OID_AUTO, ddp_rcvbuf_cache, CTLFLAG_RWTUN,
422 &t4_ddp_rcvbuf_cache, 0,
423 "maximum number of free DDP RX buffers to cache per connection");
427 #define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */
428 #define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */
429 static int t4_native_netmap = NN_EXTRA_VI;
430 SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap,
431 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs");
434 static int t4_nnmtxq = -NNMTXQ;
435 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0,
436 "Number of netmap TX queues");
439 static int t4_nnmrxq = -NNMRXQ;
440 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0,
441 "Number of netmap RX queues");
444 static int t4_nnmtxq_vi = -NNMTXQ_VI;
445 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0,
446 "Number of netmap TX queues per VI");
449 static int t4_nnmrxq_vi = -NNMRXQ_VI;
450 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0,
451 "Number of netmap RX queues per VI");
455 * Holdoff parameters for ports.
458 int t4_tmr_idx = TMR_IDX;
459 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx,
460 0, "Holdoff timer index");
461 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
463 #define PKTC_IDX (-1)
464 int t4_pktc_idx = PKTC_IDX;
465 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx,
466 0, "Holdoff packet counter index");
467 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
470 * Size (# of entries) of each tx and rx queue.
472 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
473 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0,
474 "Number of descriptors in each TX queue");
476 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
477 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0,
478 "Number of descriptors in each RX queue");
481 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
483 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
484 SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types,
485 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
488 * Configuration file. All the _CF names here are special.
490 #define DEFAULT_CF "default"
491 #define BUILTIN_CF "built-in"
492 #define FLASH_CF "flash"
493 #define UWIRE_CF "uwire"
494 #define FPGA_CF "fpga"
495 static char t4_cfg_file[32] = DEFAULT_CF;
496 SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file,
497 sizeof(t4_cfg_file), "Firmware configuration file");
500 * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
501 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
502 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
503 * mark or when signalled to do so, 0 to never emit PAUSE.
504 * pause_autoneg = 1 means PAUSE will be negotiated if possible and the
505 * negotiated settings will override rx_pause/tx_pause.
506 * Otherwise rx_pause/tx_pause are applied forcibly.
508 static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
509 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN,
510 &t4_pause_settings, 0,
511 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
514 * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
515 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
518 static int t4_fec = -1;
519 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
520 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
523 * Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
524 * issues to the firmware. If the firmware doesn't support FORCE_FEC then the
525 * driver runs as if this is set to 0.
526 * -1 to set FORCE_FEC iff requested_fec != AUTO. Multiple FEC bits are okay.
527 * 0 to never set FORCE_FEC. requested_fec = AUTO means use the hint from the
528 * transceiver. Multiple FEC bits may not be okay but will be passed on to
529 * the firmware anyway (may result in l1cfg errors with old firmwares).
530 * 1 to always set FORCE_FEC. Multiple FEC bits are okay. requested_fec = AUTO
531 * means set all FEC bits that are valid for the speed.
533 static int t4_force_fec = -1;
534 SYSCTL_INT(_hw_cxgbe, OID_AUTO, force_fec, CTLFLAG_RDTUN, &t4_force_fec, 0,
535 "Controls the use of FORCE_FEC bit in L1 configuration.");
538 * Link autonegotiation.
539 * -1 to run with the firmware default.
543 static int t4_autoneg = -1;
544 SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0,
545 "Link autonegotiation");
548 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
549 * encouraged respectively). '-n' is the same as 'n' except the firmware
550 * version used in the checks is read from the firmware bundled with the driver.
552 static int t4_fw_install = 1;
553 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0,
554 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
557 * ASIC features that will be used. Disable the ones you don't want so that the
558 * chip resources aren't wasted on features that will not be used.
560 static int t4_nbmcaps_allowed = 0;
561 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN,
562 &t4_nbmcaps_allowed, 0, "Default NBM capabilities");
564 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
565 SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN,
566 &t4_linkcaps_allowed, 0, "Default link capabilities");
568 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
569 FW_CAPS_CONFIG_SWITCH_EGRESS;
570 SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
571 &t4_switchcaps_allowed, 0, "Default switch capabilities");
574 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
575 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
577 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
578 FW_CAPS_CONFIG_NIC_HASHFILTER;
580 SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN,
581 &t4_niccaps_allowed, 0, "Default NIC capabilities");
583 static int t4_toecaps_allowed = -1;
584 SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN,
585 &t4_toecaps_allowed, 0, "Default TCP offload capabilities");
587 static int t4_rdmacaps_allowed = -1;
588 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN,
589 &t4_rdmacaps_allowed, 0, "Default RDMA capabilities");
591 static int t4_cryptocaps_allowed = -1;
592 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN,
593 &t4_cryptocaps_allowed, 0, "Default crypto capabilities");
595 static int t4_iscsicaps_allowed = -1;
596 SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN,
597 &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities");
599 static int t4_fcoecaps_allowed = 0;
600 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN,
601 &t4_fcoecaps_allowed, 0, "Default FCoE capabilities");
603 static int t5_write_combine = 0;
604 SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
605 0, "Use WC instead of UC for BAR2");
607 static int t4_num_vis = 1;
608 SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
609 "Number of VIs per port");
612 * PCIe Relaxed Ordering.
613 * -1: driver should figure out a good value.
618 static int pcie_relaxed_ordering = -1;
619 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN,
620 &pcie_relaxed_ordering, 0,
621 "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone");
623 static int t4_panic_on_fatal_err = 0;
624 SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN,
625 &t4_panic_on_fatal_err, 0, "panic on fatal errors");
627 static int t4_reset_on_fatal_err = 0;
628 SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN,
629 &t4_reset_on_fatal_err, 0, "reset adapter on fatal errors");
631 static int t4_clock_gate_on_suspend = 0;
632 SYSCTL_INT(_hw_cxgbe, OID_AUTO, clock_gate_on_suspend, CTLFLAG_RWTUN,
633 &t4_clock_gate_on_suspend, 0, "gate the clock on suspend");
635 static int t4_tx_vm_wr = 0;
636 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0,
637 "Use VM work requests to transmit packets.");
640 * Set to non-zero to enable the attack filter. A packet that matches any of
641 * these conditions will get dropped on ingress:
642 * 1) IP && source address == destination address.
643 * 2) TCP/IP && source address is not a unicast address.
644 * 3) TCP/IP && destination address is not a unicast address.
645 * 4) IP && source address is loopback (127.x.y.z).
646 * 5) IP && destination address is loopback (127.x.y.z).
647 * 6) IPv6 && source address == destination address.
648 * 7) IPv6 && source address is not a unicast address.
649 * 8) IPv6 && source address is loopback (::1/128).
650 * 9) IPv6 && destination address is loopback (::1/128).
651 * 10) IPv6 && source address is unspecified (::/128).
652 * 11) IPv6 && destination address is unspecified (::/128).
653 * 12) TCP/IPv6 && source address is multicast (ff00::/8).
654 * 13) TCP/IPv6 && destination address is multicast (ff00::/8).
656 static int t4_attack_filter = 0;
657 SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN,
658 &t4_attack_filter, 0, "Drop suspicious traffic");
660 static int t4_drop_ip_fragments = 0;
661 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN,
662 &t4_drop_ip_fragments, 0, "Drop IP fragments");
664 static int t4_drop_pkts_with_l2_errors = 1;
665 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN,
666 &t4_drop_pkts_with_l2_errors, 0,
667 "Drop all frames with Layer 2 length or checksum errors");
669 static int t4_drop_pkts_with_l3_errors = 0;
670 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN,
671 &t4_drop_pkts_with_l3_errors, 0,
672 "Drop all frames with IP version, length, or checksum errors");
674 static int t4_drop_pkts_with_l4_errors = 0;
675 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN,
676 &t4_drop_pkts_with_l4_errors, 0,
677 "Drop all frames with Layer 4 length, checksum, or other errors");
683 static int t4_cop_managed_offloading = 0;
684 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
685 &t4_cop_managed_offloading, 0,
686 "COP (Connection Offload Policy) controls all TOE offload");
691 * This enables KERN_TLS for all adapters if set.
693 static int t4_kern_tls = 0;
694 SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0,
695 "Enable KERN_TLS mode for T6 adapters");
697 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
698 "cxgbe(4) KERN_TLS parameters");
700 static int t4_tls_inline_keys = 0;
701 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
702 &t4_tls_inline_keys, 0,
703 "Always pass TLS keys in work requests (1) or attempt to store TLS keys "
706 static int t4_tls_combo_wrs = 0;
707 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
708 0, "Attempt to combine TCB field updates with TLS record work requests.");
711 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
712 static int vi_mac_funcs[] = {
716 FW_VI_FUNC_OPENISCSI,
722 struct intrs_and_queues {
723 uint16_t intr_type; /* INTx, MSI, or MSI-X */
724 uint16_t num_vis; /* number of VIs for each port */
725 uint16_t nirq; /* Total # of vectors */
726 uint16_t ntxq; /* # of NIC txq's for each port */
727 uint16_t nrxq; /* # of NIC rxq's for each port */
728 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
729 uint16_t nofldrxq; /* # of TOE rxq's for each port */
730 uint16_t nnmtxq; /* # of netmap txq's */
731 uint16_t nnmrxq; /* # of netmap rxq's */
733 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
734 uint16_t ntxq_vi; /* # of NIC txq's */
735 uint16_t nrxq_vi; /* # of NIC rxq's */
736 uint16_t nofldtxq_vi; /* # of TOE txq's */
737 uint16_t nofldrxq_vi; /* # of TOE rxq's */
738 uint16_t nnmtxq_vi; /* # of netmap txq's */
739 uint16_t nnmrxq_vi; /* # of netmap rxq's */
742 static void setup_memwin(struct adapter *);
743 static void position_memwin(struct adapter *, int, uint32_t);
744 static int validate_mem_range(struct adapter *, uint32_t, uint32_t);
745 static int fwmtype_to_hwmtype(int);
746 static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t,
748 static int fixup_devlog_params(struct adapter *);
749 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
750 static int contact_firmware(struct adapter *);
751 static int partition_resources(struct adapter *);
752 static int get_params__pre_init(struct adapter *);
753 static int set_params__pre_init(struct adapter *);
754 static int get_params__post_init(struct adapter *);
755 static int set_params__post_init(struct adapter *);
756 static void t4_set_desc(struct adapter *);
757 static bool fixed_ifmedia(struct port_info *);
758 static void build_medialist(struct port_info *);
759 static void init_link_config(struct port_info *);
760 static int fixup_link_config(struct port_info *);
761 static int apply_link_config(struct port_info *);
762 static int cxgbe_init_synchronized(struct vi_info *);
763 static int cxgbe_uninit_synchronized(struct vi_info *);
764 static int adapter_full_init(struct adapter *);
765 static void adapter_full_uninit(struct adapter *);
766 static int vi_full_init(struct vi_info *);
767 static void vi_full_uninit(struct vi_info *);
768 static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *);
769 static void quiesce_txq(struct sge_txq *);
770 static void quiesce_wrq(struct sge_wrq *);
771 static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *);
772 static void quiesce_vi(struct vi_info *);
773 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
774 driver_intr_t *, void *, char *);
775 static int t4_free_irq(struct adapter *, struct irq *);
776 static void t4_init_atid_table(struct adapter *);
777 static void t4_free_atid_table(struct adapter *);
778 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
779 static void vi_refresh_stats(struct vi_info *);
780 static void cxgbe_refresh_stats(struct vi_info *);
781 static void cxgbe_tick(void *);
782 static void vi_tick(void *);
783 static void cxgbe_sysctls(struct port_info *);
784 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
785 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
786 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
787 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
788 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
789 static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS);
790 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
791 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
792 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
793 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
794 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
795 static int sysctl_link_fec(SYSCTL_HANDLER_ARGS);
796 static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
797 static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
798 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
799 static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
800 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
801 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
802 static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
803 static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
804 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
805 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
806 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
807 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
808 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
809 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
810 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
811 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
812 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
813 static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
814 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
815 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
816 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
817 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
818 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
819 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
820 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
821 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
822 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
823 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
824 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
825 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
826 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
827 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
828 static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS);
829 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
830 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
831 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
832 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
833 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
834 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
835 static int sysctl_reset(SYSCTL_HANDLER_ARGS);
837 static int sysctl_tls(SYSCTL_HANDLER_ARGS);
838 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
839 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
840 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
841 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
842 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
843 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
844 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
846 static int get_sge_context(struct adapter *, struct t4_sge_context *);
847 static int load_fw(struct adapter *, struct t4_data *);
848 static int load_cfg(struct adapter *, struct t4_data *);
849 static int load_boot(struct adapter *, struct t4_bootrom *);
850 static int load_bootcfg(struct adapter *, struct t4_data *);
851 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
852 static void free_offload_policy(struct t4_offload_policy *);
853 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
854 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
855 static int read_i2c(struct adapter *, struct t4_i2c_data *);
856 static int clear_stats(struct adapter *, u_int);
857 static int hold_clip_addr(struct adapter *, struct t4_clip_addr *);
858 static int release_clip_addr(struct adapter *, struct t4_clip_addr *);
860 static int toe_capability(struct vi_info *, bool);
861 static int t4_deactivate_all_uld(struct adapter *);
862 static void t4_async_event(struct adapter *);
865 static int ktls_capability(struct adapter *, bool);
867 static int mod_event(module_t, int, void *);
868 static int notify_siblings(device_t, int);
869 static uint64_t vi_get_counter(if_t, ift_counter);
870 static uint64_t cxgbe_get_counter(if_t, ift_counter);
871 static void enable_vxlan_rx(struct adapter *);
872 static void reset_adapter_task(void *, int);
873 static void fatal_error_task(void *, int);
874 static void dump_devlog(struct adapter *);
875 static void dump_cim_regs(struct adapter *);
876 static void dump_cimla(struct adapter *);
882 {0xa000, "Chelsio Terminator 4 FPGA"},
883 {0x4400, "Chelsio T440-dbg"},
884 {0x4401, "Chelsio T420-CR"},
885 {0x4402, "Chelsio T422-CR"},
886 {0x4403, "Chelsio T440-CR"},
887 {0x4404, "Chelsio T420-BCH"},
888 {0x4405, "Chelsio T440-BCH"},
889 {0x4406, "Chelsio T440-CH"},
890 {0x4407, "Chelsio T420-SO"},
891 {0x4408, "Chelsio T420-CX"},
892 {0x4409, "Chelsio T420-BT"},
893 {0x440a, "Chelsio T404-BT"},
894 {0x440e, "Chelsio T440-LP-CR"},
896 {0xb000, "Chelsio Terminator 5 FPGA"},
897 {0x5400, "Chelsio T580-dbg"},
898 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
899 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
900 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
901 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
902 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
903 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
904 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
905 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
906 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
907 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
908 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
909 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
910 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
911 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
912 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
913 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
914 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
917 {0x5483, "Custom T540-CR"},
918 {0x5484, "Custom T540-BT"},
920 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
921 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
922 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
923 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
924 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
925 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
926 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
927 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
928 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
929 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
930 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
931 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
932 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
933 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
934 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
935 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
938 {0x6480, "Custom T6225-CR"},
939 {0x6481, "Custom T62100-CR"},
940 {0x6482, "Custom T6225-CR"},
941 {0x6483, "Custom T62100-CR"},
942 {0x6484, "Custom T64100-CR"},
943 {0x6485, "Custom T6240-SO"},
944 {0x6486, "Custom T6225-SO-CR"},
945 {0x6487, "Custom T6225-CR"},
950 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
951 * be exactly the same for both rxq and ofld_rxq.
953 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
954 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
956 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
959 t4_probe(device_t dev)
962 uint16_t v = pci_get_vendor(dev);
963 uint16_t d = pci_get_device(dev);
964 uint8_t f = pci_get_function(dev);
966 if (v != PCI_VENDOR_ID_CHELSIO)
969 /* Attach only to PF0 of the FPGA */
970 if (d == 0xa000 && f != 0)
973 for (i = 0; i < nitems(t4_pciids); i++) {
974 if (d == t4_pciids[i].device) {
975 device_set_desc(dev, t4_pciids[i].desc);
976 return (BUS_PROBE_DEFAULT);
984 t5_probe(device_t dev)
987 uint16_t v = pci_get_vendor(dev);
988 uint16_t d = pci_get_device(dev);
989 uint8_t f = pci_get_function(dev);
991 if (v != PCI_VENDOR_ID_CHELSIO)
994 /* Attach only to PF0 of the FPGA */
995 if (d == 0xb000 && f != 0)
998 for (i = 0; i < nitems(t5_pciids); i++) {
999 if (d == t5_pciids[i].device) {
1000 device_set_desc(dev, t5_pciids[i].desc);
1001 return (BUS_PROBE_DEFAULT);
1009 t6_probe(device_t dev)
1012 uint16_t v = pci_get_vendor(dev);
1013 uint16_t d = pci_get_device(dev);
1015 if (v != PCI_VENDOR_ID_CHELSIO)
1018 for (i = 0; i < nitems(t6_pciids); i++) {
1019 if (d == t6_pciids[i].device) {
1020 device_set_desc(dev, t6_pciids[i].desc);
1021 return (BUS_PROBE_DEFAULT);
1029 t5_attribute_workaround(device_t dev)
1035 * The T5 chips do not properly echo the No Snoop and Relaxed
1036 * Ordering attributes when replying to a TLP from a Root
1037 * Port. As a workaround, find the parent Root Port and
1038 * disable No Snoop and Relaxed Ordering. Note that this
1039 * affects all devices under this root port.
1041 root_port = pci_find_pcie_root_port(dev);
1042 if (root_port == NULL) {
1043 device_printf(dev, "Unable to find parent root port\n");
1047 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
1048 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
1049 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
1051 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
1052 device_get_nameunit(root_port));
1055 static const struct devnames devnames[] = {
1057 .nexus_name = "t4nex",
1058 .ifnet_name = "cxgbe",
1059 .vi_ifnet_name = "vcxgbe",
1060 .pf03_drv_name = "t4iov",
1061 .vf_nexus_name = "t4vf",
1062 .vf_ifnet_name = "cxgbev"
1064 .nexus_name = "t5nex",
1065 .ifnet_name = "cxl",
1066 .vi_ifnet_name = "vcxl",
1067 .pf03_drv_name = "t5iov",
1068 .vf_nexus_name = "t5vf",
1069 .vf_ifnet_name = "cxlv"
1071 .nexus_name = "t6nex",
1073 .vi_ifnet_name = "vcc",
1074 .pf03_drv_name = "t6iov",
1075 .vf_nexus_name = "t6vf",
1076 .vf_ifnet_name = "ccv"
1081 t4_init_devnames(struct adapter *sc)
1086 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
1087 sc->names = &devnames[id - CHELSIO_T4];
1089 device_printf(sc->dev, "chip id %d is not supported.\n", id);
1095 t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
1097 const char *parent, *name;
1102 parent = device_get_nameunit(sc->dev);
1103 name = sc->names->ifnet_name;
1104 while (resource_find_dev(&line, name, &unit, "at", parent) == 0) {
1105 if (resource_long_value(name, unit, "port", &value) == 0 &&
1106 value == pi->port_id)
1113 t4_calibration(void *arg)
1116 struct clock_sync *cur, *nex;
1121 sc = (struct adapter *)arg;
1123 KASSERT((hw_off_limits(sc) == 0), ("hw_off_limits at t4_calibration"));
1124 hw = t4_read_reg64(sc, A_SGE_TIMESTAMP_LO);
1127 cur = &sc->cal_info[sc->cal_current];
1128 next_up = (sc->cal_current + 1) % CNT_CAL_INFO;
1129 nex = &sc->cal_info[next_up];
1130 if (__predict_false(sc->cal_count == 0)) {
1131 /* First time in, just get the values in */
1138 if (cur->hw_cur == hw) {
1139 /* The clock is not advancing? */
1141 atomic_store_rel_int(&cur->gen, 0);
1145 seqc_write_begin(&nex->gen);
1146 nex->hw_prev = cur->hw_cur;
1147 nex->sbt_prev = cur->sbt_cur;
1150 seqc_write_end(&nex->gen);
1151 sc->cal_current = next_up;
1153 callout_reset_sbt_curcpu(&sc->cal_callout, SBT_1S, 0, t4_calibration,
1158 t4_calibration_start(struct adapter *sc)
1161 * Here if we have not done a calibration
1162 * then do so otherwise start the appropriate
1167 for (i = 0; i < CNT_CAL_INFO; i++) {
1168 sc->cal_info[i].gen = 0;
1170 sc->cal_current = 0;
1177 t4_attach(device_t dev)
1180 int rc = 0, i, j, rqidx, tqidx, nports;
1181 struct make_dev_args mda;
1182 struct intrs_and_queues iaq;
1185 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1192 int nm_rqidx, nm_tqidx;
1196 sc = device_get_softc(dev);
1198 sysctl_ctx_init(&sc->ctx);
1199 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
1201 if ((pci_get_device(dev) & 0xff00) == 0x5400)
1202 t5_attribute_workaround(dev);
1203 pci_enable_busmaster(dev);
1204 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
1207 pci_set_max_read_req(dev, 4096);
1208 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
1209 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
1210 if (pcie_relaxed_ordering == 0 &&
1211 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
1212 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
1213 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1214 } else if (pcie_relaxed_ordering == 1 &&
1215 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
1216 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
1217 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1221 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
1222 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
1224 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
1225 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
1226 device_get_nameunit(dev));
1228 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
1229 device_get_nameunit(dev));
1230 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
1233 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
1234 TAILQ_INIT(&sc->sfl);
1235 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
1237 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
1240 rw_init(&sc->policy_lock, "connection offload policy");
1242 callout_init(&sc->ktls_tick, 1);
1244 callout_init(&sc->cal_callout, 1);
1246 refcount_init(&sc->vxlan_refcount, 0);
1248 TASK_INIT(&sc->reset_task, 0, reset_adapter_task, sc);
1249 TASK_INIT(&sc->fatal_error_task, 0, fatal_error_task, sc);
1251 sc->ctrlq_oid = SYSCTL_ADD_NODE(&sc->ctx,
1252 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq",
1253 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues");
1254 sc->fwq_oid = SYSCTL_ADD_NODE(&sc->ctx,
1255 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq",
1256 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue");
1258 rc = t4_map_bars_0_and_4(sc);
1260 goto done; /* error message displayed already */
1262 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
1264 /* Prepare the adapter for operation. */
1265 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
1266 rc = -t4_prep_adapter(sc, buf);
1269 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
1274 * This is the real PF# to which we're attaching. Works from within PCI
1275 * passthrough environments too, where pci_get_function() could return a
1276 * different PF# depending on the passthrough configuration. We need to
1277 * use the real PF# in all our communication with the firmware.
1279 j = t4_read_reg(sc, A_PL_WHOAMI);
1280 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
1283 t4_init_devnames(sc);
1284 if (sc->names == NULL) {
1286 goto done; /* error message displayed already */
1290 * Do this really early, with the memory windows set up even before the
1291 * character device. The userland tool's register i/o and mem read
1292 * will work even in "recovery mode".
1295 if (t4_init_devlog_params(sc, 0) == 0)
1296 fixup_devlog_params(sc);
1297 make_dev_args_init(&mda);
1298 mda.mda_devsw = &t4_cdevsw;
1299 mda.mda_uid = UID_ROOT;
1300 mda.mda_gid = GID_WHEEL;
1301 mda.mda_mode = 0600;
1302 mda.mda_si_drv1 = sc;
1303 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
1305 device_printf(dev, "failed to create nexus char device: %d.\n",
1308 /* Go no further if recovery mode has been requested. */
1309 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
1310 device_printf(dev, "recovery mode.\n");
1314 #if defined(__i386__)
1315 if ((cpu_feature & CPUID_CX8) == 0) {
1316 device_printf(dev, "64 bit atomics not available.\n");
1322 /* Contact the firmware and try to become the master driver. */
1323 rc = contact_firmware(sc);
1325 goto done; /* error message displayed already */
1326 MPASS(sc->flags & FW_OK);
1328 rc = get_params__pre_init(sc);
1330 goto done; /* error message displayed already */
1332 if (sc->flags & MASTER_PF) {
1333 rc = partition_resources(sc);
1335 goto done; /* error message displayed already */
1338 rc = get_params__post_init(sc);
1340 goto done; /* error message displayed already */
1342 rc = set_params__post_init(sc);
1344 goto done; /* error message displayed already */
1346 rc = t4_map_bar_2(sc);
1348 goto done; /* error message displayed already */
1350 rc = t4_create_dma_tag(sc);
1352 goto done; /* error message displayed already */
1355 * First pass over all the ports - allocate VIs and initialize some
1356 * basic parameters like mac address, port type, etc.
1358 for_each_port(sc, i) {
1359 struct port_info *pi;
1361 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
1364 /* These must be set before t4_port_init */
1368 * XXX: vi[0] is special so we can't delay this allocation until
1369 * pi->nvi's final value is known.
1371 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1375 * Allocate the "main" VI and initialize parameters
1378 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1380 device_printf(dev, "unable to initialize port %d: %d\n",
1382 free(pi->vi, M_CXGBE);
1388 if (is_bt(pi->port_type))
1389 setbit(&sc->bt_map, pi->tx_chan);
1391 MPASS(!isset(&sc->bt_map, pi->tx_chan));
1393 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1394 device_get_nameunit(dev), i);
1395 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1396 sc->chan_map[pi->tx_chan] = i;
1399 * The MPS counter for FCS errors doesn't work correctly on the
1400 * T6 so we use the MAC counter here. Which MAC is in use
1401 * depends on the link settings which will be known when the
1407 pi->fcs_reg = t4_port_reg(sc, pi->tx_chan,
1408 A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
1412 /* All VIs on this port share this media. */
1413 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1414 cxgbe_media_status);
1417 init_link_config(pi);
1418 fixup_link_config(pi);
1419 build_medialist(pi);
1420 if (fixed_ifmedia(pi))
1421 pi->flags |= FIXED_IFMEDIA;
1424 pi->dev = device_add_child(dev, sc->names->ifnet_name,
1425 t4_ifnet_unit(sc, pi));
1426 if (pi->dev == NULL) {
1428 "failed to add device for port %d.\n", i);
1432 pi->vi[0].dev = pi->dev;
1433 device_set_softc(pi->dev, pi);
1437 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1439 nports = sc->params.nports;
1440 rc = cfg_itype_and_nqueues(sc, &iaq);
1442 goto done; /* error message displayed already */
1444 num_vis = iaq.num_vis;
1445 sc->intr_type = iaq.intr_type;
1446 sc->intr_count = iaq.nirq;
1449 s->nrxq = nports * iaq.nrxq;
1450 s->ntxq = nports * iaq.ntxq;
1452 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1453 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1455 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1456 s->neq += nports; /* ctrl queues: 1 per port */
1457 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1458 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1459 if (is_offload(sc) || is_ethoffload(sc)) {
1460 s->nofldtxq = nports * iaq.nofldtxq;
1462 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1463 s->neq += s->nofldtxq;
1465 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq),
1466 M_CXGBE, M_ZERO | M_WAITOK);
1470 if (is_offload(sc)) {
1471 s->nofldrxq = nports * iaq.nofldrxq;
1473 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1474 s->neq += s->nofldrxq; /* free list */
1475 s->niq += s->nofldrxq;
1477 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1478 M_CXGBE, M_ZERO | M_WAITOK);
1484 if (t4_native_netmap & NN_MAIN_VI) {
1485 s->nnmrxq += nports * iaq.nnmrxq;
1486 s->nnmtxq += nports * iaq.nnmtxq;
1488 if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) {
1489 s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi;
1490 s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi;
1492 s->neq += s->nnmtxq + s->nnmrxq;
1493 s->niq += s->nnmrxq;
1495 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1496 M_CXGBE, M_ZERO | M_WAITOK);
1497 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1498 M_CXGBE, M_ZERO | M_WAITOK);
1500 MPASS(s->niq <= s->iqmap_sz);
1501 MPASS(s->neq <= s->eqmap_sz);
1503 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1505 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1507 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1509 s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE,
1511 s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE,
1514 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1517 t4_init_l2t(sc, M_WAITOK);
1518 t4_init_smt(sc, M_WAITOK);
1519 t4_init_tx_sched(sc);
1520 t4_init_atid_table(sc);
1522 t4_init_etid_table(sc);
1525 t4_init_clip_table(sc);
1527 if (sc->vres.key.size != 0)
1528 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
1529 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
1532 * Second pass over the ports. This time we know the number of rx and
1533 * tx queues that each port should get.
1536 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1543 nm_rqidx = nm_tqidx = 0;
1545 for_each_port(sc, i) {
1546 struct port_info *pi = sc->port[i];
1553 for_each_vi(pi, j, vi) {
1556 vi->first_intr = -1;
1557 vi->qsize_rxq = t4_qsize_rxq;
1558 vi->qsize_txq = t4_qsize_txq;
1560 vi->first_rxq = rqidx;
1561 vi->first_txq = tqidx;
1562 vi->tmr_idx = t4_tmr_idx;
1563 vi->pktc_idx = t4_pktc_idx;
1564 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1565 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1570 if (j == 0 && vi->ntxq > 1)
1571 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1573 vi->rsrv_noflowq = 0;
1575 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1576 vi->first_ofld_txq = ofld_tqidx;
1577 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1578 ofld_tqidx += vi->nofldtxq;
1581 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1582 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1583 vi->first_ofld_rxq = ofld_rqidx;
1584 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1586 ofld_rqidx += vi->nofldrxq;
1589 vi->first_nm_rxq = nm_rqidx;
1590 vi->first_nm_txq = nm_tqidx;
1592 vi->nnmrxq = iaq.nnmrxq;
1593 vi->nnmtxq = iaq.nnmtxq;
1595 vi->nnmrxq = iaq.nnmrxq_vi;
1596 vi->nnmtxq = iaq.nnmtxq_vi;
1598 nm_rqidx += vi->nnmrxq;
1599 nm_tqidx += vi->nnmtxq;
1604 rc = t4_setup_intr_handlers(sc);
1607 "failed to setup interrupt handlers: %d\n", rc);
1611 rc = bus_generic_probe(dev);
1613 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1618 * Ensure thread-safe mailbox access (in debug builds).
1620 * So far this was the only thread accessing the mailbox but various
1621 * ifnets and sysctls are about to be created and their handlers/ioctls
1622 * will access the mailbox from different threads.
1624 sc->flags |= CHK_MBOX_ACCESS;
1626 rc = bus_generic_attach(dev);
1629 "failed to attach all child ports: %d\n", rc);
1632 t4_calibration_start(sc);
1635 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1636 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1637 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1638 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1639 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1643 notify_siblings(dev, 0);
1646 if (rc != 0 && sc->cdev) {
1647 /* cdev was created and so cxgbetool works; recover that way. */
1649 "error during attach, adapter is now in recovery mode.\n");
1654 t4_detach_common(dev);
1662 t4_child_location(device_t bus, device_t dev, struct sbuf *sb)
1665 struct port_info *pi;
1668 sc = device_get_softc(bus);
1669 for_each_port(sc, i) {
1671 if (pi != NULL && pi->dev == dev) {
1672 sbuf_printf(sb, "port=%d", pi->port_id);
1680 t4_ready(device_t dev)
1684 sc = device_get_softc(dev);
1685 if (sc->flags & FW_OK)
1691 t4_read_port_device(device_t dev, int port, device_t *child)
1694 struct port_info *pi;
1696 sc = device_get_softc(dev);
1697 if (port < 0 || port >= MAX_NPORTS)
1699 pi = sc->port[port];
1700 if (pi == NULL || pi->dev == NULL)
1707 notify_siblings(device_t dev, int detaching)
1713 for (i = 0; i < PCI_FUNCMAX; i++) {
1714 if (i == pci_get_function(dev))
1716 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1717 pci_get_slot(dev), i);
1718 if (sibling == NULL || !device_is_attached(sibling))
1721 error = T4_DETACH_CHILD(sibling);
1723 (void)T4_ATTACH_CHILD(sibling);
1734 t4_detach(device_t dev)
1738 rc = notify_siblings(dev, 1);
1741 "failed to detach sibling devices: %d\n", rc);
1745 return (t4_detach_common(dev));
1749 t4_detach_common(device_t dev)
1752 struct port_info *pi;
1755 sc = device_get_softc(dev);
1758 rc = t4_deactivate_all_uld(sc);
1761 "failed to detach upper layer drivers: %d\n", rc);
1767 destroy_dev(sc->cdev);
1771 sx_xlock(&t4_list_lock);
1772 SLIST_REMOVE(&t4_list, sc, adapter, link);
1773 sx_xunlock(&t4_list_lock);
1775 sc->flags &= ~CHK_MBOX_ACCESS;
1776 if (sc->flags & FULL_INIT_DONE) {
1777 if (!(sc->flags & IS_VF))
1778 t4_intr_disable(sc);
1781 if (device_is_attached(dev)) {
1782 rc = bus_generic_detach(dev);
1785 "failed to detach child devices: %d\n", rc);
1790 for (i = 0; i < sc->intr_count; i++)
1791 t4_free_irq(sc, &sc->irq[i]);
1793 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1794 t4_free_tx_sched(sc);
1796 for (i = 0; i < MAX_NPORTS; i++) {
1799 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1801 device_delete_child(dev, pi->dev);
1803 mtx_destroy(&pi->pi_lock);
1804 free(pi->vi, M_CXGBE);
1808 callout_stop(&sc->cal_callout);
1809 callout_drain(&sc->cal_callout);
1810 device_delete_children(dev);
1811 sysctl_ctx_free(&sc->ctx);
1812 adapter_full_uninit(sc);
1814 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1815 t4_fw_bye(sc, sc->mbox);
1817 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1818 pci_release_msi(dev);
1821 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1825 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1829 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1833 t4_free_l2t(sc->l2t);
1835 t4_free_smt(sc->smt);
1836 t4_free_atid_table(sc);
1838 t4_free_etid_table(sc);
1841 vmem_destroy(sc->key_map);
1843 t4_destroy_clip_table(sc);
1846 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1847 free(sc->sge.ofld_txq, M_CXGBE);
1850 free(sc->sge.ofld_rxq, M_CXGBE);
1853 free(sc->sge.nm_rxq, M_CXGBE);
1854 free(sc->sge.nm_txq, M_CXGBE);
1856 free(sc->irq, M_CXGBE);
1857 free(sc->sge.rxq, M_CXGBE);
1858 free(sc->sge.txq, M_CXGBE);
1859 free(sc->sge.ctrlq, M_CXGBE);
1860 free(sc->sge.iqmap, M_CXGBE);
1861 free(sc->sge.eqmap, M_CXGBE);
1862 free(sc->tids.ftid_tab, M_CXGBE);
1863 free(sc->tids.hpftid_tab, M_CXGBE);
1864 free_hftid_hash(&sc->tids);
1865 free(sc->tids.tid_tab, M_CXGBE);
1866 t4_destroy_dma_tag(sc);
1868 callout_drain(&sc->ktls_tick);
1869 callout_drain(&sc->sfl_callout);
1870 if (mtx_initialized(&sc->tids.ftid_lock)) {
1871 mtx_destroy(&sc->tids.ftid_lock);
1872 cv_destroy(&sc->tids.ftid_cv);
1874 if (mtx_initialized(&sc->tids.atid_lock))
1875 mtx_destroy(&sc->tids.atid_lock);
1876 if (mtx_initialized(&sc->ifp_lock))
1877 mtx_destroy(&sc->ifp_lock);
1879 if (rw_initialized(&sc->policy_lock)) {
1880 rw_destroy(&sc->policy_lock);
1882 if (sc->policy != NULL)
1883 free_offload_policy(sc->policy);
1887 for (i = 0; i < NUM_MEMWIN; i++) {
1888 struct memwin *mw = &sc->memwin[i];
1890 if (rw_initialized(&mw->mw_lock))
1891 rw_destroy(&mw->mw_lock);
1894 mtx_destroy(&sc->sfl_lock);
1895 mtx_destroy(&sc->reg_lock);
1896 mtx_destroy(&sc->sc_lock);
1898 bzero(sc, sizeof(*sc));
1904 ok_to_reset(struct adapter *sc)
1906 struct tid_info *t = &sc->tids;
1907 struct port_info *pi;
1910 int caps = IFCAP_TOE | IFCAP_NETMAP | IFCAP_TXRTLMT;
1913 caps |= IFCAP_TXTLS;
1915 ASSERT_SYNCHRONIZED_OP(sc);
1916 MPASS(!(sc->flags & IS_VF));
1918 for_each_port(sc, i) {
1920 for_each_vi(pi, j, vi) {
1921 if (if_getcapenable(vi->ifp) & caps)
1926 if (atomic_load_int(&t->tids_in_use) > 0)
1928 if (atomic_load_int(&t->stids_in_use) > 0)
1930 if (atomic_load_int(&t->atids_in_use) > 0)
1932 if (atomic_load_int(&t->ftids_in_use) > 0)
1934 if (atomic_load_int(&t->hpftids_in_use) > 0)
1936 if (atomic_load_int(&t->etids_in_use) > 0)
1943 stop_adapter(struct adapter *sc)
1945 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED)))
1946 return (1); /* Already stopped. */
1947 return (t4_shutdown_adapter(sc));
1951 t4_suspend(device_t dev)
1953 struct adapter *sc = device_get_softc(dev);
1954 struct port_info *pi;
1957 struct sge_rxq *rxq;
1958 struct sge_txq *txq;
1959 struct sge_wrq *wrq;
1961 struct sge_ofld_rxq *ofld_rxq;
1963 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1964 struct sge_ofld_txq *ofld_txq;
1968 CH_ALERT(sc, "suspend requested\n");
1970 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4sus");
1974 /* XXX: Can the kernel call suspend repeatedly without resume? */
1975 MPASS(!hw_off_limits(sc));
1977 if (!ok_to_reset(sc)) {
1978 /* XXX: should list what resource is preventing suspend. */
1979 CH_ERR(sc, "not safe to suspend.\n");
1984 /* No more DMA or interrupts. */
1987 /* Quiesce all activity. */
1988 for_each_port(sc, i) {
1990 pi->vxlan_tcam_entry = false;
1993 if (pi->up_vis > 0) {
1995 * t4_shutdown_adapter has already shut down all the
1996 * PHYs but it also disables interrupts and DMA so there
1997 * won't be a link interrupt. So we update the state
1998 * manually and inform the kernel.
2000 pi->link_cfg.link_ok = false;
2001 t4_os_link_changed(pi);
2005 for_each_vi(pi, j, vi) {
2006 vi->xact_addr_filt = -1;
2007 mtx_lock(&vi->tick_mtx);
2008 vi->flags |= VI_SKIP_STATS;
2009 mtx_unlock(&vi->tick_mtx);
2010 if (!(vi->flags & VI_INIT_DONE))
2014 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2015 mtx_lock(&vi->tick_mtx);
2016 callout_stop(&vi->tick);
2017 mtx_unlock(&vi->tick_mtx);
2018 callout_drain(&vi->tick);
2022 * Note that the HW is not available.
2024 for_each_txq(vi, k, txq) {
2026 txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED);
2029 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2030 for_each_ofld_txq(vi, k, ofld_txq) {
2031 ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED;
2034 for_each_rxq(vi, k, rxq) {
2035 rxq->iq.flags &= ~IQ_HW_ALLOCATED;
2037 #if defined(TCP_OFFLOAD)
2038 for_each_ofld_rxq(vi, k, ofld_rxq) {
2039 ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED;
2046 if (sc->flags & FULL_INIT_DONE) {
2048 wrq = &sc->sge.ctrlq[i];
2049 wrq->eq.flags &= ~EQ_HW_ALLOCATED;
2053 if (sc->flags & FULL_INIT_DONE) {
2054 /* Firmware event queue */
2055 sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED;
2056 quiesce_iq_fl(sc, &sc->sge.fwq, NULL);
2059 /* Stop calibration */
2060 callout_stop(&sc->cal_callout);
2061 callout_drain(&sc->cal_callout);
2063 /* Mark the adapter totally off limits. */
2064 mtx_lock(&sc->reg_lock);
2065 atomic_set_int(&sc->error_flags, HW_OFF_LIMITS);
2066 sc->flags &= ~(FW_OK | MASTER_PF);
2067 sc->reset_thread = NULL;
2068 mtx_unlock(&sc->reg_lock);
2070 if (t4_clock_gate_on_suspend) {
2071 t4_set_reg_field(sc, A_PMU_PART_CG_PWRMODE, F_MA_PART_CGEN |
2072 F_LE_PART_CGEN | F_EDC1_PART_CGEN | F_EDC0_PART_CGEN |
2073 F_TP_PART_CGEN | F_PDP_PART_CGEN | F_SGE_PART_CGEN, 0);
2076 CH_ALERT(sc, "suspend completed.\n");
2078 end_synchronized_op(sc, 0);
2082 struct adapter_pre_reset_state {
2086 uint16_t switchcaps;
2090 uint16_t cryptocaps;
2097 struct adapter_params params;
2098 struct t4_virt_res vres;
2099 struct tid_info tids;
2108 save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
2111 ASSERT_SYNCHRONIZED_OP(sc);
2113 o->flags = sc->flags;
2115 o->nbmcaps = sc->nbmcaps;
2116 o->linkcaps = sc->linkcaps;
2117 o->switchcaps = sc->switchcaps;
2118 o->niccaps = sc->niccaps;
2119 o->toecaps = sc->toecaps;
2120 o->rdmacaps = sc->rdmacaps;
2121 o->cryptocaps = sc->cryptocaps;
2122 o->iscsicaps = sc->iscsicaps;
2123 o->fcoecaps = sc->fcoecaps;
2125 o->cfcsum = sc->cfcsum;
2126 MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file));
2127 memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file));
2129 o->params = sc->params;
2134 o->rawf_base = sc->rawf_base;
2135 o->nrawf = sc->nrawf;
2139 compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
2143 ASSERT_SYNCHRONIZED_OP(sc);
2146 #define COMPARE_CAPS(c) do { \
2147 if (o->c##caps != sc->c##caps) { \
2148 CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \
2155 COMPARE_CAPS(switch);
2159 COMPARE_CAPS(crypto);
2160 COMPARE_CAPS(iscsi);
2164 /* Firmware config file */
2165 if (o->cfcsum != sc->cfcsum) {
2166 CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file,
2167 o->cfcsum, sc->cfg_file, sc->cfcsum);
2171 #define COMPARE_PARAM(p, name) do { \
2172 if (o->p != sc->p) { \
2173 CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \
2177 COMPARE_PARAM(sge.iq_start, iq_start);
2178 COMPARE_PARAM(sge.eq_start, eq_start);
2179 COMPARE_PARAM(tids.ftid_base, ftid_base);
2180 COMPARE_PARAM(tids.ftid_end, ftid_end);
2181 COMPARE_PARAM(tids.nftids, nftids);
2182 COMPARE_PARAM(vres.l2t.start, l2t_start);
2183 COMPARE_PARAM(vres.l2t.size, l2t_size);
2184 COMPARE_PARAM(sge.iqmap_sz, iqmap_sz);
2185 COMPARE_PARAM(sge.eqmap_sz, eqmap_sz);
2186 COMPARE_PARAM(tids.tid_base, tid_base);
2187 COMPARE_PARAM(tids.hpftid_base, hpftid_base);
2188 COMPARE_PARAM(tids.hpftid_end, hpftid_end);
2189 COMPARE_PARAM(tids.nhpftids, nhpftids);
2190 COMPARE_PARAM(rawf_base, rawf_base);
2191 COMPARE_PARAM(nrawf, nrawf);
2192 COMPARE_PARAM(params.mps_bg_map, mps_bg_map);
2193 COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support);
2194 COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl);
2195 COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support);
2196 COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr);
2197 COMPARE_PARAM(tids.ntids, ntids);
2198 COMPARE_PARAM(tids.etid_base, etid_base);
2199 COMPARE_PARAM(tids.etid_end, etid_end);
2200 COMPARE_PARAM(tids.netids, netids);
2201 COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred);
2202 COMPARE_PARAM(params.ethoffload, ethoffload);
2203 COMPARE_PARAM(tids.natids, natids);
2204 COMPARE_PARAM(tids.stid_base, stid_base);
2205 COMPARE_PARAM(vres.ddp.start, ddp_start);
2206 COMPARE_PARAM(vres.ddp.size, ddp_size);
2207 COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred);
2208 COMPARE_PARAM(vres.stag.start, stag_start);
2209 COMPARE_PARAM(vres.stag.size, stag_size);
2210 COMPARE_PARAM(vres.rq.start, rq_start);
2211 COMPARE_PARAM(vres.rq.size, rq_size);
2212 COMPARE_PARAM(vres.pbl.start, pbl_start);
2213 COMPARE_PARAM(vres.pbl.size, pbl_size);
2214 COMPARE_PARAM(vres.qp.start, qp_start);
2215 COMPARE_PARAM(vres.qp.size, qp_size);
2216 COMPARE_PARAM(vres.cq.start, cq_start);
2217 COMPARE_PARAM(vres.cq.size, cq_size);
2218 COMPARE_PARAM(vres.ocq.start, ocq_start);
2219 COMPARE_PARAM(vres.ocq.size, ocq_size);
2220 COMPARE_PARAM(vres.srq.start, srq_start);
2221 COMPARE_PARAM(vres.srq.size, srq_size);
2222 COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp);
2223 COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter);
2224 COMPARE_PARAM(vres.iscsi.start, iscsi_start);
2225 COMPARE_PARAM(vres.iscsi.size, iscsi_size);
2226 COMPARE_PARAM(vres.key.start, key_start);
2227 COMPARE_PARAM(vres.key.size, key_size);
2228 #undef COMPARE_PARAM
2234 t4_resume(device_t dev)
2236 struct adapter *sc = device_get_softc(dev);
2237 struct adapter_pre_reset_state *old_state = NULL;
2238 struct port_info *pi;
2241 struct sge_txq *txq;
2244 CH_ALERT(sc, "resume requested.\n");
2246 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4res");
2249 MPASS(hw_off_limits(sc));
2250 MPASS((sc->flags & FW_OK) == 0);
2251 MPASS((sc->flags & MASTER_PF) == 0);
2252 MPASS(sc->reset_thread == NULL);
2253 sc->reset_thread = curthread;
2255 /* Register access is expected to work by the time we're here. */
2256 if (t4_read_reg(sc, A_PL_WHOAMI) == 0xffffffff) {
2257 CH_ERR(sc, "%s: can't read device registers\n", __func__);
2262 /* Note that HW_OFF_LIMITS is cleared a bit later. */
2263 atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR | ADAP_STOPPED);
2265 /* Restore memory window. */
2268 /* Go no further if recovery mode has been requested. */
2269 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
2270 CH_ALERT(sc, "recovery mode on resume.\n");
2272 mtx_lock(&sc->reg_lock);
2273 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
2274 mtx_unlock(&sc->reg_lock);
2278 old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK);
2279 save_caps_and_params(sc, old_state);
2281 /* Reestablish contact with firmware and become the primary PF. */
2282 rc = contact_firmware(sc);
2284 goto done; /* error message displayed already */
2285 MPASS(sc->flags & FW_OK);
2287 if (sc->flags & MASTER_PF) {
2288 rc = partition_resources(sc);
2290 goto done; /* error message displayed already */
2293 rc = get_params__post_init(sc);
2295 goto done; /* error message displayed already */
2297 rc = set_params__post_init(sc);
2299 goto done; /* error message displayed already */
2301 rc = compare_caps_and_params(sc, old_state);
2303 goto done; /* error message displayed already */
2305 for_each_port(sc, i) {
2308 MPASS(pi->vi != NULL);
2309 MPASS(pi->vi[0].dev == pi->dev);
2311 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
2314 "failed to re-initialize port %d: %d\n", i, rc);
2317 MPASS(sc->chan_map[pi->tx_chan] == i);
2320 fixup_link_config(pi);
2321 build_medialist(pi);
2323 for_each_vi(pi, j, vi) {
2326 rc = alloc_extra_vi(sc, pi, vi);
2329 "failed to re-allocate extra VI: %d\n", rc);
2336 * Interrupts and queues are about to be enabled and other threads will
2337 * want to access the hardware too. It is safe to do so. Note that
2338 * this thread is still in the middle of a synchronized_op.
2340 mtx_lock(&sc->reg_lock);
2341 atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
2342 mtx_unlock(&sc->reg_lock);
2344 if (sc->flags & FULL_INIT_DONE) {
2345 rc = adapter_full_init(sc);
2347 CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc);
2351 if (sc->vxlan_refcount > 0)
2352 enable_vxlan_rx(sc);
2354 for_each_port(sc, i) {
2356 for_each_vi(pi, j, vi) {
2357 mtx_lock(&vi->tick_mtx);
2358 vi->flags &= ~VI_SKIP_STATS;
2359 mtx_unlock(&vi->tick_mtx);
2360 if (!(vi->flags & VI_INIT_DONE))
2362 rc = vi_full_init(vi);
2364 CH_ERR(vi, "failed to re-initialize "
2365 "interface: %d\n", rc);
2370 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
2373 * Note that we do not setup multicast addresses
2374 * in the first pass. This ensures that the
2375 * unicast DMACs for all VIs on all ports get an
2378 rc = update_mac_settings(ifp, XGMAC_ALL &
2381 CH_ERR(vi, "failed to re-configure MAC: %d\n", rc);
2384 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true,
2387 CH_ERR(vi, "failed to re-enable VI: %d\n", rc);
2390 for_each_txq(vi, k, txq) {
2392 txq->eq.flags |= EQ_ENABLED;
2395 mtx_lock(&vi->tick_mtx);
2396 callout_schedule(&vi->tick, hz);
2397 mtx_unlock(&vi->tick_mtx);
2400 if (pi->up_vis > 0) {
2401 t4_update_port_info(pi);
2402 fixup_link_config(pi);
2403 build_medialist(pi);
2404 apply_link_config(pi);
2405 if (pi->link_cfg.link_ok)
2406 t4_os_link_changed(pi);
2411 /* Now reprogram the L2 multicast addresses. */
2412 for_each_port(sc, i) {
2414 for_each_vi(pi, j, vi) {
2415 if (!(vi->flags & VI_INIT_DONE))
2418 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
2420 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
2422 CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc);
2423 rc = 0; /* carry on */
2429 /* Reset all calibration */
2430 t4_calibration_start(sc);
2435 CH_ALERT(sc, "resume completed.\n");
2437 end_synchronized_op(sc, 0);
2438 free(old_state, M_CXGBE);
2443 t4_reset_prepare(device_t dev, device_t child)
2445 struct adapter *sc = device_get_softc(dev);
2447 CH_ALERT(sc, "reset_prepare.\n");
2452 t4_reset_post(device_t dev, device_t child)
2454 struct adapter *sc = device_get_softc(dev);
2456 CH_ALERT(sc, "reset_post.\n");
2461 reset_adapter(struct adapter *sc)
2463 int rc, oldinc, error_flags;
2465 CH_ALERT(sc, "reset requested.\n");
2467 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst1");
2471 if (hw_off_limits(sc)) {
2472 CH_ERR(sc, "adapter is suspended, use resume (not reset).\n");
2477 if (!ok_to_reset(sc)) {
2478 /* XXX: should list what resource is preventing reset. */
2479 CH_ERR(sc, "not safe to reset.\n");
2485 oldinc = sc->incarnation;
2486 end_synchronized_op(sc, 0);
2488 return (rc); /* Error logged already. */
2490 atomic_add_int(&sc->num_resets, 1);
2492 rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0);
2495 CH_ERR(sc, "bus_reset_child failed: %d.\n", rc);
2497 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst2");
2500 error_flags = atomic_load_int(&sc->error_flags);
2501 if (sc->incarnation > oldinc && error_flags == 0) {
2502 CH_ALERT(sc, "bus_reset_child succeeded.\n");
2504 CH_ERR(sc, "adapter did not reset properly, flags "
2505 "0x%08x, error_flags 0x%08x.\n", sc->flags,
2509 end_synchronized_op(sc, 0);
2516 reset_adapter_task(void *arg, int pending)
2518 /* XXX: t4_async_event here? */
2523 cxgbe_probe(device_t dev)
2526 struct port_info *pi = device_get_softc(dev);
2528 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
2529 device_set_desc_copy(dev, buf);
2531 return (BUS_PROBE_DEFAULT);
2534 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
2535 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
2536 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
2537 IFCAP_HWRXTSTMP | IFCAP_MEXTPG)
2538 #define T4_CAP_ENABLE (T4_CAP)
2541 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
2545 struct sysctl_ctx_list *ctx = &vi->ctx;
2546 struct sysctl_oid_list *children;
2547 struct pfil_head_args pa;
2548 struct adapter *sc = vi->adapter;
2550 sysctl_ctx_init(ctx);
2551 children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev));
2552 vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq",
2553 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC rx queues");
2554 vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq",
2555 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC tx queues");
2557 vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq",
2558 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap rx queues");
2559 vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq",
2560 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queues");
2563 vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq",
2564 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE rx queues");
2566 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2567 vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq",
2568 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE/ETHOFLD tx queues");
2571 vi->xact_addr_filt = -1;
2572 mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF);
2573 callout_init_mtx(&vi->tick, &vi->tick_mtx, 0);
2574 if (sc->flags & IS_VF || t4_tx_vm_wr != 0)
2575 vi->flags |= TX_USES_VM_WR;
2577 /* Allocate an ifnet and set it up */
2578 ifp = if_alloc_dev(IFT_ETHER, dev);
2580 device_printf(dev, "Cannot allocate ifnet\n");
2584 if_setsoftc(ifp, vi);
2586 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2587 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2589 if_setinitfn(ifp, cxgbe_init);
2590 if_setioctlfn(ifp, cxgbe_ioctl);
2591 if_settransmitfn(ifp, cxgbe_transmit);
2592 if_setqflushfn(ifp, cxgbe_qflush);
2593 if (vi->pi->nvi > 1 || sc->flags & IS_VF)
2594 if_setgetcounterfn(ifp, vi_get_counter);
2596 if_setgetcounterfn(ifp, cxgbe_get_counter);
2597 #if defined(KERN_TLS) || defined(RATELIMIT)
2598 if_setsndtagallocfn(ifp, cxgbe_snd_tag_alloc);
2601 if_setratelimitqueryfn(ifp, cxgbe_ratelimit_query);
2604 if_setcapabilities(ifp, T4_CAP);
2605 if_setcapenable(ifp, T4_CAP_ENABLE);
2606 if_sethwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
2607 CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2608 if (chip_id(sc) >= CHELSIO_T6) {
2609 if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
2610 if_setcapenablebit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
2611 if_sethwassistbits(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
2612 CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP |
2613 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN, 0);
2617 if (vi->nofldrxq != 0)
2618 if_setcapabilitiesbit(ifp, IFCAP_TOE, 0);
2621 if (is_ethoffload(sc) && vi->nofldtxq != 0) {
2622 if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT, 0);
2623 if_setcapenablebit(ifp, IFCAP_TXRTLMT, 0);
2627 if_sethwtsomax(ifp, IP_MAXPACKET);
2628 if (vi->flags & TX_USES_VM_WR)
2629 if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_VM_TSO);
2631 if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_TSO);
2633 if (is_ethoffload(sc) && vi->nofldtxq != 0)
2634 if_sethwtsomaxsegcount(ifp, TX_SGL_SEGS_EO_TSO);
2636 if_sethwtsomaxsegsize(ifp, 65536);
2639 if_setcapabilitiesbit(ifp, IFCAP_TXTLS, 0);
2640 if (sc->flags & KERN_TLS_ON || !is_t6(sc))
2641 if_setcapenablebit(ifp, IFCAP_TXTLS, 0);
2645 ether_ifattach(ifp, vi->hw_addr);
2647 if (vi->nnmrxq != 0)
2648 cxgbe_nm_attach(vi);
2650 sb = sbuf_new_auto();
2651 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
2652 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
2653 switch (if_getcapabilities(ifp) & (IFCAP_TOE | IFCAP_TXRTLMT)) {
2655 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
2657 case IFCAP_TOE | IFCAP_TXRTLMT:
2658 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
2661 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
2666 if (if_getcapabilities(ifp) & IFCAP_TOE)
2667 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
2670 if (if_getcapabilities(ifp) & IFCAP_NETMAP)
2671 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
2672 vi->nnmtxq, vi->nnmrxq);
2675 device_printf(dev, "%s\n", sbuf_data(sb));
2680 pa.pa_version = PFIL_VERSION;
2681 pa.pa_flags = PFIL_IN;
2682 pa.pa_type = PFIL_TYPE_ETHERNET;
2683 pa.pa_headname = if_name(ifp);
2684 vi->pfil = pfil_head_register(&pa);
2690 cxgbe_attach(device_t dev)
2692 struct port_info *pi = device_get_softc(dev);
2693 struct adapter *sc = pi->adapter;
2697 sysctl_ctx_init(&pi->ctx);
2699 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
2703 for_each_vi(pi, i, vi) {
2706 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
2707 if (vi->dev == NULL) {
2708 device_printf(dev, "failed to add VI %d\n", i);
2711 device_set_softc(vi->dev, vi);
2716 bus_generic_attach(dev);
2722 cxgbe_vi_detach(struct vi_info *vi)
2726 if (vi->pfil != NULL) {
2727 pfil_head_unregister(vi->pfil);
2731 ether_ifdetach(ifp);
2733 /* Let detach proceed even if these fail. */
2735 if (if_getcapabilities(ifp) & IFCAP_NETMAP)
2736 cxgbe_nm_detach(vi);
2738 cxgbe_uninit_synchronized(vi);
2739 callout_drain(&vi->tick);
2740 mtx_destroy(&vi->tick_mtx);
2741 sysctl_ctx_free(&vi->ctx);
2749 cxgbe_detach(device_t dev)
2751 struct port_info *pi = device_get_softc(dev);
2752 struct adapter *sc = pi->adapter;
2755 /* Detach the extra VIs first. */
2756 rc = bus_generic_detach(dev);
2759 device_delete_children(dev);
2761 sysctl_ctx_free(&pi->ctx);
2762 begin_vi_detach(sc, &pi->vi[0]);
2763 if (pi->flags & HAS_TRACEQ) {
2764 sc->traceq = -1; /* cloner should not create ifnet */
2765 t4_tracer_port_detach(sc);
2767 cxgbe_vi_detach(&pi->vi[0]);
2768 ifmedia_removeall(&pi->media);
2769 end_vi_detach(sc, &pi->vi[0]);
2775 cxgbe_init(void *arg)
2777 struct vi_info *vi = arg;
2778 struct adapter *sc = vi->adapter;
2780 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
2782 cxgbe_init_synchronized(vi);
2783 end_synchronized_op(sc, 0);
2787 cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
2789 int rc = 0, mtu, flags;
2790 struct vi_info *vi = if_getsoftc(ifp);
2791 struct port_info *pi = vi->pi;
2792 struct adapter *sc = pi->adapter;
2793 struct ifreq *ifr = (struct ifreq *)data;
2799 if (mtu < ETHERMIN || mtu > MAX_MTU)
2802 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
2805 if_setmtu(ifp, mtu);
2806 if (vi->flags & VI_INIT_DONE) {
2807 t4_update_fl_bufsize(ifp);
2808 if (!hw_off_limits(sc) &&
2809 if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2810 rc = update_mac_settings(ifp, XGMAC_MTU);
2812 end_synchronized_op(sc, 0);
2816 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
2820 if (hw_off_limits(sc)) {
2825 if (if_getflags(ifp) & IFF_UP) {
2826 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2827 flags = vi->if_flags;
2828 if ((if_getflags(ifp) ^ flags) &
2829 (IFF_PROMISC | IFF_ALLMULTI)) {
2830 rc = update_mac_settings(ifp,
2831 XGMAC_PROMISC | XGMAC_ALLMULTI);
2834 rc = cxgbe_init_synchronized(vi);
2836 vi->if_flags = if_getflags(ifp);
2837 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2838 rc = cxgbe_uninit_synchronized(vi);
2840 end_synchronized_op(sc, 0);
2845 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
2848 if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2849 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
2850 end_synchronized_op(sc, 0);
2854 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
2858 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2859 if (mask & IFCAP_TXCSUM) {
2860 if_togglecapenable(ifp, IFCAP_TXCSUM);
2861 if_togglehwassist(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP);
2863 if (IFCAP_TSO4 & if_getcapenable(ifp) &&
2864 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
2865 mask &= ~IFCAP_TSO4;
2866 if_setcapenablebit(ifp, 0, IFCAP_TSO4);
2868 "tso4 disabled due to -txcsum.\n");
2871 if (mask & IFCAP_TXCSUM_IPV6) {
2872 if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
2873 if_togglehwassist(ifp, CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2875 if (IFCAP_TSO6 & if_getcapenable(ifp) &&
2876 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
2877 mask &= ~IFCAP_TSO6;
2878 if_setcapenablebit(ifp, 0, IFCAP_TSO6);
2880 "tso6 disabled due to -txcsum6.\n");
2883 if (mask & IFCAP_RXCSUM)
2884 if_togglecapenable(ifp, IFCAP_RXCSUM);
2885 if (mask & IFCAP_RXCSUM_IPV6)
2886 if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
2889 * Note that we leave CSUM_TSO alone (it is always set). The
2890 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
2891 * sending a TSO request our way, so it's sufficient to toggle
2894 if (mask & IFCAP_TSO4) {
2895 if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
2896 !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
2897 if_printf(ifp, "enable txcsum first.\n");
2901 if_togglecapenable(ifp, IFCAP_TSO4);
2903 if (mask & IFCAP_TSO6) {
2904 if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
2905 !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
2906 if_printf(ifp, "enable txcsum6 first.\n");
2910 if_togglecapenable(ifp, IFCAP_TSO6);
2912 if (mask & IFCAP_LRO) {
2913 #if defined(INET) || defined(INET6)
2915 struct sge_rxq *rxq;
2917 if_togglecapenable(ifp, IFCAP_LRO);
2918 for_each_rxq(vi, i, rxq) {
2919 if (if_getcapenable(ifp) & IFCAP_LRO)
2920 rxq->iq.flags |= IQ_LRO_ENABLED;
2922 rxq->iq.flags &= ~IQ_LRO_ENABLED;
2927 if (mask & IFCAP_TOE) {
2928 int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TOE;
2930 rc = toe_capability(vi, enable);
2934 if_togglecapenable(ifp, mask);
2937 if (mask & IFCAP_VLAN_HWTAGGING) {
2938 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2939 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2940 rc = update_mac_settings(ifp, XGMAC_VLANEX);
2942 if (mask & IFCAP_VLAN_MTU) {
2943 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
2945 /* Need to find out how to disable auto-mtu-inflation */
2947 if (mask & IFCAP_VLAN_HWTSO)
2948 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2949 if (mask & IFCAP_VLAN_HWCSUM)
2950 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2952 if (mask & IFCAP_TXRTLMT)
2953 if_togglecapenable(ifp, IFCAP_TXRTLMT);
2955 if (mask & IFCAP_HWRXTSTMP) {
2957 struct sge_rxq *rxq;
2959 if_togglecapenable(ifp, IFCAP_HWRXTSTMP);
2960 for_each_rxq(vi, i, rxq) {
2961 if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP)
2962 rxq->iq.flags |= IQ_RX_TIMESTAMP;
2964 rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
2967 if (mask & IFCAP_MEXTPG)
2968 if_togglecapenable(ifp, IFCAP_MEXTPG);
2971 if (mask & IFCAP_TXTLS) {
2972 int enable = (if_getcapenable(ifp) ^ mask) & IFCAP_TXTLS;
2974 rc = ktls_capability(sc, enable);
2978 if_togglecapenable(ifp, mask & IFCAP_TXTLS);
2981 if (mask & IFCAP_VXLAN_HWCSUM) {
2982 if_togglecapenable(ifp, IFCAP_VXLAN_HWCSUM);
2983 if_togglehwassist(ifp, CSUM_INNER_IP6_UDP |
2984 CSUM_INNER_IP6_TCP | CSUM_INNER_IP |
2985 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP);
2987 if (mask & IFCAP_VXLAN_HWTSO) {
2988 if_togglecapenable(ifp, IFCAP_VXLAN_HWTSO);
2989 if_togglehwassist(ifp, CSUM_INNER_IP6_TSO |
2993 #ifdef VLAN_CAPABILITIES
2994 VLAN_CAPABILITIES(ifp);
2997 end_synchronized_op(sc, 0);
3003 rc = ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
3007 struct ifi2creq i2c;
3009 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
3012 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
3016 if (i2c.len > sizeof(i2c.data)) {
3020 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
3023 if (hw_off_limits(sc))
3026 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
3027 i2c.offset, i2c.len, &i2c.data[0]);
3028 end_synchronized_op(sc, 0);
3030 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
3035 rc = ether_ioctl(ifp, cmd, data);
3042 cxgbe_transmit(if_t ifp, struct mbuf *m)
3044 struct vi_info *vi = if_getsoftc(ifp);
3045 struct port_info *pi = vi->pi;
3047 struct sge_txq *txq;
3052 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
3053 #if defined(KERN_TLS) || defined(RATELIMIT)
3054 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
3055 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
3058 if (__predict_false(pi->link_cfg.link_ok == false)) {
3063 rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR);
3064 if (__predict_false(rc != 0)) {
3065 if (__predict_true(rc == EINPROGRESS)) {
3066 /* queued by parse_pkt */
3071 MPASS(m == NULL); /* was freed already */
3072 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
3078 txq = &sc->sge.txq[vi->first_txq];
3079 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
3080 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
3084 rc = mp_ring_enqueue(txq->r, items, 1, 256);
3085 if (__predict_false(rc != 0))
3092 cxgbe_qflush(if_t ifp)
3094 struct vi_info *vi = if_getsoftc(ifp);
3095 struct sge_txq *txq;
3098 /* queues do not exist if !VI_INIT_DONE. */
3099 if (vi->flags & VI_INIT_DONE) {
3100 for_each_txq(vi, i, txq) {
3102 txq->eq.flags |= EQ_QFLUSH;
3104 while (!mp_ring_is_idle(txq->r)) {
3105 mp_ring_check_drainage(txq->r, 4096);
3109 txq->eq.flags &= ~EQ_QFLUSH;
3117 vi_get_counter(if_t ifp, ift_counter c)
3119 struct vi_info *vi = if_getsoftc(ifp);
3120 struct fw_vi_stats_vf *s = &vi->stats;
3122 mtx_lock(&vi->tick_mtx);
3123 vi_refresh_stats(vi);
3124 mtx_unlock(&vi->tick_mtx);
3127 case IFCOUNTER_IPACKETS:
3128 return (s->rx_bcast_frames + s->rx_mcast_frames +
3129 s->rx_ucast_frames);
3130 case IFCOUNTER_IERRORS:
3131 return (s->rx_err_frames);
3132 case IFCOUNTER_OPACKETS:
3133 return (s->tx_bcast_frames + s->tx_mcast_frames +
3134 s->tx_ucast_frames + s->tx_offload_frames);
3135 case IFCOUNTER_OERRORS:
3136 return (s->tx_drop_frames);
3137 case IFCOUNTER_IBYTES:
3138 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
3140 case IFCOUNTER_OBYTES:
3141 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
3142 s->tx_ucast_bytes + s->tx_offload_bytes);
3143 case IFCOUNTER_IMCASTS:
3144 return (s->rx_mcast_frames);
3145 case IFCOUNTER_OMCASTS:
3146 return (s->tx_mcast_frames);
3147 case IFCOUNTER_OQDROPS: {
3151 if (vi->flags & VI_INIT_DONE) {
3153 struct sge_txq *txq;
3155 for_each_txq(vi, i, txq)
3156 drops += counter_u64_fetch(txq->r->dropped);
3164 return (if_get_counter_default(ifp, c));
3169 cxgbe_get_counter(if_t ifp, ift_counter c)
3171 struct vi_info *vi = if_getsoftc(ifp);
3172 struct port_info *pi = vi->pi;
3173 struct port_stats *s = &pi->stats;
3175 mtx_lock(&vi->tick_mtx);
3176 cxgbe_refresh_stats(vi);
3177 mtx_unlock(&vi->tick_mtx);
3180 case IFCOUNTER_IPACKETS:
3181 return (s->rx_frames);
3183 case IFCOUNTER_IERRORS:
3184 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
3185 s->rx_fcs_err + s->rx_len_err);
3187 case IFCOUNTER_OPACKETS:
3188 return (s->tx_frames);
3190 case IFCOUNTER_OERRORS:
3191 return (s->tx_error_frames);
3193 case IFCOUNTER_IBYTES:
3194 return (s->rx_octets);
3196 case IFCOUNTER_OBYTES:
3197 return (s->tx_octets);
3199 case IFCOUNTER_IMCASTS:
3200 return (s->rx_mcast_frames);
3202 case IFCOUNTER_OMCASTS:
3203 return (s->tx_mcast_frames);
3205 case IFCOUNTER_IQDROPS:
3206 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3207 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3208 s->rx_trunc3 + pi->tnl_cong_drops);
3210 case IFCOUNTER_OQDROPS: {
3214 if (vi->flags & VI_INIT_DONE) {
3216 struct sge_txq *txq;
3218 for_each_txq(vi, i, txq)
3219 drops += counter_u64_fetch(txq->r->dropped);
3227 return (if_get_counter_default(ifp, c));
3231 #if defined(KERN_TLS) || defined(RATELIMIT)
3233 cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
3234 struct m_snd_tag **pt)
3238 switch (params->hdr.type) {
3240 case IF_SND_TAG_TYPE_RATE_LIMIT:
3241 error = cxgbe_rate_tag_alloc(ifp, params, pt);
3245 case IF_SND_TAG_TYPE_TLS:
3247 struct vi_info *vi = if_getsoftc(ifp);
3249 if (is_t6(vi->pi->adapter))
3250 error = t6_tls_tag_alloc(ifp, params, pt);
3264 * The kernel picks a media from the list we had provided but we still validate
3268 cxgbe_media_change(if_t ifp)
3270 struct vi_info *vi = if_getsoftc(ifp);
3271 struct port_info *pi = vi->pi;
3272 struct ifmedia *ifm = &pi->media;
3273 struct link_config *lc = &pi->link_cfg;
3274 struct adapter *sc = pi->adapter;
3277 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
3281 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
3282 /* ifconfig .. media autoselect */
3283 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
3284 rc = ENOTSUP; /* AN not supported by transceiver */
3287 lc->requested_aneg = AUTONEG_ENABLE;
3288 lc->requested_speed = 0;
3289 lc->requested_fc |= PAUSE_AUTONEG;
3291 lc->requested_aneg = AUTONEG_DISABLE;
3292 lc->requested_speed =
3293 ifmedia_baudrate(ifm->ifm_media) / 1000000;
3294 lc->requested_fc = 0;
3295 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
3296 lc->requested_fc |= PAUSE_RX;
3297 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
3298 lc->requested_fc |= PAUSE_TX;
3300 if (pi->up_vis > 0 && !hw_off_limits(sc)) {
3301 fixup_link_config(pi);
3302 rc = apply_link_config(pi);
3306 end_synchronized_op(sc, 0);
3311 * Base media word (without ETHER, pause, link active, etc.) for the port at the
3315 port_mword(struct port_info *pi, uint32_t speed)
3318 MPASS(speed & M_FW_PORT_CAP32_SPEED);
3319 MPASS(powerof2(speed));
3321 switch(pi->port_type) {
3322 case FW_PORT_TYPE_BT_SGMII:
3323 case FW_PORT_TYPE_BT_XFI:
3324 case FW_PORT_TYPE_BT_XAUI:
3327 case FW_PORT_CAP32_SPEED_100M:
3329 case FW_PORT_CAP32_SPEED_1G:
3330 return (IFM_1000_T);
3331 case FW_PORT_CAP32_SPEED_10G:
3335 case FW_PORT_TYPE_KX4:
3336 if (speed == FW_PORT_CAP32_SPEED_10G)
3337 return (IFM_10G_KX4);
3339 case FW_PORT_TYPE_CX4:
3340 if (speed == FW_PORT_CAP32_SPEED_10G)
3341 return (IFM_10G_CX4);
3343 case FW_PORT_TYPE_KX:
3344 if (speed == FW_PORT_CAP32_SPEED_1G)
3345 return (IFM_1000_KX);
3347 case FW_PORT_TYPE_KR:
3348 case FW_PORT_TYPE_BP_AP:
3349 case FW_PORT_TYPE_BP4_AP:
3350 case FW_PORT_TYPE_BP40_BA:
3351 case FW_PORT_TYPE_KR4_100G:
3352 case FW_PORT_TYPE_KR_SFP28:
3353 case FW_PORT_TYPE_KR_XLAUI:
3355 case FW_PORT_CAP32_SPEED_1G:
3356 return (IFM_1000_KX);
3357 case FW_PORT_CAP32_SPEED_10G:
3358 return (IFM_10G_KR);
3359 case FW_PORT_CAP32_SPEED_25G:
3360 return (IFM_25G_KR);
3361 case FW_PORT_CAP32_SPEED_40G:
3362 return (IFM_40G_KR4);
3363 case FW_PORT_CAP32_SPEED_50G:
3364 return (IFM_50G_KR2);
3365 case FW_PORT_CAP32_SPEED_100G:
3366 return (IFM_100G_KR4);
3369 case FW_PORT_TYPE_FIBER_XFI:
3370 case FW_PORT_TYPE_FIBER_XAUI:
3371 case FW_PORT_TYPE_SFP:
3372 case FW_PORT_TYPE_QSFP_10G:
3373 case FW_PORT_TYPE_QSA:
3374 case FW_PORT_TYPE_QSFP:
3375 case FW_PORT_TYPE_CR4_QSFP:
3376 case FW_PORT_TYPE_CR_QSFP:
3377 case FW_PORT_TYPE_CR2_QSFP:
3378 case FW_PORT_TYPE_SFP28:
3379 /* Pluggable transceiver */
3380 switch (pi->mod_type) {
3381 case FW_PORT_MOD_TYPE_LR:
3383 case FW_PORT_CAP32_SPEED_1G:
3384 return (IFM_1000_LX);
3385 case FW_PORT_CAP32_SPEED_10G:
3386 return (IFM_10G_LR);
3387 case FW_PORT_CAP32_SPEED_25G:
3388 return (IFM_25G_LR);
3389 case FW_PORT_CAP32_SPEED_40G:
3390 return (IFM_40G_LR4);
3391 case FW_PORT_CAP32_SPEED_50G:
3392 return (IFM_50G_LR2);
3393 case FW_PORT_CAP32_SPEED_100G:
3394 return (IFM_100G_LR4);
3397 case FW_PORT_MOD_TYPE_SR:
3399 case FW_PORT_CAP32_SPEED_1G:
3400 return (IFM_1000_SX);
3401 case FW_PORT_CAP32_SPEED_10G:
3402 return (IFM_10G_SR);
3403 case FW_PORT_CAP32_SPEED_25G:
3404 return (IFM_25G_SR);
3405 case FW_PORT_CAP32_SPEED_40G:
3406 return (IFM_40G_SR4);
3407 case FW_PORT_CAP32_SPEED_50G:
3408 return (IFM_50G_SR2);
3409 case FW_PORT_CAP32_SPEED_100G:
3410 return (IFM_100G_SR4);
3413 case FW_PORT_MOD_TYPE_ER:
3414 if (speed == FW_PORT_CAP32_SPEED_10G)
3415 return (IFM_10G_ER);
3417 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
3418 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
3420 case FW_PORT_CAP32_SPEED_1G:
3421 return (IFM_1000_CX);
3422 case FW_PORT_CAP32_SPEED_10G:
3423 return (IFM_10G_TWINAX);
3424 case FW_PORT_CAP32_SPEED_25G:
3425 return (IFM_25G_CR);
3426 case FW_PORT_CAP32_SPEED_40G:
3427 return (IFM_40G_CR4);
3428 case FW_PORT_CAP32_SPEED_50G:
3429 return (IFM_50G_CR2);
3430 case FW_PORT_CAP32_SPEED_100G:
3431 return (IFM_100G_CR4);
3434 case FW_PORT_MOD_TYPE_LRM:
3435 if (speed == FW_PORT_CAP32_SPEED_10G)
3436 return (IFM_10G_LRM);
3438 case FW_PORT_MOD_TYPE_NA:
3439 MPASS(0); /* Not pluggable? */
3441 case FW_PORT_MOD_TYPE_ERROR:
3442 case FW_PORT_MOD_TYPE_UNKNOWN:
3443 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
3445 case FW_PORT_MOD_TYPE_NONE:
3449 case FW_PORT_TYPE_NONE:
3453 return (IFM_UNKNOWN);
3457 cxgbe_media_status(if_t ifp, struct ifmediareq *ifmr)
3459 struct vi_info *vi = if_getsoftc(ifp);
3460 struct port_info *pi = vi->pi;
3461 struct adapter *sc = pi->adapter;
3462 struct link_config *lc = &pi->link_cfg;
3464 if (begin_synchronized_op(sc, vi , SLEEP_OK | INTR_OK, "t4med") != 0)
3468 if (pi->up_vis == 0 && !hw_off_limits(sc)) {
3470 * If all the interfaces are administratively down the firmware
3471 * does not report transceiver changes. Refresh port info here
3472 * so that ifconfig displays accurate ifmedia at all times.
3473 * This is the only reason we have a synchronized op in this
3474 * function. Just PORT_LOCK would have been enough otherwise.
3476 t4_update_port_info(pi);
3477 build_medialist(pi);
3481 ifmr->ifm_status = IFM_AVALID;
3482 if (lc->link_ok == false)
3484 ifmr->ifm_status |= IFM_ACTIVE;
3487 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
3488 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
3489 if (lc->fc & PAUSE_RX)
3490 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
3491 if (lc->fc & PAUSE_TX)
3492 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
3493 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
3496 end_synchronized_op(sc, 0);
3500 vcxgbe_probe(device_t dev)
3503 struct vi_info *vi = device_get_softc(dev);
3505 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
3507 device_set_desc_copy(dev, buf);
3509 return (BUS_PROBE_DEFAULT);
3513 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
3515 int func, index, rc;
3516 uint32_t param, val;
3518 ASSERT_SYNCHRONIZED_OP(sc);
3520 index = vi - pi->vi;
3521 MPASS(index > 0); /* This function deals with _extra_ VIs only */
3522 KASSERT(index < nitems(vi_mac_funcs),
3523 ("%s: VI %s doesn't have a MAC func", __func__,
3524 device_get_nameunit(vi->dev)));
3525 func = vi_mac_funcs[index];
3526 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
3527 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
3529 CH_ERR(vi, "failed to allocate virtual interface %d"
3530 "for port %d: %d\n", index, pi->port_id, -rc);
3535 if (vi->rss_size == 1) {
3537 * This VI didn't get a slice of the RSS table. Reduce the
3538 * number of VIs being created (hw.cxgbe.num_vis) or modify the
3539 * configuration file (nvi, rssnvi for this PF) if this is a
3542 device_printf(vi->dev, "RSS table not available.\n");
3543 vi->rss_base = 0xffff;
3548 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3549 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
3550 V_FW_PARAMS_PARAM_YZ(vi->viid);
3551 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3553 vi->rss_base = 0xffff;
3555 MPASS((val >> 16) == vi->rss_size);
3556 vi->rss_base = val & 0xffff;
3563 vcxgbe_attach(device_t dev)
3566 struct port_info *pi;
3570 vi = device_get_softc(dev);
3574 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
3577 rc = alloc_extra_vi(sc, pi, vi);
3578 end_synchronized_op(sc, 0);
3582 rc = cxgbe_vi_attach(dev, vi);
3584 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
3591 vcxgbe_detach(device_t dev)
3596 vi = device_get_softc(dev);
3599 begin_vi_detach(sc, vi);
3600 cxgbe_vi_detach(vi);
3601 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
3602 end_vi_detach(sc, vi);
3607 static struct callout fatal_callout;
3608 static struct taskqueue *reset_tq;
3611 delayed_panic(void *arg)
3613 struct adapter *sc = arg;
3615 panic("%s: panic on fatal error", device_get_nameunit(sc->dev));
3619 fatal_error_task(void *arg, int pending)
3621 struct adapter *sc = arg;
3627 if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) {
3633 if (t4_reset_on_fatal_err) {
3634 CH_ALERT(sc, "resetting on fatal error.\n");
3635 rc = reset_adapter(sc);
3636 if (rc == 0 && t4_panic_on_fatal_err) {
3637 CH_ALERT(sc, "reset was successful, "
3638 "system will NOT panic.\n");
3643 if (t4_panic_on_fatal_err) {
3644 CH_ALERT(sc, "panicking on fatal error (after 30s).\n");
3645 callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
3650 t4_fatal_err(struct adapter *sc, bool fw_error)
3652 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
3655 if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_FATAL_ERR)))
3659 * We are here because of a firmware error/timeout and not
3660 * because of a hardware interrupt. It is possible (although
3661 * not very likely) that an error interrupt was also raised but
3662 * this thread ran first and inhibited t4_intr_err. We walk the
3663 * main INT_CAUSE registers here to make sure we haven't missed
3664 * anything interesting.
3666 t4_slow_intr_handler(sc, verbose);
3667 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
3669 t4_report_fw_error(sc);
3670 log(LOG_ALERT, "%s: encountered fatal error, adapter stopped (%d).\n",
3671 device_get_nameunit(sc->dev), fw_error);
3672 taskqueue_enqueue(reset_tq, &sc->fatal_error_task);
3676 t4_add_adapter(struct adapter *sc)
3678 sx_xlock(&t4_list_lock);
3679 SLIST_INSERT_HEAD(&t4_list, sc, link);
3680 sx_xunlock(&t4_list_lock);
3684 t4_map_bars_0_and_4(struct adapter *sc)
3686 sc->regs_rid = PCIR_BAR(0);
3687 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3688 &sc->regs_rid, RF_ACTIVE);
3689 if (sc->regs_res == NULL) {
3690 device_printf(sc->dev, "cannot map registers.\n");
3693 sc->bt = rman_get_bustag(sc->regs_res);
3694 sc->bh = rman_get_bushandle(sc->regs_res);
3695 sc->mmio_len = rman_get_size(sc->regs_res);
3696 setbit(&sc->doorbells, DOORBELL_KDB);
3698 sc->msix_rid = PCIR_BAR(4);
3699 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3700 &sc->msix_rid, RF_ACTIVE);
3701 if (sc->msix_res == NULL) {
3702 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
3710 t4_map_bar_2(struct adapter *sc)
3714 * T4: only iWARP driver uses the userspace doorbells. There is no need
3715 * to map it if RDMA is disabled.
3717 if (is_t4(sc) && sc->rdmacaps == 0)
3720 sc->udbs_rid = PCIR_BAR(2);
3721 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3722 &sc->udbs_rid, RF_ACTIVE);
3723 if (sc->udbs_res == NULL) {
3724 device_printf(sc->dev, "cannot map doorbell BAR.\n");
3727 sc->udbs_base = rman_get_virtual(sc->udbs_res);
3729 if (chip_id(sc) >= CHELSIO_T5) {
3730 setbit(&sc->doorbells, DOORBELL_UDB);
3731 #if defined(__i386__) || defined(__amd64__)
3732 if (t5_write_combine) {
3736 * Enable write combining on BAR2. This is the
3737 * userspace doorbell BAR and is split into 128B
3738 * (UDBS_SEG_SIZE) doorbell regions, each associated
3739 * with an egress queue. The first 64B has the doorbell
3740 * and the second 64B can be used to submit a tx work
3741 * request with an implicit doorbell.
3744 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
3745 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
3747 clrbit(&sc->doorbells, DOORBELL_UDB);
3748 setbit(&sc->doorbells, DOORBELL_WCWR);
3749 setbit(&sc->doorbells, DOORBELL_UDBWC);
3751 device_printf(sc->dev,
3752 "couldn't enable write combining: %d\n",
3756 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
3757 t4_write_reg(sc, A_SGE_STAT_CFG,
3758 V_STATSOURCE_T5(7) | mode);
3762 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
3767 struct memwin_init {
3772 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
3773 { MEMWIN0_BASE, MEMWIN0_APERTURE },
3774 { MEMWIN1_BASE, MEMWIN1_APERTURE },
3775 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
3778 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
3779 { MEMWIN0_BASE, MEMWIN0_APERTURE },
3780 { MEMWIN1_BASE, MEMWIN1_APERTURE },
3781 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
3785 setup_memwin(struct adapter *sc)
3787 const struct memwin_init *mw_init;
3794 * Read low 32b of bar0 indirectly via the hardware backdoor
3795 * mechanism. Works from within PCI passthrough environments
3796 * too, where rman_get_start() can return a different value. We
3797 * need to program the T4 memory window decoders with the actual
3798 * addresses that will be coming across the PCIe link.
3800 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
3801 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
3803 mw_init = &t4_memwin[0];
3805 /* T5+ use the relative offset inside the PCIe BAR */
3808 mw_init = &t5_memwin[0];
3811 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
3812 if (!rw_initialized(&mw->mw_lock)) {
3813 rw_init(&mw->mw_lock, "memory window access");
3814 mw->mw_base = mw_init->base;
3815 mw->mw_aperture = mw_init->aperture;
3819 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
3820 (mw->mw_base + bar0) | V_BIR(0) |
3821 V_WINDOW(ilog2(mw->mw_aperture) - 10));
3822 rw_wlock(&mw->mw_lock);
3823 position_memwin(sc, i, mw->mw_curpos);
3824 rw_wunlock(&mw->mw_lock);
3828 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
3832 * Positions the memory window at the given address in the card's address space.
3833 * There are some alignment requirements and the actual position may be at an
3834 * address prior to the requested address. mw->mw_curpos always has the actual
3835 * position of the window.
3838 position_memwin(struct adapter *sc, int idx, uint32_t addr)
3844 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3845 mw = &sc->memwin[idx];
3846 rw_assert(&mw->mw_lock, RA_WLOCKED);
3850 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
3852 pf = V_PFNUM(sc->pf);
3853 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
3855 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
3856 t4_write_reg(sc, reg, mw->mw_curpos | pf);
3857 t4_read_reg(sc, reg); /* flush */
3861 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
3867 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3869 /* Memory can only be accessed in naturally aligned 4 byte units */
3870 if (addr & 3 || len & 3 || len <= 0)
3873 mw = &sc->memwin[idx];
3875 rw_rlock(&mw->mw_lock);
3876 mw_end = mw->mw_curpos + mw->mw_aperture;
3877 if (addr >= mw_end || addr < mw->mw_curpos) {
3878 /* Will need to reposition the window */
3879 if (!rw_try_upgrade(&mw->mw_lock)) {
3880 rw_runlock(&mw->mw_lock);
3881 rw_wlock(&mw->mw_lock);
3883 rw_assert(&mw->mw_lock, RA_WLOCKED);
3884 position_memwin(sc, idx, addr);
3885 rw_downgrade(&mw->mw_lock);
3886 mw_end = mw->mw_curpos + mw->mw_aperture;
3888 rw_assert(&mw->mw_lock, RA_RLOCKED);
3889 while (addr < mw_end && len > 0) {
3891 v = t4_read_reg(sc, mw->mw_base + addr -
3893 *val++ = le32toh(v);
3896 t4_write_reg(sc, mw->mw_base + addr -
3897 mw->mw_curpos, htole32(v));
3902 rw_runlock(&mw->mw_lock);
3909 t4_init_atid_table(struct adapter *sc)
3918 MPASS(t->atid_tab == NULL);
3920 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
3922 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
3923 t->afree = t->atid_tab;
3924 t->atids_in_use = 0;
3925 for (i = 1; i < t->natids; i++)
3926 t->atid_tab[i - 1].next = &t->atid_tab[i];
3927 t->atid_tab[t->natids - 1].next = NULL;
3931 t4_free_atid_table(struct adapter *sc)
3937 KASSERT(t->atids_in_use == 0,
3938 ("%s: %d atids still in use.", __func__, t->atids_in_use));
3940 if (mtx_initialized(&t->atid_lock))
3941 mtx_destroy(&t->atid_lock);
3942 free(t->atid_tab, M_CXGBE);
3947 alloc_atid(struct adapter *sc, void *ctx)
3949 struct tid_info *t = &sc->tids;
3952 mtx_lock(&t->atid_lock);
3954 union aopen_entry *p = t->afree;
3956 atid = p - t->atid_tab;
3957 MPASS(atid <= M_TID_TID);
3962 mtx_unlock(&t->atid_lock);
3967 lookup_atid(struct adapter *sc, int atid)
3969 struct tid_info *t = &sc->tids;
3971 return (t->atid_tab[atid].data);
3975 free_atid(struct adapter *sc, int atid)
3977 struct tid_info *t = &sc->tids;
3978 union aopen_entry *p = &t->atid_tab[atid];
3980 mtx_lock(&t->atid_lock);
3984 mtx_unlock(&t->atid_lock);
3988 queue_tid_release(struct adapter *sc, int tid)
3991 CXGBE_UNIMPLEMENTED("deferred tid release");
3995 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
3998 struct cpl_tid_release *req;
4000 wr = alloc_wrqe(sizeof(*req), ctrlq);
4002 queue_tid_release(sc, tid); /* defer */
4007 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
4013 t4_range_cmp(const void *a, const void *b)
4015 return ((const struct t4_range *)a)->start -
4016 ((const struct t4_range *)b)->start;
4020 * Verify that the memory range specified by the addr/len pair is valid within
4021 * the card's address space.
4024 validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len)
4026 struct t4_range mem_ranges[4], *r, *next;
4027 uint32_t em, addr_len;
4028 int i, n, remaining;
4030 /* Memory can only be accessed in naturally aligned 4 byte units */
4031 if (addr & 3 || len & 3 || len == 0)
4034 /* Enabled memories */
4035 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4039 bzero(r, sizeof(mem_ranges));
4040 if (em & F_EDRAM0_ENABLE) {
4041 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4042 r->size = G_EDRAM0_SIZE(addr_len) << 20;
4044 r->start = G_EDRAM0_BASE(addr_len) << 20;
4045 if (addr >= r->start &&
4046 addr + len <= r->start + r->size)
4052 if (em & F_EDRAM1_ENABLE) {
4053 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4054 r->size = G_EDRAM1_SIZE(addr_len) << 20;
4056 r->start = G_EDRAM1_BASE(addr_len) << 20;
4057 if (addr >= r->start &&
4058 addr + len <= r->start + r->size)
4064 if (em & F_EXT_MEM_ENABLE) {
4065 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4066 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
4068 r->start = G_EXT_MEM_BASE(addr_len) << 20;
4069 if (addr >= r->start &&
4070 addr + len <= r->start + r->size)
4076 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
4077 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
4078 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
4080 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
4081 if (addr >= r->start &&
4082 addr + len <= r->start + r->size)
4088 MPASS(n <= nitems(mem_ranges));
4091 /* Sort and merge the ranges. */
4092 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
4094 /* Start from index 0 and examine the next n - 1 entries. */
4096 for (remaining = n - 1; remaining > 0; remaining--, r++) {
4098 MPASS(r->size > 0); /* r is a valid entry. */
4100 MPASS(next->size > 0); /* and so is the next one. */
4102 while (r->start + r->size >= next->start) {
4103 /* Merge the next one into the current entry. */
4104 r->size = max(r->start + r->size,
4105 next->start + next->size) - r->start;
4106 n--; /* One fewer entry in total. */
4107 if (--remaining == 0)
4108 goto done; /* short circuit */
4111 if (next != r + 1) {
4113 * Some entries were merged into r and next
4114 * points to the first valid entry that couldn't
4117 MPASS(next->size > 0); /* must be valid */
4118 memcpy(r + 1, next, remaining * sizeof(*r));
4121 * This so that the foo->size assertion in the
4122 * next iteration of the loop do the right
4123 * thing for entries that were pulled up and are
4126 MPASS(n < nitems(mem_ranges));
4127 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
4128 sizeof(struct t4_range));
4133 /* Done merging the ranges. */
4136 for (i = 0; i < n; i++, r++) {
4137 if (addr >= r->start &&
4138 addr + len <= r->start + r->size)
4147 fwmtype_to_hwmtype(int mtype)
4151 case FW_MEMTYPE_EDC0:
4153 case FW_MEMTYPE_EDC1:
4155 case FW_MEMTYPE_EXTMEM:
4157 case FW_MEMTYPE_EXTMEM1:
4160 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
4165 * Verify that the memory range specified by the memtype/offset/len pair is
4166 * valid and lies entirely within the memtype specified. The global address of
4167 * the start of the range is returned in addr.
4170 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len,
4173 uint32_t em, addr_len, maddr;
4175 /* Memory can only be accessed in naturally aligned 4 byte units */
4176 if (off & 3 || len & 3 || len == 0)
4179 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
4180 switch (fwmtype_to_hwmtype(mtype)) {
4182 if (!(em & F_EDRAM0_ENABLE))
4184 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
4185 maddr = G_EDRAM0_BASE(addr_len) << 20;
4188 if (!(em & F_EDRAM1_ENABLE))
4190 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
4191 maddr = G_EDRAM1_BASE(addr_len) << 20;
4194 if (!(em & F_EXT_MEM_ENABLE))
4196 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
4197 maddr = G_EXT_MEM_BASE(addr_len) << 20;
4200 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
4202 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
4203 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
4209 *addr = maddr + off; /* global address */
4210 return (validate_mem_range(sc, *addr, len));
4214 fixup_devlog_params(struct adapter *sc)
4216 struct devlog_params *dparams = &sc->params.devlog;
4219 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
4220 dparams->size, &dparams->addr);
4226 update_nirq(struct intrs_and_queues *iaq, int nports)
4229 iaq->nirq = T4_EXTRA_INTR;
4230 iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq);
4231 iaq->nirq += nports * iaq->nofldrxq;
4232 iaq->nirq += nports * (iaq->num_vis - 1) *
4233 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
4234 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
4238 * Adjust requirements to fit the number of interrupts available.
4241 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
4245 const int nports = sc->params.nports;
4250 bzero(iaq, sizeof(*iaq));
4251 iaq->intr_type = itype;
4252 iaq->num_vis = t4_num_vis;
4253 iaq->ntxq = t4_ntxq;
4254 iaq->ntxq_vi = t4_ntxq_vi;
4255 iaq->nrxq = t4_nrxq;
4256 iaq->nrxq_vi = t4_nrxq_vi;
4257 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4258 if (is_offload(sc) || is_ethoffload(sc)) {
4259 iaq->nofldtxq = t4_nofldtxq;
4260 iaq->nofldtxq_vi = t4_nofldtxq_vi;
4264 if (is_offload(sc)) {
4265 iaq->nofldrxq = t4_nofldrxq;
4266 iaq->nofldrxq_vi = t4_nofldrxq_vi;
4270 if (t4_native_netmap & NN_MAIN_VI) {
4271 iaq->nnmtxq = t4_nnmtxq;
4272 iaq->nnmrxq = t4_nnmrxq;
4274 if (t4_native_netmap & NN_EXTRA_VI) {
4275 iaq->nnmtxq_vi = t4_nnmtxq_vi;
4276 iaq->nnmrxq_vi = t4_nnmrxq_vi;
4280 update_nirq(iaq, nports);
4281 if (iaq->nirq <= navail &&
4282 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4284 * This is the normal case -- there are enough interrupts for
4291 * If extra VIs have been configured try reducing their count and see if
4294 while (iaq->num_vis > 1) {
4296 update_nirq(iaq, nports);
4297 if (iaq->nirq <= navail &&
4298 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4299 device_printf(sc->dev, "virtual interfaces per port "
4300 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
4301 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
4302 "itype %d, navail %u, nirq %d.\n",
4303 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
4304 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
4305 itype, navail, iaq->nirq);
4311 * Extra VIs will not be created. Log a message if they were requested.
4313 MPASS(iaq->num_vis == 1);
4314 iaq->ntxq_vi = iaq->nrxq_vi = 0;
4315 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
4316 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
4317 if (iaq->num_vis != t4_num_vis) {
4318 device_printf(sc->dev, "extra virtual interfaces disabled. "
4319 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
4320 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
4321 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
4322 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
4326 * Keep reducing the number of NIC rx queues to the next lower power of
4327 * 2 (for even RSS distribution) and halving the TOE rx queues and see
4331 if (iaq->nrxq > 1) {
4334 } while (!powerof2(iaq->nrxq));
4335 if (iaq->nnmrxq > iaq->nrxq)
4336 iaq->nnmrxq = iaq->nrxq;
4338 if (iaq->nofldrxq > 1)
4339 iaq->nofldrxq >>= 1;
4341 old_nirq = iaq->nirq;
4342 update_nirq(iaq, nports);
4343 if (iaq->nirq <= navail &&
4344 (itype != INTR_MSI || powerof2(iaq->nirq))) {
4345 device_printf(sc->dev, "running with reduced number of "
4346 "rx queues because of shortage of interrupts. "
4347 "nrxq=%u, nofldrxq=%u. "
4348 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
4349 iaq->nofldrxq, itype, navail, iaq->nirq);
4352 } while (old_nirq != iaq->nirq);
4354 /* One interrupt for everything. Ugh. */
4355 device_printf(sc->dev, "running with minimal number of queues. "
4356 "itype %d, navail %u.\n", itype, navail);
4360 if (iaq->nofldrxq > 0) {
4367 MPASS(iaq->num_vis > 0);
4368 if (iaq->num_vis > 1) {
4369 MPASS(iaq->nrxq_vi > 0);
4370 MPASS(iaq->ntxq_vi > 0);
4372 MPASS(iaq->nirq > 0);
4373 MPASS(iaq->nrxq > 0);
4374 MPASS(iaq->ntxq > 0);
4375 if (itype == INTR_MSI) {
4376 MPASS(powerof2(iaq->nirq));
4381 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
4383 int rc, itype, navail, nalloc;
4385 for (itype = INTR_MSIX; itype; itype >>= 1) {
4387 if ((itype & t4_intr_types) == 0)
4388 continue; /* not allowed */
4390 if (itype == INTR_MSIX)
4391 navail = pci_msix_count(sc->dev);
4392 else if (itype == INTR_MSI)
4393 navail = pci_msi_count(sc->dev);
4400 calculate_iaq(sc, iaq, itype, navail);
4403 if (itype == INTR_MSIX)
4404 rc = pci_alloc_msix(sc->dev, &nalloc);
4405 else if (itype == INTR_MSI)
4406 rc = pci_alloc_msi(sc->dev, &nalloc);
4408 if (rc == 0 && nalloc > 0) {
4409 if (nalloc == iaq->nirq)
4413 * Didn't get the number requested. Use whatever number
4414 * the kernel is willing to allocate.
4416 device_printf(sc->dev, "fewer vectors than requested, "
4417 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
4418 itype, iaq->nirq, nalloc);
4419 pci_release_msi(sc->dev);
4424 device_printf(sc->dev,
4425 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
4426 itype, rc, iaq->nirq, nalloc);
4429 device_printf(sc->dev,
4430 "failed to find a usable interrupt type. "
4431 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
4432 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
4437 #define FW_VERSION(chip) ( \
4438 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
4439 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
4440 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
4441 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
4442 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
4444 /* Just enough of fw_hdr to cover all version info. */
4450 __be32 tp_microcode_ver;
4455 __u8 intfver_iscsipdu;
4457 __u8 intfver_fcoepdu;
4460 /* Spot check a couple of fields. */
4461 CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver));
4462 CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic));
4463 CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe));
4473 .kld_name = "t4fw_cfg",
4474 .fw_mod_name = "t4fw",
4476 .chip = FW_HDR_CHIP_T4,
4477 .fw_ver = htobe32(FW_VERSION(T4)),
4478 .intfver_nic = FW_INTFVER(T4, NIC),
4479 .intfver_vnic = FW_INTFVER(T4, VNIC),
4480 .intfver_ofld = FW_INTFVER(T4, OFLD),
4481 .intfver_ri = FW_INTFVER(T4, RI),
4482 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
4483 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4484 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
4485 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4489 .kld_name = "t5fw_cfg",
4490 .fw_mod_name = "t5fw",
4492 .chip = FW_HDR_CHIP_T5,
4493 .fw_ver = htobe32(FW_VERSION(T5)),
4494 .intfver_nic = FW_INTFVER(T5, NIC),
4495 .intfver_vnic = FW_INTFVER(T5, VNIC),
4496 .intfver_ofld = FW_INTFVER(T5, OFLD),
4497 .intfver_ri = FW_INTFVER(T5, RI),
4498 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
4499 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4500 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
4501 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4505 .kld_name = "t6fw_cfg",
4506 .fw_mod_name = "t6fw",
4508 .chip = FW_HDR_CHIP_T6,
4509 .fw_ver = htobe32(FW_VERSION(T6)),
4510 .intfver_nic = FW_INTFVER(T6, NIC),
4511 .intfver_vnic = FW_INTFVER(T6, VNIC),
4512 .intfver_ofld = FW_INTFVER(T6, OFLD),
4513 .intfver_ri = FW_INTFVER(T6, RI),
4514 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4515 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4516 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4517 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4522 static struct fw_info *
4523 find_fw_info(int chip)
4527 for (i = 0; i < nitems(fw_info); i++) {
4528 if (fw_info[i].chip == chip)
4529 return (&fw_info[i]);
4535 * Is the given firmware API compatible with the one the driver was compiled
4539 fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
4542 /* short circuit if it's the exact same firmware version */
4543 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
4547 * XXX: Is this too conservative? Perhaps I should limit this to the
4548 * features that are supported in the driver.
4550 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
4551 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
4552 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
4553 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
4561 load_fw_module(struct adapter *sc, const struct firmware **dcfg,
4562 const struct firmware **fw)
4564 struct fw_info *fw_info;
4570 fw_info = find_fw_info(chip_id(sc));
4571 if (fw_info == NULL) {
4572 device_printf(sc->dev,
4573 "unable to look up firmware information for chip %d.\n",
4578 *dcfg = firmware_get(fw_info->kld_name);
4579 if (*dcfg != NULL) {
4581 *fw = firmware_get(fw_info->fw_mod_name);
4589 unload_fw_module(struct adapter *sc, const struct firmware *dcfg,
4590 const struct firmware *fw)
4594 firmware_put(fw, FIRMWARE_UNLOAD);
4596 firmware_put(dcfg, FIRMWARE_UNLOAD);
4601 * 0 means no firmware install attempted.
4602 * ERESTART means a firmware install was attempted and was successful.
4603 * +ve errno means a firmware install was attempted but failed.
4606 install_kld_firmware(struct adapter *sc, struct fw_h *card_fw,
4607 const struct fw_h *drv_fw, const char *reason, int *already)
4609 const struct firmware *cfg, *fw;
4610 const uint32_t c = be32toh(card_fw->fw_ver);
4613 struct fw_h bundled_fw;
4614 bool load_attempted;
4617 load_attempted = false;
4618 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install;
4620 memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw));
4621 if (t4_fw_install < 0) {
4622 rc = load_fw_module(sc, &cfg, &fw);
4623 if (rc != 0 || fw == NULL) {
4624 device_printf(sc->dev,
4625 "failed to load firmware module: %d. cfg %p, fw %p;"
4626 " will use compiled-in firmware version for"
4627 "hw.cxgbe.fw_install checks.\n",
4630 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw));
4632 load_attempted = true;
4634 d = be32toh(bundled_fw.fw_ver);
4639 if ((sc->flags & FW_OK) == 0) {
4641 if (c == 0xffffffff) {
4650 if (!fw_compatible(card_fw, &bundled_fw)) {
4651 reason = "incompatible or unusable";
4656 reason = "older than the version bundled with this driver";
4660 if (fw_install == 2 && d != c) {
4661 reason = "different than the version bundled with this driver";
4665 /* No reason to do anything to the firmware already on the card. */
4674 if (fw_install == 0) {
4675 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4676 "but the driver is prohibited from installing a firmware "
4678 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4679 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
4685 * We'll attempt to install a firmware. Load the module first (if it
4686 * hasn't been loaded already).
4688 if (!load_attempted) {
4689 rc = load_fw_module(sc, &cfg, &fw);
4690 if (rc != 0 || fw == NULL) {
4691 device_printf(sc->dev,
4692 "failed to load firmware module: %d. cfg %p, fw %p\n",
4698 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4699 "but the driver cannot take corrective action because it "
4700 "is unable to load the firmware module.\n",
4701 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4702 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
4703 rc = sc->flags & FW_OK ? 0 : ENOENT;
4706 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver);
4708 MPASS(t4_fw_install > 0);
4709 device_printf(sc->dev,
4710 "firmware in KLD (%u.%u.%u.%u) is not what the driver was "
4711 "expecting (%u.%u.%u.%u) and will not be used.\n",
4712 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4713 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k),
4714 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4715 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
4716 rc = sc->flags & FW_OK ? 0 : EINVAL;
4720 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
4721 "installing firmware %u.%u.%u.%u on card.\n",
4722 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4723 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
4724 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4725 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
4727 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
4729 device_printf(sc->dev, "failed to install firmware: %d\n", rc);
4731 /* Installed successfully, update the cached header too. */
4733 memcpy(card_fw, fw->data, sizeof(*card_fw));
4736 unload_fw_module(sc, cfg, fw);
4742 * Establish contact with the firmware and attempt to become the master driver.
4744 * A firmware will be installed to the card if needed (if the driver is allowed
4748 contact_firmware(struct adapter *sc)
4750 int rc, already = 0;
4751 enum dev_state state;
4752 struct fw_info *fw_info;
4753 struct fw_hdr *card_fw; /* fw on the card */
4754 const struct fw_h *drv_fw;
4756 fw_info = find_fw_info(chip_id(sc));
4757 if (fw_info == NULL) {
4758 device_printf(sc->dev,
4759 "unable to look up firmware information for chip %d.\n",
4763 drv_fw = &fw_info->fw_h;
4765 /* Read the header of the firmware on the card */
4766 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
4768 rc = -t4_get_fw_hdr(sc, card_fw);
4770 device_printf(sc->dev,
4771 "unable to read firmware header from card's flash: %d\n",
4776 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL,
4783 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
4784 if (rc < 0 || state == DEV_STATE_ERR) {
4786 device_printf(sc->dev,
4787 "failed to connect to the firmware: %d, %d. "
4788 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4790 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4791 "not responding properly to HELLO", &already) == ERESTART)
4796 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT);
4797 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */
4800 sc->flags |= MASTER_PF;
4801 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4807 } else if (state == DEV_STATE_UNINIT) {
4809 * We didn't get to be the master so we definitely won't be
4810 * configuring the chip. It's a bug if someone else hasn't
4811 * configured it already.
4813 device_printf(sc->dev, "couldn't be master(%d), "
4814 "device not already initialized either(%d). "
4815 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4820 * Some other PF is the master and has configured the chip.
4821 * This is allowed but untested.
4823 device_printf(sc->dev, "PF%d is master, device state %d. "
4824 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4825 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc);
4830 if (rc != 0 && sc->flags & FW_OK) {
4831 t4_fw_bye(sc, sc->mbox);
4832 sc->flags &= ~FW_OK;
4834 free(card_fw, M_CXGBE);
4839 copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
4840 uint32_t mtype, uint32_t moff)
4842 struct fw_info *fw_info;
4843 const struct firmware *dcfg, *rcfg = NULL;
4844 const uint32_t *cfdata;
4845 uint32_t cflen, addr;
4848 load_fw_module(sc, &dcfg, NULL);
4850 /* Card specific interpretation of "default". */
4851 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4852 if (pci_get_device(sc->dev) == 0x440a)
4853 snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF);
4855 snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF);
4858 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4860 device_printf(sc->dev,
4861 "KLD with default config is not available.\n");
4865 cfdata = dcfg->data;
4866 cflen = dcfg->datasize & ~3;
4870 fw_info = find_fw_info(chip_id(sc));
4871 if (fw_info == NULL) {
4872 device_printf(sc->dev,
4873 "unable to look up firmware information for chip %d.\n",
4878 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file);
4880 rcfg = firmware_get(s);
4882 device_printf(sc->dev,
4883 "unable to load module \"%s\" for configuration "
4884 "profile \"%s\".\n", s, cfg_file);
4888 cfdata = rcfg->data;
4889 cflen = rcfg->datasize & ~3;
4892 if (cflen > FLASH_CFG_MAX_SIZE) {
4893 device_printf(sc->dev,
4894 "config file too long (%d, max allowed is %d).\n",
4895 cflen, FLASH_CFG_MAX_SIZE);
4900 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
4902 device_printf(sc->dev,
4903 "%s: addr (%d/0x%x) or len %d is not valid: %d.\n",
4904 __func__, mtype, moff, cflen, rc);
4908 write_via_memwin(sc, 2, addr, cfdata, cflen);
4911 firmware_put(rcfg, FIRMWARE_UNLOAD);
4912 unload_fw_module(sc, dcfg, NULL);
4916 struct caps_allowed {
4919 uint16_t switchcaps;
4923 uint16_t cryptocaps;
4928 #define FW_PARAM_DEV(param) \
4929 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4930 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4931 #define FW_PARAM_PFVF(param) \
4932 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4933 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
4936 * Provide a configuration profile to the firmware and have it initialize the
4937 * chip accordingly. This may involve uploading a configuration file to the
4941 apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
4942 const struct caps_allowed *caps_allowed)
4945 struct fw_caps_config_cmd caps;
4946 uint32_t mtype, moff, finicsum, cfcsum, param, val;
4948 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
4950 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
4954 bzero(&caps, sizeof(caps));
4955 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4956 F_FW_CMD_REQUEST | F_FW_CMD_READ);
4957 if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) {
4960 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4961 } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
4962 mtype = FW_MEMTYPE_FLASH;
4963 moff = t4_flash_cfg_addr(sc);
4964 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4965 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4966 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4970 * Ask the firmware where it wants us to upload the config file.
4972 param = FW_PARAM_DEV(CF);
4973 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4975 /* No support for config file? Shouldn't happen. */
4976 device_printf(sc->dev,
4977 "failed to query config file location: %d.\n", rc);
4980 mtype = G_FW_PARAMS_PARAM_Y(val);
4981 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
4982 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4983 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4984 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4987 rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
4989 device_printf(sc->dev,
4990 "failed to upload config file to card: %d.\n", rc);
4994 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4996 device_printf(sc->dev, "failed to pre-process config file: %d "
4997 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
5001 finicsum = be32toh(caps.finicsum);
5002 cfcsum = be32toh(caps.cfcsum); /* actual */
5003 if (finicsum != cfcsum) {
5004 device_printf(sc->dev,
5005 "WARNING: config file checksum mismatch: %08x %08x\n",
5008 sc->cfcsum = cfcsum;
5009 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file);
5012 * Let the firmware know what features will (not) be used so it can tune
5013 * things accordingly.
5015 #define LIMIT_CAPS(x) do { \
5016 caps.x##caps &= htobe16(caps_allowed->x##caps); \
5028 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
5030 * TOE and hashfilters are mutually exclusive. It is a config
5031 * file or firmware bug if both are reported as available. Try
5032 * to cope with the situation in non-debug builds by disabling
5035 MPASS(caps.toecaps == 0);
5042 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5043 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5044 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
5045 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
5047 device_printf(sc->dev,
5048 "failed to process config file: %d.\n", rc);
5052 t4_tweak_chip_settings(sc);
5053 set_params__pre_init(sc);
5055 /* get basic stuff going */
5056 rc = -t4_fw_initialize(sc, sc->mbox);
5058 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc);
5066 * Partition chip resources for use between various PFs, VFs, etc.
5069 partition_resources(struct adapter *sc)
5071 char cfg_file[sizeof(t4_cfg_file)];
5072 struct caps_allowed caps_allowed;
5076 /* Only the master driver gets to configure the chip resources. */
5077 MPASS(sc->flags & MASTER_PF);
5079 #define COPY_CAPS(x) do { \
5080 caps_allowed.x##caps = t4_##x##caps_allowed; \
5082 bzero(&caps_allowed, sizeof(caps_allowed));
5092 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true;
5093 snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file);
5095 rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed);
5096 if (rc != 0 && fallback) {
5098 device_printf(sc->dev,
5099 "failed (%d) to configure card with \"%s\" profile, "
5100 "will fall back to a basic configuration and retry.\n",
5102 snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF);
5103 bzero(&caps_allowed, sizeof(caps_allowed));
5105 caps_allowed.niccaps = FW_CAPS_CONFIG_NIC;
5114 * Retrieve parameters that are needed (or nice to have) very early.
5117 get_params__pre_init(struct adapter *sc)
5120 uint32_t param[2], val[2];
5122 t4_get_version_info(sc);
5124 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
5125 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
5126 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
5127 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
5128 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
5130 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
5131 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
5132 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
5133 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
5134 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
5136 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
5137 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
5138 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
5139 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
5140 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
5142 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
5143 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
5144 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
5145 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
5146 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
5148 param[0] = FW_PARAM_DEV(PORTVEC);
5149 param[1] = FW_PARAM_DEV(CCLK);
5150 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5152 device_printf(sc->dev,
5153 "failed to query parameters (pre_init): %d.\n", rc);
5157 sc->params.portvec = val[0];
5158 sc->params.nports = bitcount32(val[0]);
5159 sc->params.vpd.cclk = val[1];
5161 /* Read device log parameters. */
5162 rc = -t4_init_devlog_params(sc, 1);
5164 fixup_devlog_params(sc);
5166 device_printf(sc->dev,
5167 "failed to get devlog parameters: %d.\n", rc);
5168 rc = 0; /* devlog isn't critical for device operation */
5175 * Any params that need to be set before FW_INITIALIZE.
5178 set_params__pre_init(struct adapter *sc)
5181 uint32_t param, val;
5183 if (chip_id(sc) >= CHELSIO_T6) {
5184 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
5186 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5187 /* firmwares < 1.20.1.0 do not have this param. */
5188 if (rc == FW_EINVAL &&
5189 sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) {
5193 device_printf(sc->dev,
5194 "failed to enable high priority filters :%d.\n",
5198 param = FW_PARAM_DEV(PPOD_EDRAM);
5199 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5200 if (rc == 0 && val == 1) {
5201 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m,
5204 device_printf(sc->dev,
5205 "failed to set PPOD_EDRAM: %d.\n", rc);
5210 /* Enable opaque VIIDs with firmwares that support it. */
5211 param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5213 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5214 if (rc == 0 && val == 1)
5215 sc->params.viid_smt_extn_support = true;
5217 sc->params.viid_smt_extn_support = false;
5223 * Retrieve various parameters that are of interest to the driver. The device
5224 * has been initialized by the firmware at this point.
5227 get_params__post_init(struct adapter *sc)
5230 uint32_t param[7], val[7];
5231 struct fw_caps_config_cmd caps;
5233 param[0] = FW_PARAM_PFVF(IQFLINT_START);
5234 param[1] = FW_PARAM_PFVF(EQ_START);
5235 param[2] = FW_PARAM_PFVF(FILTER_START);
5236 param[3] = FW_PARAM_PFVF(FILTER_END);
5237 param[4] = FW_PARAM_PFVF(L2T_START);
5238 param[5] = FW_PARAM_PFVF(L2T_END);
5239 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5240 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5241 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
5242 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
5244 device_printf(sc->dev,
5245 "failed to query parameters (post_init): %d.\n", rc);
5249 sc->sge.iq_start = val[0];
5250 sc->sge.eq_start = val[1];
5251 if ((int)val[3] > (int)val[2]) {
5252 sc->tids.ftid_base = val[2];
5253 sc->tids.ftid_end = val[3];
5254 sc->tids.nftids = val[3] - val[2] + 1;
5256 sc->vres.l2t.start = val[4];
5257 sc->vres.l2t.size = val[5] - val[4] + 1;
5258 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
5259 ("%s: L2 table size (%u) larger than expected (%u)",
5260 __func__, sc->vres.l2t.size, L2T_SIZE));
5261 sc->params.core_vdd = val[6];
5263 param[0] = FW_PARAM_PFVF(IQFLINT_END);
5264 param[1] = FW_PARAM_PFVF(EQ_END);
5265 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5267 device_printf(sc->dev,
5268 "failed to query parameters (post_init2): %d.\n", rc);
5271 MPASS((int)val[0] >= sc->sge.iq_start);
5272 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
5273 MPASS((int)val[1] >= sc->sge.eq_start);
5274 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
5276 if (chip_id(sc) >= CHELSIO_T6) {
5278 sc->tids.tid_base = t4_read_reg(sc,
5279 A_LE_DB_ACTIVE_TABLE_START_INDEX);
5281 param[0] = FW_PARAM_PFVF(HPFILTER_START);
5282 param[1] = FW_PARAM_PFVF(HPFILTER_END);
5283 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5285 device_printf(sc->dev,
5286 "failed to query hpfilter parameters: %d.\n", rc);
5289 if ((int)val[1] > (int)val[0]) {
5290 sc->tids.hpftid_base = val[0];
5291 sc->tids.hpftid_end = val[1];
5292 sc->tids.nhpftids = val[1] - val[0] + 1;
5295 * These should go off if the layout changes and the
5296 * driver needs to catch up.
5298 MPASS(sc->tids.hpftid_base == 0);
5299 MPASS(sc->tids.tid_base == sc->tids.nhpftids);
5302 param[0] = FW_PARAM_PFVF(RAWF_START);
5303 param[1] = FW_PARAM_PFVF(RAWF_END);
5304 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5306 device_printf(sc->dev,
5307 "failed to query rawf parameters: %d.\n", rc);
5310 if ((int)val[1] > (int)val[0]) {
5311 sc->rawf_base = val[0];
5312 sc->nrawf = val[1] - val[0] + 1;
5317 * MPSBGMAP is queried separately because only recent firmwares support
5318 * it as a parameter and we don't want the compound query above to fail
5319 * on older firmwares.
5321 param[0] = FW_PARAM_DEV(MPSBGMAP);
5323 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5325 sc->params.mps_bg_map = val[0];
5327 sc->params.mps_bg_map = 0;
5330 * Determine whether the firmware supports the filter2 work request.
5331 * This is queried separately for the same reason as MPSBGMAP above.
5333 param[0] = FW_PARAM_DEV(FILTER2_WR);
5335 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5337 sc->params.filter2_wr_support = val[0] != 0;
5339 sc->params.filter2_wr_support = 0;
5342 * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
5343 * This is queried separately for the same reason as other params above.
5345 param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5347 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5349 sc->params.ulptx_memwrite_dsgl = val[0] != 0;
5351 sc->params.ulptx_memwrite_dsgl = false;
5353 /* FW_RI_FR_NSMR_TPTE_WR support */
5354 param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5355 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5357 sc->params.fr_nsmr_tpte_wr_support = val[0] != 0;
5359 sc->params.fr_nsmr_tpte_wr_support = false;
5361 /* Support for 512 SGL entries per FR MR. */
5362 param[0] = FW_PARAM_DEV(DEV_512SGL_MR);
5363 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5365 sc->params.dev_512sgl_mr = val[0] != 0;
5367 sc->params.dev_512sgl_mr = false;
5369 param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
5370 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5372 sc->params.max_pkts_per_eth_tx_pkts_wr = val[0];
5374 sc->params.max_pkts_per_eth_tx_pkts_wr = 15;
5376 param[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5377 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5379 MPASS(val[0] > 0 && val[0] < 256); /* nsched_cls is 8b */
5380 sc->params.nsched_cls = val[0];
5382 sc->params.nsched_cls = sc->chip_params->nsched_cls;
5384 /* get capabilites */
5385 bzero(&caps, sizeof(caps));
5386 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5387 F_FW_CMD_REQUEST | F_FW_CMD_READ);
5388 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
5389 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
5391 device_printf(sc->dev,
5392 "failed to get card capabilities: %d.\n", rc);
5396 #define READ_CAPS(x) do { \
5397 sc->x = htobe16(caps.x); \
5400 READ_CAPS(linkcaps);
5401 READ_CAPS(switchcaps);
5404 READ_CAPS(rdmacaps);
5405 READ_CAPS(cryptocaps);
5406 READ_CAPS(iscsicaps);
5407 READ_CAPS(fcoecaps);
5409 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
5410 MPASS(chip_id(sc) > CHELSIO_T4);
5411 MPASS(sc->toecaps == 0);
5414 param[0] = FW_PARAM_DEV(NTID);
5415 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
5417 device_printf(sc->dev,
5418 "failed to query HASHFILTER parameters: %d.\n", rc);
5421 sc->tids.ntids = val[0];
5422 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
5423 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
5424 sc->tids.ntids -= sc->tids.nhpftids;
5426 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
5427 sc->params.hash_filter = 1;
5429 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
5430 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
5431 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
5432 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5433 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
5435 device_printf(sc->dev,
5436 "failed to query NIC parameters: %d.\n", rc);
5439 if ((int)val[1] > (int)val[0]) {
5440 sc->tids.etid_base = val[0];
5441 sc->tids.etid_end = val[1];
5442 sc->tids.netids = val[1] - val[0] + 1;
5443 sc->params.eo_wr_cred = val[2];
5444 sc->params.ethoffload = 1;
5448 /* query offload-related parameters */
5449 param[0] = FW_PARAM_DEV(NTID);
5450 param[1] = FW_PARAM_PFVF(SERVER_START);
5451 param[2] = FW_PARAM_PFVF(SERVER_END);
5452 param[3] = FW_PARAM_PFVF(TDDP_START);
5453 param[4] = FW_PARAM_PFVF(TDDP_END);
5454 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5455 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5457 device_printf(sc->dev,
5458 "failed to query TOE parameters: %d.\n", rc);
5461 sc->tids.ntids = val[0];
5462 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
5463 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
5464 sc->tids.ntids -= sc->tids.nhpftids;
5466 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
5467 if ((int)val[2] > (int)val[1]) {
5468 sc->tids.stid_base = val[1];
5469 sc->tids.nstids = val[2] - val[1] + 1;
5471 sc->vres.ddp.start = val[3];
5472 sc->vres.ddp.size = val[4] - val[3] + 1;
5473 sc->params.ofldq_wr_cred = val[5];
5474 sc->params.offload = 1;
5477 * The firmware attempts memfree TOE configuration for -SO cards
5478 * and will report toecaps=0 if it runs out of resources (this
5479 * depends on the config file). It may not report 0 for other
5480 * capabilities dependent on the TOE in this case. Set them to
5481 * 0 here so that the driver doesn't bother tracking resources
5482 * that will never be used.
5488 param[0] = FW_PARAM_PFVF(STAG_START);
5489 param[1] = FW_PARAM_PFVF(STAG_END);
5490 param[2] = FW_PARAM_PFVF(RQ_START);
5491 param[3] = FW_PARAM_PFVF(RQ_END);
5492 param[4] = FW_PARAM_PFVF(PBL_START);
5493 param[5] = FW_PARAM_PFVF(PBL_END);
5494 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5496 device_printf(sc->dev,
5497 "failed to query RDMA parameters(1): %d.\n", rc);
5500 sc->vres.stag.start = val[0];
5501 sc->vres.stag.size = val[1] - val[0] + 1;
5502 sc->vres.rq.start = val[2];
5503 sc->vres.rq.size = val[3] - val[2] + 1;
5504 sc->vres.pbl.start = val[4];
5505 sc->vres.pbl.size = val[5] - val[4] + 1;
5507 param[0] = FW_PARAM_PFVF(SQRQ_START);
5508 param[1] = FW_PARAM_PFVF(SQRQ_END);
5509 param[2] = FW_PARAM_PFVF(CQ_START);
5510 param[3] = FW_PARAM_PFVF(CQ_END);
5511 param[4] = FW_PARAM_PFVF(OCQ_START);
5512 param[5] = FW_PARAM_PFVF(OCQ_END);
5513 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
5515 device_printf(sc->dev,
5516 "failed to query RDMA parameters(2): %d.\n", rc);
5519 sc->vres.qp.start = val[0];
5520 sc->vres.qp.size = val[1] - val[0] + 1;
5521 sc->vres.cq.start = val[2];
5522 sc->vres.cq.size = val[3] - val[2] + 1;
5523 sc->vres.ocq.start = val[4];
5524 sc->vres.ocq.size = val[5] - val[4] + 1;
5526 param[0] = FW_PARAM_PFVF(SRQ_START);
5527 param[1] = FW_PARAM_PFVF(SRQ_END);
5528 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
5529 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5530 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
5532 device_printf(sc->dev,
5533 "failed to query RDMA parameters(3): %d.\n", rc);
5536 sc->vres.srq.start = val[0];
5537 sc->vres.srq.size = val[1] - val[0] + 1;
5538 sc->params.max_ordird_qp = val[2];
5539 sc->params.max_ird_adapter = val[3];
5541 if (sc->iscsicaps) {
5542 param[0] = FW_PARAM_PFVF(ISCSI_START);
5543 param[1] = FW_PARAM_PFVF(ISCSI_END);
5544 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5546 device_printf(sc->dev,
5547 "failed to query iSCSI parameters: %d.\n", rc);
5550 sc->vres.iscsi.start = val[0];
5551 sc->vres.iscsi.size = val[1] - val[0] + 1;
5553 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
5554 param[0] = FW_PARAM_PFVF(TLS_START);
5555 param[1] = FW_PARAM_PFVF(TLS_END);
5556 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
5558 device_printf(sc->dev,
5559 "failed to query TLS parameters: %d.\n", rc);
5562 sc->vres.key.start = val[0];
5563 sc->vres.key.size = val[1] - val[0] + 1;
5567 * We've got the params we wanted to query directly from the firmware.
5568 * Grab some others via other means.
5570 t4_init_sge_params(sc);
5571 t4_init_tp_params(sc);
5572 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
5573 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
5575 rc = t4_verify_chip_settings(sc);
5578 t4_init_rx_buf_info(sc);
5585 ktls_tick(void *arg)
5591 tstamp = tcp_ts_getticks();
5592 t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1);
5593 t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31);
5594 callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK);
5598 t6_config_kern_tls(struct adapter *sc, bool enable)
5601 uint32_t param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5602 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_KTLS_HW) |
5603 V_FW_PARAMS_PARAM_Y(enable ? 1 : 0) |
5604 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
5606 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, ¶m);
5608 CH_ERR(sc, "failed to %s NIC TLS: %d\n",
5609 enable ? "enable" : "disable", rc);
5614 sc->flags |= KERN_TLS_ON;
5615 callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc,
5618 sc->flags &= ~KERN_TLS_ON;
5619 callout_stop(&sc->ktls_tick);
5627 set_params__post_init(struct adapter *sc)
5629 uint32_t mask, param, val;
5634 /* ask for encapsulated CPLs */
5635 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5637 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5639 /* Enable 32b port caps if the firmware supports it. */
5640 param = FW_PARAM_PFVF(PORT_CAPS32);
5642 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0)
5643 sc->params.port_caps32 = 1;
5645 /* Let filter + maskhash steer to a part of the VI's RSS region. */
5646 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1);
5647 t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER),
5648 V_MASKFILTER(val - 1));
5650 mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER |
5651 F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN |
5652 F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
5653 F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM;
5655 if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) {
5656 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE,
5657 F_ATTACKFILTERENABLE);
5658 val |= F_DROPERRORATTACK;
5660 if (t4_drop_ip_fragments != 0) {
5661 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP,
5663 val |= F_DROPERRORFRAG;
5665 if (t4_drop_pkts_with_l2_errors != 0)
5666 val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN;
5667 if (t4_drop_pkts_with_l3_errors != 0) {
5668 val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN |
5671 if (t4_drop_pkts_with_l4_errors != 0) {
5672 val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
5673 F_DROPERRORTCPOPT | F_DROPERRORCSUM;
5675 t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val);
5679 * Override the TOE timers with user provided tunables. This is not the
5680 * recommended way to change the timers (the firmware config file is) so
5681 * these tunables are not documented.
5683 * All the timer tunables are in microseconds.
5685 if (t4_toe_keepalive_idle != 0) {
5686 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
5687 v &= M_KEEPALIVEIDLE;
5688 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
5689 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
5691 if (t4_toe_keepalive_interval != 0) {
5692 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
5693 v &= M_KEEPALIVEINTVL;
5694 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
5695 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
5697 if (t4_toe_keepalive_count != 0) {
5698 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
5699 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
5700 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
5701 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
5702 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
5704 if (t4_toe_rexmt_min != 0) {
5705 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
5707 t4_set_reg_field(sc, A_TP_RXT_MIN,
5708 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
5710 if (t4_toe_rexmt_max != 0) {
5711 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
5713 t4_set_reg_field(sc, A_TP_RXT_MAX,
5714 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
5716 if (t4_toe_rexmt_count != 0) {
5717 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
5718 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
5719 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
5720 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
5721 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
5723 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
5724 if (t4_toe_rexmt_backoff[i] != -1) {
5725 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
5726 shift = (i & 3) << 3;
5727 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
5728 M_TIMERBACKOFFINDEX0 << shift, v << shift);
5734 * Limit TOE connections to 2 reassembly "islands". This is
5735 * required to permit migrating TOE connections to either
5736 * ULP_MODE_TCPDDP or UPL_MODE_TLS.
5738 t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG, V_PASSMODE(M_PASSMODE),
5743 sc->tlst.inline_keys = t4_tls_inline_keys;
5744 sc->tlst.combo_wrs = t4_tls_combo_wrs;
5745 if (t4_kern_tls != 0 && is_t6(sc))
5746 t6_config_kern_tls(sc, true);
5752 #undef FW_PARAM_PFVF
5756 t4_set_desc(struct adapter *sc)
5759 struct adapter_params *p = &sc->params;
5761 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
5763 device_set_desc_copy(sc->dev, buf);
5767 ifmedia_add4(struct ifmedia *ifm, int m)
5770 ifmedia_add(ifm, m, 0, NULL);
5771 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
5772 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
5773 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
5777 * This is the selected media, which is not quite the same as the active media.
5778 * The media line in ifconfig is "media: Ethernet selected (active)" if selected
5779 * and active are not the same, and "media: Ethernet selected" otherwise.
5782 set_current_media(struct port_info *pi)
5784 struct link_config *lc;
5785 struct ifmedia *ifm;
5789 PORT_LOCK_ASSERT_OWNED(pi);
5791 /* Leave current media alone if it's already set to IFM_NONE. */
5793 if (ifm->ifm_cur != NULL &&
5794 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
5798 if (lc->requested_aneg != AUTONEG_DISABLE &&
5799 lc->pcaps & FW_PORT_CAP32_ANEG) {
5800 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
5803 mword = IFM_ETHER | IFM_FDX;
5804 if (lc->requested_fc & PAUSE_TX)
5805 mword |= IFM_ETH_TXPAUSE;
5806 if (lc->requested_fc & PAUSE_RX)
5807 mword |= IFM_ETH_RXPAUSE;
5808 if (lc->requested_speed == 0)
5809 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */
5811 speed = lc->requested_speed;
5812 mword |= port_mword(pi, speed_to_fwcap(speed));
5813 ifmedia_set(ifm, mword);
5817 * Returns true if the ifmedia list for the port cannot change.
5820 fixed_ifmedia(struct port_info *pi)
5823 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
5824 pi->port_type == FW_PORT_TYPE_BT_XFI ||
5825 pi->port_type == FW_PORT_TYPE_BT_XAUI ||
5826 pi->port_type == FW_PORT_TYPE_KX4 ||
5827 pi->port_type == FW_PORT_TYPE_KX ||
5828 pi->port_type == FW_PORT_TYPE_KR ||
5829 pi->port_type == FW_PORT_TYPE_BP_AP ||
5830 pi->port_type == FW_PORT_TYPE_BP4_AP ||
5831 pi->port_type == FW_PORT_TYPE_BP40_BA ||
5832 pi->port_type == FW_PORT_TYPE_KR4_100G ||
5833 pi->port_type == FW_PORT_TYPE_KR_SFP28 ||
5834 pi->port_type == FW_PORT_TYPE_KR_XLAUI);
5838 build_medialist(struct port_info *pi)
5841 int unknown, mword, bit;
5842 struct link_config *lc;
5843 struct ifmedia *ifm;
5845 PORT_LOCK_ASSERT_OWNED(pi);
5847 if (pi->flags & FIXED_IFMEDIA)
5851 * Rebuild the ifmedia list.
5854 ifmedia_removeall(ifm);
5856 ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */
5857 if (__predict_false(ss == 0)) { /* not supposed to happen. */
5860 MPASS(LIST_EMPTY(&ifm->ifm_list));
5861 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
5862 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
5867 for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
5869 MPASS(speed & M_FW_PORT_CAP32_SPEED);
5871 mword = port_mword(pi, speed);
5872 if (mword == IFM_NONE) {
5874 } else if (mword == IFM_UNKNOWN)
5877 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
5880 if (unknown > 0) /* Add one unknown for all unknown media types. */
5881 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
5882 if (lc->pcaps & FW_PORT_CAP32_ANEG)
5883 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
5885 set_current_media(pi);
5889 * Initialize the requested fields in the link config based on driver tunables.
5892 init_link_config(struct port_info *pi)
5894 struct link_config *lc = &pi->link_cfg;
5896 PORT_LOCK_ASSERT_OWNED(pi);
5898 lc->requested_caps = 0;
5899 lc->requested_speed = 0;
5901 if (t4_autoneg == 0)
5902 lc->requested_aneg = AUTONEG_DISABLE;
5903 else if (t4_autoneg == 1)
5904 lc->requested_aneg = AUTONEG_ENABLE;
5906 lc->requested_aneg = AUTONEG_AUTO;
5908 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
5911 if (t4_fec & FEC_AUTO)
5912 lc->requested_fec = FEC_AUTO;
5913 else if (t4_fec == 0)
5914 lc->requested_fec = FEC_NONE;
5916 /* -1 is handled by the FEC_AUTO block above and not here. */
5917 lc->requested_fec = t4_fec &
5918 (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE);
5919 if (lc->requested_fec == 0)
5920 lc->requested_fec = FEC_AUTO;
5922 if (t4_force_fec < 0)
5924 else if (t4_force_fec > 0)
5931 * Makes sure that all requested settings comply with what's supported by the
5932 * port. Returns the number of settings that were invalid and had to be fixed.
5935 fixup_link_config(struct port_info *pi)
5938 struct link_config *lc = &pi->link_cfg;
5941 PORT_LOCK_ASSERT_OWNED(pi);
5943 /* Speed (when not autonegotiating) */
5944 if (lc->requested_speed != 0) {
5945 fwspeed = speed_to_fwcap(lc->requested_speed);
5946 if ((fwspeed & lc->pcaps) == 0) {
5948 lc->requested_speed = 0;
5952 /* Link autonegotiation */
5953 MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
5954 lc->requested_aneg == AUTONEG_DISABLE ||
5955 lc->requested_aneg == AUTONEG_AUTO);
5956 if (lc->requested_aneg == AUTONEG_ENABLE &&
5957 !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
5959 lc->requested_aneg = AUTONEG_AUTO;
5963 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
5964 if (lc->requested_fc & PAUSE_TX &&
5965 !(lc->pcaps & FW_PORT_CAP32_FC_TX)) {
5967 lc->requested_fc &= ~PAUSE_TX;
5969 if (lc->requested_fc & PAUSE_RX &&
5970 !(lc->pcaps & FW_PORT_CAP32_FC_RX)) {
5972 lc->requested_fc &= ~PAUSE_RX;
5974 if (!(lc->requested_fc & PAUSE_AUTONEG) &&
5975 !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) {
5977 lc->requested_fc |= PAUSE_AUTONEG;
5981 if ((lc->requested_fec & FEC_RS &&
5982 !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) ||
5983 (lc->requested_fec & FEC_BASER_RS &&
5984 !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) {
5986 lc->requested_fec = FEC_AUTO;
5993 * Apply the requested L1 settings, which are expected to be valid, to the
5997 apply_link_config(struct port_info *pi)
5999 struct adapter *sc = pi->adapter;
6000 struct link_config *lc = &pi->link_cfg;
6004 ASSERT_SYNCHRONIZED_OP(sc);
6005 PORT_LOCK_ASSERT_OWNED(pi);
6007 if (lc->requested_aneg == AUTONEG_ENABLE)
6008 MPASS(lc->pcaps & FW_PORT_CAP32_ANEG);
6009 if (!(lc->requested_fc & PAUSE_AUTONEG))
6010 MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE);
6011 if (lc->requested_fc & PAUSE_TX)
6012 MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX);
6013 if (lc->requested_fc & PAUSE_RX)
6014 MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX);
6015 if (lc->requested_fec & FEC_RS)
6016 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS);
6017 if (lc->requested_fec & FEC_BASER_RS)
6018 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
6020 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
6022 /* Don't complain if the VF driver gets back an EPERM. */
6023 if (!(sc->flags & IS_VF) || rc != FW_EPERM)
6024 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
6027 * An L1_CFG will almost always result in a link-change event if
6028 * the link is up, and the driver will refresh the actual
6029 * fec/fc/etc. when the notification is processed. If the link
6030 * is down then the actual settings are meaningless.
6032 * This takes care of the case where a change in the L1 settings
6033 * may not result in a notification.
6035 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
6036 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
6041 #define FW_MAC_EXACT_CHUNK 7
6044 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
6052 add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
6054 struct mcaddr_ctx *ctx = arg;
6055 struct vi_info *vi = if_getsoftc(ctx->ifp);
6056 struct port_info *pi = vi->pi;
6057 struct adapter *sc = pi->adapter;
6062 ctx->mcaddr[ctx->i] = LLADDR(sdl);
6063 MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i]));
6066 if (ctx->i == FW_MAC_EXACT_CHUNK) {
6067 ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del,
6068 ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0);
6072 for (j = 0; j < ctx->i; j++) {
6074 "failed to add mc address"
6076 "%02x:%02x:%02x rc=%d\n",
6077 ctx->mcaddr[j][0], ctx->mcaddr[j][1],
6078 ctx->mcaddr[j][2], ctx->mcaddr[j][3],
6079 ctx->mcaddr[j][4], ctx->mcaddr[j][5],
6092 * Program the port's XGMAC based on parameters in ifnet. The caller also
6093 * indicates which parameters should be programmed (the rest are left alone).
6096 update_mac_settings(if_t ifp, int flags)
6099 struct vi_info *vi = if_getsoftc(ifp);
6100 struct port_info *pi = vi->pi;
6101 struct adapter *sc = pi->adapter;
6102 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
6103 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
6105 ASSERT_SYNCHRONIZED_OP(sc);
6106 KASSERT(flags, ("%s: not told what to update.", __func__));
6108 if (flags & XGMAC_MTU)
6109 mtu = if_getmtu(ifp);
6111 if (flags & XGMAC_PROMISC)
6112 promisc = if_getflags(ifp) & IFF_PROMISC ? 1 : 0;
6114 if (flags & XGMAC_ALLMULTI)
6115 allmulti = if_getflags(ifp) & IFF_ALLMULTI ? 1 : 0;
6117 if (flags & XGMAC_VLANEX)
6118 vlanex = if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING ? 1 : 0;
6120 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
6121 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
6122 allmulti, 1, vlanex, false);
6124 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
6130 if (flags & XGMAC_UCADDR) {
6131 uint8_t ucaddr[ETHER_ADDR_LEN];
6133 bcopy(if_getlladdr(ifp), ucaddr, sizeof(ucaddr));
6134 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
6135 ucaddr, true, &vi->smt_idx);
6138 if_printf(ifp, "change_mac failed: %d\n", rc);
6141 vi->xact_addr_filt = rc;
6146 if (flags & XGMAC_MCADDRS) {
6147 struct epoch_tracker et;
6148 struct mcaddr_ctx ctx;
6157 * Unlike other drivers, we accumulate list of pointers into
6158 * interface address lists and we need to keep it safe even
6159 * after if_foreach_llmaddr() returns, thus we must enter the
6162 NET_EPOCH_ENTER(et);
6163 if_foreach_llmaddr(ifp, add_maddr, &ctx);
6170 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
6171 ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0);
6175 for (j = 0; j < ctx.i; j++) {
6177 "failed to add mcast address"
6179 "%02x:%02x:%02x rc=%d\n",
6180 ctx.mcaddr[j][0], ctx.mcaddr[j][1],
6181 ctx.mcaddr[j][2], ctx.mcaddr[j][3],
6182 ctx.mcaddr[j][4], ctx.mcaddr[j][5],
6191 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0);
6193 if_printf(ifp, "failed to set mcast address hash: %d\n",
6196 /* We clobbered the VXLAN entry if there was one. */
6197 pi->vxlan_tcam_entry = false;
6201 if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 &&
6202 pi->vxlan_tcam_entry == false) {
6203 rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac,
6204 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
6208 if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n",
6211 MPASS(rc == sc->rawf_base + pi->port_id);
6213 pi->vxlan_tcam_entry = true;
6221 * {begin|end}_synchronized_op must be called from the same thread.
6224 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
6230 /* the caller thinks it's ok to sleep, but is it really? */
6231 if (flags & SLEEP_OK)
6232 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
6233 "begin_synchronized_op");
6244 if (vi && IS_DETACHING(vi)) {
6254 if (!(flags & SLEEP_OK)) {
6259 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
6265 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
6268 sc->last_op = wmesg;
6269 sc->last_op_thr = curthread;
6270 sc->last_op_flags = flags;
6274 if (!(flags & HOLD_LOCK) || rc)
6281 * Tell if_ioctl and if_init that the VI is going away. This is
6282 * special variant of begin_synchronized_op and must be paired with a
6283 * call to end_vi_detach.
6286 begin_vi_detach(struct adapter *sc, struct vi_info *vi)
6292 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
6295 sc->last_op = "t4detach";
6296 sc->last_op_thr = curthread;
6297 sc->last_op_flags = 0;
6303 end_vi_detach(struct adapter *sc, struct vi_info *vi)
6306 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
6314 * {begin|end}_synchronized_op must be called from the same thread.
6317 end_synchronized_op(struct adapter *sc, int flags)
6320 if (flags & LOCK_HELD)
6321 ADAPTER_LOCK_ASSERT_OWNED(sc);
6325 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
6332 cxgbe_init_synchronized(struct vi_info *vi)
6334 struct port_info *pi = vi->pi;
6335 struct adapter *sc = pi->adapter;
6338 struct sge_txq *txq;
6340 ASSERT_SYNCHRONIZED_OP(sc);
6342 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
6343 return (0); /* already running */
6345 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
6346 return (rc); /* error message displayed already */
6348 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
6349 return (rc); /* error message displayed already */
6351 rc = update_mac_settings(ifp, XGMAC_ALL);
6353 goto done; /* error message displayed already */
6356 if (pi->up_vis == 0) {
6357 t4_update_port_info(pi);
6358 fixup_link_config(pi);
6359 build_medialist(pi);
6360 apply_link_config(pi);
6363 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
6365 if_printf(ifp, "enable_vi failed: %d\n", rc);
6371 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
6375 for_each_txq(vi, i, txq) {
6377 txq->eq.flags |= EQ_ENABLED;
6382 * The first iq of the first port to come up is used for tracing.
6384 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
6385 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
6386 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
6387 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
6388 V_QUEUENUMBER(sc->traceq));
6389 pi->flags |= HAS_TRACEQ;
6394 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
6395 if (pi->link_cfg.link_ok)
6396 t4_os_link_changed(pi);
6399 mtx_lock(&vi->tick_mtx);
6400 if (vi->pi->nvi > 1 || sc->flags & IS_VF)
6401 callout_reset(&vi->tick, hz, vi_tick, vi);
6403 callout_reset(&vi->tick, hz, cxgbe_tick, vi);
6404 mtx_unlock(&vi->tick_mtx);
6407 cxgbe_uninit_synchronized(vi);
6416 cxgbe_uninit_synchronized(struct vi_info *vi)
6418 struct port_info *pi = vi->pi;
6419 struct adapter *sc = pi->adapter;
6422 struct sge_txq *txq;
6424 ASSERT_SYNCHRONIZED_OP(sc);
6426 if (!(vi->flags & VI_INIT_DONE)) {
6427 if (__predict_false(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
6428 KASSERT(0, ("uninited VI is running"));
6429 if_printf(ifp, "uninited VI with running ifnet. "
6430 "vi->flags 0x%016lx, if_flags 0x%08x, "
6431 "if_drv_flags 0x%08x\n", vi->flags, if_getflags(ifp),
6432 if_getdrvflags(ifp));
6438 * Disable the VI so that all its data in either direction is discarded
6439 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
6440 * tick) intact as the TP can deliver negative advice or data that it's
6441 * holding in its RAM (for an offloaded connection) even after the VI is
6444 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
6446 if_printf(ifp, "disable_vi failed: %d\n", rc);
6450 for_each_txq(vi, i, txq) {
6452 txq->eq.flags &= ~EQ_ENABLED;
6456 mtx_lock(&vi->tick_mtx);
6457 callout_stop(&vi->tick);
6458 mtx_unlock(&vi->tick_mtx);
6461 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
6465 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
6467 if (pi->up_vis > 0) {
6472 pi->link_cfg.link_ok = false;
6473 pi->link_cfg.speed = 0;
6474 pi->link_cfg.link_down_rc = 255;
6475 t4_os_link_changed(pi);
6482 * It is ok for this function to fail midway and return right away. t4_detach
6483 * will walk the entire sc->irq list and clean up whatever is valid.
6486 t4_setup_intr_handlers(struct adapter *sc)
6488 int rc, rid, p, q, v;
6491 struct port_info *pi;
6493 struct sge *sge = &sc->sge;
6494 struct sge_rxq *rxq;
6496 struct sge_ofld_rxq *ofld_rxq;
6499 struct sge_nm_rxq *nm_rxq;
6502 int nbuckets = rss_getnumbuckets();
6509 rid = sc->intr_type == INTR_INTX ? 0 : 1;
6510 if (forwarding_intr_to_fwq(sc))
6511 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
6513 /* Multiple interrupts. */
6514 if (sc->flags & IS_VF)
6515 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
6516 ("%s: too few intr.", __func__));
6518 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
6519 ("%s: too few intr.", __func__));
6521 /* The first one is always error intr on PFs */
6522 if (!(sc->flags & IS_VF)) {
6523 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
6530 /* The second one is always the firmware event queue (first on VFs) */
6531 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
6537 for_each_port(sc, p) {
6539 for_each_vi(pi, v, vi) {
6540 vi->first_intr = rid - 1;
6542 if (vi->nnmrxq > 0) {
6543 int n = max(vi->nrxq, vi->nnmrxq);
6545 rxq = &sge->rxq[vi->first_rxq];
6547 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
6549 for (q = 0; q < n; q++) {
6550 snprintf(s, sizeof(s), "%x%c%x", p,
6556 irq->nm_rxq = nm_rxq++;
6558 if (irq->nm_rxq != NULL &&
6560 /* Netmap rx only */
6561 rc = t4_alloc_irq(sc, irq, rid,
6562 t4_nm_intr, irq->nm_rxq, s);
6564 if (irq->nm_rxq != NULL &&
6566 /* NIC and Netmap rx */
6567 rc = t4_alloc_irq(sc, irq, rid,
6568 t4_vi_intr, irq, s);
6571 if (irq->rxq != NULL &&
6572 irq->nm_rxq == NULL) {
6574 rc = t4_alloc_irq(sc, irq, rid,
6575 t4_intr, irq->rxq, s);
6581 bus_bind_intr(sc->dev, irq->res,
6582 rss_getcpu(q % nbuckets));
6590 for_each_rxq(vi, q, rxq) {
6591 snprintf(s, sizeof(s), "%x%c%x", p,
6593 rc = t4_alloc_irq(sc, irq, rid,
6598 bus_bind_intr(sc->dev, irq->res,
6599 rss_getcpu(q % nbuckets));
6607 for_each_ofld_rxq(vi, q, ofld_rxq) {
6608 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
6609 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
6620 MPASS(irq == &sc->irq[sc->intr_count]);
6626 write_global_rss_key(struct adapter *sc)
6630 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
6631 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
6633 CTASSERT(RSS_KEYSIZE == 40);
6635 rss_getkey((void *)&raw_rss_key[0]);
6636 for (i = 0; i < nitems(rss_key); i++) {
6637 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
6639 t4_write_rss_key(sc, &rss_key[0], -1, 1);
6647 adapter_full_init(struct adapter *sc)
6651 ASSERT_SYNCHRONIZED_OP(sc);
6654 * queues that belong to the adapter (not any particular port).
6656 rc = t4_setup_adapter_queues(sc);
6660 for (i = 0; i < nitems(sc->tq); i++) {
6661 if (sc->tq[i] != NULL)
6663 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
6664 taskqueue_thread_enqueue, &sc->tq[i]);
6665 if (sc->tq[i] == NULL) {
6666 CH_ERR(sc, "failed to allocate task queue %d\n", i);
6669 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
6670 device_get_nameunit(sc->dev), i);
6673 if (!(sc->flags & IS_VF)) {
6674 write_global_rss_key(sc);
6681 adapter_init(struct adapter *sc)
6685 ASSERT_SYNCHRONIZED_OP(sc);
6686 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
6687 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
6688 ("%s: FULL_INIT_DONE already", __func__));
6690 rc = adapter_full_init(sc);
6692 adapter_full_uninit(sc);
6694 sc->flags |= FULL_INIT_DONE;
6703 adapter_full_uninit(struct adapter *sc)
6707 t4_teardown_adapter_queues(sc);
6709 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
6710 taskqueue_free(sc->tq[i]);
6714 sc->flags &= ~FULL_INIT_DONE;
6718 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
6719 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
6720 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
6721 RSS_HASHTYPE_RSS_UDP_IPV6)
6723 /* Translates kernel hash types to hardware. */
6725 hashconfig_to_hashen(int hashconfig)
6729 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
6730 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
6731 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
6732 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
6733 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
6734 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
6735 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
6737 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
6738 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
6739 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
6741 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
6742 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
6743 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
6744 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
6749 /* Translates hardware hash types to kernel. */
6751 hashen_to_hashconfig(int hashen)
6755 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
6757 * If UDP hashing was enabled it must have been enabled for
6758 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
6759 * enabling any 4-tuple hash is nonsense configuration.
6761 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
6762 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
6764 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
6765 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
6766 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
6767 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
6769 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
6770 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
6771 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
6772 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
6773 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
6774 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
6775 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
6776 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
6778 return (hashconfig);
6786 vi_full_init(struct vi_info *vi)
6788 struct adapter *sc = vi->adapter;
6789 struct sge_rxq *rxq;
6792 int nbuckets = rss_getnumbuckets();
6793 int hashconfig = rss_gethashconfig();
6797 ASSERT_SYNCHRONIZED_OP(sc);
6800 * Allocate tx/rx/fl queues for this VI.
6802 rc = t4_setup_vi_queues(vi);
6807 * Setup RSS for this VI. Save a copy of the RSS table for later use.
6809 if (vi->nrxq > vi->rss_size) {
6810 CH_ALERT(vi, "nrxq (%d) > hw RSS table size (%d); "
6811 "some queues will never receive traffic.\n", vi->nrxq,
6813 } else if (vi->rss_size % vi->nrxq) {
6814 CH_ALERT(vi, "nrxq (%d), hw RSS table size (%d); "
6815 "expect uneven traffic distribution.\n", vi->nrxq,
6819 if (vi->nrxq != nbuckets) {
6820 CH_ALERT(vi, "nrxq (%d) != kernel RSS buckets (%d);"
6821 "performance will be impacted.\n", vi->nrxq, nbuckets);
6824 if (vi->rss == NULL)
6825 vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE,
6827 for (i = 0; i < vi->rss_size;) {
6829 j = rss_get_indirection_to_bucket(i);
6831 rxq = &sc->sge.rxq[vi->first_rxq + j];
6832 vi->rss[i++] = rxq->iq.abs_id;
6834 for_each_rxq(vi, j, rxq) {
6835 vi->rss[i++] = rxq->iq.abs_id;
6836 if (i == vi->rss_size)
6842 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
6843 vi->rss, vi->rss_size);
6845 CH_ERR(vi, "rss_config failed: %d\n", rc);
6850 vi->hashen = hashconfig_to_hashen(hashconfig);
6853 * We may have had to enable some hashes even though the global config
6854 * wants them disabled. This is a potential problem that must be
6855 * reported to the user.
6857 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig;
6860 * If we consider only the supported hash types, then the enabled hashes
6861 * are a superset of the requested hashes. In other words, there cannot
6862 * be any supported hash that was requested but not enabled, but there
6863 * can be hashes that were not requested but had to be enabled.
6865 extra &= SUPPORTED_RSS_HASHTYPES;
6866 MPASS((extra & hashconfig) == 0);
6870 "global RSS config (0x%x) cannot be accommodated.\n",
6873 if (extra & RSS_HASHTYPE_RSS_IPV4)
6874 CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n");
6875 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
6876 CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n");
6877 if (extra & RSS_HASHTYPE_RSS_IPV6)
6878 CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n");
6879 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
6880 CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n");
6881 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
6882 CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n");
6883 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
6884 CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n");
6886 vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
6887 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
6888 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
6889 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
6891 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0],
6894 CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc);
6902 vi_init(struct vi_info *vi)
6906 ASSERT_SYNCHRONIZED_OP(vi->adapter);
6907 KASSERT((vi->flags & VI_INIT_DONE) == 0,
6908 ("%s: VI_INIT_DONE already", __func__));
6910 rc = vi_full_init(vi);
6914 vi->flags |= VI_INIT_DONE;
6923 vi_full_uninit(struct vi_info *vi)
6926 if (vi->flags & VI_INIT_DONE) {
6928 free(vi->rss, M_CXGBE);
6929 free(vi->nm_rss, M_CXGBE);
6932 t4_teardown_vi_queues(vi);
6933 vi->flags &= ~VI_INIT_DONE;
6937 quiesce_txq(struct sge_txq *txq)
6939 struct sge_eq *eq = &txq->eq;
6940 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
6942 MPASS(eq->flags & EQ_SW_ALLOCATED);
6943 MPASS(!(eq->flags & EQ_ENABLED));
6945 /* Wait for the mp_ring to empty. */
6946 while (!mp_ring_is_idle(txq->r)) {
6947 mp_ring_check_drainage(txq->r, 4096);
6948 pause("rquiesce", 1);
6950 MPASS(txq->txp.npkt == 0);
6952 if (eq->flags & EQ_HW_ALLOCATED) {
6954 * Hardware is alive and working normally. Wait for it to
6955 * finish and then wait for the driver to catch up and reclaim
6958 while (spg->cidx != htobe16(eq->pidx))
6959 pause("equiesce", 1);
6960 while (eq->cidx != eq->pidx)
6961 pause("dquiesce", 1);
6964 * Hardware is unavailable. Discard all pending tx and reclaim
6965 * descriptors directly.
6968 while (eq->cidx != eq->pidx) {
6969 struct mbuf *m, *nextpkt;
6970 struct tx_sdesc *txsd;
6972 txsd = &txq->sdesc[eq->cidx];
6973 for (m = txsd->m; m != NULL; m = nextpkt) {
6974 nextpkt = m->m_nextpkt;
6975 m->m_nextpkt = NULL;
6978 IDXINCR(eq->cidx, txsd->desc_used, eq->sidx);
6980 spg->pidx = spg->cidx = htobe16(eq->cidx);
6986 quiesce_wrq(struct sge_wrq *wrq)
6993 quiesce_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
6995 /* Synchronize with the interrupt handler */
6996 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
7000 MPASS(iq->flags & IQ_HAS_FL);
7002 mtx_lock(&sc->sfl_lock);
7004 fl->flags |= FL_DOOMED;
7006 callout_stop(&sc->sfl_callout);
7007 mtx_unlock(&sc->sfl_lock);
7009 KASSERT((fl->flags & FL_STARVING) == 0,
7010 ("%s: still starving", __func__));
7012 /* Release all buffers if hardware is no longer available. */
7013 if (!(iq->flags & IQ_HW_ALLOCATED))
7014 free_fl_buffers(sc, fl);
7019 * Wait for all activity on all the queues of the VI to complete. It is assumed
7020 * that no new work is being enqueued by the hardware or the driver. That part
7021 * should be arranged before calling this function.
7024 quiesce_vi(struct vi_info *vi)
7027 struct adapter *sc = vi->adapter;
7028 struct sge_rxq *rxq;
7029 struct sge_txq *txq;
7031 struct sge_ofld_rxq *ofld_rxq;
7033 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
7034 struct sge_ofld_txq *ofld_txq;
7037 if (!(vi->flags & VI_INIT_DONE))
7040 for_each_txq(vi, i, txq) {
7044 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
7045 for_each_ofld_txq(vi, i, ofld_txq) {
7046 quiesce_wrq(&ofld_txq->wrq);
7050 for_each_rxq(vi, i, rxq) {
7051 quiesce_iq_fl(sc, &rxq->iq, &rxq->fl);
7055 for_each_ofld_rxq(vi, i, ofld_rxq) {
7056 quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl);
7062 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
7063 driver_intr_t *handler, void *arg, char *name)
7068 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
7069 RF_SHAREABLE | RF_ACTIVE);
7070 if (irq->res == NULL) {
7071 device_printf(sc->dev,
7072 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
7076 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
7077 NULL, handler, arg, &irq->tag);
7079 device_printf(sc->dev,
7080 "failed to setup interrupt for rid %d, name %s: %d\n",
7083 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
7089 t4_free_irq(struct adapter *sc, struct irq *irq)
7092 bus_teardown_intr(sc->dev, irq->res, irq->tag);
7094 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
7096 bzero(irq, sizeof(*irq));
7102 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
7105 regs->version = chip_id(sc) | chip_rev(sc) << 10;
7106 t4_get_regs(sc, buf, regs->len);
7109 #define A_PL_INDIR_CMD 0x1f8
7111 #define S_PL_AUTOINC 31
7112 #define M_PL_AUTOINC 0x1U
7113 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
7114 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
7116 #define S_PL_VFID 20
7117 #define M_PL_VFID 0xffU
7118 #define V_PL_VFID(x) ((x) << S_PL_VFID)
7119 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
7122 #define M_PL_ADDR 0xfffffU
7123 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
7124 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
7126 #define A_PL_INDIR_DATA 0x1fc
7129 read_vf_stat(struct adapter *sc, u_int vin, int reg)
7133 if (sc->flags & IS_VF) {
7134 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
7135 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
7137 mtx_assert(&sc->reg_lock, MA_OWNED);
7138 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
7139 V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
7140 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
7141 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
7143 return (((uint64_t)stats[1]) << 32 | stats[0]);
7147 t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
7150 #define GET_STAT(name) \
7151 read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
7153 if (!(sc->flags & IS_VF))
7154 mtx_lock(&sc->reg_lock);
7155 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
7156 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
7157 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
7158 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
7159 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
7160 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
7161 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
7162 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
7163 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
7164 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
7165 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
7166 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
7167 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
7168 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
7169 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
7170 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
7171 if (!(sc->flags & IS_VF))
7172 mtx_unlock(&sc->reg_lock);
7178 t4_clr_vi_stats(struct adapter *sc, u_int vin)
7182 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) |
7183 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
7184 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
7185 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
7186 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
7190 vi_refresh_stats(struct vi_info *vi)
7193 const struct timeval interval = {0, 250000}; /* 250ms */
7195 mtx_assert(&vi->tick_mtx, MA_OWNED);
7197 if (vi->flags & VI_SKIP_STATS)
7201 timevalsub(&tv, &interval);
7202 if (timevalcmp(&tv, &vi->last_refreshed, <))
7205 t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats);
7206 getmicrotime(&vi->last_refreshed);
7210 cxgbe_refresh_stats(struct vi_info *vi)
7212 u_int i, v, tnl_cong_drops, chan_map;
7214 const struct timeval interval = {0, 250000}; /* 250ms */
7215 struct port_info *pi;
7218 mtx_assert(&vi->tick_mtx, MA_OWNED);
7220 if (vi->flags & VI_SKIP_STATS)
7224 timevalsub(&tv, &interval);
7225 if (timevalcmp(&tv, &vi->last_refreshed, <))
7231 t4_get_port_stats(sc, pi->port_id, &pi->stats);
7232 chan_map = pi->rx_e_chan_map;
7234 i = ffs(chan_map) - 1;
7235 mtx_lock(&sc->reg_lock);
7236 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
7237 A_TP_MIB_TNL_CNG_DROP_0 + i);
7238 mtx_unlock(&sc->reg_lock);
7239 tnl_cong_drops += v;
7240 chan_map &= ~(1 << i);
7242 pi->tnl_cong_drops = tnl_cong_drops;
7243 getmicrotime(&vi->last_refreshed);
7247 cxgbe_tick(void *arg)
7249 struct vi_info *vi = arg;
7251 MPASS(IS_MAIN_VI(vi));
7252 mtx_assert(&vi->tick_mtx, MA_OWNED);
7254 cxgbe_refresh_stats(vi);
7255 callout_schedule(&vi->tick, hz);
7261 struct vi_info *vi = arg;
7263 mtx_assert(&vi->tick_mtx, MA_OWNED);
7265 vi_refresh_stats(vi);
7266 callout_schedule(&vi->tick, hz);
7270 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
7272 static char *caps_decoder[] = {
7273 "\20\001IPMI\002NCSI", /* 0: NBM */
7274 "\20\001PPP\002QFC\003DCBX", /* 1: link */
7275 "\20\001INGRESS\002EGRESS", /* 2: switch */
7276 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
7277 "\006HASHFILTER\007ETHOFLD",
7278 "\20\001TOE", /* 4: TOE */
7279 "\20\001RDDP\002RDMAC", /* 5: RDMA */
7280 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
7281 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
7282 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
7284 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
7285 "\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
7287 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
7288 "\004PO_INITIATOR\005PO_TARGET",
7292 t4_sysctls(struct adapter *sc)
7294 struct sysctl_ctx_list *ctx = &sc->ctx;
7295 struct sysctl_oid *oid;
7296 struct sysctl_oid_list *children, *c0;
7297 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
7302 oid = device_get_sysctl_tree(sc->dev);
7303 c0 = children = SYSCTL_CHILDREN(oid);
7305 sc->sc_do_rxcopy = 1;
7306 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
7307 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
7309 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
7310 sc->params.nports, "# of ports");
7312 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
7313 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells,
7314 (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A",
7315 "available doorbells");
7317 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
7318 sc->params.vpd.cclk, "core clock frequency (in KHz)");
7320 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
7321 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
7322 sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val),
7323 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
7325 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
7326 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
7327 sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val),
7328 sysctl_int_array, "A", "interrupt holdoff packet counter values");
7330 t4_sge_sysctls(sc, ctx, children);
7332 sc->lro_timeout = 100;
7333 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
7334 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
7336 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
7337 &sc->debug_flags, 0, "flags to enable runtime debugging");
7339 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
7340 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
7342 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
7343 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
7345 if (sc->flags & IS_VF)
7348 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
7349 NULL, chip_rev(sc), "chip hardware revision");
7351 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
7352 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
7354 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
7355 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
7357 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
7358 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
7360 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
7361 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
7363 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
7364 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
7366 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
7367 sc->er_version, 0, "expansion ROM version");
7369 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
7370 sc->bs_version, 0, "bootstrap firmware version");
7372 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
7373 NULL, sc->params.scfg_vers, "serial config version");
7375 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
7376 NULL, sc->params.vpd_vers, "VPD version");
7378 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
7379 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
7381 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
7382 sc->cfcsum, "config file checksum");
7384 #define SYSCTL_CAP(name, n, text) \
7385 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
7386 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \
7387 (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \
7388 "available " text " capabilities")
7390 SYSCTL_CAP(nbmcaps, 0, "NBM");
7391 SYSCTL_CAP(linkcaps, 1, "link");
7392 SYSCTL_CAP(switchcaps, 2, "switch");
7393 SYSCTL_CAP(niccaps, 3, "NIC");
7394 SYSCTL_CAP(toecaps, 4, "TCP offload");
7395 SYSCTL_CAP(rdmacaps, 5, "RDMA");
7396 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
7397 SYSCTL_CAP(cryptocaps, 7, "crypto");
7398 SYSCTL_CAP(fcoecaps, 8, "FCoE");
7401 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
7402 NULL, sc->tids.nftids, "number of filters");
7404 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
7405 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7406 sysctl_temperature, "I", "chip temperature (in Celsius)");
7407 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor",
7408 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7409 sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
7411 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
7412 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7413 sysctl_loadavg, "A",
7414 "microprocessor load averages (debug firmwares only)");
7416 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
7417 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
7418 "I", "core Vdd (in mV)");
7420 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
7421 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS,
7422 sysctl_cpus, "A", "local CPUs");
7424 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
7425 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS,
7426 sysctl_cpus, "A", "preferred CPUs for interrupts");
7428 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
7429 &sc->swintr, 0, "software triggered interrupts");
7431 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset",
7432 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I",
7433 "1 = reset adapter, 0 = zero reset counter");
7436 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
7438 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
7439 CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
7440 "logs and miscellaneous information");
7441 children = SYSCTL_CHILDREN(oid);
7443 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
7444 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7445 sysctl_cctrl, "A", "congestion control");
7447 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
7448 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7449 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
7451 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
7452 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
7453 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
7455 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
7456 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
7457 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
7459 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
7460 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
7461 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
7463 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
7464 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
7465 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
7467 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
7468 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
7469 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
7471 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
7472 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7473 sysctl_cim_la, "A", "CIM logic analyzer");
7475 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
7476 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7477 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
7479 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
7480 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7481 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
7483 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
7484 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7485 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
7487 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
7488 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7489 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
7491 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
7492 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7493 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
7495 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
7496 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7497 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
7499 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
7500 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7501 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
7503 if (chip_id(sc) > CHELSIO_T4) {
7504 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
7505 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7506 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
7507 "CIM OBQ 6 (SGE0-RX)");
7509 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
7510 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7511 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
7512 "CIM OBQ 7 (SGE1-RX)");
7515 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
7516 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7517 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
7519 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
7520 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7521 sysctl_cim_qcfg, "A", "CIM queue configuration");
7523 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
7524 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7525 sysctl_cpl_stats, "A", "CPL statistics");
7527 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
7528 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7529 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
7531 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats",
7532 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7533 sysctl_tid_stats, "A", "tid stats");
7535 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
7536 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7537 sysctl_devlog, "A", "firmware's device log");
7539 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
7540 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7541 sysctl_fcoe_stats, "A", "FCoE statistics");
7543 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
7544 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7545 sysctl_hw_sched, "A", "hardware scheduler ");
7547 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
7548 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7549 sysctl_l2t, "A", "hardware L2 table");
7551 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
7552 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7553 sysctl_smt, "A", "hardware source MAC table");
7556 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip",
7557 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7558 sysctl_clip, "A", "active CLIP table entries");
7561 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
7562 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7563 sysctl_lb_stats, "A", "loopback statistics");
7565 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
7566 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7567 sysctl_meminfo, "A", "memory regions");
7569 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
7570 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7571 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
7572 "A", "MPS TCAM entries");
7574 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
7575 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7576 sysctl_path_mtus, "A", "path MTUs");
7578 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
7579 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7580 sysctl_pm_stats, "A", "PM statistics");
7582 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
7583 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7584 sysctl_rdma_stats, "A", "RDMA statistics");
7586 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
7587 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7588 sysctl_tcp_stats, "A", "TCP statistics");
7590 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
7591 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7592 sysctl_tids, "A", "TID information");
7594 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
7595 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7596 sysctl_tp_err_stats, "A", "TP error statistics");
7598 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats",
7599 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7600 sysctl_tnl_stats, "A", "TP tunnel statistics");
7602 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
7603 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
7604 sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask");
7606 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
7607 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7608 sysctl_tp_la, "A", "TP logic analyzer");
7610 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
7611 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7612 sysctl_tx_rate, "A", "Tx rate");
7614 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
7615 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7616 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
7618 if (chip_id(sc) >= CHELSIO_T5) {
7619 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
7620 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7621 sysctl_wcwr_stats, "A", "write combined work requests");
7629 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls",
7630 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters");
7631 children = SYSCTL_CHILDREN(oid);
7633 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys",
7634 CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS "
7635 "keys in work requests (1) or attempt to store TLS keys "
7639 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs",
7640 CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to "
7641 "combine TCB field updates with TLS record work "
7647 if (is_offload(sc)) {
7654 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe",
7655 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters");
7656 children = SYSCTL_CHILDREN(oid);
7658 sc->tt.cong_algorithm = -1;
7659 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
7660 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
7661 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
7665 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
7666 &sc->tt.sndbuf, 0, "hardware send buffer");
7669 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp",
7670 CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, "");
7671 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW,
7672 &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)");
7674 sc->tt.rx_coalesce = -1;
7675 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
7676 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
7679 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT |
7680 CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I",
7681 "Inline TLS allowed");
7683 sc->tt.tx_align = -1;
7684 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
7685 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
7687 sc->tt.tx_zcopy = 0;
7688 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
7689 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
7690 "Enable zero-copy aio_write(2)");
7692 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
7693 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7694 "cop_managed_offloading", CTLFLAG_RW,
7695 &sc->tt.cop_managed_offloading, 0,
7696 "COP (Connection Offload Policy) controls all TOE offload");
7698 sc->tt.autorcvbuf_inc = 16 * 1024;
7699 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc",
7700 CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0,
7701 "autorcvbuf increment");
7703 sc->tt.update_hc_on_pmtu_change = 1;
7704 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
7705 "update_hc_on_pmtu_change", CTLFLAG_RW,
7706 &sc->tt.update_hc_on_pmtu_change, 0,
7707 "Update hostcache entry if the PMTU changes");
7710 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "iso", CTLFLAG_RW,
7711 &sc->tt.iso, 0, "Enable iSCSI segmentation offload");
7713 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
7714 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7715 sysctl_tp_tick, "A", "TP timer tick (us)");
7717 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
7718 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
7719 sysctl_tp_tick, "A", "TCP timestamp tick (us)");
7721 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
7722 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
7723 sysctl_tp_tick, "A", "DACK tick (us)");
7725 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
7726 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
7727 sysctl_tp_dack_timer, "IU", "DACK timer (us)");
7729 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
7730 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7731 A_TP_RXT_MIN, sysctl_tp_timer, "LU",
7732 "Minimum retransmit interval (us)");
7734 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
7735 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7736 A_TP_RXT_MAX, sysctl_tp_timer, "LU",
7737 "Maximum retransmit interval (us)");
7739 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
7740 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7741 A_TP_PERS_MIN, sysctl_tp_timer, "LU",
7742 "Persist timer min (us)");
7744 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
7745 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7746 A_TP_PERS_MAX, sysctl_tp_timer, "LU",
7747 "Persist timer max (us)");
7749 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
7750 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7751 A_TP_KEEP_IDLE, sysctl_tp_timer, "LU",
7752 "Keepalive idle timer (us)");
7754 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
7755 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7756 A_TP_KEEP_INTVL, sysctl_tp_timer, "LU",
7757 "Keepalive interval timer (us)");
7759 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
7760 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7761 A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)");
7763 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
7764 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7765 A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU",
7766 "FINWAIT2 timer (us)");
7768 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
7769 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7770 S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU",
7771 "Number of SYN retransmissions before abort");
7773 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
7774 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7775 S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU",
7776 "Number of retransmissions before abort");
7778 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
7779 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7780 S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU",
7781 "Number of keepalive probes before abort");
7783 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
7784 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7785 "TOE retransmit backoffs");
7786 children = SYSCTL_CHILDREN(oid);
7787 for (i = 0; i < 16; i++) {
7788 snprintf(s, sizeof(s), "%u", i);
7789 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
7790 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7791 i, sysctl_tp_backoff, "IU",
7792 "TOE retransmit backoff");
7799 vi_sysctls(struct vi_info *vi)
7801 struct sysctl_ctx_list *ctx = &vi->ctx;
7802 struct sysctl_oid *oid;
7803 struct sysctl_oid_list *children;
7806 * dev.v?(cxgbe|cxl).X.
7808 oid = device_get_sysctl_tree(vi->dev);
7809 children = SYSCTL_CHILDREN(oid);
7811 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
7812 vi->viid, "VI identifer");
7813 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
7814 &vi->nrxq, 0, "# of rx queues");
7815 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
7816 &vi->ntxq, 0, "# of tx queues");
7817 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
7818 &vi->first_rxq, 0, "index of first rx queue");
7819 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
7820 &vi->first_txq, 0, "index of first tx queue");
7821 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL,
7822 vi->rss_base, "start of RSS indirection table");
7823 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
7824 vi->rss_size, "size of RSS indirection table");
7826 if (IS_MAIN_VI(vi)) {
7827 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
7828 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7829 sysctl_noflowq, "IU",
7830 "Reserve queue 0 for non-flowid packets");
7833 if (vi->adapter->flags & IS_VF) {
7834 MPASS(vi->flags & TX_USES_VM_WR);
7835 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD,
7836 NULL, 1, "use VM work requests for transmit");
7838 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr",
7839 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7840 sysctl_tx_vm_wr, "I", "use VM work requestes for transmit");
7844 if (vi->nofldrxq != 0) {
7845 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
7847 "# of rx queues for offloaded TCP connections");
7848 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
7849 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
7850 "index of first TOE rx queue");
7851 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
7852 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7853 sysctl_holdoff_tmr_idx_ofld, "I",
7854 "holdoff timer index for TOE queues");
7855 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
7856 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7857 sysctl_holdoff_pktc_idx_ofld, "I",
7858 "holdoff packet counter index for TOE queues");
7861 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
7862 if (vi->nofldtxq != 0) {
7863 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
7865 "# of tx queues for TOE/ETHOFLD");
7866 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
7867 CTLFLAG_RD, &vi->first_ofld_txq, 0,
7868 "index of first TOE/ETHOFLD tx queue");
7872 if (vi->nnmrxq != 0) {
7873 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
7874 &vi->nnmrxq, 0, "# of netmap rx queues");
7875 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
7876 &vi->nnmtxq, 0, "# of netmap tx queues");
7877 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
7878 CTLFLAG_RD, &vi->first_nm_rxq, 0,
7879 "index of first netmap rx queue");
7880 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
7881 CTLFLAG_RD, &vi->first_nm_txq, 0,
7882 "index of first netmap tx queue");
7886 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
7887 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7888 sysctl_holdoff_tmr_idx, "I", "holdoff timer index");
7889 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
7890 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7891 sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index");
7893 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
7894 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7895 sysctl_qsize_rxq, "I", "rx queue size");
7896 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
7897 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7898 sysctl_qsize_txq, "I", "tx queue size");
7902 cxgbe_sysctls(struct port_info *pi)
7904 struct sysctl_ctx_list *ctx = &pi->ctx;
7905 struct sysctl_oid *oid;
7906 struct sysctl_oid_list *children, *children2;
7907 struct adapter *sc = pi->adapter;
7910 static char *tc_flags = {"\20\1USER"};
7915 oid = device_get_sysctl_tree(pi->dev);
7916 children = SYSCTL_CHILDREN(oid);
7918 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc",
7919 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7920 sysctl_linkdnrc, "A", "reason why link is down");
7921 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
7922 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
7923 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7924 sysctl_btphy, "I", "PHY temperature (in Celsius)");
7925 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
7926 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1,
7927 sysctl_btphy, "I", "PHY firmware version");
7930 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
7931 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7932 sysctl_pause_settings, "A",
7933 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
7934 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_fec",
7935 CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_link_fec, "A",
7936 "FEC in use on the link");
7937 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "requested_fec",
7938 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7939 sysctl_requested_fec, "A",
7940 "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)");
7941 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec",
7942 CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A",
7943 "FEC recommended by the cable/transceiver");
7944 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
7945 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7946 sysctl_autoneg, "I",
7947 "autonegotiation (-1 = not supported)");
7948 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "force_fec",
7949 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7950 sysctl_force_fec, "I", "when to use FORCE_FEC bit for link config");
7952 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rcaps", CTLFLAG_RD,
7953 &pi->link_cfg.requested_caps, 0, "L1 config requested by driver");
7954 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD,
7955 &pi->link_cfg.pcaps, 0, "port capabilities");
7956 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD,
7957 &pi->link_cfg.acaps, 0, "advertised capabilities");
7958 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD,
7959 &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities");
7961 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
7962 port_top_speed(pi), "max speed (in Gbps)");
7963 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
7964 pi->mps_bg_map, "MPS buffer group map");
7965 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
7966 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
7967 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_c_chan", CTLFLAG_RD, NULL,
7968 pi->rx_c_chan, "TP rx c-channel");
7970 if (sc->flags & IS_VF)
7974 * dev.(cxgbe|cxl).X.tc.
7976 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc",
7977 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7978 "Tx scheduler traffic classes (cl_rl)");
7979 children2 = SYSCTL_CHILDREN(oid);
7980 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
7981 CTLFLAG_RW, &pi->sched_params->pktsize, 0,
7982 "pktsize for per-flow cl-rl (0 means up to the driver )");
7983 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
7984 CTLFLAG_RW, &pi->sched_params->burstsize, 0,
7985 "burstsize for per-flow cl-rl (0 means up to the driver)");
7986 for (i = 0; i < sc->params.nsched_cls; i++) {
7987 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
7989 snprintf(name, sizeof(name), "%d", i);
7990 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
7991 SYSCTL_CHILDREN(oid), OID_AUTO, name,
7992 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class"));
7993 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "state",
7994 CTLFLAG_RD, &tc->state, 0, "current state");
7995 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
7996 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags,
7997 (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags");
7998 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
7999 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
8000 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
8001 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
8002 (pi->port_id << 16) | i, sysctl_tc_params, "A",
8003 "traffic class parameters");
8007 * dev.cxgbe.X.stats.
8009 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
8010 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics");
8011 children = SYSCTL_CHILDREN(oid);
8012 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
8013 &pi->tx_parse_error, 0,
8014 "# of tx packets with invalid length or # of segments");
8016 #define T4_REGSTAT(name, stat, desc) \
8017 SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
8018 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
8019 t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
8020 sysctl_handle_t4_reg64, "QU", desc)
8022 /* We get these from port_stats and they may be stale by up to 1s */
8023 #define T4_PORTSTAT(name, desc) \
8024 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
8025 &pi->stats.name, desc)
8027 T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
8028 T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
8029 T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
8030 T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
8031 T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
8032 T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
8033 T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
8034 T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
8035 T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
8036 T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
8037 T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
8038 T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
8039 T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
8040 T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
8041 T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
8042 T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
8043 T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
8044 T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
8045 T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
8046 T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
8047 T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
8048 T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
8049 T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
8051 T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
8052 T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
8053 T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
8054 T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
8055 T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
8056 T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
8057 T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
8059 T4_PORTSTAT(rx_fcs_err,
8060 "# of frames received with bad FCS since last link up");
8062 T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
8063 "# of frames received with bad FCS");
8065 T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
8066 T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
8067 T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
8068 T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
8069 T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
8070 T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
8071 T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
8072 T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
8073 T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
8074 T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
8075 T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
8076 T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
8077 T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
8078 T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
8079 T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
8080 T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
8081 T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
8082 T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
8083 T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
8085 T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
8086 T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
8087 T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
8088 T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
8089 T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
8090 T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
8091 T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
8092 T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
8099 sysctl_int_array(SYSCTL_HANDLER_ARGS)
8101 int rc, *i, space = 0;
8104 sbuf_new_for_sysctl(&sb, NULL, 64, req);
8105 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
8107 sbuf_printf(&sb, " ");
8108 sbuf_printf(&sb, "%d", *i);
8111 rc = sbuf_finish(&sb);
8117 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
8122 rc = sysctl_wire_old_buffer(req, 0);
8126 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8130 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
8131 rc = sbuf_finish(sb);
8138 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
8143 rc = sysctl_wire_old_buffer(req, 0);
8147 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8151 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
8152 rc = sbuf_finish(sb);
8159 sysctl_btphy(SYSCTL_HANDLER_ARGS)
8161 struct port_info *pi = arg1;
8163 struct adapter *sc = pi->adapter;
8167 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
8170 if (hw_off_limits(sc))
8173 /* XXX: magic numbers */
8174 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e,
8175 op ? 0x20 : 0xc820, &v);
8177 end_synchronized_op(sc, 0);
8183 rc = sysctl_handle_int(oidp, &v, 0, req);
8188 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
8190 struct vi_info *vi = arg1;
8193 val = vi->rsrv_noflowq;
8194 rc = sysctl_handle_int(oidp, &val, 0, req);
8195 if (rc != 0 || req->newptr == NULL)
8198 if ((val >= 1) && (vi->ntxq > 1))
8199 vi->rsrv_noflowq = 1;
8201 vi->rsrv_noflowq = 0;
8207 sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
8209 struct vi_info *vi = arg1;
8210 struct adapter *sc = vi->adapter;
8213 MPASS(!(sc->flags & IS_VF));
8215 val = vi->flags & TX_USES_VM_WR ? 1 : 0;
8216 rc = sysctl_handle_int(oidp, &val, 0, req);
8217 if (rc != 0 || req->newptr == NULL)
8220 if (val != 0 && val != 1)
8223 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8227 if (hw_off_limits(sc))
8229 else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) {
8231 * We don't want parse_pkt to run with one setting (VF or PF)
8232 * and then eth_tx to see a different setting but still use
8233 * stale information calculated by parse_pkt.
8237 struct port_info *pi = vi->pi;
8238 struct sge_txq *txq;
8240 uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr;
8243 vi->flags |= TX_USES_VM_WR;
8244 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO);
8245 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
8246 V_TXPKT_INTF(pi->tx_chan));
8247 if (!(sc->flags & IS_VF))
8250 vi->flags &= ~TX_USES_VM_WR;
8251 if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO);
8252 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
8253 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
8254 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
8256 for_each_txq(vi, i, txq) {
8257 txq->cpl_ctrl0 = ctrl0;
8258 txq->txp.max_npkt = npkt;
8261 end_synchronized_op(sc, LOCK_HELD);
8266 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
8268 struct vi_info *vi = arg1;
8269 struct adapter *sc = vi->adapter;
8271 struct sge_rxq *rxq;
8276 rc = sysctl_handle_int(oidp, &idx, 0, req);
8277 if (rc != 0 || req->newptr == NULL)
8280 if (idx < 0 || idx >= SGE_NTIMERS)
8283 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8288 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
8289 for_each_rxq(vi, i, rxq) {
8290 #ifdef atomic_store_rel_8
8291 atomic_store_rel_8(&rxq->iq.intr_params, v);
8293 rxq->iq.intr_params = v;
8298 end_synchronized_op(sc, LOCK_HELD);
8303 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
8305 struct vi_info *vi = arg1;
8306 struct adapter *sc = vi->adapter;
8311 rc = sysctl_handle_int(oidp, &idx, 0, req);
8312 if (rc != 0 || req->newptr == NULL)
8315 if (idx < -1 || idx >= SGE_NCOUNTERS)
8318 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8323 if (vi->flags & VI_INIT_DONE)
8324 rc = EBUSY; /* cannot be changed once the queues are created */
8328 end_synchronized_op(sc, LOCK_HELD);
8333 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
8335 struct vi_info *vi = arg1;
8336 struct adapter *sc = vi->adapter;
8339 qsize = vi->qsize_rxq;
8341 rc = sysctl_handle_int(oidp, &qsize, 0, req);
8342 if (rc != 0 || req->newptr == NULL)
8345 if (qsize < 128 || (qsize & 7))
8348 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8353 if (vi->flags & VI_INIT_DONE)
8354 rc = EBUSY; /* cannot be changed once the queues are created */
8356 vi->qsize_rxq = qsize;
8358 end_synchronized_op(sc, LOCK_HELD);
8363 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
8365 struct vi_info *vi = arg1;
8366 struct adapter *sc = vi->adapter;
8369 qsize = vi->qsize_txq;
8371 rc = sysctl_handle_int(oidp, &qsize, 0, req);
8372 if (rc != 0 || req->newptr == NULL)
8375 if (qsize < 128 || qsize > 65536)
8378 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
8383 if (vi->flags & VI_INIT_DONE)
8384 rc = EBUSY; /* cannot be changed once the queues are created */
8386 vi->qsize_txq = qsize;
8388 end_synchronized_op(sc, LOCK_HELD);
8393 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
8395 struct port_info *pi = arg1;
8396 struct adapter *sc = pi->adapter;
8397 struct link_config *lc = &pi->link_cfg;
8400 if (req->newptr == NULL) {
8402 static char *bits = "\20\1RX\2TX\3AUTO";
8404 rc = sysctl_wire_old_buffer(req, 0);
8408 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8413 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
8414 (lc->requested_fc & PAUSE_AUTONEG), bits);
8416 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
8417 PAUSE_RX | PAUSE_AUTONEG), bits);
8419 rc = sbuf_finish(sb);
8425 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
8429 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
8435 if (s[0] < '0' || s[0] > '9')
8436 return (EINVAL); /* not a number */
8438 if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
8439 return (EINVAL); /* some other bit is set too */
8441 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8445 if (!hw_off_limits(sc)) {
8447 lc->requested_fc = n;
8448 fixup_link_config(pi);
8450 rc = apply_link_config(pi);
8451 set_current_media(pi);
8454 end_synchronized_op(sc, 0);
8461 sysctl_link_fec(SYSCTL_HANDLER_ARGS)
8463 struct port_info *pi = arg1;
8464 struct link_config *lc = &pi->link_cfg;
8467 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
8469 rc = sysctl_wire_old_buffer(req, 0);
8473 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8477 sbuf_printf(sb, "%b", lc->fec, bits);
8479 sbuf_printf(sb, "no link");
8480 rc = sbuf_finish(sb);
8487 sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
8489 struct port_info *pi = arg1;
8490 struct adapter *sc = pi->adapter;
8491 struct link_config *lc = &pi->link_cfg;
8495 if (req->newptr == NULL) {
8497 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
8498 "\5RSVD3\6auto\7module";
8500 rc = sysctl_wire_old_buffer(req, 0);
8504 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8508 sbuf_printf(sb, "%b", lc->requested_fec, bits);
8509 rc = sbuf_finish(sb);
8515 snprintf(s, sizeof(s), "%d",
8516 lc->requested_fec == FEC_AUTO ? -1 :
8517 lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE));
8519 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
8523 n = strtol(&s[0], NULL, 0);
8524 if (n < 0 || n & FEC_AUTO)
8526 else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE))
8527 return (EINVAL);/* some other bit is set too */
8529 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8534 old = lc->requested_fec;
8536 lc->requested_fec = FEC_AUTO;
8537 else if (n == 0 || n == FEC_NONE)
8538 lc->requested_fec = FEC_NONE;
8541 V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) !=
8546 lc->requested_fec = n & (M_FW_PORT_CAP32_FEC |
8549 if (!hw_off_limits(sc)) {
8550 fixup_link_config(pi);
8551 if (pi->up_vis > 0) {
8552 rc = apply_link_config(pi);
8554 lc->requested_fec = old;
8555 if (rc == FW_EPROTO)
8562 end_synchronized_op(sc, 0);
8569 sysctl_module_fec(SYSCTL_HANDLER_ARGS)
8571 struct port_info *pi = arg1;
8572 struct adapter *sc = pi->adapter;
8573 struct link_config *lc = &pi->link_cfg;
8577 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
8579 rc = sysctl_wire_old_buffer(req, 0);
8583 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
8587 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) {
8591 if (hw_off_limits(sc)) {
8596 if (pi->up_vis == 0) {
8598 * If all the interfaces are administratively down the firmware
8599 * does not report transceiver changes. Refresh port info here.
8600 * This is the only reason we have a synchronized op in this
8601 * function. Just PORT_LOCK would have been enough otherwise.
8603 t4_update_port_info(pi);
8607 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE ||
8608 !fec_supported(lc->pcaps)) {
8609 sbuf_printf(sb, "n/a");
8613 sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
8615 rc = sbuf_finish(sb);
8619 end_synchronized_op(sc, 0);
8625 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
8627 struct port_info *pi = arg1;
8628 struct adapter *sc = pi->adapter;
8629 struct link_config *lc = &pi->link_cfg;
8632 if (lc->pcaps & FW_PORT_CAP32_ANEG)
8633 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
8636 rc = sysctl_handle_int(oidp, &val, 0, req);
8637 if (rc != 0 || req->newptr == NULL)
8640 val = AUTONEG_DISABLE;
8642 val = AUTONEG_ENABLE;
8646 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
8651 if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
8655 lc->requested_aneg = val;
8656 if (!hw_off_limits(sc)) {
8657 fixup_link_config(pi);
8659 rc = apply_link_config(pi);
8660 set_current_media(pi);
8664 end_synchronized_op(sc, 0);
8669 sysctl_force_fec(SYSCTL_HANDLER_ARGS)
8671 struct port_info *pi = arg1;
8672 struct adapter *sc = pi->adapter;
8673 struct link_config *lc = &pi->link_cfg;
8676 val = lc->force_fec;
8677 MPASS(val >= -1 && val <= 1);
8678 rc = sysctl_handle_int(oidp, &val, 0, req);
8679 if (rc != 0 || req->newptr == NULL)
8681 if (!(lc->pcaps & FW_PORT_CAP32_FORCE_FEC))
8683 if (val < -1 || val > 1)
8686 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4ff");
8690 lc->force_fec = val;
8691 if (!hw_off_limits(sc)) {
8692 fixup_link_config(pi);
8694 rc = apply_link_config(pi);
8697 end_synchronized_op(sc, 0);
8702 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
8704 struct adapter *sc = arg1;
8708 mtx_lock(&sc->reg_lock);
8709 if (hw_off_limits(sc))
8713 val = t4_read_reg64(sc, reg);
8715 mtx_unlock(&sc->reg_lock);
8717 rc = sysctl_handle_64(oidp, &val, 0, req);
8722 sysctl_temperature(SYSCTL_HANDLER_ARGS)
8724 struct adapter *sc = arg1;
8726 uint32_t param, val;
8728 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
8731 if (hw_off_limits(sc))
8734 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8735 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
8736 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
8737 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
8739 end_synchronized_op(sc, 0);
8743 /* unknown is returned as 0 but we display -1 in that case */
8744 t = val == 0 ? -1 : val;
8746 rc = sysctl_handle_int(oidp, &t, 0, req);
8751 sysctl_vdd(SYSCTL_HANDLER_ARGS)
8753 struct adapter *sc = arg1;
8755 uint32_t param, val;
8757 if (sc->params.core_vdd == 0) {
8758 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
8762 if (hw_off_limits(sc))
8765 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8766 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
8767 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
8768 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1,
8771 end_synchronized_op(sc, 0);
8774 sc->params.core_vdd = val;
8777 return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req));
8781 sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
8783 struct adapter *sc = arg1;
8785 uint32_t param, val;
8787 v = sc->sensor_resets;
8788 rc = sysctl_handle_int(oidp, &v, 0, req);
8789 if (rc != 0 || req->newptr == NULL || v <= 0)
8792 if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) ||
8793 chip_id(sc) < CHELSIO_T5)
8796 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
8799 if (hw_off_limits(sc))
8802 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8803 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
8804 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR));
8806 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
8808 end_synchronized_op(sc, 0);
8810 sc->sensor_resets++;
8815 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
8817 struct adapter *sc = arg1;
8820 uint32_t param, val;
8822 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
8825 if (hw_off_limits(sc))
8828 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8829 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
8830 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
8832 end_synchronized_op(sc, 0);
8836 rc = sysctl_wire_old_buffer(req, 0);
8840 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8844 if (val == 0xffffffff) {
8845 /* Only debug and custom firmwares report load averages. */
8846 sbuf_printf(sb, "not available");
8848 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
8849 (val >> 16) & 0xff);
8851 rc = sbuf_finish(sb);
8858 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
8860 struct adapter *sc = arg1;
8863 uint16_t incr[NMTUS][NCCTRL_WIN];
8864 static const char *dec_fac[] = {
8865 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
8869 rc = sysctl_wire_old_buffer(req, 0);
8873 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8877 mtx_lock(&sc->reg_lock);
8878 if (hw_off_limits(sc))
8881 t4_read_cong_tbl(sc, incr);
8882 mtx_unlock(&sc->reg_lock);
8886 for (i = 0; i < NCCTRL_WIN; ++i) {
8887 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
8888 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
8889 incr[5][i], incr[6][i], incr[7][i]);
8890 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
8891 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
8892 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
8893 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
8896 rc = sbuf_finish(sb);
8902 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
8903 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
8904 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
8905 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
8909 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
8911 struct adapter *sc = arg1;
8913 int rc, i, n, qid = arg2;
8916 u_int cim_num_obq = sc->chip_params->cim_num_obq;
8918 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
8919 ("%s: bad qid %d\n", __func__, qid));
8921 if (qid < CIM_NUM_IBQ) {
8924 n = 4 * CIM_IBQ_SIZE;
8925 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
8926 mtx_lock(&sc->reg_lock);
8927 if (hw_off_limits(sc))
8930 rc = t4_read_cim_ibq(sc, qid, buf, n);
8931 mtx_unlock(&sc->reg_lock);
8933 /* outbound queue */
8936 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
8937 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
8938 mtx_lock(&sc->reg_lock);
8939 if (hw_off_limits(sc))
8942 rc = t4_read_cim_obq(sc, qid, buf, n);
8943 mtx_unlock(&sc->reg_lock);
8950 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
8952 rc = sysctl_wire_old_buffer(req, 0);
8956 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
8962 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
8963 for (i = 0, p = buf; i < n; i += 16, p += 4)
8964 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
8967 rc = sbuf_finish(sb);
8975 sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
8979 sbuf_printf(sb, "Status Data PC%s",
8980 cfg & F_UPDBGLACAPTPCONLY ? "" :
8981 " LS0Stat LS0Addr LS0Data");
8983 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
8984 if (cfg & F_UPDBGLACAPTPCONLY) {
8985 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
8987 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
8988 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
8989 p[4] & 0xff, p[5] >> 8);
8990 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
8991 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8992 p[1] & 0xf, p[2] >> 4);
8995 "\n %02x %x%07x %x%07x %08x %08x "
8997 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8998 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
9005 sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
9009 sbuf_printf(sb, "Status Inst Data PC%s",
9010 cfg & F_UPDBGLACAPTPCONLY ? "" :
9011 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
9013 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
9014 if (cfg & F_UPDBGLACAPTPCONLY) {
9015 sbuf_printf(sb, "\n %02x %08x %08x %08x",
9016 p[3] & 0xff, p[2], p[1], p[0]);
9017 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
9018 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
9019 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
9020 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
9021 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
9022 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
9025 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
9026 "%08x %08x %08x %08x %08x %08x",
9027 (p[9] >> 16) & 0xff,
9028 p[9] & 0xffff, p[8] >> 16,
9029 p[8] & 0xffff, p[7] >> 16,
9030 p[7] & 0xffff, p[6] >> 16,
9031 p[2], p[1], p[0], p[5], p[4], p[3]);
9037 sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
9042 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
9043 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
9048 mtx_lock(&sc->reg_lock);
9049 if (hw_off_limits(sc))
9052 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9054 rc = -t4_cim_read_la(sc, buf, NULL);
9056 mtx_unlock(&sc->reg_lock);
9058 if (chip_id(sc) < CHELSIO_T6)
9059 sbuf_cim_la4(sc, sb, buf, cfg);
9061 sbuf_cim_la6(sc, sb, buf, cfg);
9068 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
9070 struct adapter *sc = arg1;
9074 rc = sysctl_wire_old_buffer(req, 0);
9077 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9081 rc = sbuf_cim_la(sc, sb, M_WAITOK);
9083 rc = sbuf_finish(sb);
9089 dump_cim_regs(struct adapter *sc)
9091 log(LOG_DEBUG, "%s: CIM debug regs1 %08x %08x %08x %08x %08x\n",
9092 device_get_nameunit(sc->dev),
9093 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0),
9094 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1),
9095 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA2),
9096 t4_read_reg(sc, A_EDC_H_BIST_DATA_PATTERN),
9097 t4_read_reg(sc, A_EDC_H_BIST_STATUS_RDATA));
9098 log(LOG_DEBUG, "%s: CIM debug regs2 %08x %08x %08x %08x %08x\n",
9099 device_get_nameunit(sc->dev),
9100 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0),
9101 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1),
9102 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA0 + 0x800),
9103 t4_read_reg(sc, A_EDC_H_BIST_USER_WDATA1 + 0x800),
9104 t4_read_reg(sc, A_EDC_H_BIST_CMD_LEN));
9108 dump_cimla(struct adapter *sc)
9113 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
9114 log(LOG_DEBUG, "%s: failed to generate CIM LA dump.\n",
9115 device_get_nameunit(sc->dev));
9118 rc = sbuf_cim_la(sc, &sb, M_WAITOK);
9120 rc = sbuf_finish(&sb);
9122 log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s\n",
9123 device_get_nameunit(sc->dev), sbuf_data(&sb));
9130 t4_os_cim_err(struct adapter *sc)
9132 atomic_set_int(&sc->error_flags, ADAP_CIM_ERR);
9136 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
9138 struct adapter *sc = arg1;
9144 rc = sysctl_wire_old_buffer(req, 0);
9148 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9152 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
9155 mtx_lock(&sc->reg_lock);
9156 if (hw_off_limits(sc))
9159 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
9160 mtx_unlock(&sc->reg_lock);
9165 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
9166 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
9170 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
9171 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
9172 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
9173 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
9174 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
9175 (p[1] >> 2) | ((p[2] & 3) << 30),
9176 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
9179 rc = sbuf_finish(sb);
9187 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
9189 struct adapter *sc = arg1;
9195 rc = sysctl_wire_old_buffer(req, 0);
9199 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9203 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
9206 mtx_lock(&sc->reg_lock);
9207 if (hw_off_limits(sc))
9210 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
9211 mtx_unlock(&sc->reg_lock);
9216 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
9217 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
9218 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
9219 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
9220 p[4], p[3], p[2], p[1], p[0]);
9223 sbuf_printf(sb, "\n\nCntl ID Data");
9224 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
9225 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
9226 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
9229 rc = sbuf_finish(sb);
9237 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
9239 struct adapter *sc = arg1;
9242 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
9243 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
9244 uint16_t thres[CIM_NUM_IBQ];
9245 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
9246 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
9247 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
9249 cim_num_obq = sc->chip_params->cim_num_obq;
9251 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
9252 obq_rdaddr = A_UP_OBQ_0_REALADDR;
9254 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
9255 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
9257 nq = CIM_NUM_IBQ + cim_num_obq;
9259 mtx_lock(&sc->reg_lock);
9260 if (hw_off_limits(sc))
9263 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
9265 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq,
9268 t4_read_cimq_cfg(sc, base, size, thres);
9271 mtx_unlock(&sc->reg_lock);
9275 rc = sysctl_wire_old_buffer(req, 0);
9279 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
9284 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
9286 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
9287 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
9288 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
9289 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
9290 G_QUEREMFLITS(p[2]) * 16);
9291 for ( ; i < nq; i++, p += 4, wr += 2)
9292 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
9293 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
9294 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
9295 G_QUEREMFLITS(p[2]) * 16);
9297 rc = sbuf_finish(sb);
9304 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
9306 struct adapter *sc = arg1;
9309 struct tp_cpl_stats stats;
9311 rc = sysctl_wire_old_buffer(req, 0);
9315 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9319 mtx_lock(&sc->reg_lock);
9320 if (hw_off_limits(sc))
9323 t4_tp_get_cpl_stats(sc, &stats, 0);
9324 mtx_unlock(&sc->reg_lock);
9328 if (sc->chip_params->nchan > 2) {
9329 sbuf_printf(sb, " channel 0 channel 1"
9330 " channel 2 channel 3");
9331 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
9332 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
9333 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
9334 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
9336 sbuf_printf(sb, " channel 0 channel 1");
9337 sbuf_printf(sb, "\nCPL requests: %10u %10u",
9338 stats.req[0], stats.req[1]);
9339 sbuf_printf(sb, "\nCPL responses: %10u %10u",
9340 stats.rsp[0], stats.rsp[1]);
9343 rc = sbuf_finish(sb);
9350 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
9352 struct adapter *sc = arg1;
9355 struct tp_usm_stats stats;
9357 rc = sysctl_wire_old_buffer(req, 0);
9361 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9365 mtx_lock(&sc->reg_lock);
9366 if (hw_off_limits(sc))
9369 t4_get_usm_stats(sc, &stats, 1);
9370 mtx_unlock(&sc->reg_lock);
9372 sbuf_printf(sb, "Frames: %u\n", stats.frames);
9373 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
9374 sbuf_printf(sb, "Drops: %u", stats.drops);
9375 rc = sbuf_finish(sb);
9383 sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
9385 struct adapter *sc = arg1;
9388 struct tp_tid_stats stats;
9390 rc = sysctl_wire_old_buffer(req, 0);
9394 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9398 mtx_lock(&sc->reg_lock);
9399 if (hw_off_limits(sc))
9402 t4_tp_get_tid_stats(sc, &stats, 1);
9403 mtx_unlock(&sc->reg_lock);
9405 sbuf_printf(sb, "Delete: %u\n", stats.del);
9406 sbuf_printf(sb, "Invalidate: %u\n", stats.inv);
9407 sbuf_printf(sb, "Active: %u\n", stats.act);
9408 sbuf_printf(sb, "Passive: %u", stats.pas);
9409 rc = sbuf_finish(sb);
9416 static const char * const devlog_level_strings[] = {
9417 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
9418 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
9419 [FW_DEVLOG_LEVEL_ERR] = "ERR",
9420 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
9421 [FW_DEVLOG_LEVEL_INFO] = "INFO",
9422 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
9425 static const char * const devlog_facility_strings[] = {
9426 [FW_DEVLOG_FACILITY_CORE] = "CORE",
9427 [FW_DEVLOG_FACILITY_CF] = "CF",
9428 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
9429 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
9430 [FW_DEVLOG_FACILITY_RES] = "RES",
9431 [FW_DEVLOG_FACILITY_HW] = "HW",
9432 [FW_DEVLOG_FACILITY_FLR] = "FLR",
9433 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
9434 [FW_DEVLOG_FACILITY_PHY] = "PHY",
9435 [FW_DEVLOG_FACILITY_MAC] = "MAC",
9436 [FW_DEVLOG_FACILITY_PORT] = "PORT",
9437 [FW_DEVLOG_FACILITY_VI] = "VI",
9438 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
9439 [FW_DEVLOG_FACILITY_ACL] = "ACL",
9440 [FW_DEVLOG_FACILITY_TM] = "TM",
9441 [FW_DEVLOG_FACILITY_QFC] = "QFC",
9442 [FW_DEVLOG_FACILITY_DCB] = "DCB",
9443 [FW_DEVLOG_FACILITY_ETH] = "ETH",
9444 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
9445 [FW_DEVLOG_FACILITY_RI] = "RI",
9446 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
9447 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
9448 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
9449 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
9450 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
9454 sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
9456 int i, j, rc, nentries, first = 0;
9457 struct devlog_params *dparams = &sc->params.devlog;
9458 struct fw_devlog_e *buf, *e;
9459 uint64_t ftstamp = UINT64_MAX;
9461 if (dparams->addr == 0)
9464 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
9465 buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
9469 mtx_lock(&sc->reg_lock);
9470 if (hw_off_limits(sc))
9473 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
9475 mtx_unlock(&sc->reg_lock);
9479 nentries = dparams->size / sizeof(struct fw_devlog_e);
9480 for (i = 0; i < nentries; i++) {
9483 if (e->timestamp == 0)
9486 e->timestamp = be64toh(e->timestamp);
9487 e->seqno = be32toh(e->seqno);
9488 for (j = 0; j < 8; j++)
9489 e->params[j] = be32toh(e->params[j]);
9491 if (e->timestamp < ftstamp) {
9492 ftstamp = e->timestamp;
9497 if (buf[first].timestamp == 0)
9498 goto done; /* nothing in the log */
9500 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
9501 "Seq#", "Tstamp", "Level", "Facility", "Message");
9506 if (e->timestamp == 0)
9509 sbuf_printf(sb, "%10d %15ju %8s %8s ",
9510 e->seqno, e->timestamp,
9511 (e->level < nitems(devlog_level_strings) ?
9512 devlog_level_strings[e->level] : "UNKNOWN"),
9513 (e->facility < nitems(devlog_facility_strings) ?
9514 devlog_facility_strings[e->facility] : "UNKNOWN"));
9515 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
9516 e->params[2], e->params[3], e->params[4],
9517 e->params[5], e->params[6], e->params[7]);
9519 if (++i == nentries)
9521 } while (i != first);
9528 sysctl_devlog(SYSCTL_HANDLER_ARGS)
9530 struct adapter *sc = arg1;
9534 rc = sysctl_wire_old_buffer(req, 0);
9537 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9541 rc = sbuf_devlog(sc, sb, M_WAITOK);
9543 rc = sbuf_finish(sb);
9549 dump_devlog(struct adapter *sc)
9554 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
9555 log(LOG_DEBUG, "%s: failed to generate devlog dump.\n",
9556 device_get_nameunit(sc->dev));
9559 rc = sbuf_devlog(sc, &sb, M_WAITOK);
9561 rc = sbuf_finish(&sb);
9563 log(LOG_DEBUG, "%s: device log follows.\n%s",
9564 device_get_nameunit(sc->dev), sbuf_data(&sb));
9571 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
9573 struct adapter *sc = arg1;
9576 struct tp_fcoe_stats stats[MAX_NCHAN];
9577 int i, nchan = sc->chip_params->nchan;
9579 rc = sysctl_wire_old_buffer(req, 0);
9583 mtx_lock(&sc->reg_lock);
9584 if (hw_off_limits(sc))
9587 for (i = 0; i < nchan; i++)
9588 t4_get_fcoe_stats(sc, i, &stats[i], 1);
9590 mtx_unlock(&sc->reg_lock);
9594 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9599 sbuf_printf(sb, " channel 0 channel 1"
9600 " channel 2 channel 3");
9601 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
9602 stats[0].octets_ddp, stats[1].octets_ddp,
9603 stats[2].octets_ddp, stats[3].octets_ddp);
9604 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
9605 stats[0].frames_ddp, stats[1].frames_ddp,
9606 stats[2].frames_ddp, stats[3].frames_ddp);
9607 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
9608 stats[0].frames_drop, stats[1].frames_drop,
9609 stats[2].frames_drop, stats[3].frames_drop);
9611 sbuf_printf(sb, " channel 0 channel 1");
9612 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
9613 stats[0].octets_ddp, stats[1].octets_ddp);
9614 sbuf_printf(sb, "\nframesDDP: %16u %16u",
9615 stats[0].frames_ddp, stats[1].frames_ddp);
9616 sbuf_printf(sb, "\nframesDrop: %16u %16u",
9617 stats[0].frames_drop, stats[1].frames_drop);
9620 rc = sbuf_finish(sb);
9627 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
9629 struct adapter *sc = arg1;
9632 unsigned int map, kbps, ipg, mode;
9633 unsigned int pace_tab[NTX_SCHED];
9635 rc = sysctl_wire_old_buffer(req, 0);
9639 sb = sbuf_new_for_sysctl(NULL, NULL, 512, req);
9643 mtx_lock(&sc->reg_lock);
9644 if (hw_off_limits(sc)) {
9649 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
9650 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
9651 t4_read_pace_tbl(sc, pace_tab);
9653 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
9654 "Class IPG (0.1 ns) Flow IPG (us)");
9656 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
9657 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
9658 sbuf_printf(sb, "\n %u %-5s %u ", i,
9659 (mode & (1 << i)) ? "flow" : "class", map & 3);
9661 sbuf_printf(sb, "%9u ", kbps);
9663 sbuf_printf(sb, " disabled ");
9666 sbuf_printf(sb, "%13u ", ipg);
9668 sbuf_printf(sb, " disabled ");
9671 sbuf_printf(sb, "%10u", pace_tab[i]);
9673 sbuf_printf(sb, " disabled");
9675 rc = sbuf_finish(sb);
9677 mtx_unlock(&sc->reg_lock);
9683 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
9685 struct adapter *sc = arg1;
9689 struct lb_port_stats s[2];
9690 static const char *stat_name[] = {
9691 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
9692 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
9693 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
9694 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
9695 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
9696 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
9697 "BG2FramesTrunc:", "BG3FramesTrunc:"
9700 rc = sysctl_wire_old_buffer(req, 0);
9704 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9708 memset(s, 0, sizeof(s));
9710 for (i = 0; i < sc->chip_params->nchan; i += 2) {
9711 mtx_lock(&sc->reg_lock);
9712 if (hw_off_limits(sc))
9715 t4_get_lb_stats(sc, i, &s[0]);
9716 t4_get_lb_stats(sc, i + 1, &s[1]);
9718 mtx_unlock(&sc->reg_lock);
9724 sbuf_printf(sb, "%s Loopback %u"
9725 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
9727 for (j = 0; j < nitems(stat_name); j++)
9728 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
9732 rc = sbuf_finish(sb);
9739 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
9742 struct port_info *pi = arg1;
9743 struct link_config *lc = &pi->link_cfg;
9746 rc = sysctl_wire_old_buffer(req, 0);
9749 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
9753 if (lc->link_ok || lc->link_down_rc == 255)
9754 sbuf_printf(sb, "n/a");
9756 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
9758 rc = sbuf_finish(sb);
9771 mem_desc_cmp(const void *a, const void *b)
9773 const u_int v1 = ((const struct mem_desc *)a)->base;
9774 const u_int v2 = ((const struct mem_desc *)b)->base;
9785 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
9793 size = to - from + 1;
9797 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
9798 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
9802 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
9804 struct adapter *sc = arg1;
9807 uint32_t lo, hi, used, free, alloc;
9808 static const char *memory[] = {
9809 "EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
9811 static const char *region[] = {
9812 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
9813 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
9814 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
9815 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
9816 "RQUDP region:", "PBL region:", "TXPBL region:",
9817 "TLSKey region:", "DBVFIFO region:", "ULPRX state:",
9818 "ULPTX state:", "On-chip queues:",
9820 struct mem_desc avail[4];
9821 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
9822 struct mem_desc *md = mem;
9824 rc = sysctl_wire_old_buffer(req, 0);
9828 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9832 for (i = 0; i < nitems(mem); i++) {
9837 mtx_lock(&sc->reg_lock);
9838 if (hw_off_limits(sc)) {
9843 /* Find and sort the populated memory ranges */
9845 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
9846 if (lo & F_EDRAM0_ENABLE) {
9847 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
9848 avail[i].base = G_EDRAM0_BASE(hi) << 20;
9849 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
9853 if (lo & F_EDRAM1_ENABLE) {
9854 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
9855 avail[i].base = G_EDRAM1_BASE(hi) << 20;
9856 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
9860 if (lo & F_EXT_MEM_ENABLE) {
9861 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
9862 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
9863 avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
9864 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
9867 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
9868 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
9869 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
9870 avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
9874 if (is_t6(sc) && lo & F_HMA_MUX) {
9875 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
9876 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
9877 avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
9881 MPASS(i <= nitems(avail));
9882 if (!i) /* no memory available */
9884 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
9886 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
9887 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
9888 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
9889 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
9890 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
9891 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
9892 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
9893 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
9894 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
9896 /* the next few have explicit upper bounds */
9897 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
9898 md->limit = md->base - 1 +
9899 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
9900 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
9903 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
9904 md->limit = md->base - 1 +
9905 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
9906 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
9909 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
9910 if (chip_id(sc) <= CHELSIO_T5)
9911 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
9913 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
9917 md->idx = nitems(region); /* hide it */
9921 #define ulp_region(reg) \
9922 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
9923 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
9925 ulp_region(RX_ISCSI);
9926 ulp_region(RX_TDDP);
9928 ulp_region(RX_STAG);
9930 ulp_region(RX_RQUDP);
9933 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
9934 ulp_region(RX_TLS_KEY);
9940 md->idx = nitems(region);
9943 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
9944 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
9947 if (sge_ctrl & F_VFIFO_ENABLE)
9948 size = fifo_size << 2;
9950 size = G_T6_DBVFIFO_SIZE(fifo_size) << 6;
9953 md->base = t4_read_reg(sc, A_SGE_DBVFIFO_BADDR);
9954 md->limit = md->base + size - 1;
9956 md->idx = nitems(region);
9960 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
9963 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
9967 md->base = sc->vres.ocq.start;
9968 if (sc->vres.ocq.size)
9969 md->limit = md->base + sc->vres.ocq.size - 1;
9971 md->idx = nitems(region); /* hide it */
9974 /* add any address-space holes, there can be up to 3 */
9975 for (n = 0; n < i - 1; n++)
9976 if (avail[n].limit < avail[n + 1].base)
9977 (md++)->base = avail[n].limit;
9979 (md++)->base = avail[n].limit;
9982 MPASS(n <= nitems(mem));
9983 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
9985 for (lo = 0; lo < i; lo++)
9986 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
9987 avail[lo].limit - 1);
9989 sbuf_printf(sb, "\n");
9990 for (i = 0; i < n; i++) {
9991 if (mem[i].idx >= nitems(region))
9992 continue; /* skip holes */
9994 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
9995 mem_region_show(sb, region[mem[i].idx], mem[i].base,
9999 sbuf_printf(sb, "\n");
10000 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
10001 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
10002 mem_region_show(sb, "uP RAM:", lo, hi);
10004 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
10005 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
10006 mem_region_show(sb, "uP Extmem2:", lo, hi);
10008 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
10009 for (i = 0, free = 0; i < 2; i++)
10010 free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT));
10011 sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
10012 G_PMRXMAXPAGE(lo), free,
10013 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
10014 (lo & F_PMRXNUMCHN) ? 2 : 1);
10016 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
10017 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
10018 for (i = 0, free = 0; i < 4; i++)
10019 free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT));
10020 sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
10021 G_PMTXMAXPAGE(lo), free,
10022 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
10023 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
10024 sbuf_printf(sb, "%u p-structs (%u free)\n",
10025 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT),
10026 G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT)));
10028 for (i = 0; i < 4; i++) {
10029 if (chip_id(sc) > CHELSIO_T5)
10030 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
10032 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
10034 used = G_T5_USED(lo);
10035 alloc = G_T5_ALLOC(lo);
10038 alloc = G_ALLOC(lo);
10040 /* For T6 these are MAC buffer groups */
10041 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
10044 for (i = 0; i < sc->chip_params->nchan; i++) {
10045 if (chip_id(sc) > CHELSIO_T5)
10046 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
10048 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
10050 used = G_T5_USED(lo);
10051 alloc = G_T5_ALLOC(lo);
10054 alloc = G_ALLOC(lo);
10056 /* For T6 these are MAC buffer groups */
10058 "\nLoopback %d using %u pages out of %u allocated",
10062 mtx_unlock(&sc->reg_lock);
10064 rc = sbuf_finish(sb);
10070 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
10074 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
10078 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
10080 struct adapter *sc = arg1;
10084 MPASS(chip_id(sc) <= CHELSIO_T5);
10086 rc = sysctl_wire_old_buffer(req, 0);
10090 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
10095 "Idx Ethernet address Mask Vld Ports PF"
10096 " VF Replication P0 P1 P2 P3 ML");
10097 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
10098 uint64_t tcamx, tcamy, mask;
10099 uint32_t cls_lo, cls_hi;
10100 uint8_t addr[ETHER_ADDR_LEN];
10102 mtx_lock(&sc->reg_lock);
10103 if (hw_off_limits(sc))
10106 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
10107 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
10109 mtx_unlock(&sc->reg_lock);
10114 tcamxy2valmask(tcamx, tcamy, addr, &mask);
10115 mtx_lock(&sc->reg_lock);
10116 if (hw_off_limits(sc))
10119 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
10120 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
10122 mtx_unlock(&sc->reg_lock);
10125 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
10126 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
10127 addr[3], addr[4], addr[5], (uintmax_t)mask,
10128 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
10129 G_PORTMAP(cls_hi), G_PF(cls_lo),
10130 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
10132 if (cls_lo & F_REPLICATE) {
10133 struct fw_ldst_cmd ldst_cmd;
10135 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10136 ldst_cmd.op_to_addrspace =
10137 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
10138 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10139 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
10140 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
10141 ldst_cmd.u.mps.rplc.fid_idx =
10142 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
10143 V_FW_LDST_CMD_IDX(i));
10145 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
10149 if (hw_off_limits(sc))
10152 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
10153 sizeof(ldst_cmd), &ldst_cmd);
10154 end_synchronized_op(sc, 0);
10158 sbuf_printf(sb, " %08x %08x %08x %08x",
10159 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
10160 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
10161 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
10162 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
10165 sbuf_printf(sb, "%36s", "");
10167 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
10168 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
10169 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
10173 (void) sbuf_finish(sb);
10175 rc = sbuf_finish(sb);
10182 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
10184 struct adapter *sc = arg1;
10188 MPASS(chip_id(sc) > CHELSIO_T5);
10190 rc = sysctl_wire_old_buffer(req, 0);
10194 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
10198 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
10199 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
10201 " P0 P1 P2 P3 ML\n");
10203 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
10204 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
10206 uint64_t tcamx, tcamy, val, mask;
10207 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
10208 uint8_t addr[ETHER_ADDR_LEN];
10210 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
10212 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
10214 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
10215 mtx_lock(&sc->reg_lock);
10216 if (hw_off_limits(sc))
10219 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
10220 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
10221 tcamy = G_DMACH(val) << 32;
10222 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
10223 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
10225 mtx_unlock(&sc->reg_lock);
10229 lookup_type = G_DATALKPTYPE(data2);
10230 port_num = G_DATAPORTNUM(data2);
10231 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10232 /* Inner header VNI */
10233 vniy = ((data2 & F_DATAVIDH2) << 23) |
10234 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
10235 dip_hit = data2 & F_DATADIPHIT;
10240 vlan_vld = data2 & F_DATAVIDH2;
10241 ivlan = G_VIDL(val);
10244 ctl |= V_CTLXYBITSEL(1);
10245 mtx_lock(&sc->reg_lock);
10246 if (hw_off_limits(sc))
10249 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
10250 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
10251 tcamx = G_DMACH(val) << 32;
10252 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
10253 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
10255 mtx_unlock(&sc->reg_lock);
10259 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10260 /* Inner header VNI mask */
10261 vnix = ((data2 & F_DATAVIDH2) << 23) |
10262 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
10268 tcamxy2valmask(tcamx, tcamy, addr, &mask);
10270 mtx_lock(&sc->reg_lock);
10271 if (hw_off_limits(sc))
10274 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
10275 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
10277 mtx_unlock(&sc->reg_lock);
10281 if (lookup_type && lookup_type != M_DATALKPTYPE) {
10282 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
10283 "%012jx %06x %06x - - %3c"
10284 " I %4x %3c %#x%4u%4d", i, addr[0],
10285 addr[1], addr[2], addr[3], addr[4], addr[5],
10286 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
10287 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
10288 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
10289 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
10291 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
10292 "%012jx - - ", i, addr[0], addr[1],
10293 addr[2], addr[3], addr[4], addr[5],
10297 sbuf_printf(sb, "%4u Y ", ivlan);
10299 sbuf_printf(sb, " - N ");
10301 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
10302 lookup_type ? 'I' : 'O', port_num,
10303 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
10304 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
10305 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
10309 if (cls_lo & F_T6_REPLICATE) {
10310 struct fw_ldst_cmd ldst_cmd;
10312 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10313 ldst_cmd.op_to_addrspace =
10314 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
10315 F_FW_CMD_REQUEST | F_FW_CMD_READ |
10316 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
10317 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
10318 ldst_cmd.u.mps.rplc.fid_idx =
10319 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
10320 V_FW_LDST_CMD_IDX(i));
10322 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
10326 if (hw_off_limits(sc))
10329 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
10330 sizeof(ldst_cmd), &ldst_cmd);
10331 end_synchronized_op(sc, 0);
10335 sbuf_printf(sb, " %08x %08x %08x %08x"
10336 " %08x %08x %08x %08x",
10337 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
10338 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
10339 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
10340 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
10341 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
10342 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
10343 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
10344 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
10347 sbuf_printf(sb, "%72s", "");
10349 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
10350 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
10351 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
10352 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
10356 (void) sbuf_finish(sb);
10358 rc = sbuf_finish(sb);
10365 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
10367 struct adapter *sc = arg1;
10370 uint16_t mtus[NMTUS];
10372 rc = sysctl_wire_old_buffer(req, 0);
10376 mtx_lock(&sc->reg_lock);
10377 if (hw_off_limits(sc))
10380 t4_read_mtu_tbl(sc, mtus, NULL);
10381 mtx_unlock(&sc->reg_lock);
10385 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10389 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
10390 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
10391 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
10392 mtus[14], mtus[15]);
10394 rc = sbuf_finish(sb);
10401 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
10403 struct adapter *sc = arg1;
10406 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
10407 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
10408 static const char *tx_stats[MAX_PM_NSTATS] = {
10409 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
10410 "Tx FIFO wait", NULL, "Tx latency"
10412 static const char *rx_stats[MAX_PM_NSTATS] = {
10413 "Read:", "Write bypass:", "Write mem:", "Flush:",
10414 "Rx FIFO wait", NULL, "Rx latency"
10417 rc = sysctl_wire_old_buffer(req, 0);
10421 mtx_lock(&sc->reg_lock);
10422 if (hw_off_limits(sc))
10425 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
10426 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
10428 mtx_unlock(&sc->reg_lock);
10432 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10436 sbuf_printf(sb, " Tx pcmds Tx bytes");
10437 for (i = 0; i < 4; i++) {
10438 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10442 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
10443 for (i = 0; i < 4; i++) {
10444 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10448 if (chip_id(sc) > CHELSIO_T5) {
10450 "\n Total wait Total occupancy");
10451 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10453 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10457 MPASS(i < nitems(tx_stats));
10460 "\n Reads Total wait");
10461 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
10463 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
10467 rc = sbuf_finish(sb);
10474 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
10476 struct adapter *sc = arg1;
10479 struct tp_rdma_stats stats;
10481 rc = sysctl_wire_old_buffer(req, 0);
10485 mtx_lock(&sc->reg_lock);
10486 if (hw_off_limits(sc))
10489 t4_tp_get_rdma_stats(sc, &stats, 0);
10490 mtx_unlock(&sc->reg_lock);
10494 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10498 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
10499 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
10501 rc = sbuf_finish(sb);
10508 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
10510 struct adapter *sc = arg1;
10513 struct tp_tcp_stats v4, v6;
10515 rc = sysctl_wire_old_buffer(req, 0);
10519 mtx_lock(&sc->reg_lock);
10520 if (hw_off_limits(sc))
10523 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
10524 mtx_unlock(&sc->reg_lock);
10528 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10534 sbuf_printf(sb, "OutRsts: %20u %20u\n",
10535 v4.tcp_out_rsts, v6.tcp_out_rsts);
10536 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
10537 v4.tcp_in_segs, v6.tcp_in_segs);
10538 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
10539 v4.tcp_out_segs, v6.tcp_out_segs);
10540 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
10541 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
10543 rc = sbuf_finish(sb);
10550 sysctl_tids(SYSCTL_HANDLER_ARGS)
10552 struct adapter *sc = arg1;
10556 struct tid_info *t = &sc->tids;
10558 rc = sysctl_wire_old_buffer(req, 0);
10562 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10567 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
10572 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
10573 t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
10577 bool hashen = false;
10579 mtx_lock(&sc->reg_lock);
10580 if (hw_off_limits(sc))
10582 else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
10584 if (chip_id(sc) <= CHELSIO_T5) {
10585 x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
10586 y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
10588 x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
10589 y = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
10592 mtx_unlock(&sc->reg_lock);
10596 sbuf_printf(sb, "TID range: ");
10599 sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1);
10600 sbuf_printf(sb, "%u-%u", y, t->ntids - 1);
10602 sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base +
10605 sbuf_printf(sb, ", in use: %u\n",
10606 atomic_load_acq_int(&t->tids_in_use));
10610 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
10611 t->stid_base + t->nstids - 1, t->stids_in_use);
10615 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
10616 t->ftid_end, t->ftids_in_use);
10620 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
10621 t->etid_base + t->netids - 1, t->etids_in_use);
10624 mtx_lock(&sc->reg_lock);
10625 if (hw_off_limits(sc))
10628 x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4);
10629 y = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6);
10631 mtx_unlock(&sc->reg_lock);
10634 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y);
10637 rc = sbuf_finish(sb);
10639 (void)sbuf_finish(sb);
10646 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
10648 struct adapter *sc = arg1;
10651 struct tp_err_stats stats;
10653 rc = sysctl_wire_old_buffer(req, 0);
10657 mtx_lock(&sc->reg_lock);
10658 if (hw_off_limits(sc))
10661 t4_tp_get_err_stats(sc, &stats, 0);
10662 mtx_unlock(&sc->reg_lock);
10666 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10670 if (sc->chip_params->nchan > 2) {
10671 sbuf_printf(sb, " channel 0 channel 1"
10672 " channel 2 channel 3\n");
10673 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
10674 stats.mac_in_errs[0], stats.mac_in_errs[1],
10675 stats.mac_in_errs[2], stats.mac_in_errs[3]);
10676 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
10677 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
10678 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
10679 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
10680 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
10681 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
10682 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
10683 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
10684 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
10685 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
10686 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
10687 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
10688 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
10689 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
10690 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
10691 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
10692 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
10693 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
10694 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
10695 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
10696 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
10698 sbuf_printf(sb, " channel 0 channel 1\n");
10699 sbuf_printf(sb, "macInErrs: %10u %10u\n",
10700 stats.mac_in_errs[0], stats.mac_in_errs[1]);
10701 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
10702 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
10703 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
10704 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
10705 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
10706 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
10707 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
10708 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
10709 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
10710 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
10711 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
10712 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
10713 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
10714 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
10717 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
10718 stats.ofld_no_neigh, stats.ofld_cong_defer);
10720 rc = sbuf_finish(sb);
10727 sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
10729 struct adapter *sc = arg1;
10732 struct tp_tnl_stats stats;
10734 rc = sysctl_wire_old_buffer(req, 0);
10738 mtx_lock(&sc->reg_lock);
10739 if (hw_off_limits(sc))
10742 t4_tp_get_tnl_stats(sc, &stats, 1);
10743 mtx_unlock(&sc->reg_lock);
10747 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
10751 if (sc->chip_params->nchan > 2) {
10752 sbuf_printf(sb, " channel 0 channel 1"
10753 " channel 2 channel 3\n");
10754 sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n",
10755 stats.out_pkt[0], stats.out_pkt[1],
10756 stats.out_pkt[2], stats.out_pkt[3]);
10757 sbuf_printf(sb, "InPkts: %10u %10u %10u %10u",
10758 stats.in_pkt[0], stats.in_pkt[1],
10759 stats.in_pkt[2], stats.in_pkt[3]);
10761 sbuf_printf(sb, " channel 0 channel 1\n");
10762 sbuf_printf(sb, "OutPkts: %10u %10u\n",
10763 stats.out_pkt[0], stats.out_pkt[1]);
10764 sbuf_printf(sb, "InPkts: %10u %10u",
10765 stats.in_pkt[0], stats.in_pkt[1]);
10768 rc = sbuf_finish(sb);
10775 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
10777 struct adapter *sc = arg1;
10778 struct tp_params *tpp = &sc->params.tp;
10782 mask = tpp->la_mask >> 16;
10783 rc = sysctl_handle_int(oidp, &mask, 0, req);
10784 if (rc != 0 || req->newptr == NULL)
10788 mtx_lock(&sc->reg_lock);
10789 if (hw_off_limits(sc))
10792 tpp->la_mask = mask << 16;
10793 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U,
10796 mtx_unlock(&sc->reg_lock);
10801 struct field_desc {
10808 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
10814 uint64_t mask = (1ULL << f->width) - 1;
10815 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
10816 ((uintmax_t)v >> f->start) & mask);
10818 if (line_size + len >= 79) {
10820 sbuf_printf(sb, "\n ");
10822 sbuf_printf(sb, "%s ", buf);
10823 line_size += len + 1;
10826 sbuf_printf(sb, "\n");
10829 static const struct field_desc tp_la0[] = {
10830 { "RcfOpCodeOut", 60, 4 },
10831 { "State", 56, 4 },
10832 { "WcfState", 52, 4 },
10833 { "RcfOpcSrcOut", 50, 2 },
10834 { "CRxError", 49, 1 },
10835 { "ERxError", 48, 1 },
10836 { "SanityFailed", 47, 1 },
10837 { "SpuriousMsg", 46, 1 },
10838 { "FlushInputMsg", 45, 1 },
10839 { "FlushInputCpl", 44, 1 },
10840 { "RssUpBit", 43, 1 },
10841 { "RssFilterHit", 42, 1 },
10843 { "InitTcb", 31, 1 },
10844 { "LineNumber", 24, 7 },
10846 { "EdataOut", 22, 1 },
10848 { "CdataOut", 20, 1 },
10849 { "EreadPdu", 19, 1 },
10850 { "CreadPdu", 18, 1 },
10851 { "TunnelPkt", 17, 1 },
10852 { "RcfPeerFin", 16, 1 },
10853 { "RcfReasonOut", 12, 4 },
10854 { "TxCchannel", 10, 2 },
10855 { "RcfTxChannel", 8, 2 },
10856 { "RxEchannel", 6, 2 },
10857 { "RcfRxChannel", 5, 1 },
10858 { "RcfDataOutSrdy", 4, 1 },
10859 { "RxDvld", 3, 1 },
10860 { "RxOoDvld", 2, 1 },
10861 { "RxCongestion", 1, 1 },
10862 { "TxCongestion", 0, 1 },
10866 static const struct field_desc tp_la1[] = {
10867 { "CplCmdIn", 56, 8 },
10868 { "CplCmdOut", 48, 8 },
10869 { "ESynOut", 47, 1 },
10870 { "EAckOut", 46, 1 },
10871 { "EFinOut", 45, 1 },
10872 { "ERstOut", 44, 1 },
10873 { "SynIn", 43, 1 },
10874 { "AckIn", 42, 1 },
10875 { "FinIn", 41, 1 },
10876 { "RstIn", 40, 1 },
10877 { "DataIn", 39, 1 },
10878 { "DataInVld", 38, 1 },
10879 { "PadIn", 37, 1 },
10880 { "RxBufEmpty", 36, 1 },
10881 { "RxDdp", 35, 1 },
10882 { "RxFbCongestion", 34, 1 },
10883 { "TxFbCongestion", 33, 1 },
10884 { "TxPktSumSrdy", 32, 1 },
10885 { "RcfUlpType", 28, 4 },
10886 { "Eread", 27, 1 },
10887 { "Ebypass", 26, 1 },
10888 { "Esave", 25, 1 },
10889 { "Static0", 24, 1 },
10890 { "Cread", 23, 1 },
10891 { "Cbypass", 22, 1 },
10892 { "Csave", 21, 1 },
10893 { "CPktOut", 20, 1 },
10894 { "RxPagePoolFull", 18, 2 },
10895 { "RxLpbkPkt", 17, 1 },
10896 { "TxLpbkPkt", 16, 1 },
10897 { "RxVfValid", 15, 1 },
10898 { "SynLearned", 14, 1 },
10899 { "SetDelEntry", 13, 1 },
10900 { "SetInvEntry", 12, 1 },
10901 { "CpcmdDvld", 11, 1 },
10902 { "CpcmdSave", 10, 1 },
10903 { "RxPstructsFull", 8, 2 },
10904 { "EpcmdDvld", 7, 1 },
10905 { "EpcmdFlush", 6, 1 },
10906 { "EpcmdTrimPrefix", 5, 1 },
10907 { "EpcmdTrimPostfix", 4, 1 },
10908 { "ERssIp4Pkt", 3, 1 },
10909 { "ERssIp6Pkt", 2, 1 },
10910 { "ERssTcpUdpPkt", 1, 1 },
10911 { "ERssFceFipPkt", 0, 1 },
10915 static const struct field_desc tp_la2[] = {
10916 { "CplCmdIn", 56, 8 },
10917 { "MpsVfVld", 55, 1 },
10918 { "MpsPf", 52, 3 },
10919 { "MpsVf", 44, 8 },
10920 { "SynIn", 43, 1 },
10921 { "AckIn", 42, 1 },
10922 { "FinIn", 41, 1 },
10923 { "RstIn", 40, 1 },
10924 { "DataIn", 39, 1 },
10925 { "DataInVld", 38, 1 },
10926 { "PadIn", 37, 1 },
10927 { "RxBufEmpty", 36, 1 },
10928 { "RxDdp", 35, 1 },
10929 { "RxFbCongestion", 34, 1 },
10930 { "TxFbCongestion", 33, 1 },
10931 { "TxPktSumSrdy", 32, 1 },
10932 { "RcfUlpType", 28, 4 },
10933 { "Eread", 27, 1 },
10934 { "Ebypass", 26, 1 },
10935 { "Esave", 25, 1 },
10936 { "Static0", 24, 1 },
10937 { "Cread", 23, 1 },
10938 { "Cbypass", 22, 1 },
10939 { "Csave", 21, 1 },
10940 { "CPktOut", 20, 1 },
10941 { "RxPagePoolFull", 18, 2 },
10942 { "RxLpbkPkt", 17, 1 },
10943 { "TxLpbkPkt", 16, 1 },
10944 { "RxVfValid", 15, 1 },
10945 { "SynLearned", 14, 1 },
10946 { "SetDelEntry", 13, 1 },
10947 { "SetInvEntry", 12, 1 },
10948 { "CpcmdDvld", 11, 1 },
10949 { "CpcmdSave", 10, 1 },
10950 { "RxPstructsFull", 8, 2 },
10951 { "EpcmdDvld", 7, 1 },
10952 { "EpcmdFlush", 6, 1 },
10953 { "EpcmdTrimPrefix", 5, 1 },
10954 { "EpcmdTrimPostfix", 4, 1 },
10955 { "ERssIp4Pkt", 3, 1 },
10956 { "ERssIp6Pkt", 2, 1 },
10957 { "ERssTcpUdpPkt", 1, 1 },
10958 { "ERssFceFipPkt", 0, 1 },
10963 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
10966 field_desc_show(sb, *p, tp_la0);
10970 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
10974 sbuf_printf(sb, "\n");
10975 field_desc_show(sb, p[0], tp_la0);
10976 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
10977 field_desc_show(sb, p[1], tp_la0);
10981 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
10985 sbuf_printf(sb, "\n");
10986 field_desc_show(sb, p[0], tp_la0);
10987 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
10988 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
10992 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
10994 struct adapter *sc = arg1;
10999 void (*show_func)(struct sbuf *, uint64_t *, int);
11001 rc = sysctl_wire_old_buffer(req, 0);
11005 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11009 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
11011 mtx_lock(&sc->reg_lock);
11012 if (hw_off_limits(sc))
11015 t4_tp_read_la(sc, buf, NULL);
11016 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
11019 show_func = tp_la_show2;
11023 show_func = tp_la_show3;
11027 show_func = tp_la_show;
11030 mtx_unlock(&sc->reg_lock);
11035 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
11036 (*show_func)(sb, p, i);
11037 rc = sbuf_finish(sb);
11040 free(buf, M_CXGBE);
11045 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
11047 struct adapter *sc = arg1;
11050 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
11052 rc = sysctl_wire_old_buffer(req, 0);
11056 mtx_lock(&sc->reg_lock);
11057 if (hw_off_limits(sc))
11060 t4_get_chan_txrate(sc, nrate, orate);
11061 mtx_unlock(&sc->reg_lock);
11065 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
11069 if (sc->chip_params->nchan > 2) {
11070 sbuf_printf(sb, " channel 0 channel 1"
11071 " channel 2 channel 3\n");
11072 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
11073 nrate[0], nrate[1], nrate[2], nrate[3]);
11074 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
11075 orate[0], orate[1], orate[2], orate[3]);
11077 sbuf_printf(sb, " channel 0 channel 1\n");
11078 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
11079 nrate[0], nrate[1]);
11080 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
11081 orate[0], orate[1]);
11084 rc = sbuf_finish(sb);
11091 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
11093 struct adapter *sc = arg1;
11098 rc = sysctl_wire_old_buffer(req, 0);
11102 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11106 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
11107 M_ZERO | M_WAITOK);
11109 mtx_lock(&sc->reg_lock);
11110 if (hw_off_limits(sc))
11113 t4_ulprx_read_la(sc, buf);
11114 mtx_unlock(&sc->reg_lock);
11119 sbuf_printf(sb, " Pcmd Type Message"
11121 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
11122 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
11123 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
11125 rc = sbuf_finish(sb);
11128 free(buf, M_CXGBE);
11133 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
11135 struct adapter *sc = arg1;
11138 uint32_t cfg, s1, s2;
11140 MPASS(chip_id(sc) >= CHELSIO_T5);
11142 rc = sysctl_wire_old_buffer(req, 0);
11146 mtx_lock(&sc->reg_lock);
11147 if (hw_off_limits(sc))
11150 cfg = t4_read_reg(sc, A_SGE_STAT_CFG);
11151 s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL);
11152 s2 = t4_read_reg(sc, A_SGE_STAT_MATCH);
11154 mtx_unlock(&sc->reg_lock);
11158 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11162 if (G_STATSOURCE_T5(cfg) == 7) {
11165 mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg);
11167 sbuf_printf(sb, "total %d, incomplete %d", s1, s2);
11168 else if (mode == 1)
11169 sbuf_printf(sb, "total %d, data overflow %d", s1, s2);
11171 sbuf_printf(sb, "unknown mode %d", mode);
11173 rc = sbuf_finish(sb);
11180 sysctl_cpus(SYSCTL_HANDLER_ARGS)
11182 struct adapter *sc = arg1;
11183 enum cpu_sets op = arg2;
11188 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
11191 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
11195 rc = sysctl_wire_old_buffer(req, 0);
11199 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
11204 sbuf_printf(sb, "%d ", i);
11205 rc = sbuf_finish(sb);
11212 sysctl_reset(SYSCTL_HANDLER_ARGS)
11214 struct adapter *sc = arg1;
11218 val = atomic_load_int(&sc->num_resets);
11219 rc = sysctl_handle_int(oidp, &val, 0, req);
11220 if (rc != 0 || req->newptr == NULL)
11224 /* Zero out the counter that tracks reset. */
11225 atomic_store_int(&sc->num_resets, 0);
11230 return (EINVAL); /* 0 or 1 are the only legal values */
11232 if (hw_off_limits(sc)) /* harmless race */
11235 taskqueue_enqueue(reset_tq, &sc->reset_task);
11241 sysctl_tls(SYSCTL_HANDLER_ARGS)
11243 struct adapter *sc = arg1;
11245 struct vi_info *vi;
11248 rc = sysctl_handle_int(oidp, &v, 0, req);
11249 if (rc != 0 || req->newptr == NULL)
11252 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS))
11255 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls");
11258 if (hw_off_limits(sc))
11262 for_each_port(sc, i) {
11263 for_each_vi(sc->port[i], j, vi) {
11264 if (vi->flags & VI_INIT_DONE)
11265 t4_update_fl_bufsize(vi->ifp);
11269 end_synchronized_op(sc, 0);
11276 unit_conv(char *buf, size_t len, u_int val, u_int factor)
11278 u_int rem = val % factor;
11281 snprintf(buf, len, "%u", val / factor);
11283 while (rem % 10 == 0)
11285 snprintf(buf, len, "%u.%u", val / factor, rem);
11290 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
11292 struct adapter *sc = arg1;
11295 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11297 mtx_lock(&sc->reg_lock);
11298 if (hw_off_limits(sc))
11301 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
11302 mtx_unlock(&sc->reg_lock);
11303 if (res == (u_int)-1)
11309 re = G_TIMERRESOLUTION(res);
11312 /* TCP timestamp tick */
11313 re = G_TIMESTAMPRESOLUTION(res);
11317 re = G_DELAYEDACKRESOLUTION(res);
11323 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
11325 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
11329 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
11331 struct adapter *sc = arg1;
11333 u_int dack_tmr, dack_re, v;
11334 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11336 mtx_lock(&sc->reg_lock);
11337 if (hw_off_limits(sc))
11341 dack_re = G_DELAYEDACKRESOLUTION(t4_read_reg(sc,
11342 A_TP_TIMER_RESOLUTION));
11343 dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER);
11345 mtx_unlock(&sc->reg_lock);
11349 v = ((cclk_ps << dack_re) / 1000000) * dack_tmr;
11351 return (sysctl_handle_int(oidp, &v, 0, req));
11355 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
11357 struct adapter *sc = arg1;
11358 int rc, reg = arg2;
11360 u_long tp_tick_us, v;
11361 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
11363 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
11364 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
11365 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
11366 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
11368 mtx_lock(&sc->reg_lock);
11369 if (hw_off_limits(sc))
11373 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
11374 tp_tick_us = (cclk_ps << tre) / 1000000;
11375 if (reg == A_TP_INIT_SRTT)
11376 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
11378 v = tp_tick_us * t4_read_reg(sc, reg);
11380 mtx_unlock(&sc->reg_lock);
11384 return (sysctl_handle_long(oidp, &v, 0, req));
11388 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
11389 * passed to this function.
11392 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
11394 struct adapter *sc = arg1;
11395 int rc, idx = arg2;
11398 MPASS(idx >= 0 && idx <= 24);
11400 mtx_lock(&sc->reg_lock);
11401 if (hw_off_limits(sc))
11405 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
11407 mtx_unlock(&sc->reg_lock);
11411 return (sysctl_handle_int(oidp, &v, 0, req));
11415 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
11417 struct adapter *sc = arg1;
11418 int rc, idx = arg2;
11421 MPASS(idx >= 0 && idx < 16);
11423 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
11424 shift = (idx & 3) << 3;
11425 mtx_lock(&sc->reg_lock);
11426 if (hw_off_limits(sc))
11430 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
11432 mtx_unlock(&sc->reg_lock);
11436 return (sysctl_handle_int(oidp, &v, 0, req));
11440 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
11442 struct vi_info *vi = arg1;
11443 struct adapter *sc = vi->adapter;
11445 struct sge_ofld_rxq *ofld_rxq;
11448 idx = vi->ofld_tmr_idx;
11450 rc = sysctl_handle_int(oidp, &idx, 0, req);
11451 if (rc != 0 || req->newptr == NULL)
11454 if (idx < 0 || idx >= SGE_NTIMERS)
11457 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
11462 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
11463 for_each_ofld_rxq(vi, i, ofld_rxq) {
11464 #ifdef atomic_store_rel_8
11465 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
11467 ofld_rxq->iq.intr_params = v;
11470 vi->ofld_tmr_idx = idx;
11472 end_synchronized_op(sc, LOCK_HELD);
11477 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
11479 struct vi_info *vi = arg1;
11480 struct adapter *sc = vi->adapter;
11483 idx = vi->ofld_pktc_idx;
11485 rc = sysctl_handle_int(oidp, &idx, 0, req);
11486 if (rc != 0 || req->newptr == NULL)
11489 if (idx < -1 || idx >= SGE_NCOUNTERS)
11492 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
11497 if (vi->flags & VI_INIT_DONE)
11498 rc = EBUSY; /* cannot be changed once the queues are created */
11500 vi->ofld_pktc_idx = idx;
11502 end_synchronized_op(sc, LOCK_HELD);
11508 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
11512 if (cntxt->cid > M_CTXTQID)
11515 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
11516 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
11519 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
11523 if (hw_off_limits(sc)) {
11528 if (sc->flags & FW_OK) {
11529 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
11536 * Read via firmware failed or wasn't even attempted. Read directly via
11539 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
11541 end_synchronized_op(sc, 0);
11546 load_fw(struct adapter *sc, struct t4_data *fw)
11551 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
11555 if (hw_off_limits(sc)) {
11561 * The firmware, with the sole exception of the memory parity error
11562 * handler, runs from memory and not flash. It is almost always safe to
11563 * install a new firmware on a running system. Just set bit 1 in
11564 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
11566 if (sc->flags & FULL_INIT_DONE &&
11567 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
11572 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
11574 rc = copyin(fw->data, fw_data, fw->len);
11576 rc = -t4_load_fw(sc, fw_data, fw->len);
11578 free(fw_data, M_CXGBE);
11580 end_synchronized_op(sc, 0);
11585 load_cfg(struct adapter *sc, struct t4_data *cfg)
11588 uint8_t *cfg_data = NULL;
11590 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
11594 if (hw_off_limits(sc)) {
11599 if (cfg->len == 0) {
11601 rc = -t4_load_cfg(sc, NULL, 0);
11605 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
11607 rc = copyin(cfg->data, cfg_data, cfg->len);
11609 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
11611 free(cfg_data, M_CXGBE);
11613 end_synchronized_op(sc, 0);
11618 load_boot(struct adapter *sc, struct t4_bootrom *br)
11621 uint8_t *br_data = NULL;
11624 if (br->len > 1024 * 1024)
11627 if (br->pf_offset == 0) {
11629 if (br->pfidx_addr > 7)
11631 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
11632 A_PCIE_PF_EXPROM_OFST)));
11633 } else if (br->pf_offset == 1) {
11635 offset = G_OFFSET(br->pfidx_addr);
11640 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
11644 if (hw_off_limits(sc)) {
11649 if (br->len == 0) {
11651 rc = -t4_load_boot(sc, NULL, offset, 0);
11655 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
11657 rc = copyin(br->data, br_data, br->len);
11659 rc = -t4_load_boot(sc, br_data, offset, br->len);
11661 free(br_data, M_CXGBE);
11663 end_synchronized_op(sc, 0);
11668 load_bootcfg(struct adapter *sc, struct t4_data *bc)
11671 uint8_t *bc_data = NULL;
11673 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
11677 if (hw_off_limits(sc)) {
11682 if (bc->len == 0) {
11684 rc = -t4_load_bootcfg(sc, NULL, 0);
11688 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
11690 rc = copyin(bc->data, bc_data, bc->len);
11692 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
11694 free(bc_data, M_CXGBE);
11696 end_synchronized_op(sc, 0);
11701 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
11704 struct cudbg_init *cudbg;
11705 void *handle, *buf;
11707 /* buf is large, don't block if no memory is available */
11708 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
11712 handle = cudbg_alloc_handle();
11713 if (handle == NULL) {
11718 cudbg = cudbg_get_init(handle);
11720 cudbg->print = (cudbg_print_cb)printf;
11723 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
11724 __func__, dump->wr_flash, dump->len, dump->data);
11727 if (dump->wr_flash)
11728 cudbg->use_flash = 1;
11729 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
11730 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
11732 rc = cudbg_collect(handle, buf, &dump->len);
11736 rc = copyout(buf, dump->data, dump->len);
11738 cudbg_free_handle(handle);
11739 free(buf, M_CXGBE);
11744 free_offload_policy(struct t4_offload_policy *op)
11746 struct offload_rule *r;
11753 for (i = 0; i < op->nrules; i++, r++) {
11754 free(r->bpf_prog.bf_insns, M_CXGBE);
11756 free(op->rule, M_CXGBE);
11761 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
11764 struct t4_offload_policy *op, *old;
11765 struct bpf_program *bf;
11766 const struct offload_settings *s;
11767 struct offload_rule *r;
11770 if (!is_offload(sc))
11773 if (uop->nrules == 0) {
11774 /* Delete installed policies. */
11777 } else if (uop->nrules > 256) { /* arbitrary */
11781 /* Copy userspace offload policy to kernel */
11782 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
11783 op->nrules = uop->nrules;
11784 len = op->nrules * sizeof(struct offload_rule);
11785 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
11786 rc = copyin(uop->rule, op->rule, len);
11788 free(op->rule, M_CXGBE);
11794 for (i = 0; i < op->nrules; i++, r++) {
11796 /* Validate open_type */
11797 if (r->open_type != OPEN_TYPE_LISTEN &&
11798 r->open_type != OPEN_TYPE_ACTIVE &&
11799 r->open_type != OPEN_TYPE_PASSIVE &&
11800 r->open_type != OPEN_TYPE_DONTCARE) {
11803 * Rules 0 to i have malloc'd filters that need to be
11804 * freed. Rules i+1 to nrules have userspace pointers
11805 * and should be left alone.
11808 free_offload_policy(op);
11812 /* Validate settings */
11814 if ((s->offload != 0 && s->offload != 1) ||
11815 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
11816 s->sched_class < -1 ||
11817 s->sched_class >= sc->params.nsched_cls) {
11823 u = bf->bf_insns; /* userspace ptr */
11824 bf->bf_insns = NULL;
11825 if (bf->bf_len == 0) {
11826 /* legal, matches everything */
11829 len = bf->bf_len * sizeof(*bf->bf_insns);
11830 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
11831 rc = copyin(u, bf->bf_insns, len);
11835 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
11841 rw_wlock(&sc->policy_lock);
11844 rw_wunlock(&sc->policy_lock);
11845 free_offload_policy(old);
11850 #define MAX_READ_BUF_SIZE (128 * 1024)
11852 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
11854 uint32_t addr, remaining, n;
11859 mtx_lock(&sc->reg_lock);
11860 if (hw_off_limits(sc))
11863 rc = validate_mem_range(sc, mr->addr, mr->len);
11864 mtx_unlock(&sc->reg_lock);
11868 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
11870 remaining = mr->len;
11871 dst = (void *)mr->data;
11873 while (remaining) {
11874 n = min(remaining, MAX_READ_BUF_SIZE);
11875 mtx_lock(&sc->reg_lock);
11876 if (hw_off_limits(sc))
11879 read_via_memwin(sc, 2, addr, buf, n);
11880 mtx_unlock(&sc->reg_lock);
11884 rc = copyout(buf, dst, n);
11893 free(buf, M_CXGBE);
11896 #undef MAX_READ_BUF_SIZE
11899 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
11903 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
11906 if (i2cd->len > sizeof(i2cd->data))
11909 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
11912 if (hw_off_limits(sc))
11915 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
11916 i2cd->offset, i2cd->len, &i2cd->data[0]);
11917 end_synchronized_op(sc, 0);
11923 clear_stats(struct adapter *sc, u_int port_id)
11925 int i, v, chan_map;
11926 struct port_info *pi;
11927 struct vi_info *vi;
11928 struct sge_rxq *rxq;
11929 struct sge_txq *txq;
11930 struct sge_wrq *wrq;
11931 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
11932 struct sge_ofld_txq *ofld_txq;
11935 struct sge_ofld_rxq *ofld_rxq;
11938 if (port_id >= sc->params.nports)
11940 pi = sc->port[port_id];
11944 mtx_lock(&sc->reg_lock);
11945 if (!hw_off_limits(sc)) {
11947 t4_clr_port_stats(sc, pi->tx_chan);
11949 if (pi->fcs_reg != -1)
11950 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
11952 pi->stats.rx_fcs_err = 0;
11954 for_each_vi(pi, v, vi) {
11955 if (vi->flags & VI_INIT_DONE)
11956 t4_clr_vi_stats(sc, vi->vin);
11958 chan_map = pi->rx_e_chan_map;
11961 i = ffs(chan_map) - 1;
11962 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
11963 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
11964 chan_map &= ~(1 << i);
11967 mtx_unlock(&sc->reg_lock);
11968 pi->tx_parse_error = 0;
11969 pi->tnl_cong_drops = 0;
11972 * Since this command accepts a port, clear stats for
11973 * all VIs on this port.
11975 for_each_vi(pi, v, vi) {
11976 if (vi->flags & VI_INIT_DONE) {
11978 for_each_rxq(vi, i, rxq) {
11979 #if defined(INET) || defined(INET6)
11980 rxq->lro.lro_queued = 0;
11981 rxq->lro.lro_flushed = 0;
11984 rxq->vlan_extraction = 0;
11985 rxq->vxlan_rxcsum = 0;
11987 rxq->fl.cl_allocated = 0;
11988 rxq->fl.cl_recycled = 0;
11989 rxq->fl.cl_fast_recycled = 0;
11992 for_each_txq(vi, i, txq) {
11995 txq->vlan_insertion = 0;
11998 txq->txpkt_wrs = 0;
11999 txq->txpkts0_wrs = 0;
12000 txq->txpkts1_wrs = 0;
12001 txq->txpkts0_pkts = 0;
12002 txq->txpkts1_pkts = 0;
12003 txq->txpkts_flush = 0;
12005 txq->vxlan_tso_wrs = 0;
12006 txq->vxlan_txcsum = 0;
12007 txq->kern_tls_records = 0;
12008 txq->kern_tls_short = 0;
12009 txq->kern_tls_partial = 0;
12010 txq->kern_tls_full = 0;
12011 txq->kern_tls_octets = 0;
12012 txq->kern_tls_waste = 0;
12013 txq->kern_tls_options = 0;
12014 txq->kern_tls_header = 0;
12015 txq->kern_tls_fin = 0;
12016 txq->kern_tls_fin_short = 0;
12017 txq->kern_tls_cbc = 0;
12018 txq->kern_tls_gcm = 0;
12019 mp_ring_reset_stats(txq->r);
12022 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
12023 for_each_ofld_txq(vi, i, ofld_txq) {
12024 ofld_txq->wrq.tx_wrs_direct = 0;
12025 ofld_txq->wrq.tx_wrs_copied = 0;
12026 counter_u64_zero(ofld_txq->tx_iscsi_pdus);
12027 counter_u64_zero(ofld_txq->tx_iscsi_octets);
12028 counter_u64_zero(ofld_txq->tx_iscsi_iso_wrs);
12029 counter_u64_zero(ofld_txq->tx_aio_jobs);
12030 counter_u64_zero(ofld_txq->tx_aio_octets);
12031 counter_u64_zero(ofld_txq->tx_toe_tls_records);
12032 counter_u64_zero(ofld_txq->tx_toe_tls_octets);
12036 for_each_ofld_rxq(vi, i, ofld_rxq) {
12037 ofld_rxq->fl.cl_allocated = 0;
12038 ofld_rxq->fl.cl_recycled = 0;
12039 ofld_rxq->fl.cl_fast_recycled = 0;
12041 ofld_rxq->rx_iscsi_ddp_setup_ok);
12043 ofld_rxq->rx_iscsi_ddp_setup_error);
12044 ofld_rxq->rx_iscsi_ddp_pdus = 0;
12045 ofld_rxq->rx_iscsi_ddp_octets = 0;
12046 ofld_rxq->rx_iscsi_fl_pdus = 0;
12047 ofld_rxq->rx_iscsi_fl_octets = 0;
12048 ofld_rxq->rx_aio_ddp_jobs = 0;
12049 ofld_rxq->rx_aio_ddp_octets = 0;
12050 ofld_rxq->rx_toe_tls_records = 0;
12051 ofld_rxq->rx_toe_tls_octets = 0;
12052 ofld_rxq->rx_toe_ddp_octets = 0;
12053 counter_u64_zero(ofld_rxq->ddp_buffer_alloc);
12054 counter_u64_zero(ofld_rxq->ddp_buffer_reuse);
12055 counter_u64_zero(ofld_rxq->ddp_buffer_free);
12059 if (IS_MAIN_VI(vi)) {
12060 wrq = &sc->sge.ctrlq[pi->port_id];
12061 wrq->tx_wrs_direct = 0;
12062 wrq->tx_wrs_copied = 0;
12071 hold_clip_addr(struct adapter *sc, struct t4_clip_addr *ca)
12074 struct in6_addr in6;
12076 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
12077 if (t4_get_clip_entry(sc, &in6, true) != NULL)
12087 release_clip_addr(struct adapter *sc, struct t4_clip_addr *ca)
12090 struct in6_addr in6;
12092 bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr));
12093 return (t4_release_clip_addr(sc, &in6));
12100 t4_os_find_pci_capability(struct adapter *sc, int cap)
12104 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
12108 t4_os_pci_save_state(struct adapter *sc)
12111 struct pci_devinfo *dinfo;
12114 dinfo = device_get_ivars(dev);
12116 pci_cfg_save(dev, dinfo, 0);
12121 t4_os_pci_restore_state(struct adapter *sc)
12124 struct pci_devinfo *dinfo;
12127 dinfo = device_get_ivars(dev);
12129 pci_cfg_restore(dev, dinfo);
12134 t4_os_portmod_changed(struct port_info *pi)
12136 struct adapter *sc = pi->adapter;
12137 struct vi_info *vi;
12139 static const char *mod_str[] = {
12140 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
12143 KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
12144 ("%s: port_type %u", __func__, pi->port_type));
12147 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
12149 build_medialist(pi);
12150 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
12151 fixup_link_config(pi);
12152 apply_link_config(pi);
12155 end_synchronized_op(sc, LOCK_HELD);
12159 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
12160 if_printf(ifp, "transceiver unplugged.\n");
12161 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
12162 if_printf(ifp, "unknown transceiver inserted.\n");
12163 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
12164 if_printf(ifp, "unsupported transceiver inserted.\n");
12165 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
12166 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
12167 port_top_speed(pi), mod_str[pi->mod_type]);
12169 if_printf(ifp, "transceiver (type %d) inserted.\n",
12175 t4_os_link_changed(struct port_info *pi)
12177 struct vi_info *vi;
12179 struct link_config *lc = &pi->link_cfg;
12180 struct adapter *sc = pi->adapter;
12183 PORT_LOCK_ASSERT_OWNED(pi);
12187 if (lc->speed > 25000 ||
12188 (lc->speed == 25000 && lc->fec == FEC_RS)) {
12189 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
12190 A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
12192 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
12193 A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
12195 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
12196 pi->stats.rx_fcs_err = 0;
12201 MPASS(pi->fcs_reg != -1);
12202 MPASS(pi->fcs_base == 0);
12205 for_each_vi(pi, v, vi) {
12211 if_setbaudrate(ifp, IF_Mbps(lc->speed));
12212 if_link_state_change(ifp, LINK_STATE_UP);
12214 if_link_state_change(ifp, LINK_STATE_DOWN);
12220 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
12222 struct adapter *sc;
12224 sx_slock(&t4_list_lock);
12225 SLIST_FOREACH(sc, &t4_list, link) {
12227 * func should not make any assumptions about what state sc is
12228 * in - the only guarantee is that sc->sc_lock is a valid lock.
12232 sx_sunlock(&t4_list_lock);
12236 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
12240 struct adapter *sc = dev->si_drv1;
12242 rc = priv_check(td, PRIV_DRIVER);
12247 case CHELSIO_T4_GETREG: {
12248 struct t4_reg *edata = (struct t4_reg *)data;
12250 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
12253 mtx_lock(&sc->reg_lock);
12254 if (hw_off_limits(sc))
12256 else if (edata->size == 4)
12257 edata->val = t4_read_reg(sc, edata->addr);
12258 else if (edata->size == 8)
12259 edata->val = t4_read_reg64(sc, edata->addr);
12262 mtx_unlock(&sc->reg_lock);
12266 case CHELSIO_T4_SETREG: {
12267 struct t4_reg *edata = (struct t4_reg *)data;
12269 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
12272 mtx_lock(&sc->reg_lock);
12273 if (hw_off_limits(sc))
12275 else if (edata->size == 4) {
12276 if (edata->val & 0xffffffff00000000)
12278 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
12279 } else if (edata->size == 8)
12280 t4_write_reg64(sc, edata->addr, edata->val);
12283 mtx_unlock(&sc->reg_lock);
12287 case CHELSIO_T4_REGDUMP: {
12288 struct t4_regdump *regs = (struct t4_regdump *)data;
12289 int reglen = t4_get_regs_len(sc);
12292 if (regs->len < reglen) {
12293 regs->len = reglen; /* hint to the caller */
12297 regs->len = reglen;
12298 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
12299 mtx_lock(&sc->reg_lock);
12300 if (hw_off_limits(sc))
12303 get_regs(sc, regs, buf);
12304 mtx_unlock(&sc->reg_lock);
12306 rc = copyout(buf, regs->data, reglen);
12307 free(buf, M_CXGBE);
12310 case CHELSIO_T4_GET_FILTER_MODE:
12311 rc = get_filter_mode(sc, (uint32_t *)data);
12313 case CHELSIO_T4_SET_FILTER_MODE:
12314 rc = set_filter_mode(sc, *(uint32_t *)data);
12316 case CHELSIO_T4_SET_FILTER_MASK:
12317 rc = set_filter_mask(sc, *(uint32_t *)data);
12319 case CHELSIO_T4_GET_FILTER:
12320 rc = get_filter(sc, (struct t4_filter *)data);
12322 case CHELSIO_T4_SET_FILTER:
12323 rc = set_filter(sc, (struct t4_filter *)data);
12325 case CHELSIO_T4_DEL_FILTER:
12326 rc = del_filter(sc, (struct t4_filter *)data);
12328 case CHELSIO_T4_GET_SGE_CONTEXT:
12329 rc = get_sge_context(sc, (struct t4_sge_context *)data);
12331 case CHELSIO_T4_LOAD_FW:
12332 rc = load_fw(sc, (struct t4_data *)data);
12334 case CHELSIO_T4_GET_MEM:
12335 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
12337 case CHELSIO_T4_GET_I2C:
12338 rc = read_i2c(sc, (struct t4_i2c_data *)data);
12340 case CHELSIO_T4_CLEAR_STATS:
12341 rc = clear_stats(sc, *(uint32_t *)data);
12343 case CHELSIO_T4_SCHED_CLASS:
12344 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
12346 case CHELSIO_T4_SCHED_QUEUE:
12347 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
12349 case CHELSIO_T4_GET_TRACER:
12350 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
12352 case CHELSIO_T4_SET_TRACER:
12353 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
12355 case CHELSIO_T4_LOAD_CFG:
12356 rc = load_cfg(sc, (struct t4_data *)data);
12358 case CHELSIO_T4_LOAD_BOOT:
12359 rc = load_boot(sc, (struct t4_bootrom *)data);
12361 case CHELSIO_T4_LOAD_BOOTCFG:
12362 rc = load_bootcfg(sc, (struct t4_data *)data);
12364 case CHELSIO_T4_CUDBG_DUMP:
12365 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
12367 case CHELSIO_T4_SET_OFLD_POLICY:
12368 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
12370 case CHELSIO_T4_HOLD_CLIP_ADDR:
12371 rc = hold_clip_addr(sc, (struct t4_clip_addr *)data);
12373 case CHELSIO_T4_RELEASE_CLIP_ADDR:
12374 rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
12385 toe_capability(struct vi_info *vi, bool enable)
12388 struct port_info *pi = vi->pi;
12389 struct adapter *sc = pi->adapter;
12391 ASSERT_SYNCHRONIZED_OP(sc);
12393 if (!is_offload(sc))
12395 if (hw_off_limits(sc))
12400 if (sc->flags & KERN_TLS_ON && is_t6(sc)) {
12402 struct port_info *p;
12406 * Reconfigure hardware for TOE if TXTLS is not enabled
12410 for_each_port(sc, i) {
12412 for_each_vi(p, j, v) {
12413 if (if_getcapenable(v->ifp) & IFCAP_TXTLS) {
12415 "%s has NIC TLS enabled.\n",
12416 device_get_nameunit(v->dev));
12422 CH_WARN(sc, "Disable NIC TLS on all interfaces "
12423 "associated with this adapter before "
12424 "trying to enable TOE.\n");
12427 rc = t6_config_kern_tls(sc, false);
12432 if ((if_getcapenable(vi->ifp) & IFCAP_TOE) != 0) {
12433 /* TOE is already enabled. */
12438 * We need the port's queues around so that we're able to send
12439 * and receive CPLs to/from the TOE even if the ifnet for this
12440 * port has never been UP'd administratively.
12442 if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0))
12444 if (!(pi->vi[0].flags & VI_INIT_DONE) &&
12445 ((rc = vi_init(&pi->vi[0])) != 0))
12448 if (isset(&sc->offload_map, pi->port_id)) {
12449 /* TOE is enabled on another VI of this port. */
12454 if (!uld_active(sc, ULD_TOM)) {
12455 rc = t4_activate_uld(sc, ULD_TOM);
12456 if (rc == EAGAIN) {
12458 "You must kldload t4_tom.ko before trying "
12459 "to enable TOE on a cxgbe interface.\n");
12463 KASSERT(sc->tom_softc != NULL,
12464 ("%s: TOM activated but softc NULL", __func__));
12465 KASSERT(uld_active(sc, ULD_TOM),
12466 ("%s: TOM activated but flag not set", __func__));
12469 /* Activate iWARP and iSCSI too, if the modules are loaded. */
12470 if (!uld_active(sc, ULD_IWARP))
12471 (void) t4_activate_uld(sc, ULD_IWARP);
12472 if (!uld_active(sc, ULD_ISCSI))
12473 (void) t4_activate_uld(sc, ULD_ISCSI);
12476 setbit(&sc->offload_map, pi->port_id);
12480 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
12483 KASSERT(uld_active(sc, ULD_TOM),
12484 ("%s: TOM never initialized?", __func__));
12485 clrbit(&sc->offload_map, pi->port_id);
12492 * Add an upper layer driver to the global list.
12495 t4_register_uld(struct uld_info *ui)
12498 struct uld_info *u;
12500 sx_xlock(&t4_uld_list_lock);
12501 SLIST_FOREACH(u, &t4_uld_list, link) {
12502 if (u->uld_id == ui->uld_id) {
12508 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
12511 sx_xunlock(&t4_uld_list_lock);
12516 t4_unregister_uld(struct uld_info *ui)
12519 struct uld_info *u;
12521 sx_xlock(&t4_uld_list_lock);
12523 SLIST_FOREACH(u, &t4_uld_list, link) {
12525 if (ui->refcount > 0) {
12530 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
12536 sx_xunlock(&t4_uld_list_lock);
12541 t4_activate_uld(struct adapter *sc, int id)
12544 struct uld_info *ui;
12546 ASSERT_SYNCHRONIZED_OP(sc);
12548 if (id < 0 || id > ULD_MAX)
12550 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
12552 sx_slock(&t4_uld_list_lock);
12554 SLIST_FOREACH(ui, &t4_uld_list, link) {
12555 if (ui->uld_id == id) {
12556 if (!(sc->flags & FULL_INIT_DONE)) {
12557 rc = adapter_init(sc);
12562 rc = ui->activate(sc);
12564 setbit(&sc->active_ulds, id);
12571 sx_sunlock(&t4_uld_list_lock);
12577 t4_deactivate_uld(struct adapter *sc, int id)
12580 struct uld_info *ui;
12582 ASSERT_SYNCHRONIZED_OP(sc);
12584 if (id < 0 || id > ULD_MAX)
12588 sx_slock(&t4_uld_list_lock);
12590 SLIST_FOREACH(ui, &t4_uld_list, link) {
12591 if (ui->uld_id == id) {
12592 rc = ui->deactivate(sc);
12594 clrbit(&sc->active_ulds, id);
12601 sx_sunlock(&t4_uld_list_lock);
12607 t4_deactivate_all_uld(struct adapter *sc)
12610 struct uld_info *ui;
12612 rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld");
12616 sx_slock(&t4_uld_list_lock);
12618 SLIST_FOREACH(ui, &t4_uld_list, link) {
12619 if (isset(&sc->active_ulds, ui->uld_id)) {
12620 rc = ui->deactivate(sc);
12623 clrbit(&sc->active_ulds, ui->uld_id);
12628 sx_sunlock(&t4_uld_list_lock);
12629 end_synchronized_op(sc, 0);
12635 t4_async_event(struct adapter *sc)
12637 struct uld_info *ui;
12639 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
12641 sx_slock(&t4_uld_list_lock);
12642 SLIST_FOREACH(ui, &t4_uld_list, link) {
12643 if (ui->uld_id == ULD_IWARP) {
12644 ui->async_event(sc);
12648 sx_sunlock(&t4_uld_list_lock);
12649 end_synchronized_op(sc, 0);
12653 uld_active(struct adapter *sc, int uld_id)
12656 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
12658 return (isset(&sc->active_ulds, uld_id));
12664 ktls_capability(struct adapter *sc, bool enable)
12666 ASSERT_SYNCHRONIZED_OP(sc);
12672 if (hw_off_limits(sc))
12676 if (sc->flags & KERN_TLS_ON)
12677 return (0); /* already on */
12678 if (sc->offload_map != 0) {
12680 "Disable TOE on all interfaces associated with "
12681 "this adapter before trying to enable NIC TLS.\n");
12684 return (t6_config_kern_tls(sc, true));
12687 * Nothing to do for disable. If TOE is enabled sometime later
12688 * then toe_capability will reconfigure the hardware.
12696 * t = ptr to tunable.
12697 * nc = number of CPUs.
12698 * c = compiled in default for that tunable.
12701 calculate_nqueues(int *t, int nc, const int c)
12707 nq = *t < 0 ? -*t : c;
12712 * Come up with reasonable defaults for some of the tunables, provided they're
12713 * not set by the user (in which case we'll use the values as is).
12716 tweak_tunables(void)
12718 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
12722 t4_ntxq = rss_getnumbuckets();
12724 calculate_nqueues(&t4_ntxq, nc, NTXQ);
12728 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
12732 t4_nrxq = rss_getnumbuckets();
12734 calculate_nqueues(&t4_nrxq, nc, NRXQ);
12738 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
12740 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
12741 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
12742 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
12745 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
12746 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
12749 #if defined(TCP_OFFLOAD) || defined(KERN_TLS)
12750 if (t4_toecaps_allowed == -1)
12751 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
12753 if (t4_toecaps_allowed == -1)
12754 t4_toecaps_allowed = 0;
12758 if (t4_rdmacaps_allowed == -1) {
12759 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
12760 FW_CAPS_CONFIG_RDMA_RDMAC;
12763 if (t4_iscsicaps_allowed == -1) {
12764 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
12765 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
12766 FW_CAPS_CONFIG_ISCSI_T10DIF;
12769 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
12770 t4_tmr_idx_ofld = TMR_IDX_OFLD;
12772 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
12773 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
12775 if (t4_rdmacaps_allowed == -1)
12776 t4_rdmacaps_allowed = 0;
12778 if (t4_iscsicaps_allowed == -1)
12779 t4_iscsicaps_allowed = 0;
12783 calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ);
12784 calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ);
12785 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
12786 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
12789 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
12790 t4_tmr_idx = TMR_IDX;
12792 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
12793 t4_pktc_idx = PKTC_IDX;
12795 if (t4_qsize_txq < 128)
12796 t4_qsize_txq = 128;
12798 if (t4_qsize_rxq < 128)
12799 t4_qsize_rxq = 128;
12800 while (t4_qsize_rxq & 7)
12803 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
12806 * Number of VIs to create per-port. The first VI is the "main" regular
12807 * VI for the port. The rest are additional virtual interfaces on the
12808 * same physical port. Note that the main VI does not have native
12809 * netmap support but the extra VIs do.
12811 * Limit the number of VIs per port to the number of available
12812 * MAC addresses per port.
12814 if (t4_num_vis < 1)
12816 if (t4_num_vis > nitems(vi_mac_funcs)) {
12817 t4_num_vis = nitems(vi_mac_funcs);
12818 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
12821 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
12822 pcie_relaxed_ordering = 1;
12823 #if defined(__i386__) || defined(__amd64__)
12824 if (cpu_vendor_id == CPU_VENDOR_INTEL)
12825 pcie_relaxed_ordering = 0;
12832 t4_dump_tcb(struct adapter *sc, int tid)
12834 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
12836 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
12837 save = t4_read_reg(sc, reg);
12838 base = sc->memwin[2].mw_base;
12840 /* Dump TCB for the tid */
12841 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
12842 tcb_addr += tid * TCB_SIZE;
12846 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
12848 pf = V_PFNUM(sc->pf);
12849 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
12851 t4_write_reg(sc, reg, win_pos | pf);
12852 t4_read_reg(sc, reg);
12854 off = tcb_addr - win_pos;
12855 for (i = 0; i < 4; i++) {
12857 for (j = 0; j < 8; j++, off += 4)
12858 buf[j] = htonl(t4_read_reg(sc, base + off));
12860 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
12861 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
12865 t4_write_reg(sc, reg, save);
12866 t4_read_reg(sc, reg);
12870 t4_dump_devlog(struct adapter *sc)
12872 struct devlog_params *dparams = &sc->params.devlog;
12873 struct fw_devlog_e e;
12874 int i, first, j, m, nentries, rc;
12875 uint64_t ftstamp = UINT64_MAX;
12877 if (dparams->start == 0) {
12878 db_printf("devlog params not valid\n");
12882 nentries = dparams->size / sizeof(struct fw_devlog_e);
12883 m = fwmtype_to_hwmtype(dparams->memtype);
12885 /* Find the first entry. */
12887 for (i = 0; i < nentries && !db_pager_quit; i++) {
12888 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
12889 sizeof(e), (void *)&e);
12893 if (e.timestamp == 0)
12896 e.timestamp = be64toh(e.timestamp);
12897 if (e.timestamp < ftstamp) {
12898 ftstamp = e.timestamp;
12908 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
12909 sizeof(e), (void *)&e);
12913 if (e.timestamp == 0)
12916 e.timestamp = be64toh(e.timestamp);
12917 e.seqno = be32toh(e.seqno);
12918 for (j = 0; j < 8; j++)
12919 e.params[j] = be32toh(e.params[j]);
12921 db_printf("%10d %15ju %8s %8s ",
12922 e.seqno, e.timestamp,
12923 (e.level < nitems(devlog_level_strings) ?
12924 devlog_level_strings[e.level] : "UNKNOWN"),
12925 (e.facility < nitems(devlog_facility_strings) ?
12926 devlog_facility_strings[e.facility] : "UNKNOWN"));
12927 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
12928 e.params[3], e.params[4], e.params[5], e.params[6],
12931 if (++i == nentries)
12933 } while (i != first && !db_pager_quit);
12936 static DB_DEFINE_TABLE(show, t4, show_t4);
12938 DB_TABLE_COMMAND_FLAGS(show_t4, devlog, db_show_devlog, CS_OWN)
12945 t = db_read_token();
12947 dev = device_lookup_by_name(db_tok_string);
12952 db_printf("usage: show t4 devlog <nexus>\n");
12957 db_printf("device not found\n");
12961 t4_dump_devlog(device_get_softc(dev));
12964 DB_TABLE_COMMAND_FLAGS(show_t4, tcb, db_show_t4tcb, CS_OWN)
12973 t = db_read_token();
12975 dev = device_lookup_by_name(db_tok_string);
12976 t = db_read_token();
12977 if (t == tNUMBER) {
12978 tid = db_tok_number;
12985 db_printf("usage: show t4 tcb <nexus> <tid>\n");
12990 db_printf("device not found\n");
12994 db_printf("invalid tid\n");
12998 t4_dump_tcb(device_get_softc(dev), tid);
13002 static eventhandler_tag vxlan_start_evtag;
13003 static eventhandler_tag vxlan_stop_evtag;
13005 struct vxlan_evargs {
13011 enable_vxlan_rx(struct adapter *sc)
13014 struct port_info *pi;
13015 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
13017 ASSERT_SYNCHRONIZED_OP(sc);
13019 t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) |
13021 for_each_port(sc, i) {
13023 if (pi->vxlan_tcam_entry == true)
13025 rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac,
13026 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
13031 "failed to add VXLAN TCAM entry: %d.\n", rc);
13033 MPASS(rc == sc->rawf_base + pi->port_id);
13034 pi->vxlan_tcam_entry = true;
13040 t4_vxlan_start(struct adapter *sc, void *arg)
13042 struct vxlan_evargs *v = arg;
13044 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
13046 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0)
13049 if (sc->vxlan_refcount == 0) {
13050 sc->vxlan_port = v->port;
13051 sc->vxlan_refcount = 1;
13052 if (!hw_off_limits(sc))
13053 enable_vxlan_rx(sc);
13054 } else if (sc->vxlan_port == v->port) {
13055 sc->vxlan_refcount++;
13057 CH_ERR(sc, "VXLAN already configured on port %d; "
13058 "ignoring attempt to configure it on port %d\n",
13059 sc->vxlan_port, v->port);
13061 end_synchronized_op(sc, 0);
13065 t4_vxlan_stop(struct adapter *sc, void *arg)
13067 struct vxlan_evargs *v = arg;
13069 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
13071 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0)
13075 * VXLANs may have been configured before the driver was loaded so we
13076 * may see more stops than starts. This is not handled cleanly but at
13077 * least we keep the refcount sane.
13079 if (sc->vxlan_port != v->port)
13081 if (sc->vxlan_refcount == 0) {
13082 CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; "
13083 "ignoring attempt to stop it again.\n", sc->vxlan_port);
13084 } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc))
13085 t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0);
13087 end_synchronized_op(sc, 0);
13091 t4_vxlan_start_handler(void *arg __unused, if_t ifp,
13092 sa_family_t family, u_int port)
13094 struct vxlan_evargs v;
13096 MPASS(family == AF_INET || family == AF_INET6);
13100 t4_iterate(t4_vxlan_start, &v);
13104 t4_vxlan_stop_handler(void *arg __unused, if_t ifp, sa_family_t family,
13107 struct vxlan_evargs v;
13109 MPASS(family == AF_INET || family == AF_INET6);
13113 t4_iterate(t4_vxlan_stop, &v);
13117 static struct sx mlu; /* mod load unload */
13118 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
13121 mod_event(module_t mod, int cmd, void *arg)
13124 static int loaded = 0;
13129 if (loaded++ == 0) {
13131 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
13132 t4_filter_rpl, CPL_COOKIE_FILTER);
13133 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
13134 do_l2t_write_rpl, CPL_COOKIE_FILTER);
13135 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
13136 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
13137 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
13138 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
13139 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
13140 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
13141 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
13142 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
13143 t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
13145 sx_init(&t4_list_lock, "T4/T5 adapters");
13146 SLIST_INIT(&t4_list);
13147 callout_init(&fatal_callout, 1);
13149 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
13150 SLIST_INIT(&t4_uld_list);
13158 t4_tracer_modload();
13160 vxlan_start_evtag =
13161 EVENTHANDLER_REGISTER(vxlan_start,
13162 t4_vxlan_start_handler, NULL,
13163 EVENTHANDLER_PRI_ANY);
13165 EVENTHANDLER_REGISTER(vxlan_stop,
13166 t4_vxlan_stop_handler, NULL,
13167 EVENTHANDLER_PRI_ANY);
13168 reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK,
13169 taskqueue_thread_enqueue, &reset_tq);
13170 taskqueue_start_threads(&reset_tq, 1, PI_SOFT,
13178 if (--loaded == 0) {
13181 taskqueue_free(reset_tq);
13182 sx_slock(&t4_list_lock);
13183 if (!SLIST_EMPTY(&t4_list)) {
13185 sx_sunlock(&t4_list_lock);
13189 sx_slock(&t4_uld_list_lock);
13190 if (!SLIST_EMPTY(&t4_uld_list)) {
13192 sx_sunlock(&t4_uld_list_lock);
13193 sx_sunlock(&t4_list_lock);
13198 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
13199 uprintf("%ju clusters with custom free routine "
13200 "still is use.\n", t4_sge_extfree_refs());
13201 pause("t4unload", 2 * hz);
13204 sx_sunlock(&t4_uld_list_lock);
13206 sx_sunlock(&t4_list_lock);
13208 if (t4_sge_extfree_refs() == 0) {
13209 EVENTHANDLER_DEREGISTER(vxlan_start,
13210 vxlan_start_evtag);
13211 EVENTHANDLER_DEREGISTER(vxlan_stop,
13213 t4_tracer_modunload();
13215 t6_ktls_modunload();
13218 t4_clip_modunload();
13221 sx_destroy(&t4_uld_list_lock);
13223 sx_destroy(&t4_list_lock);
13224 t4_sge_modunload();
13228 loaded++; /* undo earlier decrement */
13239 DRIVER_MODULE(t4nex, pci, t4_driver, mod_event, 0);
13240 MODULE_VERSION(t4nex, 1);
13241 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
13243 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
13244 #endif /* DEV_NETMAP */
13246 DRIVER_MODULE(t5nex, pci, t5_driver, mod_event, 0);
13247 MODULE_VERSION(t5nex, 1);
13248 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
13250 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
13251 #endif /* DEV_NETMAP */
13253 DRIVER_MODULE(t6nex, pci, t6_driver, mod_event, 0);
13254 MODULE_VERSION(t6nex, 1);
13255 MODULE_DEPEND(t6nex, crypto, 1, 1, 1);
13256 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
13258 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
13259 #endif /* DEV_NETMAP */
13261 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0);
13262 MODULE_VERSION(cxgbe, 1);
13264 DRIVER_MODULE(cxl, t5nex, cxl_driver, 0, 0);
13265 MODULE_VERSION(cxl, 1);
13267 DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0);
13268 MODULE_VERSION(cc, 1);
13270 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0);
13271 MODULE_VERSION(vcxgbe, 1);
13273 DRIVER_MODULE(vcxl, cxl, vcxl_driver, 0, 0);
13274 MODULE_VERSION(vcxl, 1);
13276 DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0);
13277 MODULE_VERSION(vcc, 1);