2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include "opt_inet6.h"
36 #include "opt_kern_tls.h"
37 #include "opt_ratelimit.h"
40 #include <sys/param.h>
43 #include <sys/kernel.h>
45 #include <sys/eventhandler.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/queue.h>
49 #include <sys/taskqueue.h>
50 #include <sys/pciio.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pci_private.h>
54 #include <sys/firmware.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/sysctl.h>
60 #include <net/ethernet.h>
62 #include <net/if_types.h>
63 #include <net/if_dl.h>
64 #include <net/if_vlan_var.h>
66 #include <net/rss_config.h>
68 #include <netinet/in.h>
69 #include <netinet/ip.h>
71 #include <netinet/tcp_seq.h>
73 #if defined(__i386__) || defined(__amd64__)
74 #include <machine/md_var.h>
75 #include <machine/cputypes.h>
81 #include <ddb/db_lex.h>
84 #include "common/common.h"
85 #include "common/t4_msg.h"
86 #include "common/t4_regs.h"
87 #include "common/t4_regs_values.h"
88 #include "cudbg/cudbg.h"
92 #include "t4_mp_ring.h"
96 /* T4 bus driver interface */
97 static int t4_probe(device_t);
98 static int t4_attach(device_t);
99 static int t4_detach(device_t);
100 static int t4_child_location_str(device_t, device_t, char *, size_t);
101 static int t4_ready(device_t);
102 static int t4_read_port_device(device_t, int, device_t *);
103 static device_method_t t4_methods[] = {
104 DEVMETHOD(device_probe, t4_probe),
105 DEVMETHOD(device_attach, t4_attach),
106 DEVMETHOD(device_detach, t4_detach),
108 DEVMETHOD(bus_child_location_str, t4_child_location_str),
110 DEVMETHOD(t4_is_main_ready, t4_ready),
111 DEVMETHOD(t4_read_port_device, t4_read_port_device),
115 static driver_t t4_driver = {
118 sizeof(struct adapter)
122 /* T4 port (cxgbe) interface */
123 static int cxgbe_probe(device_t);
124 static int cxgbe_attach(device_t);
125 static int cxgbe_detach(device_t);
126 device_method_t cxgbe_methods[] = {
127 DEVMETHOD(device_probe, cxgbe_probe),
128 DEVMETHOD(device_attach, cxgbe_attach),
129 DEVMETHOD(device_detach, cxgbe_detach),
132 static driver_t cxgbe_driver = {
135 sizeof(struct port_info)
138 /* T4 VI (vcxgbe) interface */
139 static int vcxgbe_probe(device_t);
140 static int vcxgbe_attach(device_t);
141 static int vcxgbe_detach(device_t);
142 static device_method_t vcxgbe_methods[] = {
143 DEVMETHOD(device_probe, vcxgbe_probe),
144 DEVMETHOD(device_attach, vcxgbe_attach),
145 DEVMETHOD(device_detach, vcxgbe_detach),
148 static driver_t vcxgbe_driver = {
151 sizeof(struct vi_info)
154 static d_ioctl_t t4_ioctl;
156 static struct cdevsw t4_cdevsw = {
157 .d_version = D_VERSION,
162 /* T5 bus driver interface */
163 static int t5_probe(device_t);
164 static device_method_t t5_methods[] = {
165 DEVMETHOD(device_probe, t5_probe),
166 DEVMETHOD(device_attach, t4_attach),
167 DEVMETHOD(device_detach, t4_detach),
169 DEVMETHOD(bus_child_location_str, t4_child_location_str),
171 DEVMETHOD(t4_is_main_ready, t4_ready),
172 DEVMETHOD(t4_read_port_device, t4_read_port_device),
176 static driver_t t5_driver = {
179 sizeof(struct adapter)
183 /* T5 port (cxl) interface */
184 static driver_t cxl_driver = {
187 sizeof(struct port_info)
190 /* T5 VI (vcxl) interface */
191 static driver_t vcxl_driver = {
194 sizeof(struct vi_info)
197 /* T6 bus driver interface */
198 static int t6_probe(device_t);
199 static device_method_t t6_methods[] = {
200 DEVMETHOD(device_probe, t6_probe),
201 DEVMETHOD(device_attach, t4_attach),
202 DEVMETHOD(device_detach, t4_detach),
204 DEVMETHOD(bus_child_location_str, t4_child_location_str),
206 DEVMETHOD(t4_is_main_ready, t4_ready),
207 DEVMETHOD(t4_read_port_device, t4_read_port_device),
211 static driver_t t6_driver = {
214 sizeof(struct adapter)
218 /* T6 port (cc) interface */
219 static driver_t cc_driver = {
222 sizeof(struct port_info)
225 /* T6 VI (vcc) interface */
226 static driver_t vcc_driver = {
229 sizeof(struct vi_info)
232 /* ifnet interface */
233 static void cxgbe_init(void *);
234 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
235 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
236 static void cxgbe_qflush(struct ifnet *);
237 #if defined(KERN_TLS) || defined(RATELIMIT)
238 static int cxgbe_snd_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
239 struct m_snd_tag **);
240 static int cxgbe_snd_tag_modify(struct m_snd_tag *,
241 union if_snd_tag_modify_params *);
242 static int cxgbe_snd_tag_query(struct m_snd_tag *,
243 union if_snd_tag_query_params *);
244 static void cxgbe_snd_tag_free(struct m_snd_tag *);
247 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
250 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
251 * then ADAPTER_LOCK, then t4_uld_list_lock.
253 static struct sx t4_list_lock;
254 SLIST_HEAD(, adapter) t4_list;
256 static struct sx t4_uld_list_lock;
257 SLIST_HEAD(, uld_info) t4_uld_list;
261 * Tunables. See tweak_tunables() too.
263 * Each tunable is set to a default value here if it's known at compile-time.
264 * Otherwise it is set to -n as an indication to tweak_tunables() that it should
265 * provide a reasonable default (upto n) when the driver is loaded.
267 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
268 * T5 are under hw.cxl.
270 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
271 "cxgbe(4) parameters");
272 SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
273 "cxgbe(4) T5+ parameters");
274 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
275 "cxgbe(4) TOE parameters");
278 * Number of queues for tx and rx, NIC and offload.
282 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0,
283 "Number of TX queues per port");
284 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */
288 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0,
289 "Number of RX queues per port");
290 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */
293 static int t4_ntxq_vi = -NTXQ_VI;
294 SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0,
295 "Number of TX queues per VI");
298 static int t4_nrxq_vi = -NRXQ_VI;
299 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0,
300 "Number of RX queues per VI");
302 static int t4_rsrv_noflowq = 0;
303 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq,
304 0, "Reserve TX queue 0 of each VI for non-flowid packets");
306 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
308 static int t4_nofldtxq = -NOFLDTXQ;
309 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
310 "Number of offload TX queues per port");
313 static int t4_nofldrxq = -NOFLDRXQ;
314 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
315 "Number of offload RX queues per port");
317 #define NOFLDTXQ_VI 1
318 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
319 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
320 "Number of offload TX queues per VI");
322 #define NOFLDRXQ_VI 1
323 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
324 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
325 "Number of offload RX queues per VI");
327 #define TMR_IDX_OFLD 1
328 int t4_tmr_idx_ofld = TMR_IDX_OFLD;
329 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
330 &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
332 #define PKTC_IDX_OFLD (-1)
333 int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
334 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
335 &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
337 /* 0 means chip/fw default, non-zero number is value in microseconds */
338 static u_long t4_toe_keepalive_idle = 0;
339 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN,
340 &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)");
342 /* 0 means chip/fw default, non-zero number is value in microseconds */
343 static u_long t4_toe_keepalive_interval = 0;
344 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN,
345 &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)");
347 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */
348 static int t4_toe_keepalive_count = 0;
349 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN,
350 &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort");
352 /* 0 means chip/fw default, non-zero number is value in microseconds */
353 static u_long t4_toe_rexmt_min = 0;
354 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN,
355 &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)");
357 /* 0 means chip/fw default, non-zero number is value in microseconds */
358 static u_long t4_toe_rexmt_max = 0;
359 SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN,
360 &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)");
362 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */
363 static int t4_toe_rexmt_count = 0;
364 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN,
365 &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort");
367 /* -1 means chip/fw default, other values are raw backoff values to use */
368 static int t4_toe_rexmt_backoff[16] = {
369 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
371 SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff,
372 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
373 "cxgbe(4) TOE retransmit backoff values");
374 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN,
375 &t4_toe_rexmt_backoff[0], 0, "");
376 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN,
377 &t4_toe_rexmt_backoff[1], 0, "");
378 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN,
379 &t4_toe_rexmt_backoff[2], 0, "");
380 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN,
381 &t4_toe_rexmt_backoff[3], 0, "");
382 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN,
383 &t4_toe_rexmt_backoff[4], 0, "");
384 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN,
385 &t4_toe_rexmt_backoff[5], 0, "");
386 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN,
387 &t4_toe_rexmt_backoff[6], 0, "");
388 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN,
389 &t4_toe_rexmt_backoff[7], 0, "");
390 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN,
391 &t4_toe_rexmt_backoff[8], 0, "");
392 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN,
393 &t4_toe_rexmt_backoff[9], 0, "");
394 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN,
395 &t4_toe_rexmt_backoff[10], 0, "");
396 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN,
397 &t4_toe_rexmt_backoff[11], 0, "");
398 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN,
399 &t4_toe_rexmt_backoff[12], 0, "");
400 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN,
401 &t4_toe_rexmt_backoff[13], 0, "");
402 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN,
403 &t4_toe_rexmt_backoff[14], 0, "");
404 SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN,
405 &t4_toe_rexmt_backoff[15], 0, "");
407 static int t4_toe_tls_rx_timeout = 5;
408 SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, tls_rx_timeout, CTLFLAG_RDTUN,
409 &t4_toe_tls_rx_timeout, 0,
410 "Timeout in seconds to downgrade TLS sockets to plain TOE");
414 #define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */
415 #define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */
416 static int t4_native_netmap = NN_EXTRA_VI;
417 SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap,
418 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs");
421 static int t4_nnmtxq = -NNMTXQ;
422 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0,
423 "Number of netmap TX queues");
426 static int t4_nnmrxq = -NNMRXQ;
427 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0,
428 "Number of netmap RX queues");
431 static int t4_nnmtxq_vi = -NNMTXQ_VI;
432 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0,
433 "Number of netmap TX queues per VI");
436 static int t4_nnmrxq_vi = -NNMRXQ_VI;
437 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0,
438 "Number of netmap RX queues per VI");
442 * Holdoff parameters for ports.
445 int t4_tmr_idx = TMR_IDX;
446 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx,
447 0, "Holdoff timer index");
448 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */
450 #define PKTC_IDX (-1)
451 int t4_pktc_idx = PKTC_IDX;
452 SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx,
453 0, "Holdoff packet counter index");
454 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */
457 * Size (# of entries) of each tx and rx queue.
459 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
460 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0,
461 "Number of descriptors in each TX queue");
463 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
464 SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0,
465 "Number of descriptors in each RX queue");
468 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
470 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
471 SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types,
472 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)");
475 * Configuration file. All the _CF names here are special.
477 #define DEFAULT_CF "default"
478 #define BUILTIN_CF "built-in"
479 #define FLASH_CF "flash"
480 #define UWIRE_CF "uwire"
481 #define FPGA_CF "fpga"
482 static char t4_cfg_file[32] = DEFAULT_CF;
483 SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file,
484 sizeof(t4_cfg_file), "Firmware configuration file");
487 * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively).
488 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
489 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
490 * mark or when signalled to do so, 0 to never emit PAUSE.
491 * pause_autoneg = 1 means PAUSE will be negotiated if possible and the
492 * negotiated settings will override rx_pause/tx_pause.
493 * Otherwise rx_pause/tx_pause are applied forcibly.
495 static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG;
496 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN,
497 &t4_pause_settings, 0,
498 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
501 * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively).
502 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5)
505 static int t4_fec = -1;
506 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
507 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
510 * Link autonegotiation.
511 * -1 to run with the firmware default.
515 static int t4_autoneg = -1;
516 SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0,
517 "Link autonegotiation");
520 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
521 * encouraged respectively). '-n' is the same as 'n' except the firmware
522 * version used in the checks is read from the firmware bundled with the driver.
524 static int t4_fw_install = 1;
525 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0,
526 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)");
529 * ASIC features that will be used. Disable the ones you don't want so that the
530 * chip resources aren't wasted on features that will not be used.
532 static int t4_nbmcaps_allowed = 0;
533 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN,
534 &t4_nbmcaps_allowed, 0, "Default NBM capabilities");
536 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
537 SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN,
538 &t4_linkcaps_allowed, 0, "Default link capabilities");
540 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
541 FW_CAPS_CONFIG_SWITCH_EGRESS;
542 SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
543 &t4_switchcaps_allowed, 0, "Default switch capabilities");
546 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
547 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
549 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
550 FW_CAPS_CONFIG_NIC_HASHFILTER;
552 SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN,
553 &t4_niccaps_allowed, 0, "Default NIC capabilities");
555 static int t4_toecaps_allowed = -1;
556 SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN,
557 &t4_toecaps_allowed, 0, "Default TCP offload capabilities");
559 static int t4_rdmacaps_allowed = -1;
560 SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN,
561 &t4_rdmacaps_allowed, 0, "Default RDMA capabilities");
563 static int t4_cryptocaps_allowed = -1;
564 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN,
565 &t4_cryptocaps_allowed, 0, "Default crypto capabilities");
567 static int t4_iscsicaps_allowed = -1;
568 SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN,
569 &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities");
571 static int t4_fcoecaps_allowed = 0;
572 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN,
573 &t4_fcoecaps_allowed, 0, "Default FCoE capabilities");
575 static int t5_write_combine = 0;
576 SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
577 0, "Use WC instead of UC for BAR2");
579 static int t4_num_vis = 1;
580 SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
581 "Number of VIs per port");
584 * PCIe Relaxed Ordering.
585 * -1: driver should figure out a good value.
590 static int pcie_relaxed_ordering = -1;
591 SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN,
592 &pcie_relaxed_ordering, 0,
593 "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone");
595 static int t4_panic_on_fatal_err = 0;
596 SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RDTUN,
597 &t4_panic_on_fatal_err, 0, "panic on fatal errors");
599 static int t4_tx_vm_wr = 0;
600 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0,
601 "Use VM work requests to transmit packets.");
604 * Set to non-zero to enable the attack filter. A packet that matches any of
605 * these conditions will get dropped on ingress:
606 * 1) IP && source address == destination address.
607 * 2) TCP/IP && source address is not a unicast address.
608 * 3) TCP/IP && destination address is not a unicast address.
609 * 4) IP && source address is loopback (127.x.y.z).
610 * 5) IP && destination address is loopback (127.x.y.z).
611 * 6) IPv6 && source address == destination address.
612 * 7) IPv6 && source address is not a unicast address.
613 * 8) IPv6 && source address is loopback (::1/128).
614 * 9) IPv6 && destination address is loopback (::1/128).
615 * 10) IPv6 && source address is unspecified (::/128).
616 * 11) IPv6 && destination address is unspecified (::/128).
617 * 12) TCP/IPv6 && source address is multicast (ff00::/8).
618 * 13) TCP/IPv6 && destination address is multicast (ff00::/8).
620 static int t4_attack_filter = 0;
621 SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN,
622 &t4_attack_filter, 0, "Drop suspicious traffic");
624 static int t4_drop_ip_fragments = 0;
625 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN,
626 &t4_drop_ip_fragments, 0, "Drop IP fragments");
628 static int t4_drop_pkts_with_l2_errors = 1;
629 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN,
630 &t4_drop_pkts_with_l2_errors, 0,
631 "Drop all frames with Layer 2 length or checksum errors");
633 static int t4_drop_pkts_with_l3_errors = 0;
634 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN,
635 &t4_drop_pkts_with_l3_errors, 0,
636 "Drop all frames with IP version, length, or checksum errors");
638 static int t4_drop_pkts_with_l4_errors = 0;
639 SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN,
640 &t4_drop_pkts_with_l4_errors, 0,
641 "Drop all frames with Layer 4 length, checksum, or other errors");
647 static int t4_cop_managed_offloading = 0;
648 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
649 &t4_cop_managed_offloading, 0,
650 "COP (Connection Offload Policy) controls all TOE offload");
655 * This enables KERN_TLS for all adapters if set.
657 static int t4_kern_tls = 0;
658 SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0,
659 "Enable KERN_TLS mode for all supported adapters");
661 SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
662 "cxgbe(4) KERN_TLS parameters");
664 static int t4_tls_inline_keys = 0;
665 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
666 &t4_tls_inline_keys, 0,
667 "Always pass TLS keys in work requests (1) or attempt to store TLS keys "
670 static int t4_tls_combo_wrs = 0;
671 SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
672 0, "Attempt to combine TCB field updates with TLS record work requests.");
675 /* Functions used by VIs to obtain unique MAC addresses for each VI. */
676 static int vi_mac_funcs[] = {
680 FW_VI_FUNC_OPENISCSI,
686 struct intrs_and_queues {
687 uint16_t intr_type; /* INTx, MSI, or MSI-X */
688 uint16_t num_vis; /* number of VIs for each port */
689 uint16_t nirq; /* Total # of vectors */
690 uint16_t ntxq; /* # of NIC txq's for each port */
691 uint16_t nrxq; /* # of NIC rxq's for each port */
692 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */
693 uint16_t nofldrxq; /* # of TOE rxq's for each port */
694 uint16_t nnmtxq; /* # of netmap txq's */
695 uint16_t nnmrxq; /* # of netmap rxq's */
697 /* The vcxgbe/vcxl interfaces use these and not the ones above. */
698 uint16_t ntxq_vi; /* # of NIC txq's */
699 uint16_t nrxq_vi; /* # of NIC rxq's */
700 uint16_t nofldtxq_vi; /* # of TOE txq's */
701 uint16_t nofldrxq_vi; /* # of TOE rxq's */
702 uint16_t nnmtxq_vi; /* # of netmap txq's */
703 uint16_t nnmrxq_vi; /* # of netmap rxq's */
706 static void setup_memwin(struct adapter *);
707 static void position_memwin(struct adapter *, int, uint32_t);
708 static int validate_mem_range(struct adapter *, uint32_t, uint32_t);
709 static int fwmtype_to_hwmtype(int);
710 static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t,
712 static int fixup_devlog_params(struct adapter *);
713 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *);
714 static int contact_firmware(struct adapter *);
715 static int partition_resources(struct adapter *);
716 static int get_params__pre_init(struct adapter *);
717 static int set_params__pre_init(struct adapter *);
718 static int get_params__post_init(struct adapter *);
719 static int set_params__post_init(struct adapter *);
720 static void t4_set_desc(struct adapter *);
721 static bool fixed_ifmedia(struct port_info *);
722 static void build_medialist(struct port_info *);
723 static void init_link_config(struct port_info *);
724 static int fixup_link_config(struct port_info *);
725 static int apply_link_config(struct port_info *);
726 static int cxgbe_init_synchronized(struct vi_info *);
727 static int cxgbe_uninit_synchronized(struct vi_info *);
728 static void quiesce_txq(struct adapter *, struct sge_txq *);
729 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
730 static void quiesce_iq(struct adapter *, struct sge_iq *);
731 static void quiesce_fl(struct adapter *, struct sge_fl *);
732 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
733 driver_intr_t *, void *, char *);
734 static int t4_free_irq(struct adapter *, struct irq *);
735 static void t4_init_atid_table(struct adapter *);
736 static void t4_free_atid_table(struct adapter *);
737 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
738 static void vi_refresh_stats(struct adapter *, struct vi_info *);
739 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
740 static void cxgbe_tick(void *);
741 static void cxgbe_sysctls(struct port_info *);
742 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
743 static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS);
744 static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS);
745 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
746 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
747 static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS);
748 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
749 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
750 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
751 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
752 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
753 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
754 static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
755 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
756 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
757 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
758 static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
759 static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
760 static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
761 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
762 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
763 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
764 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
765 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
766 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
767 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
768 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
769 static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
770 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
771 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
772 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
773 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
774 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
775 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
776 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
777 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
778 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
779 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
780 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
781 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
782 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
783 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
784 static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS);
785 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
786 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
787 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
788 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
789 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
790 static int sysctl_cpus(SYSCTL_HANDLER_ARGS);
792 static int sysctl_tls(SYSCTL_HANDLER_ARGS);
793 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS);
794 static int sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS);
795 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
796 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
797 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
798 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS);
799 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
800 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
801 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
803 static int get_sge_context(struct adapter *, struct t4_sge_context *);
804 static int load_fw(struct adapter *, struct t4_data *);
805 static int load_cfg(struct adapter *, struct t4_data *);
806 static int load_boot(struct adapter *, struct t4_bootrom *);
807 static int load_bootcfg(struct adapter *, struct t4_data *);
808 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *);
809 static void free_offload_policy(struct t4_offload_policy *);
810 static int set_offload_policy(struct adapter *, struct t4_offload_policy *);
811 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
812 static int read_i2c(struct adapter *, struct t4_i2c_data *);
813 static int clear_stats(struct adapter *, u_int);
815 static int toe_capability(struct vi_info *, int);
816 static void t4_async_event(void *, int);
818 static int mod_event(module_t, int, void *);
819 static int notify_siblings(device_t, int);
825 {0xa000, "Chelsio Terminator 4 FPGA"},
826 {0x4400, "Chelsio T440-dbg"},
827 {0x4401, "Chelsio T420-CR"},
828 {0x4402, "Chelsio T422-CR"},
829 {0x4403, "Chelsio T440-CR"},
830 {0x4404, "Chelsio T420-BCH"},
831 {0x4405, "Chelsio T440-BCH"},
832 {0x4406, "Chelsio T440-CH"},
833 {0x4407, "Chelsio T420-SO"},
834 {0x4408, "Chelsio T420-CX"},
835 {0x4409, "Chelsio T420-BT"},
836 {0x440a, "Chelsio T404-BT"},
837 {0x440e, "Chelsio T440-LP-CR"},
839 {0xb000, "Chelsio Terminator 5 FPGA"},
840 {0x5400, "Chelsio T580-dbg"},
841 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
842 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
843 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
844 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
845 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
846 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
847 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
848 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
849 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
850 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
851 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
852 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
853 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
854 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */
855 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */
856 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */
857 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */
860 {0x5483, "Custom T540-CR"},
861 {0x5484, "Custom T540-BT"},
863 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */
864 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */
865 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */
866 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
867 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
868 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
869 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
870 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
871 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
872 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
873 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
874 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
875 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
876 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
877 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
878 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
881 {0x6480, "Custom T6225-CR"},
882 {0x6481, "Custom T62100-CR"},
883 {0x6482, "Custom T6225-CR"},
884 {0x6483, "Custom T62100-CR"},
885 {0x6484, "Custom T64100-CR"},
886 {0x6485, "Custom T6240-SO"},
887 {0x6486, "Custom T6225-SO-CR"},
888 {0x6487, "Custom T6225-CR"},
893 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should
894 * be exactly the same for both rxq and ofld_rxq.
896 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
897 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
899 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
902 t4_probe(device_t dev)
905 uint16_t v = pci_get_vendor(dev);
906 uint16_t d = pci_get_device(dev);
907 uint8_t f = pci_get_function(dev);
909 if (v != PCI_VENDOR_ID_CHELSIO)
912 /* Attach only to PF0 of the FPGA */
913 if (d == 0xa000 && f != 0)
916 for (i = 0; i < nitems(t4_pciids); i++) {
917 if (d == t4_pciids[i].device) {
918 device_set_desc(dev, t4_pciids[i].desc);
919 return (BUS_PROBE_DEFAULT);
927 t5_probe(device_t dev)
930 uint16_t v = pci_get_vendor(dev);
931 uint16_t d = pci_get_device(dev);
932 uint8_t f = pci_get_function(dev);
934 if (v != PCI_VENDOR_ID_CHELSIO)
937 /* Attach only to PF0 of the FPGA */
938 if (d == 0xb000 && f != 0)
941 for (i = 0; i < nitems(t5_pciids); i++) {
942 if (d == t5_pciids[i].device) {
943 device_set_desc(dev, t5_pciids[i].desc);
944 return (BUS_PROBE_DEFAULT);
952 t6_probe(device_t dev)
955 uint16_t v = pci_get_vendor(dev);
956 uint16_t d = pci_get_device(dev);
958 if (v != PCI_VENDOR_ID_CHELSIO)
961 for (i = 0; i < nitems(t6_pciids); i++) {
962 if (d == t6_pciids[i].device) {
963 device_set_desc(dev, t6_pciids[i].desc);
964 return (BUS_PROBE_DEFAULT);
972 t5_attribute_workaround(device_t dev)
978 * The T5 chips do not properly echo the No Snoop and Relaxed
979 * Ordering attributes when replying to a TLP from a Root
980 * Port. As a workaround, find the parent Root Port and
981 * disable No Snoop and Relaxed Ordering. Note that this
982 * affects all devices under this root port.
984 root_port = pci_find_pcie_root_port(dev);
985 if (root_port == NULL) {
986 device_printf(dev, "Unable to find parent root port\n");
990 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
991 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
992 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
994 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
995 device_get_nameunit(root_port));
998 static const struct devnames devnames[] = {
1000 .nexus_name = "t4nex",
1001 .ifnet_name = "cxgbe",
1002 .vi_ifnet_name = "vcxgbe",
1003 .pf03_drv_name = "t4iov",
1004 .vf_nexus_name = "t4vf",
1005 .vf_ifnet_name = "cxgbev"
1007 .nexus_name = "t5nex",
1008 .ifnet_name = "cxl",
1009 .vi_ifnet_name = "vcxl",
1010 .pf03_drv_name = "t5iov",
1011 .vf_nexus_name = "t5vf",
1012 .vf_ifnet_name = "cxlv"
1014 .nexus_name = "t6nex",
1016 .vi_ifnet_name = "vcc",
1017 .pf03_drv_name = "t6iov",
1018 .vf_nexus_name = "t6vf",
1019 .vf_ifnet_name = "ccv"
1024 t4_init_devnames(struct adapter *sc)
1029 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
1030 sc->names = &devnames[id - CHELSIO_T4];
1032 device_printf(sc->dev, "chip id %d is not supported.\n", id);
1038 t4_ifnet_unit(struct adapter *sc, struct port_info *pi)
1040 const char *parent, *name;
1045 parent = device_get_nameunit(sc->dev);
1046 name = sc->names->ifnet_name;
1047 while (resource_find_dev(&line, name, &unit, "at", parent) == 0) {
1048 if (resource_long_value(name, unit, "port", &value) == 0 &&
1049 value == pi->port_id)
1056 t4_attach(device_t dev)
1059 int rc = 0, i, j, rqidx, tqidx, nports;
1060 struct make_dev_args mda;
1061 struct intrs_and_queues iaq;
1064 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1071 int nm_rqidx, nm_tqidx;
1075 sc = device_get_softc(dev);
1077 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
1079 if ((pci_get_device(dev) & 0xff00) == 0x5400)
1080 t5_attribute_workaround(dev);
1081 pci_enable_busmaster(dev);
1082 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
1085 pci_set_max_read_req(dev, 4096);
1086 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
1087 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
1088 if (pcie_relaxed_ordering == 0 &&
1089 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) {
1090 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE;
1091 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1092 } else if (pcie_relaxed_ordering == 1 &&
1093 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) {
1094 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
1095 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
1099 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
1100 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
1102 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
1103 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
1104 device_get_nameunit(dev));
1106 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
1107 device_get_nameunit(dev));
1108 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
1111 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
1112 TAILQ_INIT(&sc->sfl);
1113 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
1115 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
1118 rw_init(&sc->policy_lock, "connection offload policy");
1120 callout_init(&sc->ktls_tick, 1);
1123 TASK_INIT(&sc->async_event_task, 0, t4_async_event, sc);
1126 refcount_init(&sc->vxlan_refcount, 0);
1128 rc = t4_map_bars_0_and_4(sc);
1130 goto done; /* error message displayed already */
1132 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
1134 /* Prepare the adapter for operation. */
1135 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
1136 rc = -t4_prep_adapter(sc, buf);
1139 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
1144 * This is the real PF# to which we're attaching. Works from within PCI
1145 * passthrough environments too, where pci_get_function() could return a
1146 * different PF# depending on the passthrough configuration. We need to
1147 * use the real PF# in all our communication with the firmware.
1149 j = t4_read_reg(sc, A_PL_WHOAMI);
1150 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
1153 t4_init_devnames(sc);
1154 if (sc->names == NULL) {
1156 goto done; /* error message displayed already */
1160 * Do this really early, with the memory windows set up even before the
1161 * character device. The userland tool's register i/o and mem read
1162 * will work even in "recovery mode".
1165 if (t4_init_devlog_params(sc, 0) == 0)
1166 fixup_devlog_params(sc);
1167 make_dev_args_init(&mda);
1168 mda.mda_devsw = &t4_cdevsw;
1169 mda.mda_uid = UID_ROOT;
1170 mda.mda_gid = GID_WHEEL;
1171 mda.mda_mode = 0600;
1172 mda.mda_si_drv1 = sc;
1173 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
1175 device_printf(dev, "failed to create nexus char device: %d.\n",
1178 /* Go no further if recovery mode has been requested. */
1179 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
1180 device_printf(dev, "recovery mode.\n");
1184 #if defined(__i386__)
1185 if ((cpu_feature & CPUID_CX8) == 0) {
1186 device_printf(dev, "64 bit atomics not available.\n");
1192 /* Contact the firmware and try to become the master driver. */
1193 rc = contact_firmware(sc);
1195 goto done; /* error message displayed already */
1196 MPASS(sc->flags & FW_OK);
1198 rc = get_params__pre_init(sc);
1200 goto done; /* error message displayed already */
1202 if (sc->flags & MASTER_PF) {
1203 rc = partition_resources(sc);
1205 goto done; /* error message displayed already */
1209 rc = get_params__post_init(sc);
1211 goto done; /* error message displayed already */
1213 rc = set_params__post_init(sc);
1215 goto done; /* error message displayed already */
1217 rc = t4_map_bar_2(sc);
1219 goto done; /* error message displayed already */
1221 rc = t4_create_dma_tag(sc);
1223 goto done; /* error message displayed already */
1226 * First pass over all the ports - allocate VIs and initialize some
1227 * basic parameters like mac address, port type, etc.
1229 for_each_port(sc, i) {
1230 struct port_info *pi;
1232 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
1235 /* These must be set before t4_port_init */
1239 * XXX: vi[0] is special so we can't delay this allocation until
1240 * pi->nvi's final value is known.
1242 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE,
1246 * Allocate the "main" VI and initialize parameters
1249 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
1251 device_printf(dev, "unable to initialize port %d: %d\n",
1253 free(pi->vi, M_CXGBE);
1259 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
1260 device_get_nameunit(dev), i);
1261 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
1262 sc->chan_map[pi->tx_chan] = i;
1265 * The MPS counter for FCS errors doesn't work correctly on the
1266 * T6 so we use the MAC counter here. Which MAC is in use
1267 * depends on the link settings which will be known when the
1272 } else if (is_t4(sc)) {
1273 pi->fcs_reg = PORT_REG(pi->tx_chan,
1274 A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
1276 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
1277 A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
1281 /* All VIs on this port share this media. */
1282 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1283 cxgbe_media_status);
1286 init_link_config(pi);
1287 fixup_link_config(pi);
1288 build_medialist(pi);
1289 if (fixed_ifmedia(pi))
1290 pi->flags |= FIXED_IFMEDIA;
1293 pi->dev = device_add_child(dev, sc->names->ifnet_name,
1294 t4_ifnet_unit(sc, pi));
1295 if (pi->dev == NULL) {
1297 "failed to add device for port %d.\n", i);
1301 pi->vi[0].dev = pi->dev;
1302 device_set_softc(pi->dev, pi);
1306 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1308 nports = sc->params.nports;
1309 rc = cfg_itype_and_nqueues(sc, &iaq);
1311 goto done; /* error message displayed already */
1313 num_vis = iaq.num_vis;
1314 sc->intr_type = iaq.intr_type;
1315 sc->intr_count = iaq.nirq;
1318 s->nrxq = nports * iaq.nrxq;
1319 s->ntxq = nports * iaq.ntxq;
1321 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi;
1322 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
1324 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
1325 s->neq += nports; /* ctrl queues: 1 per port */
1326 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
1327 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1328 if (is_offload(sc) || is_ethoffload(sc)) {
1329 s->nofldtxq = nports * iaq.nofldtxq;
1331 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi;
1332 s->neq += s->nofldtxq;
1334 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1335 M_CXGBE, M_ZERO | M_WAITOK);
1339 if (is_offload(sc)) {
1340 s->nofldrxq = nports * iaq.nofldrxq;
1342 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi;
1343 s->neq += s->nofldrxq; /* free list */
1344 s->niq += s->nofldrxq;
1346 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1347 M_CXGBE, M_ZERO | M_WAITOK);
1353 if (t4_native_netmap & NN_MAIN_VI) {
1354 s->nnmrxq += nports * iaq.nnmrxq;
1355 s->nnmtxq += nports * iaq.nnmtxq;
1357 if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) {
1358 s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi;
1359 s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi;
1361 s->neq += s->nnmtxq + s->nnmrxq;
1362 s->niq += s->nnmrxq;
1364 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1365 M_CXGBE, M_ZERO | M_WAITOK);
1366 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1367 M_CXGBE, M_ZERO | M_WAITOK);
1369 MPASS(s->niq <= s->iqmap_sz);
1370 MPASS(s->neq <= s->eqmap_sz);
1372 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
1374 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1376 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1378 s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE,
1380 s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE,
1383 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1386 t4_init_l2t(sc, M_WAITOK);
1387 t4_init_smt(sc, M_WAITOK);
1388 t4_init_tx_sched(sc);
1389 t4_init_atid_table(sc);
1391 t4_init_etid_table(sc);
1394 t4_init_clip_table(sc);
1396 if (sc->vres.key.size != 0)
1397 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
1398 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
1401 * Second pass over the ports. This time we know the number of rx and
1402 * tx queues that each port should get.
1405 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1412 nm_rqidx = nm_tqidx = 0;
1414 for_each_port(sc, i) {
1415 struct port_info *pi = sc->port[i];
1422 for_each_vi(pi, j, vi) {
1425 vi->qsize_rxq = t4_qsize_rxq;
1426 vi->qsize_txq = t4_qsize_txq;
1428 vi->first_rxq = rqidx;
1429 vi->first_txq = tqidx;
1430 vi->tmr_idx = t4_tmr_idx;
1431 vi->pktc_idx = t4_pktc_idx;
1432 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi;
1433 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi;
1438 if (j == 0 && vi->ntxq > 1)
1439 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0;
1441 vi->rsrv_noflowq = 0;
1443 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1444 vi->first_ofld_txq = ofld_tqidx;
1445 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi;
1446 ofld_tqidx += vi->nofldtxq;
1449 vi->ofld_tmr_idx = t4_tmr_idx_ofld;
1450 vi->ofld_pktc_idx = t4_pktc_idx_ofld;
1451 vi->first_ofld_rxq = ofld_rqidx;
1452 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi;
1454 ofld_rqidx += vi->nofldrxq;
1457 vi->first_nm_rxq = nm_rqidx;
1458 vi->first_nm_txq = nm_tqidx;
1460 vi->nnmrxq = iaq.nnmrxq;
1461 vi->nnmtxq = iaq.nnmtxq;
1463 vi->nnmrxq = iaq.nnmrxq_vi;
1464 vi->nnmtxq = iaq.nnmtxq_vi;
1466 nm_rqidx += vi->nnmrxq;
1467 nm_tqidx += vi->nnmtxq;
1472 rc = t4_setup_intr_handlers(sc);
1475 "failed to setup interrupt handlers: %d\n", rc);
1479 rc = bus_generic_probe(dev);
1481 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1486 * Ensure thread-safe mailbox access (in debug builds).
1488 * So far this was the only thread accessing the mailbox but various
1489 * ifnets and sysctls are about to be created and their handlers/ioctls
1490 * will access the mailbox from different threads.
1492 sc->flags |= CHK_MBOX_ACCESS;
1494 rc = bus_generic_attach(dev);
1497 "failed to attach all child ports: %d\n", rc);
1502 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1503 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1504 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1505 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1506 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1510 notify_siblings(dev, 0);
1513 if (rc != 0 && sc->cdev) {
1514 /* cdev was created and so cxgbetool works; recover that way. */
1516 "error during attach, adapter is now in recovery mode.\n");
1521 t4_detach_common(dev);
1529 t4_child_location_str(device_t bus, device_t dev, char *buf, size_t buflen)
1532 struct port_info *pi;
1535 sc = device_get_softc(bus);
1537 for_each_port(sc, i) {
1539 if (pi != NULL && pi->dev == dev) {
1540 snprintf(buf, buflen, "port=%d", pi->port_id);
1548 t4_ready(device_t dev)
1552 sc = device_get_softc(dev);
1553 if (sc->flags & FW_OK)
1559 t4_read_port_device(device_t dev, int port, device_t *child)
1562 struct port_info *pi;
1564 sc = device_get_softc(dev);
1565 if (port < 0 || port >= MAX_NPORTS)
1567 pi = sc->port[port];
1568 if (pi == NULL || pi->dev == NULL)
1575 notify_siblings(device_t dev, int detaching)
1581 for (i = 0; i < PCI_FUNCMAX; i++) {
1582 if (i == pci_get_function(dev))
1584 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev),
1585 pci_get_slot(dev), i);
1586 if (sibling == NULL || !device_is_attached(sibling))
1589 error = T4_DETACH_CHILD(sibling);
1591 (void)T4_ATTACH_CHILD(sibling);
1602 t4_detach(device_t dev)
1607 sc = device_get_softc(dev);
1609 rc = notify_siblings(dev, 1);
1612 "failed to detach sibling devices: %d\n", rc);
1616 return (t4_detach_common(dev));
1620 t4_detach_common(device_t dev)
1623 struct port_info *pi;
1626 sc = device_get_softc(dev);
1629 destroy_dev(sc->cdev);
1633 sx_xlock(&t4_list_lock);
1634 SLIST_REMOVE(&t4_list, sc, adapter, link);
1635 sx_xunlock(&t4_list_lock);
1637 sc->flags &= ~CHK_MBOX_ACCESS;
1638 if (sc->flags & FULL_INIT_DONE) {
1639 if (!(sc->flags & IS_VF))
1640 t4_intr_disable(sc);
1643 if (device_is_attached(dev)) {
1644 rc = bus_generic_detach(dev);
1647 "failed to detach child devices: %d\n", rc);
1653 taskqueue_drain(taskqueue_thread, &sc->async_event_task);
1656 for (i = 0; i < sc->intr_count; i++)
1657 t4_free_irq(sc, &sc->irq[i]);
1659 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1660 t4_free_tx_sched(sc);
1662 for (i = 0; i < MAX_NPORTS; i++) {
1665 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1667 device_delete_child(dev, pi->dev);
1669 mtx_destroy(&pi->pi_lock);
1670 free(pi->vi, M_CXGBE);
1675 device_delete_children(dev);
1677 if (sc->flags & FULL_INIT_DONE)
1678 adapter_full_uninit(sc);
1680 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1681 t4_fw_bye(sc, sc->mbox);
1683 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1684 pci_release_msi(dev);
1687 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1691 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1695 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1699 t4_free_l2t(sc->l2t);
1701 t4_free_smt(sc->smt);
1702 t4_free_atid_table(sc);
1704 t4_free_etid_table(sc);
1707 vmem_destroy(sc->key_map);
1709 t4_destroy_clip_table(sc);
1712 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1713 free(sc->sge.ofld_txq, M_CXGBE);
1716 free(sc->sge.ofld_rxq, M_CXGBE);
1719 free(sc->sge.nm_rxq, M_CXGBE);
1720 free(sc->sge.nm_txq, M_CXGBE);
1722 free(sc->irq, M_CXGBE);
1723 free(sc->sge.rxq, M_CXGBE);
1724 free(sc->sge.txq, M_CXGBE);
1725 free(sc->sge.ctrlq, M_CXGBE);
1726 free(sc->sge.iqmap, M_CXGBE);
1727 free(sc->sge.eqmap, M_CXGBE);
1728 free(sc->tids.ftid_tab, M_CXGBE);
1729 free(sc->tids.hpftid_tab, M_CXGBE);
1730 free_hftid_hash(&sc->tids);
1731 free(sc->tids.tid_tab, M_CXGBE);
1732 free(sc->tt.tls_rx_ports, M_CXGBE);
1733 t4_destroy_dma_tag(sc);
1735 callout_drain(&sc->ktls_tick);
1736 callout_drain(&sc->sfl_callout);
1737 if (mtx_initialized(&sc->tids.ftid_lock)) {
1738 mtx_destroy(&sc->tids.ftid_lock);
1739 cv_destroy(&sc->tids.ftid_cv);
1741 if (mtx_initialized(&sc->tids.atid_lock))
1742 mtx_destroy(&sc->tids.atid_lock);
1743 if (mtx_initialized(&sc->ifp_lock))
1744 mtx_destroy(&sc->ifp_lock);
1746 if (rw_initialized(&sc->policy_lock)) {
1747 rw_destroy(&sc->policy_lock);
1749 if (sc->policy != NULL)
1750 free_offload_policy(sc->policy);
1754 for (i = 0; i < NUM_MEMWIN; i++) {
1755 struct memwin *mw = &sc->memwin[i];
1757 if (rw_initialized(&mw->mw_lock))
1758 rw_destroy(&mw->mw_lock);
1761 mtx_destroy(&sc->sfl_lock);
1762 mtx_destroy(&sc->reg_lock);
1763 mtx_destroy(&sc->sc_lock);
1765 bzero(sc, sizeof(*sc));
1771 cxgbe_probe(device_t dev)
1774 struct port_info *pi = device_get_softc(dev);
1776 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1777 device_set_desc_copy(dev, buf);
1779 return (BUS_PROBE_DEFAULT);
1782 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1783 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1784 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \
1785 IFCAP_HWRXTSTMP | IFCAP_NOMAP)
1786 #define T4_CAP_ENABLE (T4_CAP)
1789 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1793 struct pfil_head_args pa;
1794 struct adapter *sc = vi->adapter;
1796 vi->xact_addr_filt = -1;
1797 callout_init(&vi->tick, 1);
1798 if (sc->flags & IS_VF || t4_tx_vm_wr != 0)
1799 vi->flags |= TX_USES_VM_WR;
1801 /* Allocate an ifnet and set it up */
1802 ifp = if_alloc_dev(IFT_ETHER, dev);
1804 device_printf(dev, "Cannot allocate ifnet\n");
1810 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1811 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1813 ifp->if_init = cxgbe_init;
1814 ifp->if_ioctl = cxgbe_ioctl;
1815 ifp->if_transmit = cxgbe_transmit;
1816 ifp->if_qflush = cxgbe_qflush;
1817 ifp->if_get_counter = cxgbe_get_counter;
1818 #if defined(KERN_TLS) || defined(RATELIMIT)
1819 ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
1820 ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
1821 ifp->if_snd_tag_query = cxgbe_snd_tag_query;
1822 ifp->if_snd_tag_free = cxgbe_snd_tag_free;
1825 ifp->if_ratelimit_query = cxgbe_ratelimit_query;
1828 ifp->if_capabilities = T4_CAP;
1829 ifp->if_capenable = T4_CAP_ENABLE;
1830 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1831 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1832 if (chip_id(sc) >= CHELSIO_T6) {
1833 ifp->if_capabilities |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO;
1834 ifp->if_capenable |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO;
1835 ifp->if_hwassist |= CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
1836 CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP |
1837 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN;
1841 if (vi->nofldrxq != 0 && (sc->flags & KERN_TLS_OK) == 0)
1842 ifp->if_capabilities |= IFCAP_TOE;
1845 if (is_ethoffload(sc) && vi->nofldtxq != 0) {
1846 ifp->if_capabilities |= IFCAP_TXRTLMT;
1847 ifp->if_capenable |= IFCAP_TXRTLMT;
1851 ifp->if_hw_tsomax = IP_MAXPACKET;
1852 if (vi->flags & TX_USES_VM_WR)
1853 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO;
1855 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO;
1857 if (is_ethoffload(sc) && vi->nofldtxq != 0)
1858 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_EO_TSO;
1860 ifp->if_hw_tsomaxsegsize = 65536;
1862 if (sc->flags & KERN_TLS_OK) {
1863 ifp->if_capabilities |= IFCAP_TXTLS;
1864 ifp->if_capenable |= IFCAP_TXTLS;
1868 ether_ifattach(ifp, vi->hw_addr);
1870 if (vi->nnmrxq != 0)
1871 cxgbe_nm_attach(vi);
1873 sb = sbuf_new_auto();
1874 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1875 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1876 switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) {
1878 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq);
1880 case IFCAP_TOE | IFCAP_TXRTLMT:
1881 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq);
1884 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq);
1889 if (ifp->if_capabilities & IFCAP_TOE)
1890 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq);
1893 if (ifp->if_capabilities & IFCAP_NETMAP)
1894 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1895 vi->nnmtxq, vi->nnmrxq);
1898 device_printf(dev, "%s\n", sbuf_data(sb));
1903 pa.pa_version = PFIL_VERSION;
1904 pa.pa_flags = PFIL_IN;
1905 pa.pa_type = PFIL_TYPE_ETHERNET;
1906 pa.pa_headname = ifp->if_xname;
1907 vi->pfil = pfil_head_register(&pa);
1913 cxgbe_attach(device_t dev)
1915 struct port_info *pi = device_get_softc(dev);
1916 struct adapter *sc = pi->adapter;
1920 callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1922 rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1926 for_each_vi(pi, i, vi) {
1929 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1930 if (vi->dev == NULL) {
1931 device_printf(dev, "failed to add VI %d\n", i);
1934 device_set_softc(vi->dev, vi);
1939 bus_generic_attach(dev);
1945 cxgbe_vi_detach(struct vi_info *vi)
1947 struct ifnet *ifp = vi->ifp;
1949 if (vi->pfil != NULL) {
1950 pfil_head_unregister(vi->pfil);
1954 ether_ifdetach(ifp);
1956 /* Let detach proceed even if these fail. */
1958 if (ifp->if_capabilities & IFCAP_NETMAP)
1959 cxgbe_nm_detach(vi);
1961 cxgbe_uninit_synchronized(vi);
1962 callout_drain(&vi->tick);
1970 cxgbe_detach(device_t dev)
1972 struct port_info *pi = device_get_softc(dev);
1973 struct adapter *sc = pi->adapter;
1976 /* Detach the extra VIs first. */
1977 rc = bus_generic_detach(dev);
1980 device_delete_children(dev);
1982 doom_vi(sc, &pi->vi[0]);
1984 if (pi->flags & HAS_TRACEQ) {
1985 sc->traceq = -1; /* cloner should not create ifnet */
1986 t4_tracer_port_detach(sc);
1989 cxgbe_vi_detach(&pi->vi[0]);
1990 callout_drain(&pi->tick);
1991 ifmedia_removeall(&pi->media);
1993 end_synchronized_op(sc, 0);
1999 cxgbe_init(void *arg)
2001 struct vi_info *vi = arg;
2002 struct adapter *sc = vi->adapter;
2004 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
2006 cxgbe_init_synchronized(vi);
2007 end_synchronized_op(sc, 0);
2011 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
2013 int rc = 0, mtu, flags;
2014 struct vi_info *vi = ifp->if_softc;
2015 struct port_info *pi = vi->pi;
2016 struct adapter *sc = pi->adapter;
2017 struct ifreq *ifr = (struct ifreq *)data;
2023 if (mtu < ETHERMIN || mtu > MAX_MTU)
2026 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
2030 if (vi->flags & VI_INIT_DONE) {
2031 t4_update_fl_bufsize(ifp);
2032 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2033 rc = update_mac_settings(ifp, XGMAC_MTU);
2035 end_synchronized_op(sc, 0);
2039 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg");
2043 if (ifp->if_flags & IFF_UP) {
2044 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2045 flags = vi->if_flags;
2046 if ((ifp->if_flags ^ flags) &
2047 (IFF_PROMISC | IFF_ALLMULTI)) {
2048 rc = update_mac_settings(ifp,
2049 XGMAC_PROMISC | XGMAC_ALLMULTI);
2052 rc = cxgbe_init_synchronized(vi);
2054 vi->if_flags = ifp->if_flags;
2055 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2056 rc = cxgbe_uninit_synchronized(vi);
2058 end_synchronized_op(sc, 0);
2063 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
2066 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2067 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
2068 end_synchronized_op(sc, 0);
2072 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
2076 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2077 if (mask & IFCAP_TXCSUM) {
2078 ifp->if_capenable ^= IFCAP_TXCSUM;
2079 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2081 if (IFCAP_TSO4 & ifp->if_capenable &&
2082 !(IFCAP_TXCSUM & ifp->if_capenable)) {
2083 mask &= ~IFCAP_TSO4;
2084 ifp->if_capenable &= ~IFCAP_TSO4;
2086 "tso4 disabled due to -txcsum.\n");
2089 if (mask & IFCAP_TXCSUM_IPV6) {
2090 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2091 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2093 if (IFCAP_TSO6 & ifp->if_capenable &&
2094 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2095 mask &= ~IFCAP_TSO6;
2096 ifp->if_capenable &= ~IFCAP_TSO6;
2098 "tso6 disabled due to -txcsum6.\n");
2101 if (mask & IFCAP_RXCSUM)
2102 ifp->if_capenable ^= IFCAP_RXCSUM;
2103 if (mask & IFCAP_RXCSUM_IPV6)
2104 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2107 * Note that we leave CSUM_TSO alone (it is always set). The
2108 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
2109 * sending a TSO request our way, so it's sufficient to toggle
2112 if (mask & IFCAP_TSO4) {
2113 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2114 !(IFCAP_TXCSUM & ifp->if_capenable)) {
2115 if_printf(ifp, "enable txcsum first.\n");
2119 ifp->if_capenable ^= IFCAP_TSO4;
2121 if (mask & IFCAP_TSO6) {
2122 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2123 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2124 if_printf(ifp, "enable txcsum6 first.\n");
2128 ifp->if_capenable ^= IFCAP_TSO6;
2130 if (mask & IFCAP_LRO) {
2131 #if defined(INET) || defined(INET6)
2133 struct sge_rxq *rxq;
2135 ifp->if_capenable ^= IFCAP_LRO;
2136 for_each_rxq(vi, i, rxq) {
2137 if (ifp->if_capenable & IFCAP_LRO)
2138 rxq->iq.flags |= IQ_LRO_ENABLED;
2140 rxq->iq.flags &= ~IQ_LRO_ENABLED;
2145 if (mask & IFCAP_TOE) {
2146 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
2148 rc = toe_capability(vi, enable);
2152 ifp->if_capenable ^= mask;
2155 if (mask & IFCAP_VLAN_HWTAGGING) {
2156 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2157 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2158 rc = update_mac_settings(ifp, XGMAC_VLANEX);
2160 if (mask & IFCAP_VLAN_MTU) {
2161 ifp->if_capenable ^= IFCAP_VLAN_MTU;
2163 /* Need to find out how to disable auto-mtu-inflation */
2165 if (mask & IFCAP_VLAN_HWTSO)
2166 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2167 if (mask & IFCAP_VLAN_HWCSUM)
2168 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2170 if (mask & IFCAP_TXRTLMT)
2171 ifp->if_capenable ^= IFCAP_TXRTLMT;
2173 if (mask & IFCAP_HWRXTSTMP) {
2175 struct sge_rxq *rxq;
2177 ifp->if_capenable ^= IFCAP_HWRXTSTMP;
2178 for_each_rxq(vi, i, rxq) {
2179 if (ifp->if_capenable & IFCAP_HWRXTSTMP)
2180 rxq->iq.flags |= IQ_RX_TIMESTAMP;
2182 rxq->iq.flags &= ~IQ_RX_TIMESTAMP;
2185 if (mask & IFCAP_NOMAP)
2186 ifp->if_capenable ^= IFCAP_NOMAP;
2189 if (mask & IFCAP_TXTLS)
2190 ifp->if_capenable ^= (mask & IFCAP_TXTLS);
2192 if (mask & IFCAP_VXLAN_HWCSUM) {
2193 ifp->if_capenable ^= IFCAP_VXLAN_HWCSUM;
2194 ifp->if_hwassist ^= CSUM_INNER_IP6_UDP |
2195 CSUM_INNER_IP6_TCP | CSUM_INNER_IP |
2196 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP;
2198 if (mask & IFCAP_VXLAN_HWTSO) {
2199 ifp->if_capenable ^= IFCAP_VXLAN_HWTSO;
2200 ifp->if_hwassist ^= CSUM_INNER_IP6_TSO |
2204 #ifdef VLAN_CAPABILITIES
2205 VLAN_CAPABILITIES(ifp);
2208 end_synchronized_op(sc, 0);
2214 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
2218 struct ifi2creq i2c;
2220 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2223 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
2227 if (i2c.len > sizeof(i2c.data)) {
2231 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
2234 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
2235 i2c.offset, i2c.len, &i2c.data[0]);
2236 end_synchronized_op(sc, 0);
2238 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2243 rc = ether_ioctl(ifp, cmd, data);
2250 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
2252 struct vi_info *vi = ifp->if_softc;
2253 struct port_info *pi = vi->pi;
2255 struct sge_txq *txq;
2260 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
2261 #if defined(KERN_TLS) || defined(RATELIMIT)
2262 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2263 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2266 if (__predict_false(pi->link_cfg.link_ok == false)) {
2271 rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR);
2272 if (__predict_false(rc != 0)) {
2273 MPASS(m == NULL); /* was freed already */
2274 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
2278 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2279 if (m->m_pkthdr.snd_tag->type == IF_SND_TAG_TYPE_RATE_LIMIT)
2280 return (ethofld_transmit(ifp, m));
2286 txq = &sc->sge.txq[vi->first_txq];
2287 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2288 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
2292 rc = mp_ring_enqueue(txq->r, items, 1, 256);
2293 if (__predict_false(rc != 0))
2300 cxgbe_qflush(struct ifnet *ifp)
2302 struct vi_info *vi = ifp->if_softc;
2303 struct sge_txq *txq;
2306 /* queues do not exist if !VI_INIT_DONE. */
2307 if (vi->flags & VI_INIT_DONE) {
2308 for_each_txq(vi, i, txq) {
2310 txq->eq.flags |= EQ_QFLUSH;
2312 while (!mp_ring_is_idle(txq->r)) {
2313 mp_ring_check_drainage(txq->r, 4096);
2317 txq->eq.flags &= ~EQ_QFLUSH;
2325 vi_get_counter(struct ifnet *ifp, ift_counter c)
2327 struct vi_info *vi = ifp->if_softc;
2328 struct fw_vi_stats_vf *s = &vi->stats;
2330 vi_refresh_stats(vi->adapter, vi);
2333 case IFCOUNTER_IPACKETS:
2334 return (s->rx_bcast_frames + s->rx_mcast_frames +
2335 s->rx_ucast_frames);
2336 case IFCOUNTER_IERRORS:
2337 return (s->rx_err_frames);
2338 case IFCOUNTER_OPACKETS:
2339 return (s->tx_bcast_frames + s->tx_mcast_frames +
2340 s->tx_ucast_frames + s->tx_offload_frames);
2341 case IFCOUNTER_OERRORS:
2342 return (s->tx_drop_frames);
2343 case IFCOUNTER_IBYTES:
2344 return (s->rx_bcast_bytes + s->rx_mcast_bytes +
2346 case IFCOUNTER_OBYTES:
2347 return (s->tx_bcast_bytes + s->tx_mcast_bytes +
2348 s->tx_ucast_bytes + s->tx_offload_bytes);
2349 case IFCOUNTER_IMCASTS:
2350 return (s->rx_mcast_frames);
2351 case IFCOUNTER_OMCASTS:
2352 return (s->tx_mcast_frames);
2353 case IFCOUNTER_OQDROPS: {
2357 if (vi->flags & VI_INIT_DONE) {
2359 struct sge_txq *txq;
2361 for_each_txq(vi, i, txq)
2362 drops += counter_u64_fetch(txq->r->dropped);
2370 return (if_get_counter_default(ifp, c));
2375 cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
2377 struct vi_info *vi = ifp->if_softc;
2378 struct port_info *pi = vi->pi;
2379 struct adapter *sc = pi->adapter;
2380 struct port_stats *s = &pi->stats;
2382 if (pi->nvi > 1 || sc->flags & IS_VF)
2383 return (vi_get_counter(ifp, c));
2385 cxgbe_refresh_stats(sc, pi);
2388 case IFCOUNTER_IPACKETS:
2389 return (s->rx_frames);
2391 case IFCOUNTER_IERRORS:
2392 return (s->rx_jabber + s->rx_runt + s->rx_too_long +
2393 s->rx_fcs_err + s->rx_len_err);
2395 case IFCOUNTER_OPACKETS:
2396 return (s->tx_frames);
2398 case IFCOUNTER_OERRORS:
2399 return (s->tx_error_frames);
2401 case IFCOUNTER_IBYTES:
2402 return (s->rx_octets);
2404 case IFCOUNTER_OBYTES:
2405 return (s->tx_octets);
2407 case IFCOUNTER_IMCASTS:
2408 return (s->rx_mcast_frames);
2410 case IFCOUNTER_OMCASTS:
2411 return (s->tx_mcast_frames);
2413 case IFCOUNTER_IQDROPS:
2414 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2415 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
2416 s->rx_trunc3 + pi->tnl_cong_drops);
2418 case IFCOUNTER_OQDROPS: {
2422 if (vi->flags & VI_INIT_DONE) {
2424 struct sge_txq *txq;
2426 for_each_txq(vi, i, txq)
2427 drops += counter_u64_fetch(txq->r->dropped);
2435 return (if_get_counter_default(ifp, c));
2439 #if defined(KERN_TLS) || defined(RATELIMIT)
2441 cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
2442 struct m_snd_tag **pt)
2446 switch (params->hdr.type) {
2448 case IF_SND_TAG_TYPE_RATE_LIMIT:
2449 error = cxgbe_rate_tag_alloc(ifp, params, pt);
2453 case IF_SND_TAG_TYPE_TLS:
2454 error = cxgbe_tls_tag_alloc(ifp, params, pt);
2464 cxgbe_snd_tag_modify(struct m_snd_tag *mst,
2465 union if_snd_tag_modify_params *params)
2468 switch (mst->type) {
2470 case IF_SND_TAG_TYPE_RATE_LIMIT:
2471 return (cxgbe_rate_tag_modify(mst, params));
2474 return (EOPNOTSUPP);
2479 cxgbe_snd_tag_query(struct m_snd_tag *mst,
2480 union if_snd_tag_query_params *params)
2483 switch (mst->type) {
2485 case IF_SND_TAG_TYPE_RATE_LIMIT:
2486 return (cxgbe_rate_tag_query(mst, params));
2489 return (EOPNOTSUPP);
2494 cxgbe_snd_tag_free(struct m_snd_tag *mst)
2497 switch (mst->type) {
2499 case IF_SND_TAG_TYPE_RATE_LIMIT:
2500 cxgbe_rate_tag_free(mst);
2504 case IF_SND_TAG_TYPE_TLS:
2505 cxgbe_tls_tag_free(mst);
2509 panic("shouldn't get here");
2515 * The kernel picks a media from the list we had provided but we still validate
2519 cxgbe_media_change(struct ifnet *ifp)
2521 struct vi_info *vi = ifp->if_softc;
2522 struct port_info *pi = vi->pi;
2523 struct ifmedia *ifm = &pi->media;
2524 struct link_config *lc = &pi->link_cfg;
2525 struct adapter *sc = pi->adapter;
2528 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
2532 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2533 /* ifconfig .. media autoselect */
2534 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
2535 rc = ENOTSUP; /* AN not supported by transceiver */
2538 lc->requested_aneg = AUTONEG_ENABLE;
2539 lc->requested_speed = 0;
2540 lc->requested_fc |= PAUSE_AUTONEG;
2542 lc->requested_aneg = AUTONEG_DISABLE;
2543 lc->requested_speed =
2544 ifmedia_baudrate(ifm->ifm_media) / 1000000;
2545 lc->requested_fc = 0;
2546 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
2547 lc->requested_fc |= PAUSE_RX;
2548 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
2549 lc->requested_fc |= PAUSE_TX;
2551 if (pi->up_vis > 0) {
2552 fixup_link_config(pi);
2553 rc = apply_link_config(pi);
2557 end_synchronized_op(sc, 0);
2562 * Base media word (without ETHER, pause, link active, etc.) for the port at the
2566 port_mword(struct port_info *pi, uint32_t speed)
2569 MPASS(speed & M_FW_PORT_CAP32_SPEED);
2570 MPASS(powerof2(speed));
2572 switch(pi->port_type) {
2573 case FW_PORT_TYPE_BT_SGMII:
2574 case FW_PORT_TYPE_BT_XFI:
2575 case FW_PORT_TYPE_BT_XAUI:
2578 case FW_PORT_CAP32_SPEED_100M:
2580 case FW_PORT_CAP32_SPEED_1G:
2581 return (IFM_1000_T);
2582 case FW_PORT_CAP32_SPEED_10G:
2586 case FW_PORT_TYPE_KX4:
2587 if (speed == FW_PORT_CAP32_SPEED_10G)
2588 return (IFM_10G_KX4);
2590 case FW_PORT_TYPE_CX4:
2591 if (speed == FW_PORT_CAP32_SPEED_10G)
2592 return (IFM_10G_CX4);
2594 case FW_PORT_TYPE_KX:
2595 if (speed == FW_PORT_CAP32_SPEED_1G)
2596 return (IFM_1000_KX);
2598 case FW_PORT_TYPE_KR:
2599 case FW_PORT_TYPE_BP_AP:
2600 case FW_PORT_TYPE_BP4_AP:
2601 case FW_PORT_TYPE_BP40_BA:
2602 case FW_PORT_TYPE_KR4_100G:
2603 case FW_PORT_TYPE_KR_SFP28:
2604 case FW_PORT_TYPE_KR_XLAUI:
2606 case FW_PORT_CAP32_SPEED_1G:
2607 return (IFM_1000_KX);
2608 case FW_PORT_CAP32_SPEED_10G:
2609 return (IFM_10G_KR);
2610 case FW_PORT_CAP32_SPEED_25G:
2611 return (IFM_25G_KR);
2612 case FW_PORT_CAP32_SPEED_40G:
2613 return (IFM_40G_KR4);
2614 case FW_PORT_CAP32_SPEED_50G:
2615 return (IFM_50G_KR2);
2616 case FW_PORT_CAP32_SPEED_100G:
2617 return (IFM_100G_KR4);
2620 case FW_PORT_TYPE_FIBER_XFI:
2621 case FW_PORT_TYPE_FIBER_XAUI:
2622 case FW_PORT_TYPE_SFP:
2623 case FW_PORT_TYPE_QSFP_10G:
2624 case FW_PORT_TYPE_QSA:
2625 case FW_PORT_TYPE_QSFP:
2626 case FW_PORT_TYPE_CR4_QSFP:
2627 case FW_PORT_TYPE_CR_QSFP:
2628 case FW_PORT_TYPE_CR2_QSFP:
2629 case FW_PORT_TYPE_SFP28:
2630 /* Pluggable transceiver */
2631 switch (pi->mod_type) {
2632 case FW_PORT_MOD_TYPE_LR:
2634 case FW_PORT_CAP32_SPEED_1G:
2635 return (IFM_1000_LX);
2636 case FW_PORT_CAP32_SPEED_10G:
2637 return (IFM_10G_LR);
2638 case FW_PORT_CAP32_SPEED_25G:
2639 return (IFM_25G_LR);
2640 case FW_PORT_CAP32_SPEED_40G:
2641 return (IFM_40G_LR4);
2642 case FW_PORT_CAP32_SPEED_50G:
2643 return (IFM_50G_LR2);
2644 case FW_PORT_CAP32_SPEED_100G:
2645 return (IFM_100G_LR4);
2648 case FW_PORT_MOD_TYPE_SR:
2650 case FW_PORT_CAP32_SPEED_1G:
2651 return (IFM_1000_SX);
2652 case FW_PORT_CAP32_SPEED_10G:
2653 return (IFM_10G_SR);
2654 case FW_PORT_CAP32_SPEED_25G:
2655 return (IFM_25G_SR);
2656 case FW_PORT_CAP32_SPEED_40G:
2657 return (IFM_40G_SR4);
2658 case FW_PORT_CAP32_SPEED_50G:
2659 return (IFM_50G_SR2);
2660 case FW_PORT_CAP32_SPEED_100G:
2661 return (IFM_100G_SR4);
2664 case FW_PORT_MOD_TYPE_ER:
2665 if (speed == FW_PORT_CAP32_SPEED_10G)
2666 return (IFM_10G_ER);
2668 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2669 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2671 case FW_PORT_CAP32_SPEED_1G:
2672 return (IFM_1000_CX);
2673 case FW_PORT_CAP32_SPEED_10G:
2674 return (IFM_10G_TWINAX);
2675 case FW_PORT_CAP32_SPEED_25G:
2676 return (IFM_25G_CR);
2677 case FW_PORT_CAP32_SPEED_40G:
2678 return (IFM_40G_CR4);
2679 case FW_PORT_CAP32_SPEED_50G:
2680 return (IFM_50G_CR2);
2681 case FW_PORT_CAP32_SPEED_100G:
2682 return (IFM_100G_CR4);
2685 case FW_PORT_MOD_TYPE_LRM:
2686 if (speed == FW_PORT_CAP32_SPEED_10G)
2687 return (IFM_10G_LRM);
2689 case FW_PORT_MOD_TYPE_NA:
2690 MPASS(0); /* Not pluggable? */
2692 case FW_PORT_MOD_TYPE_ERROR:
2693 case FW_PORT_MOD_TYPE_UNKNOWN:
2694 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
2696 case FW_PORT_MOD_TYPE_NONE:
2700 case FW_PORT_TYPE_NONE:
2704 return (IFM_UNKNOWN);
2708 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2710 struct vi_info *vi = ifp->if_softc;
2711 struct port_info *pi = vi->pi;
2712 struct adapter *sc = pi->adapter;
2713 struct link_config *lc = &pi->link_cfg;
2715 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2719 if (pi->up_vis == 0) {
2721 * If all the interfaces are administratively down the firmware
2722 * does not report transceiver changes. Refresh port info here
2723 * so that ifconfig displays accurate ifmedia at all times.
2724 * This is the only reason we have a synchronized op in this
2725 * function. Just PORT_LOCK would have been enough otherwise.
2727 t4_update_port_info(pi);
2728 build_medialist(pi);
2732 ifmr->ifm_status = IFM_AVALID;
2733 if (lc->link_ok == false)
2735 ifmr->ifm_status |= IFM_ACTIVE;
2738 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2739 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2740 if (lc->fc & PAUSE_RX)
2741 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2742 if (lc->fc & PAUSE_TX)
2743 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2744 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed));
2747 end_synchronized_op(sc, 0);
2751 vcxgbe_probe(device_t dev)
2754 struct vi_info *vi = device_get_softc(dev);
2756 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2758 device_set_desc_copy(dev, buf);
2760 return (BUS_PROBE_DEFAULT);
2764 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
2766 int func, index, rc;
2767 uint32_t param, val;
2769 ASSERT_SYNCHRONIZED_OP(sc);
2771 index = vi - pi->vi;
2772 MPASS(index > 0); /* This function deals with _extra_ VIs only */
2773 KASSERT(index < nitems(vi_mac_funcs),
2774 ("%s: VI %s doesn't have a MAC func", __func__,
2775 device_get_nameunit(vi->dev)));
2776 func = vi_mac_funcs[index];
2777 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2778 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
2780 device_printf(vi->dev, "failed to allocate virtual interface %d"
2781 "for port %d: %d\n", index, pi->port_id, -rc);
2786 if (vi->rss_size == 1) {
2788 * This VI didn't get a slice of the RSS table. Reduce the
2789 * number of VIs being created (hw.cxgbe.num_vis) or modify the
2790 * configuration file (nvi, rssnvi for this PF) if this is a
2793 device_printf(vi->dev, "RSS table not available.\n");
2794 vi->rss_base = 0xffff;
2799 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2800 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2801 V_FW_PARAMS_PARAM_YZ(vi->viid);
2802 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2804 vi->rss_base = 0xffff;
2806 MPASS((val >> 16) == vi->rss_size);
2807 vi->rss_base = val & 0xffff;
2814 vcxgbe_attach(device_t dev)
2817 struct port_info *pi;
2821 vi = device_get_softc(dev);
2825 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via");
2828 rc = alloc_extra_vi(sc, pi, vi);
2829 end_synchronized_op(sc, 0);
2833 rc = cxgbe_vi_attach(dev, vi);
2835 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2842 vcxgbe_detach(device_t dev)
2847 vi = device_get_softc(dev);
2852 cxgbe_vi_detach(vi);
2853 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2855 end_synchronized_op(sc, 0);
2860 static struct callout fatal_callout;
2863 delayed_panic(void *arg)
2865 struct adapter *sc = arg;
2867 panic("%s: panic on fatal error", device_get_nameunit(sc->dev));
2871 t4_fatal_err(struct adapter *sc, bool fw_error)
2874 t4_shutdown_adapter(sc);
2875 log(LOG_ALERT, "%s: encountered fatal error, adapter stopped.\n",
2876 device_get_nameunit(sc->dev));
2878 ASSERT_SYNCHRONIZED_OP(sc);
2879 sc->flags |= ADAP_ERR;
2882 sc->flags |= ADAP_ERR;
2886 taskqueue_enqueue(taskqueue_thread, &sc->async_event_task);
2889 if (t4_panic_on_fatal_err) {
2890 log(LOG_ALERT, "%s: panic on fatal error after 30s",
2891 device_get_nameunit(sc->dev));
2892 callout_reset(&fatal_callout, hz * 30, delayed_panic, sc);
2897 t4_add_adapter(struct adapter *sc)
2899 sx_xlock(&t4_list_lock);
2900 SLIST_INSERT_HEAD(&t4_list, sc, link);
2901 sx_xunlock(&t4_list_lock);
2905 t4_map_bars_0_and_4(struct adapter *sc)
2907 sc->regs_rid = PCIR_BAR(0);
2908 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2909 &sc->regs_rid, RF_ACTIVE);
2910 if (sc->regs_res == NULL) {
2911 device_printf(sc->dev, "cannot map registers.\n");
2914 sc->bt = rman_get_bustag(sc->regs_res);
2915 sc->bh = rman_get_bushandle(sc->regs_res);
2916 sc->mmio_len = rman_get_size(sc->regs_res);
2917 setbit(&sc->doorbells, DOORBELL_KDB);
2919 sc->msix_rid = PCIR_BAR(4);
2920 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2921 &sc->msix_rid, RF_ACTIVE);
2922 if (sc->msix_res == NULL) {
2923 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2931 t4_map_bar_2(struct adapter *sc)
2935 * T4: only iWARP driver uses the userspace doorbells. There is no need
2936 * to map it if RDMA is disabled.
2938 if (is_t4(sc) && sc->rdmacaps == 0)
2941 sc->udbs_rid = PCIR_BAR(2);
2942 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2943 &sc->udbs_rid, RF_ACTIVE);
2944 if (sc->udbs_res == NULL) {
2945 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2948 sc->udbs_base = rman_get_virtual(sc->udbs_res);
2950 if (chip_id(sc) >= CHELSIO_T5) {
2951 setbit(&sc->doorbells, DOORBELL_UDB);
2952 #if defined(__i386__) || defined(__amd64__)
2953 if (t5_write_combine) {
2957 * Enable write combining on BAR2. This is the
2958 * userspace doorbell BAR and is split into 128B
2959 * (UDBS_SEG_SIZE) doorbell regions, each associated
2960 * with an egress queue. The first 64B has the doorbell
2961 * and the second 64B can be used to submit a tx work
2962 * request with an implicit doorbell.
2965 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2966 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2968 clrbit(&sc->doorbells, DOORBELL_UDB);
2969 setbit(&sc->doorbells, DOORBELL_WCWR);
2970 setbit(&sc->doorbells, DOORBELL_UDBWC);
2972 device_printf(sc->dev,
2973 "couldn't enable write combining: %d\n",
2977 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2978 t4_write_reg(sc, A_SGE_STAT_CFG,
2979 V_STATSOURCE_T5(7) | mode);
2983 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0;
2988 struct memwin_init {
2993 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2994 { MEMWIN0_BASE, MEMWIN0_APERTURE },
2995 { MEMWIN1_BASE, MEMWIN1_APERTURE },
2996 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2999 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
3000 { MEMWIN0_BASE, MEMWIN0_APERTURE },
3001 { MEMWIN1_BASE, MEMWIN1_APERTURE },
3002 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
3006 setup_memwin(struct adapter *sc)
3008 const struct memwin_init *mw_init;
3015 * Read low 32b of bar0 indirectly via the hardware backdoor
3016 * mechanism. Works from within PCI passthrough environments
3017 * too, where rman_get_start() can return a different value. We
3018 * need to program the T4 memory window decoders with the actual
3019 * addresses that will be coming across the PCIe link.
3021 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
3022 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
3024 mw_init = &t4_memwin[0];
3026 /* T5+ use the relative offset inside the PCIe BAR */
3029 mw_init = &t5_memwin[0];
3032 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
3033 rw_init(&mw->mw_lock, "memory window access");
3034 mw->mw_base = mw_init->base;
3035 mw->mw_aperture = mw_init->aperture;
3038 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
3039 (mw->mw_base + bar0) | V_BIR(0) |
3040 V_WINDOW(ilog2(mw->mw_aperture) - 10));
3041 rw_wlock(&mw->mw_lock);
3042 position_memwin(sc, i, 0);
3043 rw_wunlock(&mw->mw_lock);
3047 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
3051 * Positions the memory window at the given address in the card's address space.
3052 * There are some alignment requirements and the actual position may be at an
3053 * address prior to the requested address. mw->mw_curpos always has the actual
3054 * position of the window.
3057 position_memwin(struct adapter *sc, int idx, uint32_t addr)
3063 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3064 mw = &sc->memwin[idx];
3065 rw_assert(&mw->mw_lock, RA_WLOCKED);
3069 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */
3071 pf = V_PFNUM(sc->pf);
3072 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
3074 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
3075 t4_write_reg(sc, reg, mw->mw_curpos | pf);
3076 t4_read_reg(sc, reg); /* flush */
3080 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
3086 MPASS(idx >= 0 && idx < NUM_MEMWIN);
3088 /* Memory can only be accessed in naturally aligned 4 byte units */
3089 if (addr & 3 || len & 3 || len <= 0)
3092 mw = &sc->memwin[idx];
3094 rw_rlock(&mw->mw_lock);
3095 mw_end = mw->mw_curpos + mw->mw_aperture;
3096 if (addr >= mw_end || addr < mw->mw_curpos) {
3097 /* Will need to reposition the window */
3098 if (!rw_try_upgrade(&mw->mw_lock)) {
3099 rw_runlock(&mw->mw_lock);
3100 rw_wlock(&mw->mw_lock);
3102 rw_assert(&mw->mw_lock, RA_WLOCKED);
3103 position_memwin(sc, idx, addr);
3104 rw_downgrade(&mw->mw_lock);
3105 mw_end = mw->mw_curpos + mw->mw_aperture;
3107 rw_assert(&mw->mw_lock, RA_RLOCKED);
3108 while (addr < mw_end && len > 0) {
3110 v = t4_read_reg(sc, mw->mw_base + addr -
3112 *val++ = le32toh(v);
3115 t4_write_reg(sc, mw->mw_base + addr -
3116 mw->mw_curpos, htole32(v));
3121 rw_runlock(&mw->mw_lock);
3128 t4_init_atid_table(struct adapter *sc)
3137 MPASS(t->atid_tab == NULL);
3139 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE,
3141 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
3142 t->afree = t->atid_tab;
3143 t->atids_in_use = 0;
3144 for (i = 1; i < t->natids; i++)
3145 t->atid_tab[i - 1].next = &t->atid_tab[i];
3146 t->atid_tab[t->natids - 1].next = NULL;
3150 t4_free_atid_table(struct adapter *sc)
3156 KASSERT(t->atids_in_use == 0,
3157 ("%s: %d atids still in use.", __func__, t->atids_in_use));
3159 if (mtx_initialized(&t->atid_lock))
3160 mtx_destroy(&t->atid_lock);
3161 free(t->atid_tab, M_CXGBE);
3166 alloc_atid(struct adapter *sc, void *ctx)
3168 struct tid_info *t = &sc->tids;
3171 mtx_lock(&t->atid_lock);
3173 union aopen_entry *p = t->afree;
3175 atid = p - t->atid_tab;
3176 MPASS(atid <= M_TID_TID);
3181 mtx_unlock(&t->atid_lock);
3186 lookup_atid(struct adapter *sc, int atid)
3188 struct tid_info *t = &sc->tids;
3190 return (t->atid_tab[atid].data);
3194 free_atid(struct adapter *sc, int atid)
3196 struct tid_info *t = &sc->tids;
3197 union aopen_entry *p = &t->atid_tab[atid];
3199 mtx_lock(&t->atid_lock);
3203 mtx_unlock(&t->atid_lock);
3207 queue_tid_release(struct adapter *sc, int tid)
3210 CXGBE_UNIMPLEMENTED("deferred tid release");
3214 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq)
3217 struct cpl_tid_release *req;
3219 wr = alloc_wrqe(sizeof(*req), ctrlq);
3221 queue_tid_release(sc, tid); /* defer */
3226 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
3232 t4_range_cmp(const void *a, const void *b)
3234 return ((const struct t4_range *)a)->start -
3235 ((const struct t4_range *)b)->start;
3239 * Verify that the memory range specified by the addr/len pair is valid within
3240 * the card's address space.
3243 validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len)
3245 struct t4_range mem_ranges[4], *r, *next;
3246 uint32_t em, addr_len;
3247 int i, n, remaining;
3249 /* Memory can only be accessed in naturally aligned 4 byte units */
3250 if (addr & 3 || len & 3 || len == 0)
3253 /* Enabled memories */
3254 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3258 bzero(r, sizeof(mem_ranges));
3259 if (em & F_EDRAM0_ENABLE) {
3260 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3261 r->size = G_EDRAM0_SIZE(addr_len) << 20;
3263 r->start = G_EDRAM0_BASE(addr_len) << 20;
3264 if (addr >= r->start &&
3265 addr + len <= r->start + r->size)
3271 if (em & F_EDRAM1_ENABLE) {
3272 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3273 r->size = G_EDRAM1_SIZE(addr_len) << 20;
3275 r->start = G_EDRAM1_BASE(addr_len) << 20;
3276 if (addr >= r->start &&
3277 addr + len <= r->start + r->size)
3283 if (em & F_EXT_MEM_ENABLE) {
3284 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3285 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
3287 r->start = G_EXT_MEM_BASE(addr_len) << 20;
3288 if (addr >= r->start &&
3289 addr + len <= r->start + r->size)
3295 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
3296 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
3297 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
3299 r->start = G_EXT_MEM1_BASE(addr_len) << 20;
3300 if (addr >= r->start &&
3301 addr + len <= r->start + r->size)
3307 MPASS(n <= nitems(mem_ranges));
3310 /* Sort and merge the ranges. */
3311 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
3313 /* Start from index 0 and examine the next n - 1 entries. */
3315 for (remaining = n - 1; remaining > 0; remaining--, r++) {
3317 MPASS(r->size > 0); /* r is a valid entry. */
3319 MPASS(next->size > 0); /* and so is the next one. */
3321 while (r->start + r->size >= next->start) {
3322 /* Merge the next one into the current entry. */
3323 r->size = max(r->start + r->size,
3324 next->start + next->size) - r->start;
3325 n--; /* One fewer entry in total. */
3326 if (--remaining == 0)
3327 goto done; /* short circuit */
3330 if (next != r + 1) {
3332 * Some entries were merged into r and next
3333 * points to the first valid entry that couldn't
3336 MPASS(next->size > 0); /* must be valid */
3337 memcpy(r + 1, next, remaining * sizeof(*r));
3340 * This so that the foo->size assertion in the
3341 * next iteration of the loop do the right
3342 * thing for entries that were pulled up and are
3345 MPASS(n < nitems(mem_ranges));
3346 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
3347 sizeof(struct t4_range));
3352 /* Done merging the ranges. */
3355 for (i = 0; i < n; i++, r++) {
3356 if (addr >= r->start &&
3357 addr + len <= r->start + r->size)
3366 fwmtype_to_hwmtype(int mtype)
3370 case FW_MEMTYPE_EDC0:
3372 case FW_MEMTYPE_EDC1:
3374 case FW_MEMTYPE_EXTMEM:
3376 case FW_MEMTYPE_EXTMEM1:
3379 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
3384 * Verify that the memory range specified by the memtype/offset/len pair is
3385 * valid and lies entirely within the memtype specified. The global address of
3386 * the start of the range is returned in addr.
3389 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len,
3392 uint32_t em, addr_len, maddr;
3394 /* Memory can only be accessed in naturally aligned 4 byte units */
3395 if (off & 3 || len & 3 || len == 0)
3398 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
3399 switch (fwmtype_to_hwmtype(mtype)) {
3401 if (!(em & F_EDRAM0_ENABLE))
3403 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
3404 maddr = G_EDRAM0_BASE(addr_len) << 20;
3407 if (!(em & F_EDRAM1_ENABLE))
3409 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
3410 maddr = G_EDRAM1_BASE(addr_len) << 20;
3413 if (!(em & F_EXT_MEM_ENABLE))
3415 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
3416 maddr = G_EXT_MEM_BASE(addr_len) << 20;
3419 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
3421 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
3422 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
3428 *addr = maddr + off; /* global address */
3429 return (validate_mem_range(sc, *addr, len));
3433 fixup_devlog_params(struct adapter *sc)
3435 struct devlog_params *dparams = &sc->params.devlog;
3438 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
3439 dparams->size, &dparams->addr);
3445 update_nirq(struct intrs_and_queues *iaq, int nports)
3448 iaq->nirq = T4_EXTRA_INTR;
3449 iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq);
3450 iaq->nirq += nports * iaq->nofldrxq;
3451 iaq->nirq += nports * (iaq->num_vis - 1) *
3452 max(iaq->nrxq_vi, iaq->nnmrxq_vi);
3453 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi;
3457 * Adjust requirements to fit the number of interrupts available.
3460 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
3464 const int nports = sc->params.nports;
3469 bzero(iaq, sizeof(*iaq));
3470 iaq->intr_type = itype;
3471 iaq->num_vis = t4_num_vis;
3472 iaq->ntxq = t4_ntxq;
3473 iaq->ntxq_vi = t4_ntxq_vi;
3474 iaq->nrxq = t4_nrxq;
3475 iaq->nrxq_vi = t4_nrxq_vi;
3476 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
3477 if (is_offload(sc) || is_ethoffload(sc)) {
3478 iaq->nofldtxq = t4_nofldtxq;
3479 iaq->nofldtxq_vi = t4_nofldtxq_vi;
3483 if (is_offload(sc)) {
3484 iaq->nofldrxq = t4_nofldrxq;
3485 iaq->nofldrxq_vi = t4_nofldrxq_vi;
3489 if (t4_native_netmap & NN_MAIN_VI) {
3490 iaq->nnmtxq = t4_nnmtxq;
3491 iaq->nnmrxq = t4_nnmrxq;
3493 if (t4_native_netmap & NN_EXTRA_VI) {
3494 iaq->nnmtxq_vi = t4_nnmtxq_vi;
3495 iaq->nnmrxq_vi = t4_nnmrxq_vi;
3499 update_nirq(iaq, nports);
3500 if (iaq->nirq <= navail &&
3501 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3503 * This is the normal case -- there are enough interrupts for
3510 * If extra VIs have been configured try reducing their count and see if
3513 while (iaq->num_vis > 1) {
3515 update_nirq(iaq, nports);
3516 if (iaq->nirq <= navail &&
3517 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3518 device_printf(sc->dev, "virtual interfaces per port "
3519 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, "
3520 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. "
3521 "itype %d, navail %u, nirq %d.\n",
3522 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq,
3523 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi,
3524 itype, navail, iaq->nirq);
3530 * Extra VIs will not be created. Log a message if they were requested.
3532 MPASS(iaq->num_vis == 1);
3533 iaq->ntxq_vi = iaq->nrxq_vi = 0;
3534 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
3535 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
3536 if (iaq->num_vis != t4_num_vis) {
3537 device_printf(sc->dev, "extra virtual interfaces disabled. "
3538 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, "
3539 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n",
3540 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi,
3541 iaq->nnmrxq_vi, itype, navail, iaq->nirq);
3545 * Keep reducing the number of NIC rx queues to the next lower power of
3546 * 2 (for even RSS distribution) and halving the TOE rx queues and see
3550 if (iaq->nrxq > 1) {
3553 } while (!powerof2(iaq->nrxq));
3554 if (iaq->nnmrxq > iaq->nrxq)
3555 iaq->nnmrxq = iaq->nrxq;
3557 if (iaq->nofldrxq > 1)
3558 iaq->nofldrxq >>= 1;
3560 old_nirq = iaq->nirq;
3561 update_nirq(iaq, nports);
3562 if (iaq->nirq <= navail &&
3563 (itype != INTR_MSI || powerof2(iaq->nirq))) {
3564 device_printf(sc->dev, "running with reduced number of "
3565 "rx queues because of shortage of interrupts. "
3566 "nrxq=%u, nofldrxq=%u. "
3567 "itype %d, navail %u, nirq %d.\n", iaq->nrxq,
3568 iaq->nofldrxq, itype, navail, iaq->nirq);
3571 } while (old_nirq != iaq->nirq);
3573 /* One interrupt for everything. Ugh. */
3574 device_printf(sc->dev, "running with minimal number of queues. "
3575 "itype %d, navail %u.\n", itype, navail);
3579 if (iaq->nofldrxq > 0) {
3586 MPASS(iaq->num_vis > 0);
3587 if (iaq->num_vis > 1) {
3588 MPASS(iaq->nrxq_vi > 0);
3589 MPASS(iaq->ntxq_vi > 0);
3591 MPASS(iaq->nirq > 0);
3592 MPASS(iaq->nrxq > 0);
3593 MPASS(iaq->ntxq > 0);
3594 if (itype == INTR_MSI) {
3595 MPASS(powerof2(iaq->nirq));
3600 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq)
3602 int rc, itype, navail, nalloc;
3604 for (itype = INTR_MSIX; itype; itype >>= 1) {
3606 if ((itype & t4_intr_types) == 0)
3607 continue; /* not allowed */
3609 if (itype == INTR_MSIX)
3610 navail = pci_msix_count(sc->dev);
3611 else if (itype == INTR_MSI)
3612 navail = pci_msi_count(sc->dev);
3619 calculate_iaq(sc, iaq, itype, navail);
3622 if (itype == INTR_MSIX)
3623 rc = pci_alloc_msix(sc->dev, &nalloc);
3624 else if (itype == INTR_MSI)
3625 rc = pci_alloc_msi(sc->dev, &nalloc);
3627 if (rc == 0 && nalloc > 0) {
3628 if (nalloc == iaq->nirq)
3632 * Didn't get the number requested. Use whatever number
3633 * the kernel is willing to allocate.
3635 device_printf(sc->dev, "fewer vectors than requested, "
3636 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
3637 itype, iaq->nirq, nalloc);
3638 pci_release_msi(sc->dev);
3643 device_printf(sc->dev,
3644 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
3645 itype, rc, iaq->nirq, nalloc);
3648 device_printf(sc->dev,
3649 "failed to find a usable interrupt type. "
3650 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
3651 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
3656 #define FW_VERSION(chip) ( \
3657 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
3658 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
3659 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
3660 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
3661 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
3663 /* Just enough of fw_hdr to cover all version info. */
3669 __be32 tp_microcode_ver;
3674 __u8 intfver_iscsipdu;
3676 __u8 intfver_fcoepdu;
3679 /* Spot check a couple of fields. */
3680 CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver));
3681 CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic));
3682 CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe));
3692 .kld_name = "t4fw_cfg",
3693 .fw_mod_name = "t4fw",
3695 .chip = FW_HDR_CHIP_T4,
3696 .fw_ver = htobe32(FW_VERSION(T4)),
3697 .intfver_nic = FW_INTFVER(T4, NIC),
3698 .intfver_vnic = FW_INTFVER(T4, VNIC),
3699 .intfver_ofld = FW_INTFVER(T4, OFLD),
3700 .intfver_ri = FW_INTFVER(T4, RI),
3701 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
3702 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3703 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
3704 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3708 .kld_name = "t5fw_cfg",
3709 .fw_mod_name = "t5fw",
3711 .chip = FW_HDR_CHIP_T5,
3712 .fw_ver = htobe32(FW_VERSION(T5)),
3713 .intfver_nic = FW_INTFVER(T5, NIC),
3714 .intfver_vnic = FW_INTFVER(T5, VNIC),
3715 .intfver_ofld = FW_INTFVER(T5, OFLD),
3716 .intfver_ri = FW_INTFVER(T5, RI),
3717 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
3718 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3719 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
3720 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3724 .kld_name = "t6fw_cfg",
3725 .fw_mod_name = "t6fw",
3727 .chip = FW_HDR_CHIP_T6,
3728 .fw_ver = htobe32(FW_VERSION(T6)),
3729 .intfver_nic = FW_INTFVER(T6, NIC),
3730 .intfver_vnic = FW_INTFVER(T6, VNIC),
3731 .intfver_ofld = FW_INTFVER(T6, OFLD),
3732 .intfver_ri = FW_INTFVER(T6, RI),
3733 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3734 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3735 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3736 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3741 static struct fw_info *
3742 find_fw_info(int chip)
3746 for (i = 0; i < nitems(fw_info); i++) {
3747 if (fw_info[i].chip == chip)
3748 return (&fw_info[i]);
3754 * Is the given firmware API compatible with the one the driver was compiled
3758 fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2)
3761 /* short circuit if it's the exact same firmware version */
3762 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3766 * XXX: Is this too conservative? Perhaps I should limit this to the
3767 * features that are supported in the driver.
3769 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3770 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3771 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3772 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3780 load_fw_module(struct adapter *sc, const struct firmware **dcfg,
3781 const struct firmware **fw)
3783 struct fw_info *fw_info;
3789 fw_info = find_fw_info(chip_id(sc));
3790 if (fw_info == NULL) {
3791 device_printf(sc->dev,
3792 "unable to look up firmware information for chip %d.\n",
3797 *dcfg = firmware_get(fw_info->kld_name);
3798 if (*dcfg != NULL) {
3800 *fw = firmware_get(fw_info->fw_mod_name);
3808 unload_fw_module(struct adapter *sc, const struct firmware *dcfg,
3809 const struct firmware *fw)
3813 firmware_put(fw, FIRMWARE_UNLOAD);
3815 firmware_put(dcfg, FIRMWARE_UNLOAD);
3820 * 0 means no firmware install attempted.
3821 * ERESTART means a firmware install was attempted and was successful.
3822 * +ve errno means a firmware install was attempted but failed.
3825 install_kld_firmware(struct adapter *sc, struct fw_h *card_fw,
3826 const struct fw_h *drv_fw, const char *reason, int *already)
3828 const struct firmware *cfg, *fw;
3829 const uint32_t c = be32toh(card_fw->fw_ver);
3832 struct fw_h bundled_fw;
3833 bool load_attempted;
3836 load_attempted = false;
3837 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install;
3839 memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw));
3840 if (t4_fw_install < 0) {
3841 rc = load_fw_module(sc, &cfg, &fw);
3842 if (rc != 0 || fw == NULL) {
3843 device_printf(sc->dev,
3844 "failed to load firmware module: %d. cfg %p, fw %p;"
3845 " will use compiled-in firmware version for"
3846 "hw.cxgbe.fw_install checks.\n",
3849 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw));
3851 load_attempted = true;
3853 d = be32toh(bundled_fw.fw_ver);
3858 if ((sc->flags & FW_OK) == 0) {
3860 if (c == 0xffffffff) {
3869 if (!fw_compatible(card_fw, &bundled_fw)) {
3870 reason = "incompatible or unusable";
3875 reason = "older than the version bundled with this driver";
3879 if (fw_install == 2 && d != c) {
3880 reason = "different than the version bundled with this driver";
3884 /* No reason to do anything to the firmware already on the card. */
3893 if (fw_install == 0) {
3894 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3895 "but the driver is prohibited from installing a firmware "
3897 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3898 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3904 * We'll attempt to install a firmware. Load the module first (if it
3905 * hasn't been loaded already).
3907 if (!load_attempted) {
3908 rc = load_fw_module(sc, &cfg, &fw);
3909 if (rc != 0 || fw == NULL) {
3910 device_printf(sc->dev,
3911 "failed to load firmware module: %d. cfg %p, fw %p\n",
3917 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3918 "but the driver cannot take corrective action because it "
3919 "is unable to load the firmware module.\n",
3920 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3921 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
3922 rc = sc->flags & FW_OK ? 0 : ENOENT;
3925 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver);
3927 MPASS(t4_fw_install > 0);
3928 device_printf(sc->dev,
3929 "firmware in KLD (%u.%u.%u.%u) is not what the driver was "
3930 "expecting (%u.%u.%u.%u) and will not be used.\n",
3931 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3932 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k),
3933 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3934 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3935 rc = sc->flags & FW_OK ? 0 : EINVAL;
3939 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
3940 "installing firmware %u.%u.%u.%u on card.\n",
3941 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3942 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3943 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3944 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d));
3946 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3948 device_printf(sc->dev, "failed to install firmware: %d\n", rc);
3950 /* Installed successfully, update the cached header too. */
3952 memcpy(card_fw, fw->data, sizeof(*card_fw));
3955 unload_fw_module(sc, cfg, fw);
3961 * Establish contact with the firmware and attempt to become the master driver.
3963 * A firmware will be installed to the card if needed (if the driver is allowed
3967 contact_firmware(struct adapter *sc)
3969 int rc, already = 0;
3970 enum dev_state state;
3971 struct fw_info *fw_info;
3972 struct fw_hdr *card_fw; /* fw on the card */
3973 const struct fw_h *drv_fw;
3975 fw_info = find_fw_info(chip_id(sc));
3976 if (fw_info == NULL) {
3977 device_printf(sc->dev,
3978 "unable to look up firmware information for chip %d.\n",
3982 drv_fw = &fw_info->fw_h;
3984 /* Read the header of the firmware on the card */
3985 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3987 rc = -t4_get_fw_hdr(sc, card_fw);
3989 device_printf(sc->dev,
3990 "unable to read firmware header from card's flash: %d\n",
3995 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL,
4002 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
4003 if (rc < 0 || state == DEV_STATE_ERR) {
4005 device_printf(sc->dev,
4006 "failed to connect to the firmware: %d, %d. "
4007 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4009 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4010 "not responding properly to HELLO", &already) == ERESTART)
4015 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT);
4016 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */
4019 sc->flags |= MASTER_PF;
4020 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw,
4026 } else if (state == DEV_STATE_UNINIT) {
4028 * We didn't get to be the master so we definitely won't be
4029 * configuring the chip. It's a bug if someone else hasn't
4030 * configured it already.
4032 device_printf(sc->dev, "couldn't be master(%d), "
4033 "device not already initialized either(%d). "
4034 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4039 * Some other PF is the master and has configured the chip.
4040 * This is allowed but untested.
4042 device_printf(sc->dev, "PF%d is master, device state %d. "
4043 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW));
4044 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc);
4049 if (rc != 0 && sc->flags & FW_OK) {
4050 t4_fw_bye(sc, sc->mbox);
4051 sc->flags &= ~FW_OK;
4053 free(card_fw, M_CXGBE);
4058 copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
4059 uint32_t mtype, uint32_t moff)
4061 struct fw_info *fw_info;
4062 const struct firmware *dcfg, *rcfg = NULL;
4063 const uint32_t *cfdata;
4064 uint32_t cflen, addr;
4067 load_fw_module(sc, &dcfg, NULL);
4069 /* Card specific interpretation of "default". */
4070 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4071 if (pci_get_device(sc->dev) == 0x440a)
4072 snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF);
4074 snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF);
4077 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
4079 device_printf(sc->dev,
4080 "KLD with default config is not available.\n");
4084 cfdata = dcfg->data;
4085 cflen = dcfg->datasize & ~3;
4089 fw_info = find_fw_info(chip_id(sc));
4090 if (fw_info == NULL) {
4091 device_printf(sc->dev,
4092 "unable to look up firmware information for chip %d.\n",
4097 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file);
4099 rcfg = firmware_get(s);
4101 device_printf(sc->dev,
4102 "unable to load module \"%s\" for configuration "
4103 "profile \"%s\".\n", s, cfg_file);
4107 cfdata = rcfg->data;
4108 cflen = rcfg->datasize & ~3;
4111 if (cflen > FLASH_CFG_MAX_SIZE) {
4112 device_printf(sc->dev,
4113 "config file too long (%d, max allowed is %d).\n",
4114 cflen, FLASH_CFG_MAX_SIZE);
4119 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
4121 device_printf(sc->dev,
4122 "%s: addr (%d/0x%x) or len %d is not valid: %d.\n",
4123 __func__, mtype, moff, cflen, rc);
4127 write_via_memwin(sc, 2, addr, cfdata, cflen);
4130 firmware_put(rcfg, FIRMWARE_UNLOAD);
4131 unload_fw_module(sc, dcfg, NULL);
4135 struct caps_allowed {
4138 uint16_t switchcaps;
4142 uint16_t cryptocaps;
4147 #define FW_PARAM_DEV(param) \
4148 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4149 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4150 #define FW_PARAM_PFVF(param) \
4151 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4152 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
4155 * Provide a configuration profile to the firmware and have it initialize the
4156 * chip accordingly. This may involve uploading a configuration file to the
4160 apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
4161 const struct caps_allowed *caps_allowed)
4164 struct fw_caps_config_cmd caps;
4165 uint32_t mtype, moff, finicsum, cfcsum, param, val;
4167 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
4169 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
4173 bzero(&caps, sizeof(caps));
4174 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4175 F_FW_CMD_REQUEST | F_FW_CMD_READ);
4176 if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) {
4179 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4180 } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
4181 mtype = FW_MEMTYPE_FLASH;
4182 moff = t4_flash_cfg_addr(sc);
4183 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4184 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4185 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4189 * Ask the firmware where it wants us to upload the config file.
4191 param = FW_PARAM_DEV(CF);
4192 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4194 /* No support for config file? Shouldn't happen. */
4195 device_printf(sc->dev,
4196 "failed to query config file location: %d.\n", rc);
4199 mtype = G_FW_PARAMS_PARAM_Y(val);
4200 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
4201 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
4202 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4203 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
4206 rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
4208 device_printf(sc->dev,
4209 "failed to upload config file to card: %d.\n", rc);
4213 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4215 device_printf(sc->dev, "failed to pre-process config file: %d "
4216 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
4220 finicsum = be32toh(caps.finicsum);
4221 cfcsum = be32toh(caps.cfcsum); /* actual */
4222 if (finicsum != cfcsum) {
4223 device_printf(sc->dev,
4224 "WARNING: config file checksum mismatch: %08x %08x\n",
4227 sc->cfcsum = cfcsum;
4228 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file);
4231 * Let the firmware know what features will (not) be used so it can tune
4232 * things accordingly.
4234 #define LIMIT_CAPS(x) do { \
4235 caps.x##caps &= htobe16(caps_allowed->x##caps); \
4247 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4249 * TOE and hashfilters are mutually exclusive. It is a config
4250 * file or firmware bug if both are reported as available. Try
4251 * to cope with the situation in non-debug builds by disabling
4254 MPASS(caps.toecaps == 0);
4261 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4262 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4263 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4264 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
4266 device_printf(sc->dev,
4267 "failed to process config file: %d.\n", rc);
4271 t4_tweak_chip_settings(sc);
4272 set_params__pre_init(sc);
4274 /* get basic stuff going */
4275 rc = -t4_fw_initialize(sc, sc->mbox);
4277 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc);
4285 * Partition chip resources for use between various PFs, VFs, etc.
4288 partition_resources(struct adapter *sc)
4290 char cfg_file[sizeof(t4_cfg_file)];
4291 struct caps_allowed caps_allowed;
4295 /* Only the master driver gets to configure the chip resources. */
4296 MPASS(sc->flags & MASTER_PF);
4298 #define COPY_CAPS(x) do { \
4299 caps_allowed.x##caps = t4_##x##caps_allowed; \
4301 bzero(&caps_allowed, sizeof(caps_allowed));
4311 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true;
4312 snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file);
4314 rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed);
4315 if (rc != 0 && fallback) {
4316 device_printf(sc->dev,
4317 "failed (%d) to configure card with \"%s\" profile, "
4318 "will fall back to a basic configuration and retry.\n",
4320 snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF);
4321 bzero(&caps_allowed, sizeof(caps_allowed));
4323 caps_allowed.niccaps = FW_CAPS_CONFIG_NIC;
4332 * Retrieve parameters that are needed (or nice to have) very early.
4335 get_params__pre_init(struct adapter *sc)
4338 uint32_t param[2], val[2];
4340 t4_get_version_info(sc);
4342 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
4343 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
4344 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
4345 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
4346 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
4348 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
4349 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
4350 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
4351 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
4352 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
4354 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
4355 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
4356 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
4357 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
4358 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
4360 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
4361 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
4362 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
4363 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
4364 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
4366 param[0] = FW_PARAM_DEV(PORTVEC);
4367 param[1] = FW_PARAM_DEV(CCLK);
4368 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4370 device_printf(sc->dev,
4371 "failed to query parameters (pre_init): %d.\n", rc);
4375 sc->params.portvec = val[0];
4376 sc->params.nports = bitcount32(val[0]);
4377 sc->params.vpd.cclk = val[1];
4379 /* Read device log parameters. */
4380 rc = -t4_init_devlog_params(sc, 1);
4382 fixup_devlog_params(sc);
4384 device_printf(sc->dev,
4385 "failed to get devlog parameters: %d.\n", rc);
4386 rc = 0; /* devlog isn't critical for device operation */
4393 * Any params that need to be set before FW_INITIALIZE.
4396 set_params__pre_init(struct adapter *sc)
4399 uint32_t param, val;
4401 if (chip_id(sc) >= CHELSIO_T6) {
4402 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
4404 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4405 /* firmwares < 1.20.1.0 do not have this param. */
4406 if (rc == FW_EINVAL &&
4407 sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) {
4411 device_printf(sc->dev,
4412 "failed to enable high priority filters :%d.\n",
4417 /* Enable opaque VIIDs with firmwares that support it. */
4418 param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
4420 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4421 if (rc == 0 && val == 1)
4422 sc->params.viid_smt_extn_support = true;
4424 sc->params.viid_smt_extn_support = false;
4430 * Retrieve various parameters that are of interest to the driver. The device
4431 * has been initialized by the firmware at this point.
4434 get_params__post_init(struct adapter *sc)
4437 uint32_t param[7], val[7];
4438 struct fw_caps_config_cmd caps;
4440 param[0] = FW_PARAM_PFVF(IQFLINT_START);
4441 param[1] = FW_PARAM_PFVF(EQ_START);
4442 param[2] = FW_PARAM_PFVF(FILTER_START);
4443 param[3] = FW_PARAM_PFVF(FILTER_END);
4444 param[4] = FW_PARAM_PFVF(L2T_START);
4445 param[5] = FW_PARAM_PFVF(L2T_END);
4446 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4447 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4448 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
4449 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val);
4451 device_printf(sc->dev,
4452 "failed to query parameters (post_init): %d.\n", rc);
4456 sc->sge.iq_start = val[0];
4457 sc->sge.eq_start = val[1];
4458 if ((int)val[3] > (int)val[2]) {
4459 sc->tids.ftid_base = val[2];
4460 sc->tids.ftid_end = val[3];
4461 sc->tids.nftids = val[3] - val[2] + 1;
4463 sc->vres.l2t.start = val[4];
4464 sc->vres.l2t.size = val[5] - val[4] + 1;
4465 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
4466 ("%s: L2 table size (%u) larger than expected (%u)",
4467 __func__, sc->vres.l2t.size, L2T_SIZE));
4468 sc->params.core_vdd = val[6];
4470 param[0] = FW_PARAM_PFVF(IQFLINT_END);
4471 param[1] = FW_PARAM_PFVF(EQ_END);
4472 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4474 device_printf(sc->dev,
4475 "failed to query parameters (post_init2): %d.\n", rc);
4478 MPASS((int)val[0] >= sc->sge.iq_start);
4479 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
4480 MPASS((int)val[1] >= sc->sge.eq_start);
4481 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
4483 if (chip_id(sc) >= CHELSIO_T6) {
4485 sc->tids.tid_base = t4_read_reg(sc,
4486 A_LE_DB_ACTIVE_TABLE_START_INDEX);
4488 param[0] = FW_PARAM_PFVF(HPFILTER_START);
4489 param[1] = FW_PARAM_PFVF(HPFILTER_END);
4490 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4492 device_printf(sc->dev,
4493 "failed to query hpfilter parameters: %d.\n", rc);
4496 if ((int)val[1] > (int)val[0]) {
4497 sc->tids.hpftid_base = val[0];
4498 sc->tids.hpftid_end = val[1];
4499 sc->tids.nhpftids = val[1] - val[0] + 1;
4502 * These should go off if the layout changes and the
4503 * driver needs to catch up.
4505 MPASS(sc->tids.hpftid_base == 0);
4506 MPASS(sc->tids.tid_base == sc->tids.nhpftids);
4509 param[0] = FW_PARAM_PFVF(RAWF_START);
4510 param[1] = FW_PARAM_PFVF(RAWF_END);
4511 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4513 device_printf(sc->dev,
4514 "failed to query rawf parameters: %d.\n", rc);
4517 if ((int)val[1] > (int)val[0]) {
4518 sc->rawf_base = val[0];
4519 sc->nrawf = val[1] - val[0] + 1;
4524 * MPSBGMAP is queried separately because only recent firmwares support
4525 * it as a parameter and we don't want the compound query above to fail
4526 * on older firmwares.
4528 param[0] = FW_PARAM_DEV(MPSBGMAP);
4530 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4532 sc->params.mps_bg_map = val[0];
4534 sc->params.mps_bg_map = 0;
4537 * Determine whether the firmware supports the filter2 work request.
4538 * This is queried separately for the same reason as MPSBGMAP above.
4540 param[0] = FW_PARAM_DEV(FILTER2_WR);
4542 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4544 sc->params.filter2_wr_support = val[0] != 0;
4546 sc->params.filter2_wr_support = 0;
4549 * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL.
4550 * This is queried separately for the same reason as other params above.
4552 param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4554 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4556 sc->params.ulptx_memwrite_dsgl = val[0] != 0;
4558 sc->params.ulptx_memwrite_dsgl = false;
4560 /* FW_RI_FR_NSMR_TPTE_WR support */
4561 param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4562 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4564 sc->params.fr_nsmr_tpte_wr_support = val[0] != 0;
4566 sc->params.fr_nsmr_tpte_wr_support = false;
4568 param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
4569 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4571 sc->params.max_pkts_per_eth_tx_pkts_wr = val[0];
4573 sc->params.max_pkts_per_eth_tx_pkts_wr = 15;
4575 /* get capabilites */
4576 bzero(&caps, sizeof(caps));
4577 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4578 F_FW_CMD_REQUEST | F_FW_CMD_READ);
4579 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
4580 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
4582 device_printf(sc->dev,
4583 "failed to get card capabilities: %d.\n", rc);
4587 #define READ_CAPS(x) do { \
4588 sc->x = htobe16(caps.x); \
4591 READ_CAPS(linkcaps);
4592 READ_CAPS(switchcaps);
4595 READ_CAPS(rdmacaps);
4596 READ_CAPS(cryptocaps);
4597 READ_CAPS(iscsicaps);
4598 READ_CAPS(fcoecaps);
4600 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) {
4601 MPASS(chip_id(sc) > CHELSIO_T4);
4602 MPASS(sc->toecaps == 0);
4605 param[0] = FW_PARAM_DEV(NTID);
4606 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
4608 device_printf(sc->dev,
4609 "failed to query HASHFILTER parameters: %d.\n", rc);
4612 sc->tids.ntids = val[0];
4613 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
4614 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4615 sc->tids.ntids -= sc->tids.nhpftids;
4617 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4618 sc->params.hash_filter = 1;
4620 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
4621 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
4622 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
4623 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4624 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
4626 device_printf(sc->dev,
4627 "failed to query NIC parameters: %d.\n", rc);
4630 if ((int)val[1] > (int)val[0]) {
4631 sc->tids.etid_base = val[0];
4632 sc->tids.etid_end = val[1];
4633 sc->tids.netids = val[1] - val[0] + 1;
4634 sc->params.eo_wr_cred = val[2];
4635 sc->params.ethoffload = 1;
4639 /* query offload-related parameters */
4640 param[0] = FW_PARAM_DEV(NTID);
4641 param[1] = FW_PARAM_PFVF(SERVER_START);
4642 param[2] = FW_PARAM_PFVF(SERVER_END);
4643 param[3] = FW_PARAM_PFVF(TDDP_START);
4644 param[4] = FW_PARAM_PFVF(TDDP_END);
4645 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4646 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4648 device_printf(sc->dev,
4649 "failed to query TOE parameters: %d.\n", rc);
4652 sc->tids.ntids = val[0];
4653 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) {
4654 MPASS(sc->tids.ntids >= sc->tids.nhpftids);
4655 sc->tids.ntids -= sc->tids.nhpftids;
4657 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
4658 if ((int)val[2] > (int)val[1]) {
4659 sc->tids.stid_base = val[1];
4660 sc->tids.nstids = val[2] - val[1] + 1;
4662 sc->vres.ddp.start = val[3];
4663 sc->vres.ddp.size = val[4] - val[3] + 1;
4664 sc->params.ofldq_wr_cred = val[5];
4665 sc->params.offload = 1;
4668 * The firmware attempts memfree TOE configuration for -SO cards
4669 * and will report toecaps=0 if it runs out of resources (this
4670 * depends on the config file). It may not report 0 for other
4671 * capabilities dependent on the TOE in this case. Set them to
4672 * 0 here so that the driver doesn't bother tracking resources
4673 * that will never be used.
4679 param[0] = FW_PARAM_PFVF(STAG_START);
4680 param[1] = FW_PARAM_PFVF(STAG_END);
4681 param[2] = FW_PARAM_PFVF(RQ_START);
4682 param[3] = FW_PARAM_PFVF(RQ_END);
4683 param[4] = FW_PARAM_PFVF(PBL_START);
4684 param[5] = FW_PARAM_PFVF(PBL_END);
4685 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4687 device_printf(sc->dev,
4688 "failed to query RDMA parameters(1): %d.\n", rc);
4691 sc->vres.stag.start = val[0];
4692 sc->vres.stag.size = val[1] - val[0] + 1;
4693 sc->vres.rq.start = val[2];
4694 sc->vres.rq.size = val[3] - val[2] + 1;
4695 sc->vres.pbl.start = val[4];
4696 sc->vres.pbl.size = val[5] - val[4] + 1;
4698 param[0] = FW_PARAM_PFVF(SQRQ_START);
4699 param[1] = FW_PARAM_PFVF(SQRQ_END);
4700 param[2] = FW_PARAM_PFVF(CQ_START);
4701 param[3] = FW_PARAM_PFVF(CQ_END);
4702 param[4] = FW_PARAM_PFVF(OCQ_START);
4703 param[5] = FW_PARAM_PFVF(OCQ_END);
4704 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
4706 device_printf(sc->dev,
4707 "failed to query RDMA parameters(2): %d.\n", rc);
4710 sc->vres.qp.start = val[0];
4711 sc->vres.qp.size = val[1] - val[0] + 1;
4712 sc->vres.cq.start = val[2];
4713 sc->vres.cq.size = val[3] - val[2] + 1;
4714 sc->vres.ocq.start = val[4];
4715 sc->vres.ocq.size = val[5] - val[4] + 1;
4717 param[0] = FW_PARAM_PFVF(SRQ_START);
4718 param[1] = FW_PARAM_PFVF(SRQ_END);
4719 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
4720 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4721 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
4723 device_printf(sc->dev,
4724 "failed to query RDMA parameters(3): %d.\n", rc);
4727 sc->vres.srq.start = val[0];
4728 sc->vres.srq.size = val[1] - val[0] + 1;
4729 sc->params.max_ordird_qp = val[2];
4730 sc->params.max_ird_adapter = val[3];
4732 if (sc->iscsicaps) {
4733 param[0] = FW_PARAM_PFVF(ISCSI_START);
4734 param[1] = FW_PARAM_PFVF(ISCSI_END);
4735 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4737 device_printf(sc->dev,
4738 "failed to query iSCSI parameters: %d.\n", rc);
4741 sc->vres.iscsi.start = val[0];
4742 sc->vres.iscsi.size = val[1] - val[0] + 1;
4744 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
4745 param[0] = FW_PARAM_PFVF(TLS_START);
4746 param[1] = FW_PARAM_PFVF(TLS_END);
4747 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
4749 device_printf(sc->dev,
4750 "failed to query TLS parameters: %d.\n", rc);
4753 sc->vres.key.start = val[0];
4754 sc->vres.key.size = val[1] - val[0] + 1;
4757 t4_init_sge_params(sc);
4760 * We've got the params we wanted to query via the firmware. Now grab
4761 * some others directly from the chip.
4763 rc = t4_read_chip_settings(sc);
4770 ktls_tick(void *arg)
4777 tstamp = tcp_ts_getticks();
4778 t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1);
4779 t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31);
4781 callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK);
4785 t4_enable_kern_tls(struct adapter *sc)
4791 t4_set_reg_field(sc, A_TP_PARA_REG6, m, v);
4793 m = F_CPL_FLAGS_UPDATE_EN | F_SEQ_UPDATE_EN;
4794 v = F_CPL_FLAGS_UPDATE_EN | F_SEQ_UPDATE_EN;
4795 t4_set_reg_field(sc, A_ULP_TX_CONFIG, m, v);
4799 t4_set_reg_field(sc, A_TP_IN_CONFIG, m, v);
4801 m = F_LOOKUPEVERYPKT;
4803 t4_set_reg_field(sc, A_TP_INGRESS_CONFIG, m, v);
4805 m = F_TXDEFERENABLE | F_DISABLEWINDOWPSH | F_DISABLESEPPSHFLAG;
4806 v = F_DISABLEWINDOWPSH;
4807 t4_set_reg_field(sc, A_TP_PC_CONFIG, m, v);
4809 m = V_TIMESTAMPRESOLUTION(M_TIMESTAMPRESOLUTION);
4810 v = V_TIMESTAMPRESOLUTION(0x1f);
4811 t4_set_reg_field(sc, A_TP_TIMER_RESOLUTION, m, v);
4813 sc->flags |= KERN_TLS_OK;
4815 sc->tlst.inline_keys = t4_tls_inline_keys;
4816 sc->tlst.combo_wrs = t4_tls_combo_wrs;
4821 set_params__post_init(struct adapter *sc)
4823 uint32_t mask, param, val;
4828 /* ask for encapsulated CPLs */
4829 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4831 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
4833 /* Enable 32b port caps if the firmware supports it. */
4834 param = FW_PARAM_PFVF(PORT_CAPS32);
4836 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0)
4837 sc->params.port_caps32 = 1;
4839 /* Let filter + maskhash steer to a part of the VI's RSS region. */
4840 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1);
4841 t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER),
4842 V_MASKFILTER(val - 1));
4844 mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER |
4845 F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN |
4846 F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
4847 F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM;
4849 if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) {
4850 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE,
4851 F_ATTACKFILTERENABLE);
4852 val |= F_DROPERRORATTACK;
4854 if (t4_drop_ip_fragments != 0) {
4855 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP,
4857 val |= F_DROPERRORFRAG;
4859 if (t4_drop_pkts_with_l2_errors != 0)
4860 val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN;
4861 if (t4_drop_pkts_with_l3_errors != 0) {
4862 val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN |
4865 if (t4_drop_pkts_with_l4_errors != 0) {
4866 val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN |
4867 F_DROPERRORTCPOPT | F_DROPERRORCSUM;
4869 t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val);
4873 * Override the TOE timers with user provided tunables. This is not the
4874 * recommended way to change the timers (the firmware config file is) so
4875 * these tunables are not documented.
4877 * All the timer tunables are in microseconds.
4879 if (t4_toe_keepalive_idle != 0) {
4880 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle);
4881 v &= M_KEEPALIVEIDLE;
4882 t4_set_reg_field(sc, A_TP_KEEP_IDLE,
4883 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v));
4885 if (t4_toe_keepalive_interval != 0) {
4886 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval);
4887 v &= M_KEEPALIVEINTVL;
4888 t4_set_reg_field(sc, A_TP_KEEP_INTVL,
4889 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v));
4891 if (t4_toe_keepalive_count != 0) {
4892 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2;
4893 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4894 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) |
4895 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2),
4896 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v));
4898 if (t4_toe_rexmt_min != 0) {
4899 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min);
4901 t4_set_reg_field(sc, A_TP_RXT_MIN,
4902 V_RXTMIN(M_RXTMIN), V_RXTMIN(v));
4904 if (t4_toe_rexmt_max != 0) {
4905 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max);
4907 t4_set_reg_field(sc, A_TP_RXT_MAX,
4908 V_RXTMAX(M_RXTMAX), V_RXTMAX(v));
4910 if (t4_toe_rexmt_count != 0) {
4911 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2;
4912 t4_set_reg_field(sc, A_TP_SHIFT_CNT,
4913 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) |
4914 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2),
4915 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v));
4917 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) {
4918 if (t4_toe_rexmt_backoff[i] != -1) {
4919 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0;
4920 shift = (i & 3) << 3;
4921 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3),
4922 M_TIMERBACKOFFINDEX0 << shift, v << shift);
4928 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS &&
4929 sc->toecaps & FW_CAPS_CONFIG_TOE) {
4930 if (t4_kern_tls != 0)
4931 t4_enable_kern_tls(sc);
4934 * Limit TOE connections to 2 reassembly
4935 * "islands". This is required for TOE TLS
4936 * connections to downgrade to plain TOE
4937 * connections if an unsupported TLS version
4938 * or ciphersuite is used.
4940 t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG,
4941 V_PASSMODE(M_PASSMODE), V_PASSMODE(2));
4948 #undef FW_PARAM_PFVF
4952 t4_set_desc(struct adapter *sc)
4955 struct adapter_params *p = &sc->params;
4957 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
4959 device_set_desc_copy(sc->dev, buf);
4963 ifmedia_add4(struct ifmedia *ifm, int m)
4966 ifmedia_add(ifm, m, 0, NULL);
4967 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
4968 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
4969 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
4973 * This is the selected media, which is not quite the same as the active media.
4974 * The media line in ifconfig is "media: Ethernet selected (active)" if selected
4975 * and active are not the same, and "media: Ethernet selected" otherwise.
4978 set_current_media(struct port_info *pi)
4980 struct link_config *lc;
4981 struct ifmedia *ifm;
4985 PORT_LOCK_ASSERT_OWNED(pi);
4987 /* Leave current media alone if it's already set to IFM_NONE. */
4989 if (ifm->ifm_cur != NULL &&
4990 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
4994 if (lc->requested_aneg != AUTONEG_DISABLE &&
4995 lc->pcaps & FW_PORT_CAP32_ANEG) {
4996 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
4999 mword = IFM_ETHER | IFM_FDX;
5000 if (lc->requested_fc & PAUSE_TX)
5001 mword |= IFM_ETH_TXPAUSE;
5002 if (lc->requested_fc & PAUSE_RX)
5003 mword |= IFM_ETH_RXPAUSE;
5004 if (lc->requested_speed == 0)
5005 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */
5007 speed = lc->requested_speed;
5008 mword |= port_mword(pi, speed_to_fwcap(speed));
5009 ifmedia_set(ifm, mword);
5013 * Returns true if the ifmedia list for the port cannot change.
5016 fixed_ifmedia(struct port_info *pi)
5019 return (pi->port_type == FW_PORT_TYPE_BT_SGMII ||
5020 pi->port_type == FW_PORT_TYPE_BT_XFI ||
5021 pi->port_type == FW_PORT_TYPE_BT_XAUI ||
5022 pi->port_type == FW_PORT_TYPE_KX4 ||
5023 pi->port_type == FW_PORT_TYPE_KX ||
5024 pi->port_type == FW_PORT_TYPE_KR ||
5025 pi->port_type == FW_PORT_TYPE_BP_AP ||
5026 pi->port_type == FW_PORT_TYPE_BP4_AP ||
5027 pi->port_type == FW_PORT_TYPE_BP40_BA ||
5028 pi->port_type == FW_PORT_TYPE_KR4_100G ||
5029 pi->port_type == FW_PORT_TYPE_KR_SFP28 ||
5030 pi->port_type == FW_PORT_TYPE_KR_XLAUI);
5034 build_medialist(struct port_info *pi)
5037 int unknown, mword, bit;
5038 struct link_config *lc;
5039 struct ifmedia *ifm;
5041 PORT_LOCK_ASSERT_OWNED(pi);
5043 if (pi->flags & FIXED_IFMEDIA)
5047 * Rebuild the ifmedia list.
5050 ifmedia_removeall(ifm);
5052 ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */
5053 if (__predict_false(ss == 0)) { /* not supposed to happen. */
5056 MPASS(LIST_EMPTY(&ifm->ifm_list));
5057 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
5058 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
5063 for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) {
5065 MPASS(speed & M_FW_PORT_CAP32_SPEED);
5067 mword = port_mword(pi, speed);
5068 if (mword == IFM_NONE) {
5070 } else if (mword == IFM_UNKNOWN)
5073 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
5076 if (unknown > 0) /* Add one unknown for all unknown media types. */
5077 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
5078 if (lc->pcaps & FW_PORT_CAP32_ANEG)
5079 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
5081 set_current_media(pi);
5085 * Initialize the requested fields in the link config based on driver tunables.
5088 init_link_config(struct port_info *pi)
5090 struct link_config *lc = &pi->link_cfg;
5092 PORT_LOCK_ASSERT_OWNED(pi);
5094 lc->requested_speed = 0;
5096 if (t4_autoneg == 0)
5097 lc->requested_aneg = AUTONEG_DISABLE;
5098 else if (t4_autoneg == 1)
5099 lc->requested_aneg = AUTONEG_ENABLE;
5101 lc->requested_aneg = AUTONEG_AUTO;
5103 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX |
5106 if (t4_fec & FEC_AUTO)
5107 lc->requested_fec = FEC_AUTO;
5108 else if (t4_fec == 0)
5109 lc->requested_fec = FEC_NONE;
5111 /* -1 is handled by the FEC_AUTO block above and not here. */
5112 lc->requested_fec = t4_fec &
5113 (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE);
5114 if (lc->requested_fec == 0)
5115 lc->requested_fec = FEC_AUTO;
5120 * Makes sure that all requested settings comply with what's supported by the
5121 * port. Returns the number of settings that were invalid and had to be fixed.
5124 fixup_link_config(struct port_info *pi)
5127 struct link_config *lc = &pi->link_cfg;
5130 PORT_LOCK_ASSERT_OWNED(pi);
5132 /* Speed (when not autonegotiating) */
5133 if (lc->requested_speed != 0) {
5134 fwspeed = speed_to_fwcap(lc->requested_speed);
5135 if ((fwspeed & lc->pcaps) == 0) {
5137 lc->requested_speed = 0;
5141 /* Link autonegotiation */
5142 MPASS(lc->requested_aneg == AUTONEG_ENABLE ||
5143 lc->requested_aneg == AUTONEG_DISABLE ||
5144 lc->requested_aneg == AUTONEG_AUTO);
5145 if (lc->requested_aneg == AUTONEG_ENABLE &&
5146 !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
5148 lc->requested_aneg = AUTONEG_AUTO;
5152 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0);
5153 if (lc->requested_fc & PAUSE_TX &&
5154 !(lc->pcaps & FW_PORT_CAP32_FC_TX)) {
5156 lc->requested_fc &= ~PAUSE_TX;
5158 if (lc->requested_fc & PAUSE_RX &&
5159 !(lc->pcaps & FW_PORT_CAP32_FC_RX)) {
5161 lc->requested_fc &= ~PAUSE_RX;
5163 if (!(lc->requested_fc & PAUSE_AUTONEG) &&
5164 !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) {
5166 lc->requested_fc |= PAUSE_AUTONEG;
5170 if ((lc->requested_fec & FEC_RS &&
5171 !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) ||
5172 (lc->requested_fec & FEC_BASER_RS &&
5173 !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) {
5175 lc->requested_fec = FEC_AUTO;
5182 * Apply the requested L1 settings, which are expected to be valid, to the
5186 apply_link_config(struct port_info *pi)
5188 struct adapter *sc = pi->adapter;
5189 struct link_config *lc = &pi->link_cfg;
5193 ASSERT_SYNCHRONIZED_OP(sc);
5194 PORT_LOCK_ASSERT_OWNED(pi);
5196 if (lc->requested_aneg == AUTONEG_ENABLE)
5197 MPASS(lc->pcaps & FW_PORT_CAP32_ANEG);
5198 if (!(lc->requested_fc & PAUSE_AUTONEG))
5199 MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE);
5200 if (lc->requested_fc & PAUSE_TX)
5201 MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX);
5202 if (lc->requested_fc & PAUSE_RX)
5203 MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX);
5204 if (lc->requested_fec & FEC_RS)
5205 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS);
5206 if (lc->requested_fec & FEC_BASER_RS)
5207 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
5209 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5211 /* Don't complain if the VF driver gets back an EPERM. */
5212 if (!(sc->flags & IS_VF) || rc != FW_EPERM)
5213 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
5216 * An L1_CFG will almost always result in a link-change event if
5217 * the link is up, and the driver will refresh the actual
5218 * fec/fc/etc. when the notification is processed. If the link
5219 * is down then the actual settings are meaningless.
5221 * This takes care of the case where a change in the L1 settings
5222 * may not result in a notification.
5224 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
5225 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
5230 #define FW_MAC_EXACT_CHUNK 7
5233 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
5241 add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
5243 struct mcaddr_ctx *ctx = arg;
5244 struct vi_info *vi = ctx->ifp->if_softc;
5245 struct port_info *pi = vi->pi;
5246 struct adapter *sc = pi->adapter;
5251 ctx->mcaddr[ctx->i] = LLADDR(sdl);
5252 MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i]));
5255 if (ctx->i == FW_MAC_EXACT_CHUNK) {
5256 ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del,
5257 ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0);
5261 for (j = 0; j < ctx->i; j++) {
5263 "failed to add mc address"
5265 "%02x:%02x:%02x rc=%d\n",
5266 ctx->mcaddr[j][0], ctx->mcaddr[j][1],
5267 ctx->mcaddr[j][2], ctx->mcaddr[j][3],
5268 ctx->mcaddr[j][4], ctx->mcaddr[j][5],
5281 * Program the port's XGMAC based on parameters in ifnet. The caller also
5282 * indicates which parameters should be programmed (the rest are left alone).
5285 update_mac_settings(struct ifnet *ifp, int flags)
5288 struct vi_info *vi = ifp->if_softc;
5289 struct port_info *pi = vi->pi;
5290 struct adapter *sc = pi->adapter;
5291 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
5292 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
5294 ASSERT_SYNCHRONIZED_OP(sc);
5295 KASSERT(flags, ("%s: not told what to update.", __func__));
5297 if (flags & XGMAC_MTU)
5300 if (flags & XGMAC_PROMISC)
5301 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
5303 if (flags & XGMAC_ALLMULTI)
5304 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
5306 if (flags & XGMAC_VLANEX)
5307 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
5309 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
5310 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
5311 allmulti, 1, vlanex, false);
5313 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
5319 if (flags & XGMAC_UCADDR) {
5320 uint8_t ucaddr[ETHER_ADDR_LEN];
5322 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
5323 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
5324 ucaddr, true, &vi->smt_idx);
5327 if_printf(ifp, "change_mac failed: %d\n", rc);
5330 vi->xact_addr_filt = rc;
5335 if (flags & XGMAC_MCADDRS) {
5336 struct epoch_tracker et;
5337 struct mcaddr_ctx ctx;
5346 * Unlike other drivers, we accumulate list of pointers into
5347 * interface address lists and we need to keep it safe even
5348 * after if_foreach_llmaddr() returns, thus we must enter the
5351 NET_EPOCH_ENTER(et);
5352 if_foreach_llmaddr(ifp, add_maddr, &ctx);
5359 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
5360 ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0);
5364 for (j = 0; j < ctx.i; j++) {
5366 "failed to add mcast address"
5368 "%02x:%02x:%02x rc=%d\n",
5369 ctx.mcaddr[j][0], ctx.mcaddr[j][1],
5370 ctx.mcaddr[j][2], ctx.mcaddr[j][3],
5371 ctx.mcaddr[j][4], ctx.mcaddr[j][5],
5380 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0);
5382 if_printf(ifp, "failed to set mcast address hash: %d\n",
5385 /* We clobbered the VXLAN entry if there was one. */
5386 pi->vxlan_tcam_entry = false;
5390 if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 &&
5391 pi->vxlan_tcam_entry == false) {
5392 rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac,
5393 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id,
5397 if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n",
5400 MPASS(rc == sc->rawf_base + pi->port_id);
5402 pi->vxlan_tcam_entry = true;
5410 * {begin|end}_synchronized_op must be called from the same thread.
5413 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
5419 /* the caller thinks it's ok to sleep, but is it really? */
5420 if (flags & SLEEP_OK)
5421 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
5422 "begin_synchronized_op");
5433 if (vi && IS_DOOMED(vi)) {
5443 if (!(flags & SLEEP_OK)) {
5448 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
5454 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
5457 sc->last_op = wmesg;
5458 sc->last_op_thr = curthread;
5459 sc->last_op_flags = flags;
5463 if (!(flags & HOLD_LOCK) || rc)
5470 * Tell if_ioctl and if_init that the VI is going away. This is
5471 * special variant of begin_synchronized_op and must be paired with a
5472 * call to end_synchronized_op.
5475 doom_vi(struct adapter *sc, struct vi_info *vi)
5482 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
5485 sc->last_op = "t4detach";
5486 sc->last_op_thr = curthread;
5487 sc->last_op_flags = 0;
5493 * {begin|end}_synchronized_op must be called from the same thread.
5496 end_synchronized_op(struct adapter *sc, int flags)
5499 if (flags & LOCK_HELD)
5500 ADAPTER_LOCK_ASSERT_OWNED(sc);
5504 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
5511 cxgbe_init_synchronized(struct vi_info *vi)
5513 struct port_info *pi = vi->pi;
5514 struct adapter *sc = pi->adapter;
5515 struct ifnet *ifp = vi->ifp;
5517 struct sge_txq *txq;
5519 ASSERT_SYNCHRONIZED_OP(sc);
5521 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5522 return (0); /* already running */
5524 if (!(sc->flags & FULL_INIT_DONE) &&
5525 ((rc = adapter_full_init(sc)) != 0))
5526 return (rc); /* error message displayed already */
5528 if (!(vi->flags & VI_INIT_DONE) &&
5529 ((rc = vi_full_init(vi)) != 0))
5530 return (rc); /* error message displayed already */
5532 rc = update_mac_settings(ifp, XGMAC_ALL);
5534 goto done; /* error message displayed already */
5537 if (pi->up_vis == 0) {
5538 t4_update_port_info(pi);
5539 fixup_link_config(pi);
5540 build_medialist(pi);
5541 apply_link_config(pi);
5544 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
5546 if_printf(ifp, "enable_vi failed: %d\n", rc);
5552 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
5556 for_each_txq(vi, i, txq) {
5558 txq->eq.flags |= EQ_ENABLED;
5563 * The first iq of the first port to come up is used for tracing.
5565 if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
5566 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
5567 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
5568 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
5569 V_QUEUENUMBER(sc->traceq));
5570 pi->flags |= HAS_TRACEQ;
5575 ifp->if_drv_flags |= IFF_DRV_RUNNING;
5577 if (pi->nvi > 1 || sc->flags & IS_VF)
5578 callout_reset(&vi->tick, hz, vi_tick, vi);
5580 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
5581 if (pi->link_cfg.link_ok)
5582 t4_os_link_changed(pi);
5586 cxgbe_uninit_synchronized(vi);
5595 cxgbe_uninit_synchronized(struct vi_info *vi)
5597 struct port_info *pi = vi->pi;
5598 struct adapter *sc = pi->adapter;
5599 struct ifnet *ifp = vi->ifp;
5601 struct sge_txq *txq;
5603 ASSERT_SYNCHRONIZED_OP(sc);
5605 if (!(vi->flags & VI_INIT_DONE)) {
5606 if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5607 KASSERT(0, ("uninited VI is running"));
5608 if_printf(ifp, "uninited VI with running ifnet. "
5609 "vi->flags 0x%016lx, if_flags 0x%08x, "
5610 "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags,
5617 * Disable the VI so that all its data in either direction is discarded
5618 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
5619 * tick) intact as the TP can deliver negative advice or data that it's
5620 * holding in its RAM (for an offloaded connection) even after the VI is
5623 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
5625 if_printf(ifp, "disable_vi failed: %d\n", rc);
5629 for_each_txq(vi, i, txq) {
5631 txq->eq.flags &= ~EQ_ENABLED;
5636 if (pi->nvi > 1 || sc->flags & IS_VF)
5637 callout_stop(&vi->tick);
5639 callout_stop(&pi->tick);
5640 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5644 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
5646 if (pi->up_vis > 0) {
5651 pi->link_cfg.link_ok = false;
5652 pi->link_cfg.speed = 0;
5653 pi->link_cfg.link_down_rc = 255;
5654 t4_os_link_changed(pi);
5661 * It is ok for this function to fail midway and return right away. t4_detach
5662 * will walk the entire sc->irq list and clean up whatever is valid.
5665 t4_setup_intr_handlers(struct adapter *sc)
5667 int rc, rid, p, q, v;
5670 struct port_info *pi;
5672 struct sge *sge = &sc->sge;
5673 struct sge_rxq *rxq;
5675 struct sge_ofld_rxq *ofld_rxq;
5678 struct sge_nm_rxq *nm_rxq;
5681 int nbuckets = rss_getnumbuckets();
5688 rid = sc->intr_type == INTR_INTX ? 0 : 1;
5689 if (forwarding_intr_to_fwq(sc))
5690 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
5692 /* Multiple interrupts. */
5693 if (sc->flags & IS_VF)
5694 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
5695 ("%s: too few intr.", __func__));
5697 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
5698 ("%s: too few intr.", __func__));
5700 /* The first one is always error intr on PFs */
5701 if (!(sc->flags & IS_VF)) {
5702 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
5709 /* The second one is always the firmware event queue (first on VFs) */
5710 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
5716 for_each_port(sc, p) {
5718 for_each_vi(pi, v, vi) {
5719 vi->first_intr = rid - 1;
5721 if (vi->nnmrxq > 0) {
5722 int n = max(vi->nrxq, vi->nnmrxq);
5724 rxq = &sge->rxq[vi->first_rxq];
5726 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
5728 for (q = 0; q < n; q++) {
5729 snprintf(s, sizeof(s), "%x%c%x", p,
5735 irq->nm_rxq = nm_rxq++;
5737 if (irq->nm_rxq != NULL &&
5739 /* Netmap rx only */
5740 rc = t4_alloc_irq(sc, irq, rid,
5741 t4_nm_intr, irq->nm_rxq, s);
5743 if (irq->nm_rxq != NULL &&
5745 /* NIC and Netmap rx */
5746 rc = t4_alloc_irq(sc, irq, rid,
5747 t4_vi_intr, irq, s);
5750 if (irq->rxq != NULL &&
5751 irq->nm_rxq == NULL) {
5753 rc = t4_alloc_irq(sc, irq, rid,
5754 t4_intr, irq->rxq, s);
5760 bus_bind_intr(sc->dev, irq->res,
5761 rss_getcpu(q % nbuckets));
5769 for_each_rxq(vi, q, rxq) {
5770 snprintf(s, sizeof(s), "%x%c%x", p,
5772 rc = t4_alloc_irq(sc, irq, rid,
5777 bus_bind_intr(sc->dev, irq->res,
5778 rss_getcpu(q % nbuckets));
5786 for_each_ofld_rxq(vi, q, ofld_rxq) {
5787 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q);
5788 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
5799 MPASS(irq == &sc->irq[sc->intr_count]);
5805 adapter_full_init(struct adapter *sc)
5809 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5810 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
5813 ASSERT_SYNCHRONIZED_OP(sc);
5814 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5815 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
5816 ("%s: FULL_INIT_DONE already", __func__));
5819 * queues that belong to the adapter (not any particular port).
5821 rc = t4_setup_adapter_queues(sc);
5825 for (i = 0; i < nitems(sc->tq); i++) {
5826 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
5827 taskqueue_thread_enqueue, &sc->tq[i]);
5828 if (sc->tq[i] == NULL) {
5829 device_printf(sc->dev,
5830 "failed to allocate task queue %d\n", i);
5834 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
5835 device_get_nameunit(sc->dev), i);
5838 MPASS(RSS_KEYSIZE == 40);
5839 rss_getkey((void *)&raw_rss_key[0]);
5840 for (i = 0; i < nitems(rss_key); i++) {
5841 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
5843 t4_write_rss_key(sc, &rss_key[0], -1, 1);
5846 if (!(sc->flags & IS_VF))
5849 if (sc->flags & KERN_TLS_OK)
5850 callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc,
5853 sc->flags |= FULL_INIT_DONE;
5856 adapter_full_uninit(sc);
5862 adapter_full_uninit(struct adapter *sc)
5866 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
5868 t4_teardown_adapter_queues(sc);
5870 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
5871 taskqueue_free(sc->tq[i]);
5875 sc->flags &= ~FULL_INIT_DONE;
5881 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
5882 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
5883 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
5884 RSS_HASHTYPE_RSS_UDP_IPV6)
5886 /* Translates kernel hash types to hardware. */
5888 hashconfig_to_hashen(int hashconfig)
5892 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
5893 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
5894 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
5895 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
5896 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
5897 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5898 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5900 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
5901 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
5902 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5904 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
5905 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
5906 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
5907 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
5912 /* Translates hardware hash types to kernel. */
5914 hashen_to_hashconfig(int hashen)
5918 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
5920 * If UDP hashing was enabled it must have been enabled for
5921 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
5922 * enabling any 4-tuple hash is nonsense configuration.
5924 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
5925 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
5927 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5928 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
5929 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5930 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
5932 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
5933 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
5934 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
5935 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
5936 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
5937 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
5938 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
5939 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
5941 return (hashconfig);
5946 vi_full_init(struct vi_info *vi)
5948 struct adapter *sc = vi->adapter;
5949 struct ifnet *ifp = vi->ifp;
5951 struct sge_rxq *rxq;
5954 int nbuckets = rss_getnumbuckets();
5955 int hashconfig = rss_gethashconfig();
5959 ASSERT_SYNCHRONIZED_OP(sc);
5960 KASSERT((vi->flags & VI_INIT_DONE) == 0,
5961 ("%s: VI_INIT_DONE already", __func__));
5963 sysctl_ctx_init(&vi->ctx);
5964 vi->flags |= VI_SYSCTL_CTX;
5967 * Allocate tx/rx/fl queues for this VI.
5969 rc = t4_setup_vi_queues(vi);
5971 goto done; /* error message displayed already */
5974 * Setup RSS for this VI. Save a copy of the RSS table for later use.
5976 if (vi->nrxq > vi->rss_size) {
5977 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
5978 "some queues will never receive traffic.\n", vi->nrxq,
5980 } else if (vi->rss_size % vi->nrxq) {
5981 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
5982 "expect uneven traffic distribution.\n", vi->nrxq,
5986 if (vi->nrxq != nbuckets) {
5987 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
5988 "performance will be impacted.\n", vi->nrxq, nbuckets);
5991 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
5992 for (i = 0; i < vi->rss_size;) {
5994 j = rss_get_indirection_to_bucket(i);
5996 rxq = &sc->sge.rxq[vi->first_rxq + j];
5997 rss[i++] = rxq->iq.abs_id;
5999 for_each_rxq(vi, j, rxq) {
6000 rss[i++] = rxq->iq.abs_id;
6001 if (i == vi->rss_size)
6007 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
6011 if_printf(ifp, "rss_config failed: %d\n", rc);
6016 vi->hashen = hashconfig_to_hashen(hashconfig);
6019 * We may have had to enable some hashes even though the global config
6020 * wants them disabled. This is a potential problem that must be
6021 * reported to the user.
6023 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig;
6026 * If we consider only the supported hash types, then the enabled hashes
6027 * are a superset of the requested hashes. In other words, there cannot
6028 * be any supported hash that was requested but not enabled, but there
6029 * can be hashes that were not requested but had to be enabled.
6031 extra &= SUPPORTED_RSS_HASHTYPES;
6032 MPASS((extra & hashconfig) == 0);
6036 "global RSS config (0x%x) cannot be accommodated.\n",
6039 if (extra & RSS_HASHTYPE_RSS_IPV4)
6040 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
6041 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
6042 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
6043 if (extra & RSS_HASHTYPE_RSS_IPV6)
6044 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
6045 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
6046 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
6047 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
6048 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
6049 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
6050 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
6052 vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
6053 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
6054 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
6055 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
6057 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, rss[0], 0, 0);
6060 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
6065 vi->flags |= VI_INIT_DONE;
6077 vi_full_uninit(struct vi_info *vi)
6079 struct port_info *pi = vi->pi;
6080 struct adapter *sc = pi->adapter;
6082 struct sge_rxq *rxq;
6083 struct sge_txq *txq;
6085 struct sge_ofld_rxq *ofld_rxq;
6087 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
6088 struct sge_wrq *ofld_txq;
6091 if (vi->flags & VI_INIT_DONE) {
6093 /* Need to quiesce queues. */
6095 /* XXX: Only for the first VI? */
6096 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
6097 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
6099 for_each_txq(vi, i, txq) {
6100 quiesce_txq(sc, txq);
6103 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
6104 for_each_ofld_txq(vi, i, ofld_txq) {
6105 quiesce_wrq(sc, ofld_txq);
6109 for_each_rxq(vi, i, rxq) {
6110 quiesce_iq(sc, &rxq->iq);
6111 quiesce_fl(sc, &rxq->fl);
6115 for_each_ofld_rxq(vi, i, ofld_rxq) {
6116 quiesce_iq(sc, &ofld_rxq->iq);
6117 quiesce_fl(sc, &ofld_rxq->fl);
6120 free(vi->rss, M_CXGBE);
6121 free(vi->nm_rss, M_CXGBE);
6124 t4_teardown_vi_queues(vi);
6125 vi->flags &= ~VI_INIT_DONE;
6131 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
6133 struct sge_eq *eq = &txq->eq;
6134 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
6136 (void) sc; /* unused */
6140 MPASS((eq->flags & EQ_ENABLED) == 0);
6144 /* Wait for the mp_ring to empty. */
6145 while (!mp_ring_is_idle(txq->r)) {
6146 mp_ring_check_drainage(txq->r, 4096);
6147 pause("rquiesce", 1);
6150 /* Then wait for the hardware to finish. */
6151 while (spg->cidx != htobe16(eq->pidx))
6152 pause("equiesce", 1);
6154 /* Finally, wait for the driver to reclaim all descriptors. */
6155 while (eq->cidx != eq->pidx)
6156 pause("dquiesce", 1);
6160 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
6167 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
6169 (void) sc; /* unused */
6171 /* Synchronize with the interrupt handler */
6172 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
6177 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
6179 mtx_lock(&sc->sfl_lock);
6181 fl->flags |= FL_DOOMED;
6183 callout_stop(&sc->sfl_callout);
6184 mtx_unlock(&sc->sfl_lock);
6186 KASSERT((fl->flags & FL_STARVING) == 0,
6187 ("%s: still starving", __func__));
6191 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
6192 driver_intr_t *handler, void *arg, char *name)
6197 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
6198 RF_SHAREABLE | RF_ACTIVE);
6199 if (irq->res == NULL) {
6200 device_printf(sc->dev,
6201 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
6205 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
6206 NULL, handler, arg, &irq->tag);
6208 device_printf(sc->dev,
6209 "failed to setup interrupt for rid %d, name %s: %d\n",
6212 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
6218 t4_free_irq(struct adapter *sc, struct irq *irq)
6221 bus_teardown_intr(sc->dev, irq->res, irq->tag);
6223 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
6225 bzero(irq, sizeof(*irq));
6231 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
6234 regs->version = chip_id(sc) | chip_rev(sc) << 10;
6235 t4_get_regs(sc, buf, regs->len);
6238 #define A_PL_INDIR_CMD 0x1f8
6240 #define S_PL_AUTOINC 31
6241 #define M_PL_AUTOINC 0x1U
6242 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
6243 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
6245 #define S_PL_VFID 20
6246 #define M_PL_VFID 0xffU
6247 #define V_PL_VFID(x) ((x) << S_PL_VFID)
6248 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID)
6251 #define M_PL_ADDR 0xfffffU
6252 #define V_PL_ADDR(x) ((x) << S_PL_ADDR)
6253 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR)
6255 #define A_PL_INDIR_DATA 0x1fc
6258 read_vf_stat(struct adapter *sc, u_int vin, int reg)
6262 mtx_assert(&sc->reg_lock, MA_OWNED);
6263 if (sc->flags & IS_VF) {
6264 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
6265 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
6267 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
6268 V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg)));
6269 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
6270 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
6272 return (((uint64_t)stats[1]) << 32 | stats[0]);
6276 t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats)
6279 #define GET_STAT(name) \
6280 read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L)
6282 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES);
6283 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
6284 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES);
6285 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
6286 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES);
6287 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
6288 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES);
6289 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES);
6290 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
6291 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES);
6292 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
6293 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES);
6294 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
6295 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES);
6296 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
6297 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES);
6303 t4_clr_vi_stats(struct adapter *sc, u_int vin)
6307 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) |
6308 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
6309 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
6310 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
6311 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
6315 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
6318 const struct timeval interval = {0, 250000}; /* 250ms */
6320 if (!(vi->flags & VI_INIT_DONE))
6324 timevalsub(&tv, &interval);
6325 if (timevalcmp(&tv, &vi->last_refreshed, <))
6328 mtx_lock(&sc->reg_lock);
6329 t4_get_vi_stats(sc, vi->vin, &vi->stats);
6330 getmicrotime(&vi->last_refreshed);
6331 mtx_unlock(&sc->reg_lock);
6335 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
6337 u_int i, v, tnl_cong_drops, chan_map;
6339 const struct timeval interval = {0, 250000}; /* 250ms */
6342 timevalsub(&tv, &interval);
6343 if (timevalcmp(&tv, &pi->last_refreshed, <))
6347 t4_get_port_stats(sc, pi->tx_chan, &pi->stats);
6348 chan_map = pi->rx_e_chan_map;
6350 i = ffs(chan_map) - 1;
6351 mtx_lock(&sc->reg_lock);
6352 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1,
6353 A_TP_MIB_TNL_CNG_DROP_0 + i);
6354 mtx_unlock(&sc->reg_lock);
6355 tnl_cong_drops += v;
6356 chan_map &= ~(1 << i);
6358 pi->tnl_cong_drops = tnl_cong_drops;
6359 getmicrotime(&pi->last_refreshed);
6363 cxgbe_tick(void *arg)
6365 struct port_info *pi = arg;
6366 struct adapter *sc = pi->adapter;
6368 PORT_LOCK_ASSERT_OWNED(pi);
6369 cxgbe_refresh_stats(sc, pi);
6371 callout_schedule(&pi->tick, hz);
6377 struct vi_info *vi = arg;
6378 struct adapter *sc = vi->adapter;
6380 vi_refresh_stats(sc, vi);
6382 callout_schedule(&vi->tick, hz);
6386 * Should match fw_caps_config_<foo> enums in t4fw_interface.h
6388 static char *caps_decoder[] = {
6389 "\20\001IPMI\002NCSI", /* 0: NBM */
6390 "\20\001PPP\002QFC\003DCBX", /* 1: link */
6391 "\20\001INGRESS\002EGRESS", /* 2: switch */
6392 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
6393 "\006HASHFILTER\007ETHOFLD",
6394 "\20\001TOE", /* 4: TOE */
6395 "\20\001RDDP\002RDMAC", /* 5: RDMA */
6396 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
6397 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
6398 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
6400 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
6401 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */
6402 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
6403 "\004PO_INITIATOR\005PO_TARGET",
6407 t4_sysctls(struct adapter *sc)
6409 struct sysctl_ctx_list *ctx;
6410 struct sysctl_oid *oid;
6411 struct sysctl_oid_list *children, *c0;
6412 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
6414 ctx = device_get_sysctl_ctx(sc->dev);
6419 oid = device_get_sysctl_tree(sc->dev);
6420 c0 = children = SYSCTL_CHILDREN(oid);
6422 sc->sc_do_rxcopy = 1;
6423 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
6424 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
6426 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
6427 sc->params.nports, "# of ports");
6429 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
6430 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells,
6431 (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A",
6432 "available doorbells");
6434 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
6435 sc->params.vpd.cclk, "core clock frequency (in KHz)");
6437 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
6438 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
6439 sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val),
6440 sysctl_int_array, "A", "interrupt holdoff timer values (us)");
6442 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
6443 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
6444 sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val),
6445 sysctl_int_array, "A", "interrupt holdoff packet counter values");
6447 t4_sge_sysctls(sc, ctx, children);
6449 sc->lro_timeout = 100;
6450 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
6451 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
6453 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
6454 &sc->debug_flags, 0, "flags to enable runtime debugging");
6456 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
6457 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
6459 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
6460 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
6462 if (sc->flags & IS_VF)
6465 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
6466 NULL, chip_rev(sc), "chip hardware revision");
6468 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
6469 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
6471 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
6472 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
6474 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
6475 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
6477 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version",
6478 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version");
6480 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
6481 CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
6483 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
6484 sc->er_version, 0, "expansion ROM version");
6486 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
6487 sc->bs_version, 0, "bootstrap firmware version");
6489 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
6490 NULL, sc->params.scfg_vers, "serial config version");
6492 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
6493 NULL, sc->params.vpd_vers, "VPD version");
6495 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
6496 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
6498 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
6499 sc->cfcsum, "config file checksum");
6501 #define SYSCTL_CAP(name, n, text) \
6502 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
6503 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \
6504 (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \
6505 "available " text " capabilities")
6507 SYSCTL_CAP(nbmcaps, 0, "NBM");
6508 SYSCTL_CAP(linkcaps, 1, "link");
6509 SYSCTL_CAP(switchcaps, 2, "switch");
6510 SYSCTL_CAP(niccaps, 3, "NIC");
6511 SYSCTL_CAP(toecaps, 4, "TCP offload");
6512 SYSCTL_CAP(rdmacaps, 5, "RDMA");
6513 SYSCTL_CAP(iscsicaps, 6, "iSCSI");
6514 SYSCTL_CAP(cryptocaps, 7, "crypto");
6515 SYSCTL_CAP(fcoecaps, 8, "FCoE");
6518 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
6519 NULL, sc->tids.nftids, "number of filters");
6521 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
6522 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6523 sysctl_temperature, "I", "chip temperature (in Celsius)");
6524 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor",
6525 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
6526 sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
6528 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
6529 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6530 sysctl_loadavg, "A",
6531 "microprocessor load averages (debug firmwares only)");
6533 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
6534 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
6535 "I", "core Vdd (in mV)");
6537 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus",
6538 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS,
6539 sysctl_cpus, "A", "local CPUs");
6541 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus",
6542 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS,
6543 sysctl_cpus, "A", "preferred CPUs for interrupts");
6545 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW,
6546 &sc->swintr, 0, "software triggered interrupts");
6549 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
6551 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
6552 CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
6553 "logs and miscellaneous information");
6554 children = SYSCTL_CHILDREN(oid);
6556 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
6557 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6558 sysctl_cctrl, "A", "congestion control");
6560 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
6561 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6562 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
6564 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
6565 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
6566 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
6568 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
6569 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
6570 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
6572 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
6573 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
6574 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
6576 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
6577 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
6578 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
6580 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
6581 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
6582 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
6584 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
6585 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6586 sysctl_cim_la, "A", "CIM logic analyzer");
6588 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
6589 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6590 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
6592 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
6593 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6594 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
6596 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
6597 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6598 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
6600 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
6601 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6602 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
6604 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
6605 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6606 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
6608 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
6609 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6610 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
6612 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
6613 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6614 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
6616 if (chip_id(sc) > CHELSIO_T4) {
6617 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
6618 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6619 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
6620 "CIM OBQ 6 (SGE0-RX)");
6622 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
6623 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6624 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
6625 "CIM OBQ 7 (SGE1-RX)");
6628 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
6629 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6630 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
6632 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
6633 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6634 sysctl_cim_qcfg, "A", "CIM queue configuration");
6636 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
6637 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6638 sysctl_cpl_stats, "A", "CPL statistics");
6640 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
6641 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6642 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
6644 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats",
6645 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6646 sysctl_tid_stats, "A", "tid stats");
6648 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
6649 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6650 sysctl_devlog, "A", "firmware's device log");
6652 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
6653 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6654 sysctl_fcoe_stats, "A", "FCoE statistics");
6656 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
6657 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6658 sysctl_hw_sched, "A", "hardware scheduler ");
6660 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
6661 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6662 sysctl_l2t, "A", "hardware L2 table");
6664 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt",
6665 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6666 sysctl_smt, "A", "hardware source MAC table");
6669 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip",
6670 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6671 sysctl_clip, "A", "active CLIP table entries");
6674 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
6675 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6676 sysctl_lb_stats, "A", "loopback statistics");
6678 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
6679 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6680 sysctl_meminfo, "A", "memory regions");
6682 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
6683 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6684 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
6685 "A", "MPS TCAM entries");
6687 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
6688 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6689 sysctl_path_mtus, "A", "path MTUs");
6691 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
6692 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6693 sysctl_pm_stats, "A", "PM statistics");
6695 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
6696 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6697 sysctl_rdma_stats, "A", "RDMA statistics");
6699 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
6700 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6701 sysctl_tcp_stats, "A", "TCP statistics");
6703 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
6704 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6705 sysctl_tids, "A", "TID information");
6707 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
6708 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6709 sysctl_tp_err_stats, "A", "TP error statistics");
6711 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats",
6712 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6713 sysctl_tnl_stats, "A", "TP tunnel statistics");
6715 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
6716 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
6717 sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask");
6719 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
6720 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6721 sysctl_tp_la, "A", "TP logic analyzer");
6723 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
6724 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6725 sysctl_tx_rate, "A", "Tx rate");
6727 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
6728 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6729 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
6731 if (chip_id(sc) >= CHELSIO_T5) {
6732 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
6733 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6734 sysctl_wcwr_stats, "A", "write combined work requests");
6738 if (sc->flags & KERN_TLS_OK) {
6742 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls",
6743 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters");
6744 children = SYSCTL_CHILDREN(oid);
6746 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys",
6747 CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS "
6748 "keys in work requests (1) or attempt to store TLS keys "
6750 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs",
6751 CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to combine "
6752 "TCB field updates with TLS record work requests.");
6757 if (is_offload(sc)) {
6764 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe",
6765 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters");
6766 children = SYSCTL_CHILDREN(oid);
6768 sc->tt.cong_algorithm = -1;
6769 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm",
6770 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control "
6771 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, "
6775 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
6776 &sc->tt.sndbuf, 0, "hardware send buffer");
6779 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp",
6780 CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, "");
6781 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW,
6782 &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)");
6784 sc->tt.rx_coalesce = -1;
6785 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
6786 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
6789 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT |
6790 CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I",
6791 "Inline TLS allowed");
6793 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports",
6794 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
6795 sysctl_tls_rx_ports, "I",
6796 "TCP ports that use inline TLS+TOE RX");
6798 sc->tt.tls_rx_timeout = t4_toe_tls_rx_timeout;
6799 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_timeout",
6800 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
6801 sysctl_tls_rx_timeout, "I",
6802 "Timeout in seconds to downgrade TLS sockets to plain TOE");
6804 sc->tt.tx_align = -1;
6805 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
6806 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
6808 sc->tt.tx_zcopy = 0;
6809 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
6810 CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
6811 "Enable zero-copy aio_write(2)");
6813 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading;
6814 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
6815 "cop_managed_offloading", CTLFLAG_RW,
6816 &sc->tt.cop_managed_offloading, 0,
6817 "COP (Connection Offload Policy) controls all TOE offload");
6819 sc->tt.autorcvbuf_inc = 16 * 1024;
6820 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc",
6821 CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0,
6822 "autorcvbuf increment");
6824 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
6825 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6826 sysctl_tp_tick, "A", "TP timer tick (us)");
6828 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
6829 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
6830 sysctl_tp_tick, "A", "TCP timestamp tick (us)");
6832 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
6833 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
6834 sysctl_tp_tick, "A", "DACK tick (us)");
6836 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
6837 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
6838 sysctl_tp_dack_timer, "IU", "DACK timer (us)");
6840 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
6841 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6842 A_TP_RXT_MIN, sysctl_tp_timer, "LU",
6843 "Minimum retransmit interval (us)");
6845 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
6846 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6847 A_TP_RXT_MAX, sysctl_tp_timer, "LU",
6848 "Maximum retransmit interval (us)");
6850 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
6851 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6852 A_TP_PERS_MIN, sysctl_tp_timer, "LU",
6853 "Persist timer min (us)");
6855 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
6856 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6857 A_TP_PERS_MAX, sysctl_tp_timer, "LU",
6858 "Persist timer max (us)");
6860 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
6861 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6862 A_TP_KEEP_IDLE, sysctl_tp_timer, "LU",
6863 "Keepalive idle timer (us)");
6865 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval",
6866 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6867 A_TP_KEEP_INTVL, sysctl_tp_timer, "LU",
6868 "Keepalive interval timer (us)");
6870 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
6871 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6872 A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)");
6874 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
6875 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6876 A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU",
6877 "FINWAIT2 timer (us)");
6879 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count",
6880 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6881 S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU",
6882 "Number of SYN retransmissions before abort");
6884 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count",
6885 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6886 S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU",
6887 "Number of retransmissions before abort");
6889 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count",
6890 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6891 S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU",
6892 "Number of keepalive probes before abort");
6894 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff",
6895 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
6896 "TOE retransmit backoffs");
6897 children = SYSCTL_CHILDREN(oid);
6898 for (i = 0; i < 16; i++) {
6899 snprintf(s, sizeof(s), "%u", i);
6900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s,
6901 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
6902 i, sysctl_tp_backoff, "IU",
6903 "TOE retransmit backoff");
6910 vi_sysctls(struct vi_info *vi)
6912 struct sysctl_ctx_list *ctx;
6913 struct sysctl_oid *oid;
6914 struct sysctl_oid_list *children;
6916 ctx = device_get_sysctl_ctx(vi->dev);
6919 * dev.v?(cxgbe|cxl).X.
6921 oid = device_get_sysctl_tree(vi->dev);
6922 children = SYSCTL_CHILDREN(oid);
6924 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
6925 vi->viid, "VI identifer");
6926 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
6927 &vi->nrxq, 0, "# of rx queues");
6928 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
6929 &vi->ntxq, 0, "# of tx queues");
6930 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
6931 &vi->first_rxq, 0, "index of first rx queue");
6932 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
6933 &vi->first_txq, 0, "index of first tx queue");
6934 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL,
6935 vi->rss_base, "start of RSS indirection table");
6936 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
6937 vi->rss_size, "size of RSS indirection table");
6939 if (IS_MAIN_VI(vi)) {
6940 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
6941 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
6942 sysctl_noflowq, "IU",
6943 "Reserve queue 0 for non-flowid packets");
6946 if (vi->adapter->flags & IS_VF) {
6947 MPASS(vi->flags & TX_USES_VM_WR);
6948 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD,
6949 NULL, 1, "use VM work requests for transmit");
6951 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr",
6952 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
6953 sysctl_tx_vm_wr, "I", "use VM work requestes for transmit");
6957 if (vi->nofldrxq != 0) {
6958 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
6960 "# of rx queues for offloaded TCP connections");
6961 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
6962 CTLFLAG_RD, &vi->first_ofld_rxq, 0,
6963 "index of first TOE rx queue");
6964 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld",
6965 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
6966 sysctl_holdoff_tmr_idx_ofld, "I",
6967 "holdoff timer index for TOE queues");
6968 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld",
6969 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
6970 sysctl_holdoff_pktc_idx_ofld, "I",
6971 "holdoff packet counter index for TOE queues");
6974 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
6975 if (vi->nofldtxq != 0) {
6976 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
6978 "# of tx queues for TOE/ETHOFLD");
6979 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
6980 CTLFLAG_RD, &vi->first_ofld_txq, 0,
6981 "index of first TOE/ETHOFLD tx queue");
6985 if (vi->nnmrxq != 0) {
6986 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
6987 &vi->nnmrxq, 0, "# of netmap rx queues");
6988 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
6989 &vi->nnmtxq, 0, "# of netmap tx queues");
6990 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
6991 CTLFLAG_RD, &vi->first_nm_rxq, 0,
6992 "index of first netmap rx queue");
6993 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
6994 CTLFLAG_RD, &vi->first_nm_txq, 0,
6995 "index of first netmap tx queue");
6999 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
7000 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7001 sysctl_holdoff_tmr_idx, "I", "holdoff timer index");
7002 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
7003 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7004 sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index");
7006 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
7007 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7008 sysctl_qsize_rxq, "I", "rx queue size");
7009 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
7010 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0,
7011 sysctl_qsize_txq, "I", "tx queue size");
7015 cxgbe_sysctls(struct port_info *pi)
7017 struct sysctl_ctx_list *ctx;
7018 struct sysctl_oid *oid;
7019 struct sysctl_oid_list *children, *children2;
7020 struct adapter *sc = pi->adapter;
7023 static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"};
7025 ctx = device_get_sysctl_ctx(pi->dev);
7030 oid = device_get_sysctl_tree(pi->dev);
7031 children = SYSCTL_CHILDREN(oid);
7033 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc",
7034 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7035 sysctl_linkdnrc, "A", "reason why link is down");
7036 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
7037 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
7038 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0,
7039 sysctl_btphy, "I", "PHY temperature (in Celsius)");
7040 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
7041 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1,
7042 sysctl_btphy, "I", "PHY firmware version");
7045 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
7046 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7047 sysctl_pause_settings, "A",
7048 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)");
7049 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
7050 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7052 "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)");
7053 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec",
7054 CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A",
7055 "FEC recommended by the cable/transceiver");
7056 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
7057 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0,
7058 sysctl_autoneg, "I",
7059 "autonegotiation (-1 = not supported)");
7061 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD,
7062 &pi->link_cfg.pcaps, 0, "port capabilities");
7063 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD,
7064 &pi->link_cfg.acaps, 0, "advertised capabilities");
7065 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD,
7066 &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities");
7068 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
7069 port_top_speed(pi), "max speed (in Gbps)");
7070 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL,
7071 pi->mps_bg_map, "MPS buffer group map");
7072 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD,
7073 NULL, pi->rx_e_chan_map, "TP rx e-channel map");
7075 if (sc->flags & IS_VF)
7079 * dev.(cxgbe|cxl).X.tc.
7081 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc",
7082 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7083 "Tx scheduler traffic classes (cl_rl)");
7084 children2 = SYSCTL_CHILDREN(oid);
7085 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize",
7086 CTLFLAG_RW, &pi->sched_params->pktsize, 0,
7087 "pktsize for per-flow cl-rl (0 means up to the driver )");
7088 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize",
7089 CTLFLAG_RW, &pi->sched_params->burstsize, 0,
7090 "burstsize for per-flow cl-rl (0 means up to the driver)");
7091 for (i = 0; i < sc->chip_params->nsched_cls; i++) {
7092 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
7094 snprintf(name, sizeof(name), "%d", i);
7095 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
7096 SYSCTL_CHILDREN(oid), OID_AUTO, name,
7097 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class"));
7098 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags",
7099 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags,
7100 (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags");
7101 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
7102 CTLFLAG_RD, &tc->refcount, 0, "references to this class");
7103 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
7104 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
7105 (pi->port_id << 16) | i, sysctl_tc_params, "A",
7106 "traffic class parameters");
7110 * dev.cxgbe.X.stats.
7112 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
7113 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics");
7114 children = SYSCTL_CHILDREN(oid);
7115 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
7116 &pi->tx_parse_error, 0,
7117 "# of tx packets with invalid length or # of segments");
7119 #define T4_REGSTAT(name, stat, desc) \
7120 SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
7121 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
7122 (is_t4(sc) ? PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L) : \
7123 T5_PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L)), \
7124 sysctl_handle_t4_reg64, "QU", desc)
7126 /* We get these from port_stats and they may be stale by up to 1s */
7127 #define T4_PORTSTAT(name, desc) \
7128 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
7129 &pi->stats.name, desc)
7131 T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
7132 T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
7133 T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
7134 T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
7135 T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
7136 T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
7137 T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
7138 T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
7139 T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
7140 T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
7141 T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
7142 T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
7143 T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
7144 T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
7145 T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
7146 T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
7147 T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
7148 T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
7149 T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
7150 T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
7151 T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
7152 T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
7153 T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
7155 T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
7156 T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
7157 T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
7158 T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
7159 T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
7160 T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
7161 T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
7163 T4_PORTSTAT(rx_fcs_err,
7164 "# of frames received with bad FCS since last link up");
7166 T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
7167 "# of frames received with bad FCS");
7169 T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
7170 T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
7171 T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
7172 T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
7173 T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
7174 T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
7175 T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
7176 T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
7177 T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
7178 T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
7179 T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
7180 T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
7181 T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
7182 T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
7183 T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
7184 T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
7185 T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
7186 T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
7187 T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
7189 T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
7190 T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
7191 T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
7192 T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
7193 T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
7194 T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
7195 T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
7196 T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
7201 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_toe_tls_records",
7202 CTLFLAG_RD, &pi->tx_toe_tls_records,
7203 "# of TOE TLS records transmitted");
7204 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_toe_tls_octets",
7205 CTLFLAG_RD, &pi->tx_toe_tls_octets,
7206 "# of payload octets in transmitted TOE TLS records");
7207 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_toe_tls_records",
7208 CTLFLAG_RD, &pi->rx_toe_tls_records,
7209 "# of TOE TLS records received");
7210 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_toe_tls_octets",
7211 CTLFLAG_RD, &pi->rx_toe_tls_octets,
7212 "# of payload octets in received TOE TLS records");
7216 sysctl_int_array(SYSCTL_HANDLER_ARGS)
7218 int rc, *i, space = 0;
7221 sbuf_new_for_sysctl(&sb, NULL, 64, req);
7222 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
7224 sbuf_printf(&sb, " ");
7225 sbuf_printf(&sb, "%d", *i);
7228 rc = sbuf_finish(&sb);
7234 sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
7239 rc = sysctl_wire_old_buffer(req, 0);
7243 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7247 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1);
7248 rc = sbuf_finish(sb);
7255 sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
7260 rc = sysctl_wire_old_buffer(req, 0);
7264 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7268 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1);
7269 rc = sbuf_finish(sb);
7276 sysctl_btphy(SYSCTL_HANDLER_ARGS)
7278 struct port_info *pi = arg1;
7280 struct adapter *sc = pi->adapter;
7284 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
7287 /* XXX: magic numbers */
7288 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
7290 end_synchronized_op(sc, 0);
7296 rc = sysctl_handle_int(oidp, &v, 0, req);
7301 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
7303 struct vi_info *vi = arg1;
7306 val = vi->rsrv_noflowq;
7307 rc = sysctl_handle_int(oidp, &val, 0, req);
7308 if (rc != 0 || req->newptr == NULL)
7311 if ((val >= 1) && (vi->ntxq > 1))
7312 vi->rsrv_noflowq = 1;
7314 vi->rsrv_noflowq = 0;
7320 sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
7322 struct vi_info *vi = arg1;
7323 struct adapter *sc = vi->adapter;
7326 MPASS(!(sc->flags & IS_VF));
7328 val = vi->flags & TX_USES_VM_WR ? 1 : 0;
7329 rc = sysctl_handle_int(oidp, &val, 0, req);
7330 if (rc != 0 || req->newptr == NULL)
7333 if (val != 0 && val != 1)
7336 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7340 if (vi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
7342 * We don't want parse_pkt to run with one setting (VF or PF)
7343 * and then eth_tx to see a different setting but still use
7344 * stale information calculated by parse_pkt.
7348 struct port_info *pi = vi->pi;
7349 struct sge_txq *txq;
7351 uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr;
7354 vi->flags |= TX_USES_VM_WR;
7355 vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO;
7356 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
7357 V_TXPKT_INTF(pi->tx_chan));
7358 if (!(sc->flags & IS_VF))
7361 vi->flags &= ~TX_USES_VM_WR;
7362 vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO;
7363 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
7364 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
7365 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
7367 for_each_txq(vi, i, txq) {
7368 txq->cpl_ctrl0 = ctrl0;
7369 txq->txp.max_npkt = npkt;
7372 end_synchronized_op(sc, LOCK_HELD);
7377 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
7379 struct vi_info *vi = arg1;
7380 struct adapter *sc = vi->adapter;
7382 struct sge_rxq *rxq;
7387 rc = sysctl_handle_int(oidp, &idx, 0, req);
7388 if (rc != 0 || req->newptr == NULL)
7391 if (idx < 0 || idx >= SGE_NTIMERS)
7394 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7399 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
7400 for_each_rxq(vi, i, rxq) {
7401 #ifdef atomic_store_rel_8
7402 atomic_store_rel_8(&rxq->iq.intr_params, v);
7404 rxq->iq.intr_params = v;
7409 end_synchronized_op(sc, LOCK_HELD);
7414 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
7416 struct vi_info *vi = arg1;
7417 struct adapter *sc = vi->adapter;
7422 rc = sysctl_handle_int(oidp, &idx, 0, req);
7423 if (rc != 0 || req->newptr == NULL)
7426 if (idx < -1 || idx >= SGE_NCOUNTERS)
7429 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7434 if (vi->flags & VI_INIT_DONE)
7435 rc = EBUSY; /* cannot be changed once the queues are created */
7439 end_synchronized_op(sc, LOCK_HELD);
7444 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
7446 struct vi_info *vi = arg1;
7447 struct adapter *sc = vi->adapter;
7450 qsize = vi->qsize_rxq;
7452 rc = sysctl_handle_int(oidp, &qsize, 0, req);
7453 if (rc != 0 || req->newptr == NULL)
7456 if (qsize < 128 || (qsize & 7))
7459 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7464 if (vi->flags & VI_INIT_DONE)
7465 rc = EBUSY; /* cannot be changed once the queues are created */
7467 vi->qsize_rxq = qsize;
7469 end_synchronized_op(sc, LOCK_HELD);
7474 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
7476 struct vi_info *vi = arg1;
7477 struct adapter *sc = vi->adapter;
7480 qsize = vi->qsize_txq;
7482 rc = sysctl_handle_int(oidp, &qsize, 0, req);
7483 if (rc != 0 || req->newptr == NULL)
7486 if (qsize < 128 || qsize > 65536)
7489 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
7494 if (vi->flags & VI_INIT_DONE)
7495 rc = EBUSY; /* cannot be changed once the queues are created */
7497 vi->qsize_txq = qsize;
7499 end_synchronized_op(sc, LOCK_HELD);
7504 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
7506 struct port_info *pi = arg1;
7507 struct adapter *sc = pi->adapter;
7508 struct link_config *lc = &pi->link_cfg;
7511 if (req->newptr == NULL) {
7513 static char *bits = "\20\1RX\2TX\3AUTO";
7515 rc = sysctl_wire_old_buffer(req, 0);
7519 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7524 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) |
7525 (lc->requested_fc & PAUSE_AUTONEG), bits);
7527 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX |
7528 PAUSE_RX | PAUSE_AUTONEG), bits);
7530 rc = sbuf_finish(sb);
7536 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX |
7540 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
7546 if (s[0] < '0' || s[0] > '9')
7547 return (EINVAL); /* not a number */
7549 if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG))
7550 return (EINVAL); /* some other bit is set too */
7552 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7557 lc->requested_fc = n;
7558 fixup_link_config(pi);
7560 rc = apply_link_config(pi);
7561 set_current_media(pi);
7563 end_synchronized_op(sc, 0);
7570 sysctl_fec(SYSCTL_HANDLER_ARGS)
7572 struct port_info *pi = arg1;
7573 struct adapter *sc = pi->adapter;
7574 struct link_config *lc = &pi->link_cfg;
7578 if (req->newptr == NULL) {
7580 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
7581 "\5RSVD3\6auto\7module";
7583 rc = sysctl_wire_old_buffer(req, 0);
7587 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7592 * Display the requested_fec when the link is down -- the actual
7593 * FEC makes sense only when the link is up.
7596 sbuf_printf(sb, "%b", (lc->fec & M_FW_PORT_CAP32_FEC) |
7597 (lc->requested_fec & (FEC_AUTO | FEC_MODULE)),
7600 sbuf_printf(sb, "%b", lc->requested_fec, bits);
7602 rc = sbuf_finish(sb);
7608 snprintf(s, sizeof(s), "%d",
7609 lc->requested_fec == FEC_AUTO ? -1 :
7610 lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE));
7612 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
7616 n = strtol(&s[0], NULL, 0);
7617 if (n < 0 || n & FEC_AUTO)
7619 else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE))
7620 return (EINVAL);/* some other bit is set too */
7622 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7627 old = lc->requested_fec;
7629 lc->requested_fec = FEC_AUTO;
7630 else if (n == 0 || n == FEC_NONE)
7631 lc->requested_fec = FEC_NONE;
7634 V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) !=
7639 lc->requested_fec = n & (M_FW_PORT_CAP32_FEC |
7642 fixup_link_config(pi);
7643 if (pi->up_vis > 0) {
7644 rc = apply_link_config(pi);
7646 lc->requested_fec = old;
7647 if (rc == FW_EPROTO)
7653 end_synchronized_op(sc, 0);
7660 sysctl_module_fec(SYSCTL_HANDLER_ARGS)
7662 struct port_info *pi = arg1;
7663 struct adapter *sc = pi->adapter;
7664 struct link_config *lc = &pi->link_cfg;
7668 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
7670 rc = sysctl_wire_old_buffer(req, 0);
7674 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
7678 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0)
7681 if (pi->up_vis == 0) {
7683 * If all the interfaces are administratively down the firmware
7684 * does not report transceiver changes. Refresh port info here.
7685 * This is the only reason we have a synchronized op in this
7686 * function. Just PORT_LOCK would have been enough otherwise.
7688 t4_update_port_info(pi);
7692 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE ||
7693 !fec_supported(lc->pcaps)) {
7694 sbuf_printf(sb, "n/a");
7698 sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
7700 rc = sbuf_finish(sb);
7704 end_synchronized_op(sc, 0);
7710 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
7712 struct port_info *pi = arg1;
7713 struct adapter *sc = pi->adapter;
7714 struct link_config *lc = &pi->link_cfg;
7717 if (lc->pcaps & FW_PORT_CAP32_ANEG)
7718 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1;
7721 rc = sysctl_handle_int(oidp, &val, 0, req);
7722 if (rc != 0 || req->newptr == NULL)
7725 val = AUTONEG_DISABLE;
7727 val = AUTONEG_ENABLE;
7731 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
7736 if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) {
7740 lc->requested_aneg = val;
7741 fixup_link_config(pi);
7743 rc = apply_link_config(pi);
7744 set_current_media(pi);
7747 end_synchronized_op(sc, 0);
7752 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
7754 struct adapter *sc = arg1;
7758 val = t4_read_reg64(sc, reg);
7760 return (sysctl_handle_64(oidp, &val, 0, req));
7764 sysctl_temperature(SYSCTL_HANDLER_ARGS)
7766 struct adapter *sc = arg1;
7768 uint32_t param, val;
7770 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
7773 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7774 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
7775 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
7776 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
7777 end_synchronized_op(sc, 0);
7781 /* unknown is returned as 0 but we display -1 in that case */
7782 t = val == 0 ? -1 : val;
7784 rc = sysctl_handle_int(oidp, &t, 0, req);
7789 sysctl_vdd(SYSCTL_HANDLER_ARGS)
7791 struct adapter *sc = arg1;
7793 uint32_t param, val;
7795 if (sc->params.core_vdd == 0) {
7796 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7800 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7801 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
7802 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD);
7803 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
7804 end_synchronized_op(sc, 0);
7807 sc->params.core_vdd = val;
7810 return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req));
7814 sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
7816 struct adapter *sc = arg1;
7818 uint32_t param, val;
7820 v = sc->sensor_resets;
7821 rc = sysctl_handle_int(oidp, &v, 0, req);
7822 if (rc != 0 || req->newptr == NULL || v <= 0)
7825 if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) ||
7826 chip_id(sc) < CHELSIO_T5)
7829 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
7832 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7833 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
7834 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR));
7836 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
7837 end_synchronized_op(sc, 0);
7839 sc->sensor_resets++;
7844 sysctl_loadavg(SYSCTL_HANDLER_ARGS)
7846 struct adapter *sc = arg1;
7849 uint32_t param, val;
7851 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
7854 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7855 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
7856 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
7857 end_synchronized_op(sc, 0);
7861 rc = sysctl_wire_old_buffer(req, 0);
7865 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7869 if (val == 0xffffffff) {
7870 /* Only debug and custom firmwares report load averages. */
7871 sbuf_printf(sb, "not available");
7873 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff,
7874 (val >> 16) & 0xff);
7876 rc = sbuf_finish(sb);
7883 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
7885 struct adapter *sc = arg1;
7888 uint16_t incr[NMTUS][NCCTRL_WIN];
7889 static const char *dec_fac[] = {
7890 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
7894 rc = sysctl_wire_old_buffer(req, 0);
7898 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7902 t4_read_cong_tbl(sc, incr);
7904 for (i = 0; i < NCCTRL_WIN; ++i) {
7905 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
7906 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
7907 incr[5][i], incr[6][i], incr[7][i]);
7908 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
7909 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
7910 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
7911 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
7914 rc = sbuf_finish(sb);
7920 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
7921 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
7922 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
7923 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
7927 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
7929 struct adapter *sc = arg1;
7931 int rc, i, n, qid = arg2;
7934 u_int cim_num_obq = sc->chip_params->cim_num_obq;
7936 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
7937 ("%s: bad qid %d\n", __func__, qid));
7939 if (qid < CIM_NUM_IBQ) {
7942 n = 4 * CIM_IBQ_SIZE;
7943 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
7944 rc = t4_read_cim_ibq(sc, qid, buf, n);
7946 /* outbound queue */
7949 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
7950 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
7951 rc = t4_read_cim_obq(sc, qid, buf, n);
7958 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
7960 rc = sysctl_wire_old_buffer(req, 0);
7964 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
7970 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
7971 for (i = 0, p = buf; i < n; i += 16, p += 4)
7972 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
7975 rc = sbuf_finish(sb);
7983 sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
7987 sbuf_printf(sb, "Status Data PC%s",
7988 cfg & F_UPDBGLACAPTPCONLY ? "" :
7989 " LS0Stat LS0Addr LS0Data");
7991 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
7992 if (cfg & F_UPDBGLACAPTPCONLY) {
7993 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
7995 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
7996 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
7997 p[4] & 0xff, p[5] >> 8);
7998 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
7999 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8000 p[1] & 0xf, p[2] >> 4);
8003 "\n %02x %x%07x %x%07x %08x %08x "
8005 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
8006 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
8013 sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
8017 sbuf_printf(sb, "Status Inst Data PC%s",
8018 cfg & F_UPDBGLACAPTPCONLY ? "" :
8019 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data");
8021 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
8022 if (cfg & F_UPDBGLACAPTPCONLY) {
8023 sbuf_printf(sb, "\n %02x %08x %08x %08x",
8024 p[3] & 0xff, p[2], p[1], p[0]);
8025 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x",
8026 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
8027 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
8028 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x",
8029 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
8030 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
8033 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x "
8034 "%08x %08x %08x %08x %08x %08x",
8035 (p[9] >> 16) & 0xff,
8036 p[9] & 0xffff, p[8] >> 16,
8037 p[8] & 0xffff, p[7] >> 16,
8038 p[7] & 0xffff, p[6] >> 16,
8039 p[2], p[1], p[0], p[5], p[4], p[3]);
8045 sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
8050 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8054 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
8055 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
8060 rc = -t4_cim_read_la(sc, buf, NULL);
8063 if (chip_id(sc) < CHELSIO_T6)
8064 sbuf_cim_la4(sc, sb, buf, cfg);
8066 sbuf_cim_la6(sc, sb, buf, cfg);
8074 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
8076 struct adapter *sc = arg1;
8080 rc = sysctl_wire_old_buffer(req, 0);
8083 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8087 rc = sbuf_cim_la(sc, sb, M_WAITOK);
8089 rc = sbuf_finish(sb);
8095 t4_os_dump_cimla(struct adapter *sc, int arg, bool verbose)
8100 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb)
8102 rc = sbuf_cim_la(sc, &sb, M_NOWAIT);
8104 rc = sbuf_finish(&sb);
8106 log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s",
8107 device_get_nameunit(sc->dev), sbuf_data(&sb));
8115 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
8117 struct adapter *sc = arg1;
8123 rc = sysctl_wire_old_buffer(req, 0);
8127 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8131 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
8134 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
8137 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
8138 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
8142 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
8143 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
8144 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
8145 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
8146 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
8147 (p[1] >> 2) | ((p[2] & 3) << 30),
8148 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
8152 rc = sbuf_finish(sb);
8159 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
8161 struct adapter *sc = arg1;
8167 rc = sysctl_wire_old_buffer(req, 0);
8171 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8175 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
8178 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
8181 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
8182 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
8183 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
8184 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
8185 p[4], p[3], p[2], p[1], p[0]);
8188 sbuf_printf(sb, "\n\nCntl ID Data");
8189 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
8190 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
8191 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
8194 rc = sbuf_finish(sb);
8201 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
8203 struct adapter *sc = arg1;
8206 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
8207 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
8208 uint16_t thres[CIM_NUM_IBQ];
8209 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
8210 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
8211 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
8213 cim_num_obq = sc->chip_params->cim_num_obq;
8215 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
8216 obq_rdaddr = A_UP_OBQ_0_REALADDR;
8218 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
8219 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
8221 nq = CIM_NUM_IBQ + cim_num_obq;
8223 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
8225 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
8229 t4_read_cimq_cfg(sc, base, size, thres);
8231 rc = sysctl_wire_old_buffer(req, 0);
8235 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
8240 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
8242 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
8243 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
8244 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
8245 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
8246 G_QUEREMFLITS(p[2]) * 16);
8247 for ( ; i < nq; i++, p += 4, wr += 2)
8248 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
8249 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
8250 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
8251 G_QUEREMFLITS(p[2]) * 16);
8253 rc = sbuf_finish(sb);
8260 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
8262 struct adapter *sc = arg1;
8265 struct tp_cpl_stats stats;
8267 rc = sysctl_wire_old_buffer(req, 0);
8271 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8275 mtx_lock(&sc->reg_lock);
8276 t4_tp_get_cpl_stats(sc, &stats, 0);
8277 mtx_unlock(&sc->reg_lock);
8279 if (sc->chip_params->nchan > 2) {
8280 sbuf_printf(sb, " channel 0 channel 1"
8281 " channel 2 channel 3");
8282 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u",
8283 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
8284 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u",
8285 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
8287 sbuf_printf(sb, " channel 0 channel 1");
8288 sbuf_printf(sb, "\nCPL requests: %10u %10u",
8289 stats.req[0], stats.req[1]);
8290 sbuf_printf(sb, "\nCPL responses: %10u %10u",
8291 stats.rsp[0], stats.rsp[1]);
8294 rc = sbuf_finish(sb);
8301 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
8303 struct adapter *sc = arg1;
8306 struct tp_usm_stats stats;
8308 rc = sysctl_wire_old_buffer(req, 0);
8312 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8316 mtx_lock(&sc->reg_lock);
8317 t4_get_usm_stats(sc, &stats, 1);
8318 mtx_unlock(&sc->reg_lock);
8320 sbuf_printf(sb, "Frames: %u\n", stats.frames);
8321 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
8322 sbuf_printf(sb, "Drops: %u", stats.drops);
8324 rc = sbuf_finish(sb);
8331 sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
8333 struct adapter *sc = arg1;
8336 struct tp_tid_stats stats;
8338 rc = sysctl_wire_old_buffer(req, 0);
8342 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8346 mtx_lock(&sc->reg_lock);
8347 t4_tp_get_tid_stats(sc, &stats, 1);
8348 mtx_unlock(&sc->reg_lock);
8350 sbuf_printf(sb, "Delete: %u\n", stats.del);
8351 sbuf_printf(sb, "Invalidate: %u\n", stats.inv);
8352 sbuf_printf(sb, "Active: %u\n", stats.act);
8353 sbuf_printf(sb, "Passive: %u", stats.pas);
8355 rc = sbuf_finish(sb);
8361 static const char * const devlog_level_strings[] = {
8362 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
8363 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
8364 [FW_DEVLOG_LEVEL_ERR] = "ERR",
8365 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
8366 [FW_DEVLOG_LEVEL_INFO] = "INFO",
8367 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
8370 static const char * const devlog_facility_strings[] = {
8371 [FW_DEVLOG_FACILITY_CORE] = "CORE",
8372 [FW_DEVLOG_FACILITY_CF] = "CF",
8373 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
8374 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
8375 [FW_DEVLOG_FACILITY_RES] = "RES",
8376 [FW_DEVLOG_FACILITY_HW] = "HW",
8377 [FW_DEVLOG_FACILITY_FLR] = "FLR",
8378 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
8379 [FW_DEVLOG_FACILITY_PHY] = "PHY",
8380 [FW_DEVLOG_FACILITY_MAC] = "MAC",
8381 [FW_DEVLOG_FACILITY_PORT] = "PORT",
8382 [FW_DEVLOG_FACILITY_VI] = "VI",
8383 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
8384 [FW_DEVLOG_FACILITY_ACL] = "ACL",
8385 [FW_DEVLOG_FACILITY_TM] = "TM",
8386 [FW_DEVLOG_FACILITY_QFC] = "QFC",
8387 [FW_DEVLOG_FACILITY_DCB] = "DCB",
8388 [FW_DEVLOG_FACILITY_ETH] = "ETH",
8389 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
8390 [FW_DEVLOG_FACILITY_RI] = "RI",
8391 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
8392 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
8393 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
8394 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE",
8395 [FW_DEVLOG_FACILITY_CHNET] = "CHNET",
8399 sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
8401 int i, j, rc, nentries, first = 0;
8402 struct devlog_params *dparams = &sc->params.devlog;
8403 struct fw_devlog_e *buf, *e;
8404 uint64_t ftstamp = UINT64_MAX;
8406 if (dparams->addr == 0)
8409 MPASS(flags == M_WAITOK || flags == M_NOWAIT);
8410 buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
8414 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
8418 nentries = dparams->size / sizeof(struct fw_devlog_e);
8419 for (i = 0; i < nentries; i++) {
8422 if (e->timestamp == 0)
8425 e->timestamp = be64toh(e->timestamp);
8426 e->seqno = be32toh(e->seqno);
8427 for (j = 0; j < 8; j++)
8428 e->params[j] = be32toh(e->params[j]);
8430 if (e->timestamp < ftstamp) {
8431 ftstamp = e->timestamp;
8436 if (buf[first].timestamp == 0)
8437 goto done; /* nothing in the log */
8439 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
8440 "Seq#", "Tstamp", "Level", "Facility", "Message");
8445 if (e->timestamp == 0)
8448 sbuf_printf(sb, "%10d %15ju %8s %8s ",
8449 e->seqno, e->timestamp,
8450 (e->level < nitems(devlog_level_strings) ?
8451 devlog_level_strings[e->level] : "UNKNOWN"),
8452 (e->facility < nitems(devlog_facility_strings) ?
8453 devlog_facility_strings[e->facility] : "UNKNOWN"));
8454 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
8455 e->params[2], e->params[3], e->params[4],
8456 e->params[5], e->params[6], e->params[7]);
8458 if (++i == nentries)
8460 } while (i != first);
8467 sysctl_devlog(SYSCTL_HANDLER_ARGS)
8469 struct adapter *sc = arg1;
8473 rc = sysctl_wire_old_buffer(req, 0);
8476 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8480 rc = sbuf_devlog(sc, sb, M_WAITOK);
8482 rc = sbuf_finish(sb);
8488 t4_os_dump_devlog(struct adapter *sc)
8493 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb)
8495 rc = sbuf_devlog(sc, &sb, M_NOWAIT);
8497 rc = sbuf_finish(&sb);
8499 log(LOG_DEBUG, "%s: device log follows.\n%s",
8500 device_get_nameunit(sc->dev), sbuf_data(&sb));
8507 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
8509 struct adapter *sc = arg1;
8512 struct tp_fcoe_stats stats[MAX_NCHAN];
8513 int i, nchan = sc->chip_params->nchan;
8515 rc = sysctl_wire_old_buffer(req, 0);
8519 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8523 mtx_lock(&sc->reg_lock);
8524 for (i = 0; i < nchan; i++)
8525 t4_get_fcoe_stats(sc, i, &stats[i], 1);
8526 mtx_unlock(&sc->reg_lock);
8529 sbuf_printf(sb, " channel 0 channel 1"
8530 " channel 2 channel 3");
8531 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju",
8532 stats[0].octets_ddp, stats[1].octets_ddp,
8533 stats[2].octets_ddp, stats[3].octets_ddp);
8534 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u",
8535 stats[0].frames_ddp, stats[1].frames_ddp,
8536 stats[2].frames_ddp, stats[3].frames_ddp);
8537 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
8538 stats[0].frames_drop, stats[1].frames_drop,
8539 stats[2].frames_drop, stats[3].frames_drop);
8541 sbuf_printf(sb, " channel 0 channel 1");
8542 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju",
8543 stats[0].octets_ddp, stats[1].octets_ddp);
8544 sbuf_printf(sb, "\nframesDDP: %16u %16u",
8545 stats[0].frames_ddp, stats[1].frames_ddp);
8546 sbuf_printf(sb, "\nframesDrop: %16u %16u",
8547 stats[0].frames_drop, stats[1].frames_drop);
8550 rc = sbuf_finish(sb);
8557 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
8559 struct adapter *sc = arg1;
8562 unsigned int map, kbps, ipg, mode;
8563 unsigned int pace_tab[NTX_SCHED];
8565 rc = sysctl_wire_old_buffer(req, 0);
8569 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
8573 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
8574 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
8575 t4_read_pace_tbl(sc, pace_tab);
8577 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
8578 "Class IPG (0.1 ns) Flow IPG (us)");
8580 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
8581 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
8582 sbuf_printf(sb, "\n %u %-5s %u ", i,
8583 (mode & (1 << i)) ? "flow" : "class", map & 3);
8585 sbuf_printf(sb, "%9u ", kbps);
8587 sbuf_printf(sb, " disabled ");
8590 sbuf_printf(sb, "%13u ", ipg);
8592 sbuf_printf(sb, " disabled ");
8595 sbuf_printf(sb, "%10u", pace_tab[i]);
8597 sbuf_printf(sb, " disabled");
8600 rc = sbuf_finish(sb);
8607 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
8609 struct adapter *sc = arg1;
8613 struct lb_port_stats s[2];
8614 static const char *stat_name[] = {
8615 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
8616 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
8617 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
8618 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
8619 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
8620 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
8621 "BG2FramesTrunc:", "BG3FramesTrunc:"
8624 rc = sysctl_wire_old_buffer(req, 0);
8628 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8632 memset(s, 0, sizeof(s));
8634 for (i = 0; i < sc->chip_params->nchan; i += 2) {
8635 t4_get_lb_stats(sc, i, &s[0]);
8636 t4_get_lb_stats(sc, i + 1, &s[1]);
8640 sbuf_printf(sb, "%s Loopback %u"
8641 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
8643 for (j = 0; j < nitems(stat_name); j++)
8644 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
8648 rc = sbuf_finish(sb);
8655 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
8658 struct port_info *pi = arg1;
8659 struct link_config *lc = &pi->link_cfg;
8662 rc = sysctl_wire_old_buffer(req, 0);
8665 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
8669 if (lc->link_ok || lc->link_down_rc == 255)
8670 sbuf_printf(sb, "n/a");
8672 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
8674 rc = sbuf_finish(sb);
8687 mem_desc_cmp(const void *a, const void *b)
8689 return ((const struct mem_desc *)a)->base -
8690 ((const struct mem_desc *)b)->base;
8694 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
8702 size = to - from + 1;
8706 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
8707 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
8711 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
8713 struct adapter *sc = arg1;
8716 uint32_t lo, hi, used, alloc;
8717 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
8718 static const char *region[] = {
8719 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
8720 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
8721 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
8722 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
8723 "RQUDP region:", "PBL region:", "TXPBL region:",
8724 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
8725 "On-chip queues:", "TLS keys:",
8727 struct mem_desc avail[4];
8728 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
8729 struct mem_desc *md = mem;
8731 rc = sysctl_wire_old_buffer(req, 0);
8735 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8739 for (i = 0; i < nitems(mem); i++) {
8744 /* Find and sort the populated memory ranges */
8746 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
8747 if (lo & F_EDRAM0_ENABLE) {
8748 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
8749 avail[i].base = G_EDRAM0_BASE(hi) << 20;
8750 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
8754 if (lo & F_EDRAM1_ENABLE) {
8755 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
8756 avail[i].base = G_EDRAM1_BASE(hi) << 20;
8757 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
8761 if (lo & F_EXT_MEM_ENABLE) {
8762 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
8763 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
8764 avail[i].limit = avail[i].base +
8765 (G_EXT_MEM_SIZE(hi) << 20);
8766 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
8769 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
8770 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
8771 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
8772 avail[i].limit = avail[i].base +
8773 (G_EXT_MEM1_SIZE(hi) << 20);
8777 if (!i) /* no memory available */
8779 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
8781 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
8782 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
8783 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
8784 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
8785 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
8786 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
8787 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
8788 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
8789 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
8791 /* the next few have explicit upper bounds */
8792 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
8793 md->limit = md->base - 1 +
8794 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
8795 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
8798 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
8799 md->limit = md->base - 1 +
8800 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
8801 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
8804 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
8805 if (chip_id(sc) <= CHELSIO_T5)
8806 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
8808 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
8812 md->idx = nitems(region); /* hide it */
8816 #define ulp_region(reg) \
8817 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
8818 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
8820 ulp_region(RX_ISCSI);
8821 ulp_region(RX_TDDP);
8823 ulp_region(RX_STAG);
8825 ulp_region(RX_RQUDP);
8831 md->idx = nitems(region);
8834 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
8835 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
8838 if (sge_ctrl & F_VFIFO_ENABLE)
8839 size = G_DBVFIFO_SIZE(fifo_size);
8841 size = G_T6_DBVFIFO_SIZE(fifo_size);
8844 md->base = G_BASEADDR(t4_read_reg(sc,
8845 A_SGE_DBVFIFO_BADDR));
8846 md->limit = md->base + (size << 2) - 1;
8851 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
8854 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
8858 md->base = sc->vres.ocq.start;
8859 if (sc->vres.ocq.size)
8860 md->limit = md->base + sc->vres.ocq.size - 1;
8862 md->idx = nitems(region); /* hide it */
8865 md->base = sc->vres.key.start;
8866 if (sc->vres.key.size)
8867 md->limit = md->base + sc->vres.key.size - 1;
8869 md->idx = nitems(region); /* hide it */
8872 /* add any address-space holes, there can be up to 3 */
8873 for (n = 0; n < i - 1; n++)
8874 if (avail[n].limit < avail[n + 1].base)
8875 (md++)->base = avail[n].limit;
8877 (md++)->base = avail[n].limit;
8880 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
8882 for (lo = 0; lo < i; lo++)
8883 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
8884 avail[lo].limit - 1);
8886 sbuf_printf(sb, "\n");
8887 for (i = 0; i < n; i++) {
8888 if (mem[i].idx >= nitems(region))
8889 continue; /* skip holes */
8891 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
8892 mem_region_show(sb, region[mem[i].idx], mem[i].base,
8896 sbuf_printf(sb, "\n");
8897 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
8898 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
8899 mem_region_show(sb, "uP RAM:", lo, hi);
8901 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
8902 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
8903 mem_region_show(sb, "uP Extmem2:", lo, hi);
8905 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
8906 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
8908 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
8909 (lo & F_PMRXNUMCHN) ? 2 : 1);
8911 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
8912 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
8913 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
8915 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
8916 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
8917 sbuf_printf(sb, "%u p-structs\n",
8918 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
8920 for (i = 0; i < 4; i++) {
8921 if (chip_id(sc) > CHELSIO_T5)
8922 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
8924 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
8926 used = G_T5_USED(lo);
8927 alloc = G_T5_ALLOC(lo);
8930 alloc = G_ALLOC(lo);
8932 /* For T6 these are MAC buffer groups */
8933 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
8936 for (i = 0; i < sc->chip_params->nchan; i++) {
8937 if (chip_id(sc) > CHELSIO_T5)
8938 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
8940 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
8942 used = G_T5_USED(lo);
8943 alloc = G_T5_ALLOC(lo);
8946 alloc = G_ALLOC(lo);
8948 /* For T6 these are MAC buffer groups */
8950 "\nLoopback %d using %u pages out of %u allocated",
8954 rc = sbuf_finish(sb);
8961 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
8965 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
8969 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
8971 struct adapter *sc = arg1;
8975 MPASS(chip_id(sc) <= CHELSIO_T5);
8977 rc = sysctl_wire_old_buffer(req, 0);
8981 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
8986 "Idx Ethernet address Mask Vld Ports PF"
8987 " VF Replication P0 P1 P2 P3 ML");
8988 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
8989 uint64_t tcamx, tcamy, mask;
8990 uint32_t cls_lo, cls_hi;
8991 uint8_t addr[ETHER_ADDR_LEN];
8993 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
8994 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
8997 tcamxy2valmask(tcamx, tcamy, addr, &mask);
8998 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
8999 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
9000 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
9001 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
9002 addr[3], addr[4], addr[5], (uintmax_t)mask,
9003 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
9004 G_PORTMAP(cls_hi), G_PF(cls_lo),
9005 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
9007 if (cls_lo & F_REPLICATE) {
9008 struct fw_ldst_cmd ldst_cmd;
9010 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
9011 ldst_cmd.op_to_addrspace =
9012 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
9013 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9014 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
9015 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
9016 ldst_cmd.u.mps.rplc.fid_idx =
9017 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
9018 V_FW_LDST_CMD_IDX(i));
9020 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
9024 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
9025 sizeof(ldst_cmd), &ldst_cmd);
9026 end_synchronized_op(sc, 0);
9029 sbuf_printf(sb, "%36d", rc);
9032 sbuf_printf(sb, " %08x %08x %08x %08x",
9033 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
9034 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
9035 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
9036 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
9039 sbuf_printf(sb, "%36s", "");
9041 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
9042 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
9043 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
9047 (void) sbuf_finish(sb);
9049 rc = sbuf_finish(sb);
9056 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
9058 struct adapter *sc = arg1;
9062 MPASS(chip_id(sc) > CHELSIO_T5);
9064 rc = sysctl_wire_old_buffer(req, 0);
9068 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9072 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
9073 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
9075 " P0 P1 P2 P3 ML\n");
9077 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
9078 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
9080 uint64_t tcamx, tcamy, val, mask;
9081 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
9082 uint8_t addr[ETHER_ADDR_LEN];
9084 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
9086 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
9088 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
9089 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
9090 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
9091 tcamy = G_DMACH(val) << 32;
9092 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
9093 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
9094 lookup_type = G_DATALKPTYPE(data2);
9095 port_num = G_DATAPORTNUM(data2);
9096 if (lookup_type && lookup_type != M_DATALKPTYPE) {
9097 /* Inner header VNI */
9098 vniy = ((data2 & F_DATAVIDH2) << 23) |
9099 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
9100 dip_hit = data2 & F_DATADIPHIT;
9105 vlan_vld = data2 & F_DATAVIDH2;
9106 ivlan = G_VIDL(val);
9109 ctl |= V_CTLXYBITSEL(1);
9110 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
9111 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
9112 tcamx = G_DMACH(val) << 32;
9113 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
9114 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
9115 if (lookup_type && lookup_type != M_DATALKPTYPE) {
9116 /* Inner header VNI mask */
9117 vnix = ((data2 & F_DATAVIDH2) << 23) |
9118 (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
9124 tcamxy2valmask(tcamx, tcamy, addr, &mask);
9126 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
9127 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
9129 if (lookup_type && lookup_type != M_DATALKPTYPE) {
9130 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
9131 "%012jx %06x %06x - - %3c"
9132 " 'I' %4x %3c %#x%4u%4d", i, addr[0],
9133 addr[1], addr[2], addr[3], addr[4], addr[5],
9134 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
9135 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
9136 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
9137 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
9139 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
9140 "%012jx - - ", i, addr[0], addr[1],
9141 addr[2], addr[3], addr[4], addr[5],
9145 sbuf_printf(sb, "%4u Y ", ivlan);
9147 sbuf_printf(sb, " - N ");
9149 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
9150 lookup_type ? 'I' : 'O', port_num,
9151 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
9152 G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
9153 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
9157 if (cls_lo & F_T6_REPLICATE) {
9158 struct fw_ldst_cmd ldst_cmd;
9160 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
9161 ldst_cmd.op_to_addrspace =
9162 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
9163 F_FW_CMD_REQUEST | F_FW_CMD_READ |
9164 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
9165 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
9166 ldst_cmd.u.mps.rplc.fid_idx =
9167 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
9168 V_FW_LDST_CMD_IDX(i));
9170 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
9174 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
9175 sizeof(ldst_cmd), &ldst_cmd);
9176 end_synchronized_op(sc, 0);
9179 sbuf_printf(sb, "%72d", rc);
9182 sbuf_printf(sb, " %08x %08x %08x %08x"
9183 " %08x %08x %08x %08x",
9184 be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
9185 be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
9186 be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
9187 be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
9188 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
9189 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
9190 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
9191 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
9194 sbuf_printf(sb, "%72s", "");
9196 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
9197 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
9198 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
9199 (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
9203 (void) sbuf_finish(sb);
9205 rc = sbuf_finish(sb);
9212 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
9214 struct adapter *sc = arg1;
9217 uint16_t mtus[NMTUS];
9219 rc = sysctl_wire_old_buffer(req, 0);
9223 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9227 t4_read_mtu_tbl(sc, mtus, NULL);
9229 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
9230 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
9231 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
9232 mtus[14], mtus[15]);
9234 rc = sbuf_finish(sb);
9241 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
9243 struct adapter *sc = arg1;
9246 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
9247 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
9248 static const char *tx_stats[MAX_PM_NSTATS] = {
9249 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
9250 "Tx FIFO wait", NULL, "Tx latency"
9252 static const char *rx_stats[MAX_PM_NSTATS] = {
9253 "Read:", "Write bypass:", "Write mem:", "Flush:",
9254 "Rx FIFO wait", NULL, "Rx latency"
9257 rc = sysctl_wire_old_buffer(req, 0);
9261 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9265 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
9266 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
9268 sbuf_printf(sb, " Tx pcmds Tx bytes");
9269 for (i = 0; i < 4; i++) {
9270 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
9274 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
9275 for (i = 0; i < 4; i++) {
9276 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
9280 if (chip_id(sc) > CHELSIO_T5) {
9282 "\n Total wait Total occupancy");
9283 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
9285 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
9289 MPASS(i < nitems(tx_stats));
9292 "\n Reads Total wait");
9293 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
9295 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
9299 rc = sbuf_finish(sb);
9306 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
9308 struct adapter *sc = arg1;
9311 struct tp_rdma_stats stats;
9313 rc = sysctl_wire_old_buffer(req, 0);
9317 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9321 mtx_lock(&sc->reg_lock);
9322 t4_tp_get_rdma_stats(sc, &stats, 0);
9323 mtx_unlock(&sc->reg_lock);
9325 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
9326 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
9328 rc = sbuf_finish(sb);
9335 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
9337 struct adapter *sc = arg1;
9340 struct tp_tcp_stats v4, v6;
9342 rc = sysctl_wire_old_buffer(req, 0);
9346 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9350 mtx_lock(&sc->reg_lock);
9351 t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
9352 mtx_unlock(&sc->reg_lock);
9356 sbuf_printf(sb, "OutRsts: %20u %20u\n",
9357 v4.tcp_out_rsts, v6.tcp_out_rsts);
9358 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
9359 v4.tcp_in_segs, v6.tcp_in_segs);
9360 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
9361 v4.tcp_out_segs, v6.tcp_out_segs);
9362 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
9363 v4.tcp_retrans_segs, v6.tcp_retrans_segs);
9365 rc = sbuf_finish(sb);
9372 sysctl_tids(SYSCTL_HANDLER_ARGS)
9374 struct adapter *sc = arg1;
9377 struct tid_info *t = &sc->tids;
9379 rc = sysctl_wire_old_buffer(req, 0);
9383 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9388 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
9393 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n",
9394 t->hpftid_base, t->hpftid_end, t->hpftids_in_use);
9398 sbuf_printf(sb, "TID range: ");
9399 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
9402 if (chip_id(sc) <= CHELSIO_T5) {
9403 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
9404 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
9406 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
9407 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
9411 sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1);
9412 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
9414 sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base +
9417 sbuf_printf(sb, ", in use: %u\n",
9418 atomic_load_acq_int(&t->tids_in_use));
9422 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
9423 t->stid_base + t->nstids - 1, t->stids_in_use);
9427 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base,
9428 t->ftid_end, t->ftids_in_use);
9432 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
9433 t->etid_base + t->netids - 1, t->etids_in_use);
9436 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
9437 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
9438 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
9440 rc = sbuf_finish(sb);
9447 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
9449 struct adapter *sc = arg1;
9452 struct tp_err_stats stats;
9454 rc = sysctl_wire_old_buffer(req, 0);
9458 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9462 mtx_lock(&sc->reg_lock);
9463 t4_tp_get_err_stats(sc, &stats, 0);
9464 mtx_unlock(&sc->reg_lock);
9466 if (sc->chip_params->nchan > 2) {
9467 sbuf_printf(sb, " channel 0 channel 1"
9468 " channel 2 channel 3\n");
9469 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
9470 stats.mac_in_errs[0], stats.mac_in_errs[1],
9471 stats.mac_in_errs[2], stats.mac_in_errs[3]);
9472 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
9473 stats.hdr_in_errs[0], stats.hdr_in_errs[1],
9474 stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
9475 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
9476 stats.tcp_in_errs[0], stats.tcp_in_errs[1],
9477 stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
9478 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
9479 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
9480 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
9481 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
9482 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
9483 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
9484 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
9485 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
9486 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
9487 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
9488 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
9489 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
9490 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
9491 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
9492 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
9494 sbuf_printf(sb, " channel 0 channel 1\n");
9495 sbuf_printf(sb, "macInErrs: %10u %10u\n",
9496 stats.mac_in_errs[0], stats.mac_in_errs[1]);
9497 sbuf_printf(sb, "hdrInErrs: %10u %10u\n",
9498 stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
9499 sbuf_printf(sb, "tcpInErrs: %10u %10u\n",
9500 stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
9501 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n",
9502 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
9503 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n",
9504 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
9505 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n",
9506 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
9507 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n",
9508 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
9509 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n",
9510 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
9513 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
9514 stats.ofld_no_neigh, stats.ofld_cong_defer);
9516 rc = sbuf_finish(sb);
9523 sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
9525 struct adapter *sc = arg1;
9528 struct tp_tnl_stats stats;
9530 rc = sysctl_wire_old_buffer(req, 0);
9534 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9538 mtx_lock(&sc->reg_lock);
9539 t4_tp_get_tnl_stats(sc, &stats, 1);
9540 mtx_unlock(&sc->reg_lock);
9542 if (sc->chip_params->nchan > 2) {
9543 sbuf_printf(sb, " channel 0 channel 1"
9544 " channel 2 channel 3\n");
9545 sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n",
9546 stats.out_pkt[0], stats.out_pkt[1],
9547 stats.out_pkt[2], stats.out_pkt[3]);
9548 sbuf_printf(sb, "InPkts: %10u %10u %10u %10u",
9549 stats.in_pkt[0], stats.in_pkt[1],
9550 stats.in_pkt[2], stats.in_pkt[3]);
9552 sbuf_printf(sb, " channel 0 channel 1\n");
9553 sbuf_printf(sb, "OutPkts: %10u %10u\n",
9554 stats.out_pkt[0], stats.out_pkt[1]);
9555 sbuf_printf(sb, "InPkts: %10u %10u",
9556 stats.in_pkt[0], stats.in_pkt[1]);
9559 rc = sbuf_finish(sb);
9566 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
9568 struct adapter *sc = arg1;
9569 struct tp_params *tpp = &sc->params.tp;
9573 mask = tpp->la_mask >> 16;
9574 rc = sysctl_handle_int(oidp, &mask, 0, req);
9575 if (rc != 0 || req->newptr == NULL)
9579 tpp->la_mask = mask << 16;
9580 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
9592 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
9598 uint64_t mask = (1ULL << f->width) - 1;
9599 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
9600 ((uintmax_t)v >> f->start) & mask);
9602 if (line_size + len >= 79) {
9604 sbuf_printf(sb, "\n ");
9606 sbuf_printf(sb, "%s ", buf);
9607 line_size += len + 1;
9610 sbuf_printf(sb, "\n");
9613 static const struct field_desc tp_la0[] = {
9614 { "RcfOpCodeOut", 60, 4 },
9616 { "WcfState", 52, 4 },
9617 { "RcfOpcSrcOut", 50, 2 },
9618 { "CRxError", 49, 1 },
9619 { "ERxError", 48, 1 },
9620 { "SanityFailed", 47, 1 },
9621 { "SpuriousMsg", 46, 1 },
9622 { "FlushInputMsg", 45, 1 },
9623 { "FlushInputCpl", 44, 1 },
9624 { "RssUpBit", 43, 1 },
9625 { "RssFilterHit", 42, 1 },
9627 { "InitTcb", 31, 1 },
9628 { "LineNumber", 24, 7 },
9630 { "EdataOut", 22, 1 },
9632 { "CdataOut", 20, 1 },
9633 { "EreadPdu", 19, 1 },
9634 { "CreadPdu", 18, 1 },
9635 { "TunnelPkt", 17, 1 },
9636 { "RcfPeerFin", 16, 1 },
9637 { "RcfReasonOut", 12, 4 },
9638 { "TxCchannel", 10, 2 },
9639 { "RcfTxChannel", 8, 2 },
9640 { "RxEchannel", 6, 2 },
9641 { "RcfRxChannel", 5, 1 },
9642 { "RcfDataOutSrdy", 4, 1 },
9644 { "RxOoDvld", 2, 1 },
9645 { "RxCongestion", 1, 1 },
9646 { "TxCongestion", 0, 1 },
9650 static const struct field_desc tp_la1[] = {
9651 { "CplCmdIn", 56, 8 },
9652 { "CplCmdOut", 48, 8 },
9653 { "ESynOut", 47, 1 },
9654 { "EAckOut", 46, 1 },
9655 { "EFinOut", 45, 1 },
9656 { "ERstOut", 44, 1 },
9661 { "DataIn", 39, 1 },
9662 { "DataInVld", 38, 1 },
9664 { "RxBufEmpty", 36, 1 },
9666 { "RxFbCongestion", 34, 1 },
9667 { "TxFbCongestion", 33, 1 },
9668 { "TxPktSumSrdy", 32, 1 },
9669 { "RcfUlpType", 28, 4 },
9671 { "Ebypass", 26, 1 },
9673 { "Static0", 24, 1 },
9675 { "Cbypass", 22, 1 },
9677 { "CPktOut", 20, 1 },
9678 { "RxPagePoolFull", 18, 2 },
9679 { "RxLpbkPkt", 17, 1 },
9680 { "TxLpbkPkt", 16, 1 },
9681 { "RxVfValid", 15, 1 },
9682 { "SynLearned", 14, 1 },
9683 { "SetDelEntry", 13, 1 },
9684 { "SetInvEntry", 12, 1 },
9685 { "CpcmdDvld", 11, 1 },
9686 { "CpcmdSave", 10, 1 },
9687 { "RxPstructsFull", 8, 2 },
9688 { "EpcmdDvld", 7, 1 },
9689 { "EpcmdFlush", 6, 1 },
9690 { "EpcmdTrimPrefix", 5, 1 },
9691 { "EpcmdTrimPostfix", 4, 1 },
9692 { "ERssIp4Pkt", 3, 1 },
9693 { "ERssIp6Pkt", 2, 1 },
9694 { "ERssTcpUdpPkt", 1, 1 },
9695 { "ERssFceFipPkt", 0, 1 },
9699 static const struct field_desc tp_la2[] = {
9700 { "CplCmdIn", 56, 8 },
9701 { "MpsVfVld", 55, 1 },
9708 { "DataIn", 39, 1 },
9709 { "DataInVld", 38, 1 },
9711 { "RxBufEmpty", 36, 1 },
9713 { "RxFbCongestion", 34, 1 },
9714 { "TxFbCongestion", 33, 1 },
9715 { "TxPktSumSrdy", 32, 1 },
9716 { "RcfUlpType", 28, 4 },
9718 { "Ebypass", 26, 1 },
9720 { "Static0", 24, 1 },
9722 { "Cbypass", 22, 1 },
9724 { "CPktOut", 20, 1 },
9725 { "RxPagePoolFull", 18, 2 },
9726 { "RxLpbkPkt", 17, 1 },
9727 { "TxLpbkPkt", 16, 1 },
9728 { "RxVfValid", 15, 1 },
9729 { "SynLearned", 14, 1 },
9730 { "SetDelEntry", 13, 1 },
9731 { "SetInvEntry", 12, 1 },
9732 { "CpcmdDvld", 11, 1 },
9733 { "CpcmdSave", 10, 1 },
9734 { "RxPstructsFull", 8, 2 },
9735 { "EpcmdDvld", 7, 1 },
9736 { "EpcmdFlush", 6, 1 },
9737 { "EpcmdTrimPrefix", 5, 1 },
9738 { "EpcmdTrimPostfix", 4, 1 },
9739 { "ERssIp4Pkt", 3, 1 },
9740 { "ERssIp6Pkt", 2, 1 },
9741 { "ERssTcpUdpPkt", 1, 1 },
9742 { "ERssFceFipPkt", 0, 1 },
9747 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
9750 field_desc_show(sb, *p, tp_la0);
9754 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
9758 sbuf_printf(sb, "\n");
9759 field_desc_show(sb, p[0], tp_la0);
9760 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
9761 field_desc_show(sb, p[1], tp_la0);
9765 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
9769 sbuf_printf(sb, "\n");
9770 field_desc_show(sb, p[0], tp_la0);
9771 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
9772 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
9776 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
9778 struct adapter *sc = arg1;
9783 void (*show_func)(struct sbuf *, uint64_t *, int);
9785 rc = sysctl_wire_old_buffer(req, 0);
9789 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9793 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
9795 t4_tp_read_la(sc, buf, NULL);
9798 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
9801 show_func = tp_la_show2;
9805 show_func = tp_la_show3;
9809 show_func = tp_la_show;
9812 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
9813 (*show_func)(sb, p, i);
9815 rc = sbuf_finish(sb);
9822 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
9824 struct adapter *sc = arg1;
9827 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
9829 rc = sysctl_wire_old_buffer(req, 0);
9833 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
9837 t4_get_chan_txrate(sc, nrate, orate);
9839 if (sc->chip_params->nchan > 2) {
9840 sbuf_printf(sb, " channel 0 channel 1"
9841 " channel 2 channel 3\n");
9842 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
9843 nrate[0], nrate[1], nrate[2], nrate[3]);
9844 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
9845 orate[0], orate[1], orate[2], orate[3]);
9847 sbuf_printf(sb, " channel 0 channel 1\n");
9848 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n",
9849 nrate[0], nrate[1]);
9850 sbuf_printf(sb, "Offload B/s: %10ju %10ju",
9851 orate[0], orate[1]);
9854 rc = sbuf_finish(sb);
9861 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
9863 struct adapter *sc = arg1;
9868 rc = sysctl_wire_old_buffer(req, 0);
9872 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9876 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
9879 t4_ulprx_read_la(sc, buf);
9882 sbuf_printf(sb, " Pcmd Type Message"
9884 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
9885 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
9886 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
9889 rc = sbuf_finish(sb);
9896 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
9898 struct adapter *sc = arg1;
9902 MPASS(chip_id(sc) >= CHELSIO_T5);
9904 rc = sysctl_wire_old_buffer(req, 0);
9908 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9912 v = t4_read_reg(sc, A_SGE_STAT_CFG);
9913 if (G_STATSOURCE_T5(v) == 7) {
9916 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
9918 sbuf_printf(sb, "total %d, incomplete %d",
9919 t4_read_reg(sc, A_SGE_STAT_TOTAL),
9920 t4_read_reg(sc, A_SGE_STAT_MATCH));
9921 } else if (mode == 1) {
9922 sbuf_printf(sb, "total %d, data overflow %d",
9923 t4_read_reg(sc, A_SGE_STAT_TOTAL),
9924 t4_read_reg(sc, A_SGE_STAT_MATCH));
9926 sbuf_printf(sb, "unknown mode %d", mode);
9929 rc = sbuf_finish(sb);
9936 sysctl_cpus(SYSCTL_HANDLER_ARGS)
9938 struct adapter *sc = arg1;
9939 enum cpu_sets op = arg2;
9944 MPASS(op == LOCAL_CPUS || op == INTR_CPUS);
9947 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset);
9951 rc = sysctl_wire_old_buffer(req, 0);
9955 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
9960 sbuf_printf(sb, "%d ", i);
9961 rc = sbuf_finish(sb);
9969 sysctl_tls(SYSCTL_HANDLER_ARGS)
9971 struct adapter *sc = arg1;
9976 rc = sysctl_handle_int(oidp, &v, 0, req);
9977 if (rc != 0 || req->newptr == NULL)
9980 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS))
9983 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls");
9987 for_each_port(sc, i) {
9988 for_each_vi(sc->port[i], j, vi) {
9989 if (vi->flags & VI_INIT_DONE)
9990 t4_update_fl_bufsize(vi->ifp);
9993 end_synchronized_op(sc, 0);
10000 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS)
10002 struct adapter *sc = arg1;
10003 int *old_ports, *new_ports;
10004 int i, new_count, rc;
10006 if (req->newptr == NULL && req->oldptr == NULL)
10007 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) *
10008 sizeof(sc->tt.tls_rx_ports[0])));
10010 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx");
10014 if (sc->tt.num_tls_rx_ports == 0) {
10016 rc = SYSCTL_OUT(req, &i, sizeof(i));
10018 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports,
10019 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0]));
10020 if (rc == 0 && req->newptr != NULL) {
10021 new_count = req->newlen / sizeof(new_ports[0]);
10022 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE,
10024 rc = SYSCTL_IN(req, new_ports, new_count *
10025 sizeof(new_ports[0]));
10029 /* Allow setting to a single '-1' to clear the list. */
10030 if (new_count == 1 && new_ports[0] == -1) {
10032 old_ports = sc->tt.tls_rx_ports;
10033 sc->tt.tls_rx_ports = NULL;
10034 sc->tt.num_tls_rx_ports = 0;
10035 ADAPTER_UNLOCK(sc);
10036 free(old_ports, M_CXGBE);
10038 for (i = 0; i < new_count; i++) {
10039 if (new_ports[i] < 1 ||
10040 new_ports[i] > IPPORT_MAX) {
10047 old_ports = sc->tt.tls_rx_ports;
10048 sc->tt.tls_rx_ports = new_ports;
10049 sc->tt.num_tls_rx_ports = new_count;
10050 ADAPTER_UNLOCK(sc);
10051 free(old_ports, M_CXGBE);
10055 free(new_ports, M_CXGBE);
10057 end_synchronized_op(sc, 0);
10062 sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS)
10064 struct adapter *sc = arg1;
10067 v = sc->tt.tls_rx_timeout;
10068 rc = sysctl_handle_int(oidp, &v, 0, req);
10069 if (rc != 0 || req->newptr == NULL)
10075 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS))
10078 sc->tt.tls_rx_timeout = v;
10085 unit_conv(char *buf, size_t len, u_int val, u_int factor)
10087 u_int rem = val % factor;
10090 snprintf(buf, len, "%u", val / factor);
10092 while (rem % 10 == 0)
10094 snprintf(buf, len, "%u.%u", val / factor, rem);
10099 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
10101 struct adapter *sc = arg1;
10104 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
10106 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
10110 re = G_TIMERRESOLUTION(res);
10113 /* TCP timestamp tick */
10114 re = G_TIMESTAMPRESOLUTION(res);
10118 re = G_DELAYEDACKRESOLUTION(res);
10124 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
10126 return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
10130 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
10132 struct adapter *sc = arg1;
10133 u_int res, dack_re, v;
10134 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
10136 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
10137 dack_re = G_DELAYEDACKRESOLUTION(res);
10138 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
10140 return (sysctl_handle_int(oidp, &v, 0, req));
10144 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
10146 struct adapter *sc = arg1;
10149 u_long tp_tick_us, v;
10150 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
10152 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
10153 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX ||
10154 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
10155 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
10157 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
10158 tp_tick_us = (cclk_ps << tre) / 1000000;
10160 if (reg == A_TP_INIT_SRTT)
10161 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
10163 v = tp_tick_us * t4_read_reg(sc, reg);
10165 return (sysctl_handle_long(oidp, &v, 0, req));
10169 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is
10170 * passed to this function.
10173 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS)
10175 struct adapter *sc = arg1;
10179 MPASS(idx >= 0 && idx <= 24);
10181 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf;
10183 return (sysctl_handle_int(oidp, &v, 0, req));
10187 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS)
10189 struct adapter *sc = arg1;
10193 MPASS(idx >= 0 && idx < 16);
10195 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3);
10196 shift = (idx & 3) << 3;
10197 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0;
10199 return (sysctl_handle_int(oidp, &v, 0, req));
10203 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS)
10205 struct vi_info *vi = arg1;
10206 struct adapter *sc = vi->adapter;
10208 struct sge_ofld_rxq *ofld_rxq;
10211 idx = vi->ofld_tmr_idx;
10213 rc = sysctl_handle_int(oidp, &idx, 0, req);
10214 if (rc != 0 || req->newptr == NULL)
10217 if (idx < 0 || idx >= SGE_NTIMERS)
10220 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
10225 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1);
10226 for_each_ofld_rxq(vi, i, ofld_rxq) {
10227 #ifdef atomic_store_rel_8
10228 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
10230 ofld_rxq->iq.intr_params = v;
10233 vi->ofld_tmr_idx = idx;
10235 end_synchronized_op(sc, LOCK_HELD);
10240 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
10242 struct vi_info *vi = arg1;
10243 struct adapter *sc = vi->adapter;
10246 idx = vi->ofld_pktc_idx;
10248 rc = sysctl_handle_int(oidp, &idx, 0, req);
10249 if (rc != 0 || req->newptr == NULL)
10252 if (idx < -1 || idx >= SGE_NCOUNTERS)
10255 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
10260 if (vi->flags & VI_INIT_DONE)
10261 rc = EBUSY; /* cannot be changed once the queues are created */
10263 vi->ofld_pktc_idx = idx;
10265 end_synchronized_op(sc, LOCK_HELD);
10271 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
10275 if (cntxt->cid > M_CTXTQID)
10278 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
10279 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
10282 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
10286 if (sc->flags & FW_OK) {
10287 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
10294 * Read via firmware failed or wasn't even attempted. Read directly via
10297 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
10299 end_synchronized_op(sc, 0);
10304 load_fw(struct adapter *sc, struct t4_data *fw)
10309 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
10314 * The firmware, with the sole exception of the memory parity error
10315 * handler, runs from memory and not flash. It is almost always safe to
10316 * install a new firmware on a running system. Just set bit 1 in
10317 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first.
10319 if (sc->flags & FULL_INIT_DONE &&
10320 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) {
10325 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
10327 rc = copyin(fw->data, fw_data, fw->len);
10329 rc = -t4_load_fw(sc, fw_data, fw->len);
10331 free(fw_data, M_CXGBE);
10333 end_synchronized_op(sc, 0);
10338 load_cfg(struct adapter *sc, struct t4_data *cfg)
10341 uint8_t *cfg_data = NULL;
10343 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
10347 if (cfg->len == 0) {
10349 rc = -t4_load_cfg(sc, NULL, 0);
10353 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
10355 rc = copyin(cfg->data, cfg_data, cfg->len);
10357 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
10359 free(cfg_data, M_CXGBE);
10361 end_synchronized_op(sc, 0);
10366 load_boot(struct adapter *sc, struct t4_bootrom *br)
10369 uint8_t *br_data = NULL;
10372 if (br->len > 1024 * 1024)
10375 if (br->pf_offset == 0) {
10377 if (br->pfidx_addr > 7)
10379 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr,
10380 A_PCIE_PF_EXPROM_OFST)));
10381 } else if (br->pf_offset == 1) {
10383 offset = G_OFFSET(br->pfidx_addr);
10388 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr");
10392 if (br->len == 0) {
10394 rc = -t4_load_boot(sc, NULL, offset, 0);
10398 br_data = malloc(br->len, M_CXGBE, M_WAITOK);
10400 rc = copyin(br->data, br_data, br->len);
10402 rc = -t4_load_boot(sc, br_data, offset, br->len);
10404 free(br_data, M_CXGBE);
10406 end_synchronized_op(sc, 0);
10411 load_bootcfg(struct adapter *sc, struct t4_data *bc)
10414 uint8_t *bc_data = NULL;
10416 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
10420 if (bc->len == 0) {
10422 rc = -t4_load_bootcfg(sc, NULL, 0);
10426 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK);
10428 rc = copyin(bc->data, bc_data, bc->len);
10430 rc = -t4_load_bootcfg(sc, bc_data, bc->len);
10432 free(bc_data, M_CXGBE);
10434 end_synchronized_op(sc, 0);
10439 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump)
10442 struct cudbg_init *cudbg;
10443 void *handle, *buf;
10445 /* buf is large, don't block if no memory is available */
10446 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO);
10450 handle = cudbg_alloc_handle();
10451 if (handle == NULL) {
10456 cudbg = cudbg_get_init(handle);
10458 cudbg->print = (cudbg_print_cb)printf;
10461 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n",
10462 __func__, dump->wr_flash, dump->len, dump->data);
10465 if (dump->wr_flash)
10466 cudbg->use_flash = 1;
10467 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap));
10468 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap));
10470 rc = cudbg_collect(handle, buf, &dump->len);
10474 rc = copyout(buf, dump->data, dump->len);
10476 cudbg_free_handle(handle);
10477 free(buf, M_CXGBE);
10482 free_offload_policy(struct t4_offload_policy *op)
10484 struct offload_rule *r;
10491 for (i = 0; i < op->nrules; i++, r++) {
10492 free(r->bpf_prog.bf_insns, M_CXGBE);
10494 free(op->rule, M_CXGBE);
10499 set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop)
10502 struct t4_offload_policy *op, *old;
10503 struct bpf_program *bf;
10504 const struct offload_settings *s;
10505 struct offload_rule *r;
10508 if (!is_offload(sc))
10511 if (uop->nrules == 0) {
10512 /* Delete installed policies. */
10515 } else if (uop->nrules > 256) { /* arbitrary */
10519 /* Copy userspace offload policy to kernel */
10520 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK);
10521 op->nrules = uop->nrules;
10522 len = op->nrules * sizeof(struct offload_rule);
10523 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
10524 rc = copyin(uop->rule, op->rule, len);
10526 free(op->rule, M_CXGBE);
10532 for (i = 0; i < op->nrules; i++, r++) {
10534 /* Validate open_type */
10535 if (r->open_type != OPEN_TYPE_LISTEN &&
10536 r->open_type != OPEN_TYPE_ACTIVE &&
10537 r->open_type != OPEN_TYPE_PASSIVE &&
10538 r->open_type != OPEN_TYPE_DONTCARE) {
10541 * Rules 0 to i have malloc'd filters that need to be
10542 * freed. Rules i+1 to nrules have userspace pointers
10543 * and should be left alone.
10546 free_offload_policy(op);
10550 /* Validate settings */
10552 if ((s->offload != 0 && s->offload != 1) ||
10553 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED ||
10554 s->sched_class < -1 ||
10555 s->sched_class >= sc->chip_params->nsched_cls) {
10561 u = bf->bf_insns; /* userspace ptr */
10562 bf->bf_insns = NULL;
10563 if (bf->bf_len == 0) {
10564 /* legal, matches everything */
10567 len = bf->bf_len * sizeof(*bf->bf_insns);
10568 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK);
10569 rc = copyin(u, bf->bf_insns, len);
10573 if (!bpf_validate(bf->bf_insns, bf->bf_len)) {
10579 rw_wlock(&sc->policy_lock);
10582 rw_wunlock(&sc->policy_lock);
10583 free_offload_policy(old);
10588 #define MAX_READ_BUF_SIZE (128 * 1024)
10590 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
10592 uint32_t addr, remaining, n;
10597 rc = validate_mem_range(sc, mr->addr, mr->len);
10601 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
10603 remaining = mr->len;
10604 dst = (void *)mr->data;
10606 while (remaining) {
10607 n = min(remaining, MAX_READ_BUF_SIZE);
10608 read_via_memwin(sc, 2, addr, buf, n);
10610 rc = copyout(buf, dst, n);
10619 free(buf, M_CXGBE);
10622 #undef MAX_READ_BUF_SIZE
10625 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
10629 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
10632 if (i2cd->len > sizeof(i2cd->data))
10635 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
10638 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
10639 i2cd->offset, i2cd->len, &i2cd->data[0]);
10640 end_synchronized_op(sc, 0);
10646 clear_stats(struct adapter *sc, u_int port_id)
10648 int i, v, chan_map;
10649 struct port_info *pi;
10650 struct vi_info *vi;
10651 struct sge_rxq *rxq;
10652 struct sge_txq *txq;
10653 struct sge_wrq *wrq;
10655 struct sge_ofld_rxq *ofld_rxq;
10658 if (port_id >= sc->params.nports)
10660 pi = sc->port[port_id];
10665 t4_clr_port_stats(sc, pi->tx_chan);
10667 if (pi->fcs_reg != -1)
10668 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
10670 pi->stats.rx_fcs_err = 0;
10672 pi->tx_parse_error = 0;
10673 pi->tnl_cong_drops = 0;
10674 mtx_lock(&sc->reg_lock);
10675 for_each_vi(pi, v, vi) {
10676 if (vi->flags & VI_INIT_DONE)
10677 t4_clr_vi_stats(sc, vi->vin);
10679 chan_map = pi->rx_e_chan_map;
10682 i = ffs(chan_map) - 1;
10683 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
10684 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
10685 chan_map &= ~(1 << i);
10687 mtx_unlock(&sc->reg_lock);
10690 * Since this command accepts a port, clear stats for
10691 * all VIs on this port.
10693 for_each_vi(pi, v, vi) {
10694 if (vi->flags & VI_INIT_DONE) {
10696 for_each_rxq(vi, i, rxq) {
10697 #if defined(INET) || defined(INET6)
10698 rxq->lro.lro_queued = 0;
10699 rxq->lro.lro_flushed = 0;
10702 rxq->vlan_extraction = 0;
10703 rxq->vxlan_rxcsum = 0;
10705 rxq->fl.cl_allocated = 0;
10706 rxq->fl.cl_recycled = 0;
10707 rxq->fl.cl_fast_recycled = 0;
10710 for_each_txq(vi, i, txq) {
10713 txq->vlan_insertion = 0;
10716 txq->txpkt_wrs = 0;
10717 txq->txpkts0_wrs = 0;
10718 txq->txpkts1_wrs = 0;
10719 txq->txpkts0_pkts = 0;
10720 txq->txpkts1_pkts = 0;
10722 txq->vxlan_tso_wrs = 0;
10723 txq->vxlan_txcsum = 0;
10724 txq->kern_tls_records = 0;
10725 txq->kern_tls_short = 0;
10726 txq->kern_tls_partial = 0;
10727 txq->kern_tls_full = 0;
10728 txq->kern_tls_octets = 0;
10729 txq->kern_tls_waste = 0;
10730 txq->kern_tls_options = 0;
10731 txq->kern_tls_header = 0;
10732 txq->kern_tls_fin = 0;
10733 txq->kern_tls_fin_short = 0;
10734 txq->kern_tls_cbc = 0;
10735 txq->kern_tls_gcm = 0;
10736 mp_ring_reset_stats(txq->r);
10739 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
10740 for_each_ofld_txq(vi, i, wrq) {
10741 wrq->tx_wrs_direct = 0;
10742 wrq->tx_wrs_copied = 0;
10746 for_each_ofld_rxq(vi, i, ofld_rxq) {
10747 ofld_rxq->fl.cl_allocated = 0;
10748 ofld_rxq->fl.cl_recycled = 0;
10749 ofld_rxq->fl.cl_fast_recycled = 0;
10753 if (IS_MAIN_VI(vi)) {
10754 wrq = &sc->sge.ctrlq[pi->port_id];
10755 wrq->tx_wrs_direct = 0;
10756 wrq->tx_wrs_copied = 0;
10765 t4_os_find_pci_capability(struct adapter *sc, int cap)
10769 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
10773 t4_os_pci_save_state(struct adapter *sc)
10776 struct pci_devinfo *dinfo;
10779 dinfo = device_get_ivars(dev);
10781 pci_cfg_save(dev, dinfo, 0);
10786 t4_os_pci_restore_state(struct adapter *sc)
10789 struct pci_devinfo *dinfo;
10792 dinfo = device_get_ivars(dev);
10794 pci_cfg_restore(dev, dinfo);
10799 t4_os_portmod_changed(struct port_info *pi)
10801 struct adapter *sc = pi->adapter;
10802 struct vi_info *vi;
10804 static const char *mod_str[] = {
10805 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
10808 KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
10809 ("%s: port_type %u", __func__, pi->port_type));
10812 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
10814 build_medialist(pi);
10815 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) {
10816 fixup_link_config(pi);
10817 apply_link_config(pi);
10820 end_synchronized_op(sc, LOCK_HELD);
10824 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
10825 if_printf(ifp, "transceiver unplugged.\n");
10826 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
10827 if_printf(ifp, "unknown transceiver inserted.\n");
10828 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
10829 if_printf(ifp, "unsupported transceiver inserted.\n");
10830 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
10831 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
10832 port_top_speed(pi), mod_str[pi->mod_type]);
10834 if_printf(ifp, "transceiver (type %d) inserted.\n",
10840 t4_os_link_changed(struct port_info *pi)
10842 struct vi_info *vi;
10844 struct link_config *lc = &pi->link_cfg;
10845 struct adapter *sc = pi->adapter;
10848 PORT_LOCK_ASSERT_OWNED(pi);
10852 if (lc->speed > 25000 ||
10853 (lc->speed == 25000 && lc->fec == FEC_RS)) {
10854 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
10855 A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
10857 pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
10858 A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
10860 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
10861 pi->stats.rx_fcs_err = 0;
10866 MPASS(pi->fcs_reg != -1);
10867 MPASS(pi->fcs_base == 0);
10870 for_each_vi(pi, v, vi) {
10876 ifp->if_baudrate = IF_Mbps(lc->speed);
10877 if_link_state_change(ifp, LINK_STATE_UP);
10879 if_link_state_change(ifp, LINK_STATE_DOWN);
10885 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
10887 struct adapter *sc;
10889 sx_slock(&t4_list_lock);
10890 SLIST_FOREACH(sc, &t4_list, link) {
10892 * func should not make any assumptions about what state sc is
10893 * in - the only guarantee is that sc->sc_lock is a valid lock.
10897 sx_sunlock(&t4_list_lock);
10901 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
10905 struct adapter *sc = dev->si_drv1;
10907 rc = priv_check(td, PRIV_DRIVER);
10912 case CHELSIO_T4_GETREG: {
10913 struct t4_reg *edata = (struct t4_reg *)data;
10915 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
10918 if (edata->size == 4)
10919 edata->val = t4_read_reg(sc, edata->addr);
10920 else if (edata->size == 8)
10921 edata->val = t4_read_reg64(sc, edata->addr);
10927 case CHELSIO_T4_SETREG: {
10928 struct t4_reg *edata = (struct t4_reg *)data;
10930 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
10933 if (edata->size == 4) {
10934 if (edata->val & 0xffffffff00000000)
10936 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
10937 } else if (edata->size == 8)
10938 t4_write_reg64(sc, edata->addr, edata->val);
10943 case CHELSIO_T4_REGDUMP: {
10944 struct t4_regdump *regs = (struct t4_regdump *)data;
10945 int reglen = t4_get_regs_len(sc);
10948 if (regs->len < reglen) {
10949 regs->len = reglen; /* hint to the caller */
10953 regs->len = reglen;
10954 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
10955 get_regs(sc, regs, buf);
10956 rc = copyout(buf, regs->data, reglen);
10957 free(buf, M_CXGBE);
10960 case CHELSIO_T4_GET_FILTER_MODE:
10961 rc = get_filter_mode(sc, (uint32_t *)data);
10963 case CHELSIO_T4_SET_FILTER_MODE:
10964 rc = set_filter_mode(sc, *(uint32_t *)data);
10966 case CHELSIO_T4_GET_FILTER:
10967 rc = get_filter(sc, (struct t4_filter *)data);
10969 case CHELSIO_T4_SET_FILTER:
10970 rc = set_filter(sc, (struct t4_filter *)data);
10972 case CHELSIO_T4_DEL_FILTER:
10973 rc = del_filter(sc, (struct t4_filter *)data);
10975 case CHELSIO_T4_GET_SGE_CONTEXT:
10976 rc = get_sge_context(sc, (struct t4_sge_context *)data);
10978 case CHELSIO_T4_LOAD_FW:
10979 rc = load_fw(sc, (struct t4_data *)data);
10981 case CHELSIO_T4_GET_MEM:
10982 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
10984 case CHELSIO_T4_GET_I2C:
10985 rc = read_i2c(sc, (struct t4_i2c_data *)data);
10987 case CHELSIO_T4_CLEAR_STATS:
10988 rc = clear_stats(sc, *(uint32_t *)data);
10990 case CHELSIO_T4_SCHED_CLASS:
10991 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
10993 case CHELSIO_T4_SCHED_QUEUE:
10994 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
10996 case CHELSIO_T4_GET_TRACER:
10997 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
10999 case CHELSIO_T4_SET_TRACER:
11000 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
11002 case CHELSIO_T4_LOAD_CFG:
11003 rc = load_cfg(sc, (struct t4_data *)data);
11005 case CHELSIO_T4_LOAD_BOOT:
11006 rc = load_boot(sc, (struct t4_bootrom *)data);
11008 case CHELSIO_T4_LOAD_BOOTCFG:
11009 rc = load_bootcfg(sc, (struct t4_data *)data);
11011 case CHELSIO_T4_CUDBG_DUMP:
11012 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data);
11014 case CHELSIO_T4_SET_OFLD_POLICY:
11015 rc = set_offload_policy(sc, (struct t4_offload_policy *)data);
11026 toe_capability(struct vi_info *vi, int enable)
11029 struct port_info *pi = vi->pi;
11030 struct adapter *sc = pi->adapter;
11032 ASSERT_SYNCHRONIZED_OP(sc);
11034 if (!is_offload(sc))
11038 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
11039 /* TOE is already enabled. */
11044 * We need the port's queues around so that we're able to send
11045 * and receive CPLs to/from the TOE even if the ifnet for this
11046 * port has never been UP'd administratively.
11048 if (!(vi->flags & VI_INIT_DONE)) {
11049 rc = vi_full_init(vi);
11053 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
11054 rc = vi_full_init(&pi->vi[0]);
11059 if (isset(&sc->offload_map, pi->port_id)) {
11060 /* TOE is enabled on another VI of this port. */
11065 if (!uld_active(sc, ULD_TOM)) {
11066 rc = t4_activate_uld(sc, ULD_TOM);
11067 if (rc == EAGAIN) {
11069 "You must kldload t4_tom.ko before trying "
11070 "to enable TOE on a cxgbe interface.\n");
11074 KASSERT(sc->tom_softc != NULL,
11075 ("%s: TOM activated but softc NULL", __func__));
11076 KASSERT(uld_active(sc, ULD_TOM),
11077 ("%s: TOM activated but flag not set", __func__));
11080 /* Activate iWARP and iSCSI too, if the modules are loaded. */
11081 if (!uld_active(sc, ULD_IWARP))
11082 (void) t4_activate_uld(sc, ULD_IWARP);
11083 if (!uld_active(sc, ULD_ISCSI))
11084 (void) t4_activate_uld(sc, ULD_ISCSI);
11087 setbit(&sc->offload_map, pi->port_id);
11091 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
11094 KASSERT(uld_active(sc, ULD_TOM),
11095 ("%s: TOM never initialized?", __func__));
11096 clrbit(&sc->offload_map, pi->port_id);
11103 * Add an upper layer driver to the global list.
11106 t4_register_uld(struct uld_info *ui)
11109 struct uld_info *u;
11111 sx_xlock(&t4_uld_list_lock);
11112 SLIST_FOREACH(u, &t4_uld_list, link) {
11113 if (u->uld_id == ui->uld_id) {
11119 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
11122 sx_xunlock(&t4_uld_list_lock);
11127 t4_unregister_uld(struct uld_info *ui)
11130 struct uld_info *u;
11132 sx_xlock(&t4_uld_list_lock);
11134 SLIST_FOREACH(u, &t4_uld_list, link) {
11136 if (ui->refcount > 0) {
11141 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
11147 sx_xunlock(&t4_uld_list_lock);
11152 t4_activate_uld(struct adapter *sc, int id)
11155 struct uld_info *ui;
11157 ASSERT_SYNCHRONIZED_OP(sc);
11159 if (id < 0 || id > ULD_MAX)
11161 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
11163 sx_slock(&t4_uld_list_lock);
11165 SLIST_FOREACH(ui, &t4_uld_list, link) {
11166 if (ui->uld_id == id) {
11167 if (!(sc->flags & FULL_INIT_DONE)) {
11168 rc = adapter_full_init(sc);
11173 rc = ui->activate(sc);
11175 setbit(&sc->active_ulds, id);
11182 sx_sunlock(&t4_uld_list_lock);
11188 t4_deactivate_uld(struct adapter *sc, int id)
11191 struct uld_info *ui;
11193 ASSERT_SYNCHRONIZED_OP(sc);
11195 if (id < 0 || id > ULD_MAX)
11199 sx_slock(&t4_uld_list_lock);
11201 SLIST_FOREACH(ui, &t4_uld_list, link) {
11202 if (ui->uld_id == id) {
11203 rc = ui->deactivate(sc);
11205 clrbit(&sc->active_ulds, id);
11212 sx_sunlock(&t4_uld_list_lock);
11218 t4_async_event(void *arg, int n)
11220 struct uld_info *ui;
11221 struct adapter *sc = (struct adapter *)arg;
11223 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
11225 sx_slock(&t4_uld_list_lock);
11226 SLIST_FOREACH(ui, &t4_uld_list, link) {
11227 if (ui->uld_id == ULD_IWARP) {
11228 ui->async_event(sc);
11232 sx_sunlock(&t4_uld_list_lock);
11233 end_synchronized_op(sc, 0);
11237 uld_active(struct adapter *sc, int uld_id)
11240 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
11242 return (isset(&sc->active_ulds, uld_id));
11247 * t = ptr to tunable.
11248 * nc = number of CPUs.
11249 * c = compiled in default for that tunable.
11252 calculate_nqueues(int *t, int nc, const int c)
11258 nq = *t < 0 ? -*t : c;
11263 * Come up with reasonable defaults for some of the tunables, provided they're
11264 * not set by the user (in which case we'll use the values as is).
11267 tweak_tunables(void)
11269 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
11273 t4_ntxq = rss_getnumbuckets();
11275 calculate_nqueues(&t4_ntxq, nc, NTXQ);
11279 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
11283 t4_nrxq = rss_getnumbuckets();
11285 calculate_nqueues(&t4_nrxq, nc, NRXQ);
11289 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
11291 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
11292 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ);
11293 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
11296 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ);
11297 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
11300 #if defined(TCP_OFFLOAD) || defined(KERN_TLS)
11301 if (t4_toecaps_allowed == -1)
11302 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
11304 if (t4_toecaps_allowed == -1)
11305 t4_toecaps_allowed = 0;
11309 if (t4_rdmacaps_allowed == -1) {
11310 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
11311 FW_CAPS_CONFIG_RDMA_RDMAC;
11314 if (t4_iscsicaps_allowed == -1) {
11315 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
11316 FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
11317 FW_CAPS_CONFIG_ISCSI_T10DIF;
11320 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS)
11321 t4_tmr_idx_ofld = TMR_IDX_OFLD;
11323 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS)
11324 t4_pktc_idx_ofld = PKTC_IDX_OFLD;
11326 if (t4_toe_tls_rx_timeout < 0)
11327 t4_toe_tls_rx_timeout = 0;
11329 if (t4_rdmacaps_allowed == -1)
11330 t4_rdmacaps_allowed = 0;
11332 if (t4_iscsicaps_allowed == -1)
11333 t4_iscsicaps_allowed = 0;
11337 calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ);
11338 calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ);
11339 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
11340 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
11343 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS)
11344 t4_tmr_idx = TMR_IDX;
11346 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS)
11347 t4_pktc_idx = PKTC_IDX;
11349 if (t4_qsize_txq < 128)
11350 t4_qsize_txq = 128;
11352 if (t4_qsize_rxq < 128)
11353 t4_qsize_rxq = 128;
11354 while (t4_qsize_rxq & 7)
11357 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
11360 * Number of VIs to create per-port. The first VI is the "main" regular
11361 * VI for the port. The rest are additional virtual interfaces on the
11362 * same physical port. Note that the main VI does not have native
11363 * netmap support but the extra VIs do.
11365 * Limit the number of VIs per port to the number of available
11366 * MAC addresses per port.
11368 if (t4_num_vis < 1)
11370 if (t4_num_vis > nitems(vi_mac_funcs)) {
11371 t4_num_vis = nitems(vi_mac_funcs);
11372 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis);
11375 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) {
11376 pcie_relaxed_ordering = 1;
11377 #if defined(__i386__) || defined(__amd64__)
11378 if (cpu_vendor_id == CPU_VENDOR_INTEL)
11379 pcie_relaxed_ordering = 0;
11386 t4_dump_tcb(struct adapter *sc, int tid)
11388 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
11390 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
11391 save = t4_read_reg(sc, reg);
11392 base = sc->memwin[2].mw_base;
11394 /* Dump TCB for the tid */
11395 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
11396 tcb_addr += tid * TCB_SIZE;
11400 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
11402 pf = V_PFNUM(sc->pf);
11403 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
11405 t4_write_reg(sc, reg, win_pos | pf);
11406 t4_read_reg(sc, reg);
11408 off = tcb_addr - win_pos;
11409 for (i = 0; i < 4; i++) {
11411 for (j = 0; j < 8; j++, off += 4)
11412 buf[j] = htonl(t4_read_reg(sc, base + off));
11414 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
11415 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
11419 t4_write_reg(sc, reg, save);
11420 t4_read_reg(sc, reg);
11424 t4_dump_devlog(struct adapter *sc)
11426 struct devlog_params *dparams = &sc->params.devlog;
11427 struct fw_devlog_e e;
11428 int i, first, j, m, nentries, rc;
11429 uint64_t ftstamp = UINT64_MAX;
11431 if (dparams->start == 0) {
11432 db_printf("devlog params not valid\n");
11436 nentries = dparams->size / sizeof(struct fw_devlog_e);
11437 m = fwmtype_to_hwmtype(dparams->memtype);
11439 /* Find the first entry. */
11441 for (i = 0; i < nentries && !db_pager_quit; i++) {
11442 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
11443 sizeof(e), (void *)&e);
11447 if (e.timestamp == 0)
11450 e.timestamp = be64toh(e.timestamp);
11451 if (e.timestamp < ftstamp) {
11452 ftstamp = e.timestamp;
11462 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
11463 sizeof(e), (void *)&e);
11467 if (e.timestamp == 0)
11470 e.timestamp = be64toh(e.timestamp);
11471 e.seqno = be32toh(e.seqno);
11472 for (j = 0; j < 8; j++)
11473 e.params[j] = be32toh(e.params[j]);
11475 db_printf("%10d %15ju %8s %8s ",
11476 e.seqno, e.timestamp,
11477 (e.level < nitems(devlog_level_strings) ?
11478 devlog_level_strings[e.level] : "UNKNOWN"),
11479 (e.facility < nitems(devlog_facility_strings) ?
11480 devlog_facility_strings[e.facility] : "UNKNOWN"));
11481 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
11482 e.params[3], e.params[4], e.params[5], e.params[6],
11485 if (++i == nentries)
11487 } while (i != first && !db_pager_quit);
11490 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
11491 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
11493 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
11500 t = db_read_token();
11502 dev = device_lookup_by_name(db_tok_string);
11507 db_printf("usage: show t4 devlog <nexus>\n");
11512 db_printf("device not found\n");
11516 t4_dump_devlog(device_get_softc(dev));
11519 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
11528 t = db_read_token();
11530 dev = device_lookup_by_name(db_tok_string);
11531 t = db_read_token();
11532 if (t == tNUMBER) {
11533 tid = db_tok_number;
11540 db_printf("usage: show t4 tcb <nexus> <tid>\n");
11545 db_printf("device not found\n");
11549 db_printf("invalid tid\n");
11553 t4_dump_tcb(device_get_softc(dev), tid);
11557 static eventhandler_tag vxlan_start_evtag;
11558 static eventhandler_tag vxlan_stop_evtag;
11560 struct vxlan_evargs {
11566 t4_vxlan_start(struct adapter *sc, void *arg)
11568 struct vxlan_evargs *v = arg;
11569 struct port_info *pi;
11570 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0};
11573 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
11575 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0)
11578 if (sc->vxlan_refcount == 0) {
11579 sc->vxlan_port = v->port;
11580 sc->vxlan_refcount = 1;
11581 t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE,
11582 V_VXLAN(v->port) | F_VXLAN_EN);
11583 for_each_port(sc, i) {
11585 if (pi->vxlan_tcam_entry == true)
11587 rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid,
11588 match_all_mac, match_all_mac,
11589 sc->rawf_base + pi->port_id, 1, pi->port_id, true);
11593 "%s: failed to add VXLAN TCAM entry: %d.\n",
11594 device_get_name(pi->vi[0].dev), rc);
11596 MPASS(rc == sc->rawf_base + pi->port_id);
11598 pi->vxlan_tcam_entry = true;
11601 } else if (sc->vxlan_port == v->port) {
11602 sc->vxlan_refcount++;
11604 log(LOG_ERR, "%s: VXLAN already configured on port %d; "
11605 "ignoring attempt to configure it on port %d\n",
11606 device_get_nameunit(sc->dev), sc->vxlan_port, v->port);
11608 end_synchronized_op(sc, 0);
11612 t4_vxlan_stop(struct adapter *sc, void *arg)
11614 struct vxlan_evargs *v = arg;
11616 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5)
11618 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0)
11622 * VXLANs may have been configured before the driver was loaded so we
11623 * may see more stops than starts. This is not handled cleanly but at
11624 * least we keep the refcount sane.
11626 if (sc->vxlan_port != v->port)
11628 if (sc->vxlan_refcount == 0) {
11630 "%s: VXLAN operation on port %d was stopped earlier; "
11631 "ignoring attempt to stop it again.\n",
11632 device_get_nameunit(sc->dev), sc->vxlan_port);
11633 } else if (--sc->vxlan_refcount == 0) {
11634 t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0);
11637 end_synchronized_op(sc, 0);
11641 t4_vxlan_start_handler(void *arg __unused, struct ifnet *ifp,
11642 sa_family_t family, u_int port)
11644 struct vxlan_evargs v;
11646 MPASS(family == AF_INET || family == AF_INET6);
11650 t4_iterate(t4_vxlan_start, &v);
11654 t4_vxlan_stop_handler(void *arg __unused, struct ifnet *ifp, sa_family_t family,
11657 struct vxlan_evargs v;
11659 MPASS(family == AF_INET || family == AF_INET6);
11663 t4_iterate(t4_vxlan_stop, &v);
11667 static struct sx mlu; /* mod load unload */
11668 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
11671 mod_event(module_t mod, int cmd, void *arg)
11674 static int loaded = 0;
11679 if (loaded++ == 0) {
11681 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
11682 t4_filter_rpl, CPL_COOKIE_FILTER);
11683 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL,
11684 do_l2t_write_rpl, CPL_COOKIE_FILTER);
11685 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL,
11686 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER);
11687 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL,
11688 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER);
11689 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS,
11690 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER);
11691 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
11692 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
11693 t4_register_cpl_handler(CPL_SMT_WRITE_RPL,
11695 sx_init(&t4_list_lock, "T4/T5 adapters");
11696 SLIST_INIT(&t4_list);
11697 callout_init(&fatal_callout, 1);
11699 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
11700 SLIST_INIT(&t4_uld_list);
11708 t4_tracer_modload();
11710 vxlan_start_evtag =
11711 EVENTHANDLER_REGISTER(vxlan_start,
11712 t4_vxlan_start_handler, NULL,
11713 EVENTHANDLER_PRI_ANY);
11715 EVENTHANDLER_REGISTER(vxlan_stop,
11716 t4_vxlan_stop_handler, NULL,
11717 EVENTHANDLER_PRI_ANY);
11724 if (--loaded == 0) {
11727 sx_slock(&t4_list_lock);
11728 if (!SLIST_EMPTY(&t4_list)) {
11730 sx_sunlock(&t4_list_lock);
11734 sx_slock(&t4_uld_list_lock);
11735 if (!SLIST_EMPTY(&t4_uld_list)) {
11737 sx_sunlock(&t4_uld_list_lock);
11738 sx_sunlock(&t4_list_lock);
11743 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
11744 uprintf("%ju clusters with custom free routine "
11745 "still is use.\n", t4_sge_extfree_refs());
11746 pause("t4unload", 2 * hz);
11749 sx_sunlock(&t4_uld_list_lock);
11751 sx_sunlock(&t4_list_lock);
11753 if (t4_sge_extfree_refs() == 0) {
11754 EVENTHANDLER_DEREGISTER(vxlan_start,
11755 vxlan_start_evtag);
11756 EVENTHANDLER_DEREGISTER(vxlan_stop,
11758 t4_tracer_modunload();
11760 t6_ktls_modunload();
11763 t4_clip_modunload();
11766 sx_destroy(&t4_uld_list_lock);
11768 sx_destroy(&t4_list_lock);
11769 t4_sge_modunload();
11773 loaded++; /* undo earlier decrement */
11784 static devclass_t t4_devclass, t5_devclass, t6_devclass;
11785 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
11786 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
11788 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
11789 MODULE_VERSION(t4nex, 1);
11790 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
11792 MODULE_DEPEND(t4nex, netmap, 1, 1, 1);
11793 #endif /* DEV_NETMAP */
11795 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
11796 MODULE_VERSION(t5nex, 1);
11797 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
11799 MODULE_DEPEND(t5nex, netmap, 1, 1, 1);
11800 #endif /* DEV_NETMAP */
11802 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
11803 MODULE_VERSION(t6nex, 1);
11804 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
11806 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
11807 #endif /* DEV_NETMAP */
11809 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
11810 MODULE_VERSION(cxgbe, 1);
11812 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
11813 MODULE_VERSION(cxl, 1);
11815 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
11816 MODULE_VERSION(cc, 1);
11818 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
11819 MODULE_VERSION(vcxgbe, 1);
11821 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
11822 MODULE_VERSION(vcxl, 1);
11824 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
11825 MODULE_VERSION(vcc, 1);