2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 #include "opt_inet6.h"
34 #include <sys/param.h>
37 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/counter.h>
41 #include <sys/module.h>
42 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
45 #include <sys/pciio.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pci_private.h>
49 #include <sys/firmware.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <net/ethernet.h>
57 #include <net/if_types.h>
58 #include <net/if_dl.h>
59 #include <net/if_vlan_var.h>
61 #include <net/rss_config.h>
63 #if defined(__i386__) || defined(__amd64__)
68 #include "common/common.h"
69 #include "common/t4_msg.h"
70 #include "common/t4_regs.h"
71 #include "common/t4_regs_values.h"
74 #include "t4_mp_ring.h"
76 /* T4 bus driver interface */
77 static int t4_probe(device_t);
78 static int t4_attach(device_t);
79 static int t4_detach(device_t);
80 static device_method_t t4_methods[] = {
81 DEVMETHOD(device_probe, t4_probe),
82 DEVMETHOD(device_attach, t4_attach),
83 DEVMETHOD(device_detach, t4_detach),
87 static driver_t t4_driver = {
90 sizeof(struct adapter)
94 /* T4 port (cxgbe) interface */
95 static int cxgbe_probe(device_t);
96 static int cxgbe_attach(device_t);
97 static int cxgbe_detach(device_t);
98 static device_method_t cxgbe_methods[] = {
99 DEVMETHOD(device_probe, cxgbe_probe),
100 DEVMETHOD(device_attach, cxgbe_attach),
101 DEVMETHOD(device_detach, cxgbe_detach),
104 static driver_t cxgbe_driver = {
107 sizeof(struct port_info)
110 static d_ioctl_t t4_ioctl;
111 static d_open_t t4_open;
112 static d_close_t t4_close;
114 static struct cdevsw t4_cdevsw = {
115 .d_version = D_VERSION,
123 /* T5 bus driver interface */
124 static int t5_probe(device_t);
125 static device_method_t t5_methods[] = {
126 DEVMETHOD(device_probe, t5_probe),
127 DEVMETHOD(device_attach, t4_attach),
128 DEVMETHOD(device_detach, t4_detach),
132 static driver_t t5_driver = {
135 sizeof(struct adapter)
139 /* T5 port (cxl) interface */
140 static driver_t cxl_driver = {
143 sizeof(struct port_info)
146 static struct cdevsw t5_cdevsw = {
147 .d_version = D_VERSION,
155 /* ifnet + media interface */
156 static void cxgbe_init(void *);
157 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
158 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
159 static void cxgbe_qflush(struct ifnet *);
160 static int cxgbe_media_change(struct ifnet *);
161 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
163 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
166 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
167 * then ADAPTER_LOCK, then t4_uld_list_lock.
169 static struct sx t4_list_lock;
170 SLIST_HEAD(, adapter) t4_list;
172 static struct sx t4_uld_list_lock;
173 SLIST_HEAD(, uld_info) t4_uld_list;
177 * Tunables. See tweak_tunables() too.
179 * Each tunable is set to a default value here if it's known at compile-time.
180 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
181 * provide a reasonable default when the driver is loaded.
183 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
184 * T5 are under hw.cxl.
188 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
191 static int t4_ntxq10g = -1;
192 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
195 static int t4_nrxq10g = -1;
196 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
199 static int t4_ntxq1g = -1;
200 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
203 static int t4_nrxq1g = -1;
204 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
206 static int t4_rsrv_noflowq = 0;
207 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
210 #define NOFLDTXQ_10G 8
211 static int t4_nofldtxq10g = -1;
212 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
214 #define NOFLDRXQ_10G 2
215 static int t4_nofldrxq10g = -1;
216 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
218 #define NOFLDTXQ_1G 2
219 static int t4_nofldtxq1g = -1;
220 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
222 #define NOFLDRXQ_1G 1
223 static int t4_nofldrxq1g = -1;
224 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
229 static int t4_nnmtxq10g = -1;
230 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
233 static int t4_nnmrxq10g = -1;
234 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
237 static int t4_nnmtxq1g = -1;
238 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
241 static int t4_nnmrxq1g = -1;
242 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
246 * Holdoff parameters for 10G and 1G ports.
248 #define TMR_IDX_10G 1
249 static int t4_tmr_idx_10g = TMR_IDX_10G;
250 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
252 #define PKTC_IDX_10G (-1)
253 static int t4_pktc_idx_10g = PKTC_IDX_10G;
254 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
257 static int t4_tmr_idx_1g = TMR_IDX_1G;
258 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
260 #define PKTC_IDX_1G (-1)
261 static int t4_pktc_idx_1g = PKTC_IDX_1G;
262 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
265 * Size (# of entries) of each tx and rx queue.
267 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
268 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
270 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
271 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
274 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
276 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
277 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
280 * Configuration file.
282 #define DEFAULT_CF "default"
283 #define FLASH_CF "flash"
284 #define UWIRE_CF "uwire"
285 #define FPGA_CF "fpga"
286 static char t4_cfg_file[32] = DEFAULT_CF;
287 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
290 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
291 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
292 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
293 * mark or when signalled to do so, 0 to never emit PAUSE.
295 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
296 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
299 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
300 * encouraged respectively).
302 static unsigned int t4_fw_install = 1;
303 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
306 * ASIC features that will be used. Disable the ones you don't want so that the
307 * chip resources aren't wasted on features that will not be used.
309 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
310 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
312 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
313 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
315 static int t4_toecaps_allowed = -1;
316 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
318 static int t4_rdmacaps_allowed = 0;
319 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
321 static int t4_iscsicaps_allowed = 0;
322 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
324 static int t4_fcoecaps_allowed = 0;
325 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
327 static int t5_write_combine = 0;
328 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
330 struct intrs_and_queues {
331 uint16_t intr_type; /* INTx, MSI, or MSI-X */
332 uint16_t nirq; /* Total # of vectors */
333 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
334 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */
335 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */
336 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */
337 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */
338 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */
339 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */
341 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */
342 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */
343 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */
344 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */
347 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */
348 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */
349 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */
350 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */
354 struct filter_entry {
355 uint32_t valid:1; /* filter allocated and valid */
356 uint32_t locked:1; /* filter is administratively locked */
357 uint32_t pending:1; /* filter action is pending firmware reply */
358 uint32_t smtidx:8; /* Source MAC Table index for smac */
359 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
361 struct t4_filter_specification fs;
364 static int map_bars_0_and_4(struct adapter *);
365 static int map_bar_2(struct adapter *);
366 static void setup_memwin(struct adapter *);
367 static int validate_mem_range(struct adapter *, uint32_t, int);
368 static int fwmtype_to_hwmtype(int);
369 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
371 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
372 static uint32_t position_memwin(struct adapter *, int, uint32_t);
373 static int cfg_itype_and_nqueues(struct adapter *, int, int,
374 struct intrs_and_queues *);
375 static int prep_firmware(struct adapter *);
376 static int partition_resources(struct adapter *, const struct firmware *,
378 static int get_params__pre_init(struct adapter *);
379 static int get_params__post_init(struct adapter *);
380 static int set_params__post_init(struct adapter *);
381 static void t4_set_desc(struct adapter *);
382 static void build_medialist(struct port_info *, struct ifmedia *);
383 static int cxgbe_init_synchronized(struct port_info *);
384 static int cxgbe_uninit_synchronized(struct port_info *);
385 static int setup_intr_handlers(struct adapter *);
386 static void quiesce_txq(struct adapter *, struct sge_txq *);
387 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
388 static void quiesce_iq(struct adapter *, struct sge_iq *);
389 static void quiesce_fl(struct adapter *, struct sge_fl *);
390 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
391 driver_intr_t *, void *, char *);
392 static int t4_free_irq(struct adapter *, struct irq *);
393 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
395 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
396 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
397 static void cxgbe_tick(void *);
398 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
399 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
401 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
402 static int fw_msg_not_handled(struct adapter *, const __be64 *);
403 static int t4_sysctls(struct adapter *);
404 static int cxgbe_sysctls(struct port_info *);
405 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
406 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
407 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
408 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
409 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
410 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
411 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
412 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
413 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
414 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
415 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
417 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
418 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
419 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
420 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
421 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
422 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
423 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
424 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
425 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
426 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
427 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
428 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
429 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
430 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
431 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
432 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
433 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
434 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
435 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
436 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
437 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
438 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
439 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
440 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
441 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
443 static uint32_t fconf_to_mode(uint32_t);
444 static uint32_t mode_to_fconf(uint32_t);
445 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
446 static int get_filter_mode(struct adapter *, uint32_t *);
447 static int set_filter_mode(struct adapter *, uint32_t);
448 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
449 static int get_filter(struct adapter *, struct t4_filter *);
450 static int set_filter(struct adapter *, struct t4_filter *);
451 static int del_filter(struct adapter *, struct t4_filter *);
452 static void clear_filter(struct filter_entry *);
453 static int set_filter_wr(struct adapter *, int);
454 static int del_filter_wr(struct adapter *, int);
455 static int get_sge_context(struct adapter *, struct t4_sge_context *);
456 static int load_fw(struct adapter *, struct t4_data *);
457 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
458 static int read_i2c(struct adapter *, struct t4_i2c_data *);
459 static int set_sched_class(struct adapter *, struct t4_sched_params *);
460 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
462 static int toe_capability(struct port_info *, int);
464 static int mod_event(module_t, int, void *);
470 {0xa000, "Chelsio Terminator 4 FPGA"},
471 {0x4400, "Chelsio T440-dbg"},
472 {0x4401, "Chelsio T420-CR"},
473 {0x4402, "Chelsio T422-CR"},
474 {0x4403, "Chelsio T440-CR"},
475 {0x4404, "Chelsio T420-BCH"},
476 {0x4405, "Chelsio T440-BCH"},
477 {0x4406, "Chelsio T440-CH"},
478 {0x4407, "Chelsio T420-SO"},
479 {0x4408, "Chelsio T420-CX"},
480 {0x4409, "Chelsio T420-BT"},
481 {0x440a, "Chelsio T404-BT"},
482 {0x440e, "Chelsio T440-LP-CR"},
484 {0xb000, "Chelsio Terminator 5 FPGA"},
485 {0x5400, "Chelsio T580-dbg"},
486 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
487 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
488 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
489 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
490 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
491 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
492 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
493 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
494 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
495 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
496 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
497 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
498 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */
500 {0x5404, "Chelsio T520-BCH"},
501 {0x5405, "Chelsio T540-BCH"},
502 {0x5406, "Chelsio T540-CH"},
503 {0x5408, "Chelsio T520-CX"},
504 {0x540b, "Chelsio B520-SR"},
505 {0x540c, "Chelsio B504-BT"},
506 {0x540f, "Chelsio Amsterdam"},
507 {0x5413, "Chelsio T580-CHR"},
513 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
514 * exactly the same for both rxq and ofld_rxq.
516 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
517 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
520 /* No easy way to include t4_msg.h before adapter.h so we check this way */
521 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
522 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
524 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
527 t4_probe(device_t dev)
530 uint16_t v = pci_get_vendor(dev);
531 uint16_t d = pci_get_device(dev);
532 uint8_t f = pci_get_function(dev);
534 if (v != PCI_VENDOR_ID_CHELSIO)
537 /* Attach only to PF0 of the FPGA */
538 if (d == 0xa000 && f != 0)
541 for (i = 0; i < nitems(t4_pciids); i++) {
542 if (d == t4_pciids[i].device) {
543 device_set_desc(dev, t4_pciids[i].desc);
544 return (BUS_PROBE_DEFAULT);
552 t5_probe(device_t dev)
555 uint16_t v = pci_get_vendor(dev);
556 uint16_t d = pci_get_device(dev);
557 uint8_t f = pci_get_function(dev);
559 if (v != PCI_VENDOR_ID_CHELSIO)
562 /* Attach only to PF0 of the FPGA */
563 if (d == 0xb000 && f != 0)
566 for (i = 0; i < nitems(t5_pciids); i++) {
567 if (d == t5_pciids[i].device) {
568 device_set_desc(dev, t5_pciids[i].desc);
569 return (BUS_PROBE_DEFAULT);
577 t5_attribute_workaround(device_t dev)
583 * The T5 chips do not properly echo the No Snoop and Relaxed
584 * Ordering attributes when replying to a TLP from a Root
585 * Port. As a workaround, find the parent Root Port and
586 * disable No Snoop and Relaxed Ordering. Note that this
587 * affects all devices under this root port.
589 root_port = pci_find_pcie_root_port(dev);
590 if (root_port == NULL) {
591 device_printf(dev, "Unable to find parent root port\n");
595 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
596 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
597 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
599 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
600 device_get_nameunit(root_port));
604 t4_attach(device_t dev)
607 int rc = 0, i, n10g, n1g, rqidx, tqidx;
608 struct intrs_and_queues iaq;
611 int ofld_rqidx, ofld_tqidx;
614 int nm_rqidx, nm_tqidx;
617 sc = device_get_softc(dev);
619 TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags);
621 if ((pci_get_device(dev) & 0xff00) == 0x5400)
622 t5_attribute_workaround(dev);
623 pci_enable_busmaster(dev);
624 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
627 pci_set_max_read_req(dev, 4096);
628 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
629 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
630 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
632 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
636 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
637 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
638 device_get_nameunit(dev));
640 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
641 device_get_nameunit(dev));
642 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
643 sx_xlock(&t4_list_lock);
644 SLIST_INSERT_HEAD(&t4_list, sc, link);
645 sx_xunlock(&t4_list_lock);
647 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
648 TAILQ_INIT(&sc->sfl);
649 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
651 mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF);
653 rc = map_bars_0_and_4(sc);
655 goto done; /* error message displayed already */
658 * This is the real PF# to which we're attaching. Works from within PCI
659 * passthrough environments too, where pci_get_function() could return a
660 * different PF# depending on the passthrough configuration. We need to
661 * use the real PF# in all our communication with the firmware.
663 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
666 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
667 sc->an_handler = an_not_handled;
668 for (i = 0; i < nitems(sc->cpl_handler); i++)
669 sc->cpl_handler[i] = cpl_not_handled;
670 for (i = 0; i < nitems(sc->fw_msg_handler); i++)
671 sc->fw_msg_handler[i] = fw_msg_not_handled;
672 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
673 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
674 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
675 t4_init_sge_cpl_handlers(sc);
677 /* Prepare the adapter for operation */
678 rc = -t4_prep_adapter(sc);
680 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
685 * Do this really early, with the memory windows set up even before the
686 * character device. The userland tool's register i/o and mem read
687 * will work even in "recovery mode".
690 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
691 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
692 device_get_nameunit(dev));
693 if (sc->cdev == NULL)
694 device_printf(dev, "failed to create nexus char device.\n");
696 sc->cdev->si_drv1 = sc;
698 /* Go no further if recovery mode has been requested. */
699 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
700 device_printf(dev, "recovery mode.\n");
704 #if defined(__i386__)
705 if ((cpu_feature & CPUID_CX8) == 0) {
706 device_printf(dev, "64 bit atomics not available.\n");
712 /* Prepare the firmware for operation */
713 rc = prep_firmware(sc);
715 goto done; /* error message displayed already */
717 rc = get_params__post_init(sc);
719 goto done; /* error message displayed already */
721 rc = set_params__post_init(sc);
723 goto done; /* error message displayed already */
727 goto done; /* error message displayed already */
729 rc = t4_create_dma_tag(sc);
731 goto done; /* error message displayed already */
734 * First pass over all the ports - allocate VIs and initialize some
735 * basic parameters like mac address, port type, etc. We also figure
736 * out whether a port is 10G or 1G and use that information when
737 * calculating how many interrupts to attempt to allocate.
740 for_each_port(sc, i) {
741 struct port_info *pi;
743 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
746 /* These must be set before t4_port_init */
750 /* Allocate the vi and initialize parameters like mac addr */
751 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
753 device_printf(dev, "unable to initialize port %d: %d\n",
760 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX);
761 pi->link_cfg.requested_fc |= t4_pause_settings;
762 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX);
763 pi->link_cfg.fc |= t4_pause_settings;
765 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
767 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
773 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
774 device_get_nameunit(dev), i);
775 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
776 sc->chan_map[pi->tx_chan] = i;
778 if (is_10G_port(pi) || is_40G_port(pi)) {
780 pi->tmr_idx = t4_tmr_idx_10g;
781 pi->pktc_idx = t4_pktc_idx_10g;
784 pi->tmr_idx = t4_tmr_idx_1g;
785 pi->pktc_idx = t4_pktc_idx_1g;
788 pi->xact_addr_filt = -1;
791 pi->qsize_rxq = t4_qsize_rxq;
792 pi->qsize_txq = t4_qsize_txq;
794 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
795 if (pi->dev == NULL) {
797 "failed to add device for port %d.\n", i);
801 device_set_softc(pi->dev, pi);
805 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
807 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
809 goto done; /* error message displayed already */
811 sc->intr_type = iaq.intr_type;
812 sc->intr_count = iaq.nirq;
815 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
816 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
817 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
818 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
819 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
821 if (is_offload(sc)) {
822 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
823 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
824 s->neq += s->nofldtxq + s->nofldrxq;
825 s->niq += s->nofldrxq;
827 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
828 M_CXGBE, M_ZERO | M_WAITOK);
829 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
830 M_CXGBE, M_ZERO | M_WAITOK);
834 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
835 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
836 s->neq += s->nnmtxq + s->nnmrxq;
839 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
840 M_CXGBE, M_ZERO | M_WAITOK);
841 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
842 M_CXGBE, M_ZERO | M_WAITOK);
845 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
847 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
849 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
851 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
853 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
856 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
859 t4_init_l2t(sc, M_WAITOK);
862 * Second pass over the ports. This time we know the number of rx and
863 * tx queues that each port should get.
867 ofld_rqidx = ofld_tqidx = 0;
870 nm_rqidx = nm_tqidx = 0;
872 for_each_port(sc, i) {
873 struct port_info *pi = sc->port[i];
878 pi->first_rxq = rqidx;
879 pi->first_txq = tqidx;
880 if (is_10G_port(pi) || is_40G_port(pi)) {
881 pi->flags |= iaq.intr_flags_10g;
882 pi->nrxq = iaq.nrxq10g;
883 pi->ntxq = iaq.ntxq10g;
885 pi->flags |= iaq.intr_flags_1g;
886 pi->nrxq = iaq.nrxq1g;
887 pi->ntxq = iaq.ntxq1g;
891 pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
893 pi->rsrv_noflowq = 0;
898 if (is_offload(sc)) {
899 pi->first_ofld_rxq = ofld_rqidx;
900 pi->first_ofld_txq = ofld_tqidx;
901 if (is_10G_port(pi) || is_40G_port(pi)) {
902 pi->nofldrxq = iaq.nofldrxq10g;
903 pi->nofldtxq = iaq.nofldtxq10g;
905 pi->nofldrxq = iaq.nofldrxq1g;
906 pi->nofldtxq = iaq.nofldtxq1g;
908 ofld_rqidx += pi->nofldrxq;
909 ofld_tqidx += pi->nofldtxq;
913 pi->first_nm_rxq = nm_rqidx;
914 pi->first_nm_txq = nm_tqidx;
915 if (is_10G_port(pi) || is_40G_port(pi)) {
916 pi->nnmrxq = iaq.nnmrxq10g;
917 pi->nnmtxq = iaq.nnmtxq10g;
919 pi->nnmrxq = iaq.nnmrxq1g;
920 pi->nnmtxq = iaq.nnmtxq1g;
922 nm_rqidx += pi->nnmrxq;
923 nm_tqidx += pi->nnmtxq;
927 rc = setup_intr_handlers(sc);
930 "failed to setup interrupt handlers: %d\n", rc);
934 rc = bus_generic_attach(dev);
937 "failed to attach all child ports: %d\n", rc);
942 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
943 sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
944 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
945 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
946 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
951 if (rc != 0 && sc->cdev) {
952 /* cdev was created and so cxgbetool works; recover that way. */
954 "error during attach, adapter is now in recovery mode.\n");
970 t4_detach(device_t dev)
973 struct port_info *pi;
976 sc = device_get_softc(dev);
978 if (sc->flags & FULL_INIT_DONE)
982 destroy_dev(sc->cdev);
986 rc = bus_generic_detach(dev);
989 "failed to detach child devices: %d\n", rc);
993 for (i = 0; i < sc->intr_count; i++)
994 t4_free_irq(sc, &sc->irq[i]);
996 for (i = 0; i < MAX_NPORTS; i++) {
999 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid);
1001 device_delete_child(dev, pi->dev);
1003 mtx_destroy(&pi->pi_lock);
1008 if (sc->flags & FULL_INIT_DONE)
1009 adapter_full_uninit(sc);
1011 if (sc->flags & FW_OK)
1012 t4_fw_bye(sc, sc->mbox);
1014 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1015 pci_release_msi(dev);
1018 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1022 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1026 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1030 t4_free_l2t(sc->l2t);
1033 free(sc->sge.ofld_rxq, M_CXGBE);
1034 free(sc->sge.ofld_txq, M_CXGBE);
1037 free(sc->sge.nm_rxq, M_CXGBE);
1038 free(sc->sge.nm_txq, M_CXGBE);
1040 free(sc->irq, M_CXGBE);
1041 free(sc->sge.rxq, M_CXGBE);
1042 free(sc->sge.txq, M_CXGBE);
1043 free(sc->sge.ctrlq, M_CXGBE);
1044 free(sc->sge.iqmap, M_CXGBE);
1045 free(sc->sge.eqmap, M_CXGBE);
1046 free(sc->tids.ftid_tab, M_CXGBE);
1047 t4_destroy_dma_tag(sc);
1048 if (mtx_initialized(&sc->sc_lock)) {
1049 sx_xlock(&t4_list_lock);
1050 SLIST_REMOVE(&t4_list, sc, adapter, link);
1051 sx_xunlock(&t4_list_lock);
1052 mtx_destroy(&sc->sc_lock);
1055 if (mtx_initialized(&sc->tids.ftid_lock))
1056 mtx_destroy(&sc->tids.ftid_lock);
1057 if (mtx_initialized(&sc->sfl_lock))
1058 mtx_destroy(&sc->sfl_lock);
1059 if (mtx_initialized(&sc->ifp_lock))
1060 mtx_destroy(&sc->ifp_lock);
1061 if (mtx_initialized(&sc->regwin_lock))
1062 mtx_destroy(&sc->regwin_lock);
1064 bzero(sc, sizeof(*sc));
1070 cxgbe_probe(device_t dev)
1073 struct port_info *pi = device_get_softc(dev);
1075 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1076 device_set_desc_copy(dev, buf);
1078 return (BUS_PROBE_DEFAULT);
1081 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1082 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1083 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1084 #define T4_CAP_ENABLE (T4_CAP)
1087 cxgbe_attach(device_t dev)
1089 struct port_info *pi = device_get_softc(dev);
1094 /* Allocate an ifnet and set it up */
1095 ifp = if_alloc(IFT_ETHER);
1097 device_printf(dev, "Cannot allocate ifnet\n");
1103 callout_init(&pi->tick, CALLOUT_MPSAFE);
1105 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1106 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1108 ifp->if_init = cxgbe_init;
1109 ifp->if_ioctl = cxgbe_ioctl;
1110 ifp->if_transmit = cxgbe_transmit;
1111 ifp->if_qflush = cxgbe_qflush;
1113 ifp->if_capabilities = T4_CAP;
1115 if (is_offload(pi->adapter))
1116 ifp->if_capabilities |= IFCAP_TOE;
1118 ifp->if_capenable = T4_CAP_ENABLE;
1119 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1120 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1122 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1123 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1124 ifp->if_hw_tsomaxsegsize = 65536;
1126 /* Initialize ifmedia for this port */
1127 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1128 cxgbe_media_status);
1129 build_medialist(pi, &pi->media);
1131 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1132 EVENTHANDLER_PRI_ANY);
1134 ether_ifattach(ifp, pi->hw_addr);
1137 s = malloc(n, M_CXGBE, M_WAITOK);
1138 o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq);
1141 if (is_offload(pi->adapter)) {
1142 o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)",
1143 pi->nofldtxq, pi->nofldrxq);
1148 o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq,
1152 device_printf(dev, "%s\n", s);
1156 /* nm_media handled here to keep implementation private to this file */
1157 ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change,
1158 cxgbe_media_status);
1159 build_medialist(pi, &pi->nm_media);
1160 create_netmap_ifnet(pi); /* logs errors it something fails */
1168 cxgbe_detach(device_t dev)
1170 struct port_info *pi = device_get_softc(dev);
1171 struct adapter *sc = pi->adapter;
1172 struct ifnet *ifp = pi->ifp;
1174 /* Tell if_ioctl and if_init that the port is going away */
1179 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1182 sc->last_op = "t4detach";
1183 sc->last_op_thr = curthread;
1184 sc->last_op_flags = 0;
1188 if (pi->flags & HAS_TRACEQ) {
1189 sc->traceq = -1; /* cloner should not create ifnet */
1190 t4_tracer_port_detach(sc);
1194 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1197 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1198 callout_stop(&pi->tick);
1200 callout_drain(&pi->tick);
1202 /* Let detach proceed even if these fail. */
1203 cxgbe_uninit_synchronized(pi);
1204 port_full_uninit(pi);
1206 ifmedia_removeall(&pi->media);
1207 ether_ifdetach(pi->ifp);
1211 /* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */
1212 destroy_netmap_ifnet(pi);
1224 cxgbe_init(void *arg)
1226 struct port_info *pi = arg;
1227 struct adapter *sc = pi->adapter;
1229 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1231 cxgbe_init_synchronized(pi);
1232 end_synchronized_op(sc, 0);
1236 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1238 int rc = 0, mtu, flags, can_sleep;
1239 struct port_info *pi = ifp->if_softc;
1240 struct adapter *sc = pi->adapter;
1241 struct ifreq *ifr = (struct ifreq *)data;
1247 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1250 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1254 if (pi->flags & PORT_INIT_DONE) {
1255 t4_update_fl_bufsize(ifp);
1256 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1257 rc = update_mac_settings(ifp, XGMAC_MTU);
1259 end_synchronized_op(sc, 0);
1265 rc = begin_synchronized_op(sc, pi,
1266 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1270 if (ifp->if_flags & IFF_UP) {
1271 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1272 flags = pi->if_flags;
1273 if ((ifp->if_flags ^ flags) &
1274 (IFF_PROMISC | IFF_ALLMULTI)) {
1275 if (can_sleep == 1) {
1276 end_synchronized_op(sc, 0);
1280 rc = update_mac_settings(ifp,
1281 XGMAC_PROMISC | XGMAC_ALLMULTI);
1284 if (can_sleep == 0) {
1285 end_synchronized_op(sc, LOCK_HELD);
1289 rc = cxgbe_init_synchronized(pi);
1291 pi->if_flags = ifp->if_flags;
1292 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1293 if (can_sleep == 0) {
1294 end_synchronized_op(sc, LOCK_HELD);
1298 rc = cxgbe_uninit_synchronized(pi);
1300 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1304 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1305 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1308 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1309 rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1310 end_synchronized_op(sc, LOCK_HELD);
1314 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1318 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1319 if (mask & IFCAP_TXCSUM) {
1320 ifp->if_capenable ^= IFCAP_TXCSUM;
1321 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1323 if (IFCAP_TSO4 & ifp->if_capenable &&
1324 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1325 ifp->if_capenable &= ~IFCAP_TSO4;
1327 "tso4 disabled due to -txcsum.\n");
1330 if (mask & IFCAP_TXCSUM_IPV6) {
1331 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1332 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1334 if (IFCAP_TSO6 & ifp->if_capenable &&
1335 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1336 ifp->if_capenable &= ~IFCAP_TSO6;
1338 "tso6 disabled due to -txcsum6.\n");
1341 if (mask & IFCAP_RXCSUM)
1342 ifp->if_capenable ^= IFCAP_RXCSUM;
1343 if (mask & IFCAP_RXCSUM_IPV6)
1344 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1347 * Note that we leave CSUM_TSO alone (it is always set). The
1348 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1349 * sending a TSO request our way, so it's sufficient to toggle
1352 if (mask & IFCAP_TSO4) {
1353 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1354 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1355 if_printf(ifp, "enable txcsum first.\n");
1359 ifp->if_capenable ^= IFCAP_TSO4;
1361 if (mask & IFCAP_TSO6) {
1362 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1363 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1364 if_printf(ifp, "enable txcsum6 first.\n");
1368 ifp->if_capenable ^= IFCAP_TSO6;
1370 if (mask & IFCAP_LRO) {
1371 #if defined(INET) || defined(INET6)
1373 struct sge_rxq *rxq;
1375 ifp->if_capenable ^= IFCAP_LRO;
1376 for_each_rxq(pi, i, rxq) {
1377 if (ifp->if_capenable & IFCAP_LRO)
1378 rxq->iq.flags |= IQ_LRO_ENABLED;
1380 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1385 if (mask & IFCAP_TOE) {
1386 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1388 rc = toe_capability(pi, enable);
1392 ifp->if_capenable ^= mask;
1395 if (mask & IFCAP_VLAN_HWTAGGING) {
1396 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1397 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1398 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1400 if (mask & IFCAP_VLAN_MTU) {
1401 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1403 /* Need to find out how to disable auto-mtu-inflation */
1405 if (mask & IFCAP_VLAN_HWTSO)
1406 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1407 if (mask & IFCAP_VLAN_HWCSUM)
1408 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1410 #ifdef VLAN_CAPABILITIES
1411 VLAN_CAPABILITIES(ifp);
1414 end_synchronized_op(sc, 0);
1419 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1423 struct ifi2creq i2c;
1425 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1428 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1432 if (i2c.len > sizeof(i2c.data)) {
1436 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4i2c");
1439 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1440 i2c.offset, i2c.len, &i2c.data[0]);
1441 end_synchronized_op(sc, 0);
1443 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1448 rc = ether_ioctl(ifp, cmd, data);
1455 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1457 struct port_info *pi = ifp->if_softc;
1458 struct adapter *sc = pi->adapter;
1459 struct sge_txq *txq;
1464 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */
1466 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1472 if (__predict_false(rc != 0)) {
1473 MPASS(m == NULL); /* was freed already */
1474 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1479 txq = &sc->sge.txq[pi->first_txq];
1480 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1481 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq)) +
1485 rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1486 if (__predict_false(rc != 0))
1493 cxgbe_qflush(struct ifnet *ifp)
1495 struct port_info *pi = ifp->if_softc;
1496 struct sge_txq *txq;
1499 /* queues do not exist if !PORT_INIT_DONE. */
1500 if (pi->flags & PORT_INIT_DONE) {
1501 for_each_txq(pi, i, txq) {
1503 txq->eq.flags &= ~EQ_ENABLED;
1505 while (!mp_ring_is_idle(txq->r)) {
1506 mp_ring_check_drainage(txq->r, 0);
1515 cxgbe_media_change(struct ifnet *ifp)
1517 struct port_info *pi = ifp->if_softc;
1519 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1521 return (EOPNOTSUPP);
1525 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1527 struct port_info *pi = ifp->if_softc;
1528 struct ifmedia *media = NULL;
1529 struct ifmedia_entry *cur;
1530 int speed = pi->link_cfg.speed;
1535 else if (ifp == pi->nm_ifp)
1536 media = &pi->nm_media;
1538 MPASS(media != NULL);
1540 cur = media->ifm_cur;
1542 ifmr->ifm_status = IFM_AVALID;
1543 if (!pi->link_cfg.link_ok)
1546 ifmr->ifm_status |= IFM_ACTIVE;
1548 /* active and current will differ iff current media is autoselect. */
1549 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1552 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1553 if (speed == SPEED_10000)
1554 ifmr->ifm_active |= IFM_10G_T;
1555 else if (speed == SPEED_1000)
1556 ifmr->ifm_active |= IFM_1000_T;
1557 else if (speed == SPEED_100)
1558 ifmr->ifm_active |= IFM_100_TX;
1559 else if (speed == SPEED_10)
1560 ifmr->ifm_active |= IFM_10_T;
1562 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1567 t4_fatal_err(struct adapter *sc)
1569 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1570 t4_intr_disable(sc);
1571 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1572 device_get_nameunit(sc->dev));
1576 map_bars_0_and_4(struct adapter *sc)
1578 sc->regs_rid = PCIR_BAR(0);
1579 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1580 &sc->regs_rid, RF_ACTIVE);
1581 if (sc->regs_res == NULL) {
1582 device_printf(sc->dev, "cannot map registers.\n");
1585 sc->bt = rman_get_bustag(sc->regs_res);
1586 sc->bh = rman_get_bushandle(sc->regs_res);
1587 sc->mmio_len = rman_get_size(sc->regs_res);
1588 setbit(&sc->doorbells, DOORBELL_KDB);
1590 sc->msix_rid = PCIR_BAR(4);
1591 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1592 &sc->msix_rid, RF_ACTIVE);
1593 if (sc->msix_res == NULL) {
1594 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1602 map_bar_2(struct adapter *sc)
1606 * T4: only iWARP driver uses the userspace doorbells. There is no need
1607 * to map it if RDMA is disabled.
1609 if (is_t4(sc) && sc->rdmacaps == 0)
1612 sc->udbs_rid = PCIR_BAR(2);
1613 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1614 &sc->udbs_rid, RF_ACTIVE);
1615 if (sc->udbs_res == NULL) {
1616 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1619 sc->udbs_base = rman_get_virtual(sc->udbs_res);
1622 setbit(&sc->doorbells, DOORBELL_UDB);
1623 #if defined(__i386__) || defined(__amd64__)
1624 if (t5_write_combine) {
1628 * Enable write combining on BAR2. This is the
1629 * userspace doorbell BAR and is split into 128B
1630 * (UDBS_SEG_SIZE) doorbell regions, each associated
1631 * with an egress queue. The first 64B has the doorbell
1632 * and the second 64B can be used to submit a tx work
1633 * request with an implicit doorbell.
1636 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1637 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1639 clrbit(&sc->doorbells, DOORBELL_UDB);
1640 setbit(&sc->doorbells, DOORBELL_WCWR);
1641 setbit(&sc->doorbells, DOORBELL_UDBWC);
1643 device_printf(sc->dev,
1644 "couldn't enable write combining: %d\n",
1648 t4_write_reg(sc, A_SGE_STAT_CFG,
1649 V_STATSOURCE_T5(7) | V_STATMODE(0));
1657 static const struct memwin t4_memwin[] = {
1658 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1659 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1660 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1663 static const struct memwin t5_memwin[] = {
1664 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1665 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1666 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1670 setup_memwin(struct adapter *sc)
1672 const struct memwin *mw;
1678 * Read low 32b of bar0 indirectly via the hardware backdoor
1679 * mechanism. Works from within PCI passthrough environments
1680 * too, where rman_get_start() can return a different value. We
1681 * need to program the T4 memory window decoders with the actual
1682 * addresses that will be coming across the PCIe link.
1684 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1685 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1688 n = nitems(t4_memwin);
1690 /* T5 uses the relative offset inside the PCIe BAR */
1694 n = nitems(t5_memwin);
1697 for (i = 0; i < n; i++, mw++) {
1699 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1700 (mw->base + bar0) | V_BIR(0) |
1701 V_WINDOW(ilog2(mw->aperture) - 10));
1705 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1709 * Verify that the memory range specified by the addr/len pair is valid and lies
1710 * entirely within a single region (EDCx or MCx).
1713 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1715 uint32_t em, addr_len, maddr, mlen;
1717 /* Memory can only be accessed in naturally aligned 4 byte units */
1718 if (addr & 3 || len & 3 || len == 0)
1721 /* Enabled memories */
1722 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1723 if (em & F_EDRAM0_ENABLE) {
1724 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1725 maddr = G_EDRAM0_BASE(addr_len) << 20;
1726 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1727 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1728 addr + len <= maddr + mlen)
1731 if (em & F_EDRAM1_ENABLE) {
1732 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1733 maddr = G_EDRAM1_BASE(addr_len) << 20;
1734 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1735 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1736 addr + len <= maddr + mlen)
1739 if (em & F_EXT_MEM_ENABLE) {
1740 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1741 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1742 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1743 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1744 addr + len <= maddr + mlen)
1747 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1748 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1749 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1750 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1751 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1752 addr + len <= maddr + mlen)
1760 fwmtype_to_hwmtype(int mtype)
1764 case FW_MEMTYPE_EDC0:
1766 case FW_MEMTYPE_EDC1:
1768 case FW_MEMTYPE_EXTMEM:
1770 case FW_MEMTYPE_EXTMEM1:
1773 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1778 * Verify that the memory range specified by the memtype/offset/len pair is
1779 * valid and lies entirely within the memtype specified. The global address of
1780 * the start of the range is returned in addr.
1783 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1786 uint32_t em, addr_len, maddr, mlen;
1788 /* Memory can only be accessed in naturally aligned 4 byte units */
1789 if (off & 3 || len & 3 || len == 0)
1792 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1793 switch (fwmtype_to_hwmtype(mtype)) {
1795 if (!(em & F_EDRAM0_ENABLE))
1797 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1798 maddr = G_EDRAM0_BASE(addr_len) << 20;
1799 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1802 if (!(em & F_EDRAM1_ENABLE))
1804 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1805 maddr = G_EDRAM1_BASE(addr_len) << 20;
1806 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1809 if (!(em & F_EXT_MEM_ENABLE))
1811 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1812 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1813 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1816 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1818 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1819 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1820 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1826 if (mlen > 0 && off < mlen && off + len <= mlen) {
1827 *addr = maddr + off; /* global address */
1835 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1837 const struct memwin *mw;
1840 KASSERT(win >= 0 && win < nitems(t4_memwin),
1841 ("%s: incorrect memwin# (%d)", __func__, win));
1842 mw = &t4_memwin[win];
1844 KASSERT(win >= 0 && win < nitems(t5_memwin),
1845 ("%s: incorrect memwin# (%d)", __func__, win));
1846 mw = &t5_memwin[win];
1851 if (aperture != NULL)
1852 *aperture = mw->aperture;
1856 * Positions the memory window such that it can be used to access the specified
1857 * address in the chip's address space. The return value is the offset of addr
1858 * from the start of the window.
1861 position_memwin(struct adapter *sc, int n, uint32_t addr)
1866 KASSERT(n >= 0 && n <= 3,
1867 ("%s: invalid window %d.", __func__, n));
1868 KASSERT((addr & 3) == 0,
1869 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1873 start = addr & ~0xf; /* start must be 16B aligned */
1875 pf = V_PFNUM(sc->pf);
1876 start = addr & ~0x7f; /* start must be 128B aligned */
1878 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1880 t4_write_reg(sc, reg, start | pf);
1881 t4_read_reg(sc, reg);
1883 return (addr - start);
1887 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1888 struct intrs_and_queues *iaq)
1890 int rc, itype, navail, nrxq10g, nrxq1g, n;
1891 int nofldrxq10g = 0, nofldrxq1g = 0;
1892 int nnmrxq10g = 0, nnmrxq1g = 0;
1894 bzero(iaq, sizeof(*iaq));
1896 iaq->ntxq10g = t4_ntxq10g;
1897 iaq->ntxq1g = t4_ntxq1g;
1898 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1899 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1900 iaq->rsrv_noflowq = t4_rsrv_noflowq;
1902 if (is_offload(sc)) {
1903 iaq->nofldtxq10g = t4_nofldtxq10g;
1904 iaq->nofldtxq1g = t4_nofldtxq1g;
1905 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1906 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1910 iaq->nnmtxq10g = t4_nnmtxq10g;
1911 iaq->nnmtxq1g = t4_nnmtxq1g;
1912 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
1913 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
1916 for (itype = INTR_MSIX; itype; itype >>= 1) {
1918 if ((itype & t4_intr_types) == 0)
1919 continue; /* not allowed */
1921 if (itype == INTR_MSIX)
1922 navail = pci_msix_count(sc->dev);
1923 else if (itype == INTR_MSI)
1924 navail = pci_msi_count(sc->dev);
1931 iaq->intr_type = itype;
1932 iaq->intr_flags_10g = 0;
1933 iaq->intr_flags_1g = 0;
1936 * Best option: an interrupt vector for errors, one for the
1937 * firmware event queue, and one for every rxq (NIC, TOE, and
1940 iaq->nirq = T4_EXTRA_INTR;
1941 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
1942 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
1943 if (iaq->nirq <= navail &&
1944 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1945 iaq->intr_flags_10g = INTR_ALL;
1946 iaq->intr_flags_1g = INTR_ALL;
1951 * Second best option: a vector for errors, one for the firmware
1952 * event queue, and vectors for either all the NIC rx queues or
1953 * all the TOE rx queues. The queues that don't get vectors
1954 * will forward their interrupts to those that do.
1956 * Note: netmap rx queues cannot be created early and so they
1957 * can't be setup to receive forwarded interrupts for others.
1959 iaq->nirq = T4_EXTRA_INTR;
1960 if (nrxq10g >= nofldrxq10g) {
1961 iaq->intr_flags_10g = INTR_RXQ;
1962 iaq->nirq += n10g * nrxq10g;
1964 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
1967 iaq->intr_flags_10g = INTR_OFLD_RXQ;
1968 iaq->nirq += n10g * nofldrxq10g;
1970 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
1973 if (nrxq1g >= nofldrxq1g) {
1974 iaq->intr_flags_1g = INTR_RXQ;
1975 iaq->nirq += n1g * nrxq1g;
1977 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
1980 iaq->intr_flags_1g = INTR_OFLD_RXQ;
1981 iaq->nirq += n1g * nofldrxq1g;
1983 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
1986 if (iaq->nirq <= navail &&
1987 (itype != INTR_MSI || powerof2(iaq->nirq)))
1991 * Next best option: an interrupt vector for errors, one for the
1992 * firmware event queue, and at least one per port. At this
1993 * point we know we'll have to downsize nrxq and/or nofldrxq
1994 * and/or nnmrxq to fit what's available to us.
1996 iaq->nirq = T4_EXTRA_INTR;
1997 iaq->nirq += n10g + n1g;
1998 if (iaq->nirq <= navail) {
1999 int leftover = navail - iaq->nirq;
2002 int target = max(nrxq10g, nofldrxq10g);
2004 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2005 INTR_RXQ : INTR_OFLD_RXQ;
2008 while (n < target && leftover >= n10g) {
2013 iaq->nrxq10g = min(n, nrxq10g);
2015 iaq->nofldrxq10g = min(n, nofldrxq10g);
2018 iaq->nnmrxq10g = min(n, nnmrxq10g);
2023 int target = max(nrxq1g, nofldrxq1g);
2025 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2026 INTR_RXQ : INTR_OFLD_RXQ;
2029 while (n < target && leftover >= n1g) {
2034 iaq->nrxq1g = min(n, nrxq1g);
2036 iaq->nofldrxq1g = min(n, nofldrxq1g);
2039 iaq->nnmrxq1g = min(n, nnmrxq1g);
2043 if (itype != INTR_MSI || powerof2(iaq->nirq))
2048 * Least desirable option: one interrupt vector for everything.
2050 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2051 iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2054 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2057 iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
2063 if (itype == INTR_MSIX)
2064 rc = pci_alloc_msix(sc->dev, &navail);
2065 else if (itype == INTR_MSI)
2066 rc = pci_alloc_msi(sc->dev, &navail);
2069 if (navail == iaq->nirq)
2073 * Didn't get the number requested. Use whatever number
2074 * the kernel is willing to allocate (it's in navail).
2076 device_printf(sc->dev, "fewer vectors than requested, "
2077 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2078 itype, iaq->nirq, navail);
2079 pci_release_msi(sc->dev);
2083 device_printf(sc->dev,
2084 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2085 itype, rc, iaq->nirq, navail);
2088 device_printf(sc->dev,
2089 "failed to find a usable interrupt type. "
2090 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2091 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2096 #define FW_VERSION(chip) ( \
2097 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2098 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2099 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2100 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2101 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2107 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
2111 .kld_name = "t4fw_cfg",
2112 .fw_mod_name = "t4fw",
2114 .chip = FW_HDR_CHIP_T4,
2115 .fw_ver = htobe32_const(FW_VERSION(T4)),
2116 .intfver_nic = FW_INTFVER(T4, NIC),
2117 .intfver_vnic = FW_INTFVER(T4, VNIC),
2118 .intfver_ofld = FW_INTFVER(T4, OFLD),
2119 .intfver_ri = FW_INTFVER(T4, RI),
2120 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2121 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
2122 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2123 .intfver_fcoe = FW_INTFVER(T4, FCOE),
2127 .kld_name = "t5fw_cfg",
2128 .fw_mod_name = "t5fw",
2130 .chip = FW_HDR_CHIP_T5,
2131 .fw_ver = htobe32_const(FW_VERSION(T5)),
2132 .intfver_nic = FW_INTFVER(T5, NIC),
2133 .intfver_vnic = FW_INTFVER(T5, VNIC),
2134 .intfver_ofld = FW_INTFVER(T5, OFLD),
2135 .intfver_ri = FW_INTFVER(T5, RI),
2136 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2137 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2138 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2139 .intfver_fcoe = FW_INTFVER(T5, FCOE),
2144 static struct fw_info *
2145 find_fw_info(int chip)
2149 for (i = 0; i < nitems(fw_info); i++) {
2150 if (fw_info[i].chip == chip)
2151 return (&fw_info[i]);
2157 * Is the given firmware API compatible with the one the driver was compiled
2161 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2164 /* short circuit if it's the exact same firmware version */
2165 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2169 * XXX: Is this too conservative? Perhaps I should limit this to the
2170 * features that are supported in the driver.
2172 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2173 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2174 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2175 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2183 * The firmware in the KLD is usable, but should it be installed? This routine
2184 * explains itself in detail if it indicates the KLD firmware should be
2188 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2192 if (!card_fw_usable) {
2193 reason = "incompatible or unusable";
2198 reason = "older than the version bundled with this driver";
2202 if (t4_fw_install == 2 && k != c) {
2203 reason = "different than the version bundled with this driver";
2210 if (t4_fw_install == 0) {
2211 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2212 "but the driver is prohibited from installing a different "
2213 "firmware on the card.\n",
2214 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2215 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2220 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2221 "installing firmware %u.%u.%u.%u on card.\n",
2222 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2223 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2224 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2225 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2230 * Establish contact with the firmware and determine if we are the master driver
2231 * or not, and whether we are responsible for chip initialization.
2234 prep_firmware(struct adapter *sc)
2236 const struct firmware *fw = NULL, *default_cfg;
2237 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2238 enum dev_state state;
2239 struct fw_info *fw_info;
2240 struct fw_hdr *card_fw; /* fw on the card */
2241 const struct fw_hdr *kld_fw; /* fw in the KLD */
2242 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
2245 /* Contact firmware. */
2246 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2247 if (rc < 0 || state == DEV_STATE_ERR) {
2249 device_printf(sc->dev,
2250 "failed to connect to the firmware: %d, %d.\n", rc, state);
2255 sc->flags |= MASTER_PF;
2256 else if (state == DEV_STATE_UNINIT) {
2258 * We didn't get to be the master so we definitely won't be
2259 * configuring the chip. It's a bug if someone else hasn't
2260 * configured it already.
2262 device_printf(sc->dev, "couldn't be master(%d), "
2263 "device not already initialized either(%d).\n", rc, state);
2267 /* This is the firmware whose headers the driver was compiled against */
2268 fw_info = find_fw_info(chip_id(sc));
2269 if (fw_info == NULL) {
2270 device_printf(sc->dev,
2271 "unable to look up firmware information for chip %d.\n",
2275 drv_fw = &fw_info->fw_hdr;
2278 * The firmware KLD contains many modules. The KLD name is also the
2279 * name of the module that contains the default config file.
2281 default_cfg = firmware_get(fw_info->kld_name);
2283 /* Read the header of the firmware on the card */
2284 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2285 rc = -t4_read_flash(sc, FLASH_FW_START,
2286 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2288 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2290 device_printf(sc->dev,
2291 "Unable to read card's firmware header: %d\n", rc);
2295 /* This is the firmware in the KLD */
2296 fw = firmware_get(fw_info->fw_mod_name);
2298 kld_fw = (const void *)fw->data;
2299 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2305 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2306 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2308 * Common case: the firmware on the card is an exact match and
2309 * the KLD is an exact match too, or the KLD is
2310 * absent/incompatible. Note that t4_fw_install = 2 is ignored
2311 * here -- use cxgbetool loadfw if you want to reinstall the
2312 * same firmware as the one on the card.
2314 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2315 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2316 be32toh(card_fw->fw_ver))) {
2318 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2320 device_printf(sc->dev,
2321 "failed to install firmware: %d\n", rc);
2325 /* Installed successfully, update the cached header too. */
2326 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2328 need_fw_reset = 0; /* already reset as part of load_fw */
2331 if (!card_fw_usable) {
2334 d = ntohl(drv_fw->fw_ver);
2335 c = ntohl(card_fw->fw_ver);
2336 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2338 device_printf(sc->dev, "Cannot find a usable firmware: "
2339 "fw_install %d, chip state %d, "
2340 "driver compiled with %d.%d.%d.%d, "
2341 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2342 t4_fw_install, state,
2343 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2344 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2345 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2346 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2347 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2348 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2353 /* We're using whatever's on the card and it's known to be good. */
2354 sc->params.fw_vers = ntohl(card_fw->fw_ver);
2355 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2356 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2357 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2358 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2359 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2360 t4_get_tp_version(sc, &sc->params.tp_vers);
2363 if (need_fw_reset &&
2364 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2365 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2366 if (rc != ETIMEDOUT && rc != EIO)
2367 t4_fw_bye(sc, sc->mbox);
2372 rc = get_params__pre_init(sc);
2374 goto done; /* error message displayed already */
2376 /* Partition adapter resources as specified in the config file. */
2377 if (state == DEV_STATE_UNINIT) {
2379 KASSERT(sc->flags & MASTER_PF,
2380 ("%s: trying to change chip settings when not master.",
2383 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2385 goto done; /* error message displayed already */
2387 t4_tweak_chip_settings(sc);
2389 /* get basic stuff going */
2390 rc = -t4_fw_initialize(sc, sc->mbox);
2392 device_printf(sc->dev, "fw init failed: %d.\n", rc);
2396 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2401 free(card_fw, M_CXGBE);
2403 firmware_put(fw, FIRMWARE_UNLOAD);
2404 if (default_cfg != NULL)
2405 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2410 #define FW_PARAM_DEV(param) \
2411 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2412 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2413 #define FW_PARAM_PFVF(param) \
2414 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2415 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2418 * Partition chip resources for use between various PFs, VFs, etc.
2421 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2422 const char *name_prefix)
2424 const struct firmware *cfg = NULL;
2426 struct fw_caps_config_cmd caps;
2427 uint32_t mtype, moff, finicsum, cfcsum;
2430 * Figure out what configuration file to use. Pick the default config
2431 * file for the card if the user hasn't specified one explicitly.
2433 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2434 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2435 /* Card specific overrides go here. */
2436 if (pci_get_device(sc->dev) == 0x440a)
2437 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2439 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2443 * We need to load another module if the profile is anything except
2444 * "default" or "flash".
2446 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2447 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2450 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2451 cfg = firmware_get(s);
2453 if (default_cfg != NULL) {
2454 device_printf(sc->dev,
2455 "unable to load module \"%s\" for "
2456 "configuration profile \"%s\", will use "
2457 "the default config file instead.\n",
2459 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2462 device_printf(sc->dev,
2463 "unable to load module \"%s\" for "
2464 "configuration profile \"%s\", will use "
2465 "the config file on the card's flash "
2466 "instead.\n", s, sc->cfg_file);
2467 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2473 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2474 default_cfg == NULL) {
2475 device_printf(sc->dev,
2476 "default config file not available, will use the config "
2477 "file on the card's flash instead.\n");
2478 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2481 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2483 const uint32_t *cfdata;
2484 uint32_t param, val, addr, off, mw_base, mw_aperture;
2486 KASSERT(cfg != NULL || default_cfg != NULL,
2487 ("%s: no config to upload", __func__));
2490 * Ask the firmware where it wants us to upload the config file.
2492 param = FW_PARAM_DEV(CF);
2493 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2495 /* No support for config file? Shouldn't happen. */
2496 device_printf(sc->dev,
2497 "failed to query config file location: %d.\n", rc);
2500 mtype = G_FW_PARAMS_PARAM_Y(val);
2501 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2504 * XXX: sheer laziness. We deliberately added 4 bytes of
2505 * useless stuffing/comments at the end of the config file so
2506 * it's ok to simply throw away the last remaining bytes when
2507 * the config file is not an exact multiple of 4. This also
2508 * helps with the validate_mt_off_len check.
2511 cflen = cfg->datasize & ~3;
2514 cflen = default_cfg->datasize & ~3;
2515 cfdata = default_cfg->data;
2518 if (cflen > FLASH_CFG_MAX_SIZE) {
2519 device_printf(sc->dev,
2520 "config file too long (%d, max allowed is %d). "
2521 "Will try to use the config on the card, if any.\n",
2522 cflen, FLASH_CFG_MAX_SIZE);
2523 goto use_config_on_flash;
2526 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2528 device_printf(sc->dev,
2529 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
2530 "Will try to use the config on the card, if any.\n",
2531 __func__, mtype, moff, cflen, rc);
2532 goto use_config_on_flash;
2535 memwin_info(sc, 2, &mw_base, &mw_aperture);
2537 off = position_memwin(sc, 2, addr);
2538 n = min(cflen, mw_aperture - off);
2539 for (i = 0; i < n; i += 4)
2540 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2545 use_config_on_flash:
2546 mtype = FW_MEMTYPE_FLASH;
2547 moff = t4_flash_cfg_addr(sc);
2550 bzero(&caps, sizeof(caps));
2551 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2552 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2553 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2554 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2555 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2556 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2558 device_printf(sc->dev,
2559 "failed to pre-process config file: %d "
2560 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2564 finicsum = be32toh(caps.finicsum);
2565 cfcsum = be32toh(caps.cfcsum);
2566 if (finicsum != cfcsum) {
2567 device_printf(sc->dev,
2568 "WARNING: config file checksum mismatch: %08x %08x\n",
2571 sc->cfcsum = cfcsum;
2573 #define LIMIT_CAPS(x) do { \
2574 caps.x &= htobe16(t4_##x##_allowed); \
2578 * Let the firmware know what features will (not) be used so it can tune
2579 * things accordingly.
2581 LIMIT_CAPS(linkcaps);
2582 LIMIT_CAPS(niccaps);
2583 LIMIT_CAPS(toecaps);
2584 LIMIT_CAPS(rdmacaps);
2585 LIMIT_CAPS(iscsicaps);
2586 LIMIT_CAPS(fcoecaps);
2589 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2590 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2591 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2592 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2594 device_printf(sc->dev,
2595 "failed to process config file: %d.\n", rc);
2599 firmware_put(cfg, FIRMWARE_UNLOAD);
2604 * Retrieve parameters that are needed (or nice to have) very early.
2607 get_params__pre_init(struct adapter *sc)
2610 uint32_t param[2], val[2];
2611 struct fw_devlog_cmd cmd;
2612 struct devlog_params *dlog = &sc->params.devlog;
2614 param[0] = FW_PARAM_DEV(PORTVEC);
2615 param[1] = FW_PARAM_DEV(CCLK);
2616 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2618 device_printf(sc->dev,
2619 "failed to query parameters (pre_init): %d.\n", rc);
2623 sc->params.portvec = val[0];
2624 sc->params.nports = bitcount32(val[0]);
2625 sc->params.vpd.cclk = val[1];
2627 /* Read device log parameters. */
2628 bzero(&cmd, sizeof(cmd));
2629 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2630 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2631 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2632 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2634 device_printf(sc->dev,
2635 "failed to get devlog parameters: %d.\n", rc);
2636 bzero(dlog, sizeof (*dlog));
2637 rc = 0; /* devlog isn't critical for device operation */
2639 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2640 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2641 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2642 dlog->size = be32toh(cmd.memsize_devlog);
2649 * Retrieve various parameters that are of interest to the driver. The device
2650 * has been initialized by the firmware at this point.
2653 get_params__post_init(struct adapter *sc)
2656 uint32_t param[7], val[7];
2657 struct fw_caps_config_cmd caps;
2659 param[0] = FW_PARAM_PFVF(IQFLINT_START);
2660 param[1] = FW_PARAM_PFVF(EQ_START);
2661 param[2] = FW_PARAM_PFVF(FILTER_START);
2662 param[3] = FW_PARAM_PFVF(FILTER_END);
2663 param[4] = FW_PARAM_PFVF(L2T_START);
2664 param[5] = FW_PARAM_PFVF(L2T_END);
2665 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2667 device_printf(sc->dev,
2668 "failed to query parameters (post_init): %d.\n", rc);
2672 sc->sge.iq_start = val[0];
2673 sc->sge.eq_start = val[1];
2674 sc->tids.ftid_base = val[2];
2675 sc->tids.nftids = val[3] - val[2] + 1;
2676 sc->params.ftid_min = val[2];
2677 sc->params.ftid_max = val[3];
2678 sc->vres.l2t.start = val[4];
2679 sc->vres.l2t.size = val[5] - val[4] + 1;
2680 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2681 ("%s: L2 table size (%u) larger than expected (%u)",
2682 __func__, sc->vres.l2t.size, L2T_SIZE));
2684 /* get capabilites */
2685 bzero(&caps, sizeof(caps));
2686 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2687 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2688 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2689 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2691 device_printf(sc->dev,
2692 "failed to get card capabilities: %d.\n", rc);
2696 #define READ_CAPS(x) do { \
2697 sc->x = htobe16(caps.x); \
2699 READ_CAPS(linkcaps);
2702 READ_CAPS(rdmacaps);
2703 READ_CAPS(iscsicaps);
2704 READ_CAPS(fcoecaps);
2706 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2707 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2708 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2709 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2710 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2712 device_printf(sc->dev,
2713 "failed to query NIC parameters: %d.\n", rc);
2716 sc->tids.etid_base = val[0];
2717 sc->params.etid_min = val[0];
2718 sc->tids.netids = val[1] - val[0] + 1;
2719 sc->params.netids = sc->tids.netids;
2720 sc->params.eo_wr_cred = val[2];
2721 sc->params.ethoffload = 1;
2725 /* query offload-related parameters */
2726 param[0] = FW_PARAM_DEV(NTID);
2727 param[1] = FW_PARAM_PFVF(SERVER_START);
2728 param[2] = FW_PARAM_PFVF(SERVER_END);
2729 param[3] = FW_PARAM_PFVF(TDDP_START);
2730 param[4] = FW_PARAM_PFVF(TDDP_END);
2731 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2732 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2734 device_printf(sc->dev,
2735 "failed to query TOE parameters: %d.\n", rc);
2738 sc->tids.ntids = val[0];
2739 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2740 sc->tids.stid_base = val[1];
2741 sc->tids.nstids = val[2] - val[1] + 1;
2742 sc->vres.ddp.start = val[3];
2743 sc->vres.ddp.size = val[4] - val[3] + 1;
2744 sc->params.ofldq_wr_cred = val[5];
2745 sc->params.offload = 1;
2748 param[0] = FW_PARAM_PFVF(STAG_START);
2749 param[1] = FW_PARAM_PFVF(STAG_END);
2750 param[2] = FW_PARAM_PFVF(RQ_START);
2751 param[3] = FW_PARAM_PFVF(RQ_END);
2752 param[4] = FW_PARAM_PFVF(PBL_START);
2753 param[5] = FW_PARAM_PFVF(PBL_END);
2754 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2756 device_printf(sc->dev,
2757 "failed to query RDMA parameters(1): %d.\n", rc);
2760 sc->vres.stag.start = val[0];
2761 sc->vres.stag.size = val[1] - val[0] + 1;
2762 sc->vres.rq.start = val[2];
2763 sc->vres.rq.size = val[3] - val[2] + 1;
2764 sc->vres.pbl.start = val[4];
2765 sc->vres.pbl.size = val[5] - val[4] + 1;
2767 param[0] = FW_PARAM_PFVF(SQRQ_START);
2768 param[1] = FW_PARAM_PFVF(SQRQ_END);
2769 param[2] = FW_PARAM_PFVF(CQ_START);
2770 param[3] = FW_PARAM_PFVF(CQ_END);
2771 param[4] = FW_PARAM_PFVF(OCQ_START);
2772 param[5] = FW_PARAM_PFVF(OCQ_END);
2773 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2775 device_printf(sc->dev,
2776 "failed to query RDMA parameters(2): %d.\n", rc);
2779 sc->vres.qp.start = val[0];
2780 sc->vres.qp.size = val[1] - val[0] + 1;
2781 sc->vres.cq.start = val[2];
2782 sc->vres.cq.size = val[3] - val[2] + 1;
2783 sc->vres.ocq.start = val[4];
2784 sc->vres.ocq.size = val[5] - val[4] + 1;
2786 if (sc->iscsicaps) {
2787 param[0] = FW_PARAM_PFVF(ISCSI_START);
2788 param[1] = FW_PARAM_PFVF(ISCSI_END);
2789 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2791 device_printf(sc->dev,
2792 "failed to query iSCSI parameters: %d.\n", rc);
2795 sc->vres.iscsi.start = val[0];
2796 sc->vres.iscsi.size = val[1] - val[0] + 1;
2800 * We've got the params we wanted to query via the firmware. Now grab
2801 * some others directly from the chip.
2803 rc = t4_read_chip_settings(sc);
2809 set_params__post_init(struct adapter *sc)
2811 uint32_t param, val;
2813 /* ask for encapsulated CPLs */
2814 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2816 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
2821 #undef FW_PARAM_PFVF
2825 t4_set_desc(struct adapter *sc)
2828 struct adapter_params *p = &sc->params;
2830 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2831 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2832 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2834 device_set_desc_copy(sc->dev, buf);
2838 build_medialist(struct port_info *pi, struct ifmedia *media)
2844 ifmedia_removeall(media);
2846 m = IFM_ETHER | IFM_FDX;
2848 switch(pi->port_type) {
2849 case FW_PORT_TYPE_BT_XFI:
2850 case FW_PORT_TYPE_BT_XAUI:
2851 ifmedia_add(media, m | IFM_10G_T, 0, NULL);
2854 case FW_PORT_TYPE_BT_SGMII:
2855 ifmedia_add(media, m | IFM_1000_T, 0, NULL);
2856 ifmedia_add(media, m | IFM_100_TX, 0, NULL);
2857 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
2858 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2861 case FW_PORT_TYPE_CX4:
2862 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
2863 ifmedia_set(media, m | IFM_10G_CX4);
2866 case FW_PORT_TYPE_QSFP_10G:
2867 case FW_PORT_TYPE_SFP:
2868 case FW_PORT_TYPE_FIBER_XFI:
2869 case FW_PORT_TYPE_FIBER_XAUI:
2870 switch (pi->mod_type) {
2872 case FW_PORT_MOD_TYPE_LR:
2873 ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
2874 ifmedia_set(media, m | IFM_10G_LR);
2877 case FW_PORT_MOD_TYPE_SR:
2878 ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
2879 ifmedia_set(media, m | IFM_10G_SR);
2882 case FW_PORT_MOD_TYPE_LRM:
2883 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
2884 ifmedia_set(media, m | IFM_10G_LRM);
2887 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2888 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2889 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
2890 ifmedia_set(media, m | IFM_10G_TWINAX);
2893 case FW_PORT_MOD_TYPE_NONE:
2895 ifmedia_add(media, m | IFM_NONE, 0, NULL);
2896 ifmedia_set(media, m | IFM_NONE);
2899 case FW_PORT_MOD_TYPE_NA:
2900 case FW_PORT_MOD_TYPE_ER:
2902 device_printf(pi->dev,
2903 "unknown port_type (%d), mod_type (%d)\n",
2904 pi->port_type, pi->mod_type);
2905 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
2906 ifmedia_set(media, m | IFM_UNKNOWN);
2911 case FW_PORT_TYPE_QSFP:
2912 switch (pi->mod_type) {
2914 case FW_PORT_MOD_TYPE_LR:
2915 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
2916 ifmedia_set(media, m | IFM_40G_LR4);
2919 case FW_PORT_MOD_TYPE_SR:
2920 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
2921 ifmedia_set(media, m | IFM_40G_SR4);
2924 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2925 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2926 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
2927 ifmedia_set(media, m | IFM_40G_CR4);
2930 case FW_PORT_MOD_TYPE_NONE:
2932 ifmedia_add(media, m | IFM_NONE, 0, NULL);
2933 ifmedia_set(media, m | IFM_NONE);
2937 device_printf(pi->dev,
2938 "unknown port_type (%d), mod_type (%d)\n",
2939 pi->port_type, pi->mod_type);
2940 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
2941 ifmedia_set(media, m | IFM_UNKNOWN);
2947 device_printf(pi->dev,
2948 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2950 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
2951 ifmedia_set(media, m | IFM_UNKNOWN);
2958 #define FW_MAC_EXACT_CHUNK 7
2961 * Program the port's XGMAC based on parameters in ifnet. The caller also
2962 * indicates which parameters should be programmed (the rest are left alone).
2965 update_mac_settings(struct ifnet *ifp, int flags)
2968 struct port_info *pi = ifp->if_softc;
2969 struct adapter *sc = pi->adapter;
2970 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2971 uint16_t viid = 0xffff;
2972 int16_t *xact_addr_filt = NULL;
2974 ASSERT_SYNCHRONIZED_OP(sc);
2975 KASSERT(flags, ("%s: not told what to update.", __func__));
2977 if (ifp == pi->ifp) {
2979 xact_addr_filt = &pi->xact_addr_filt;
2982 else if (ifp == pi->nm_ifp) {
2984 xact_addr_filt = &pi->nm_xact_addr_filt;
2987 if (flags & XGMAC_MTU)
2990 if (flags & XGMAC_PROMISC)
2991 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2993 if (flags & XGMAC_ALLMULTI)
2994 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2996 if (flags & XGMAC_VLANEX)
2997 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2999 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3000 rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti,
3003 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3009 if (flags & XGMAC_UCADDR) {
3010 uint8_t ucaddr[ETHER_ADDR_LEN];
3012 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3013 rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr,
3017 if_printf(ifp, "change_mac failed: %d\n", rc);
3020 *xact_addr_filt = rc;
3025 if (flags & XGMAC_MCADDRS) {
3026 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3029 struct ifmultiaddr *ifma;
3032 if_maddr_rlock(ifp);
3033 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3034 if (ifma->ifma_addr->sa_family != AF_LINK)
3037 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3038 MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3041 if (i == FW_MAC_EXACT_CHUNK) {
3042 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del,
3043 i, mcaddr, NULL, &hash, 0);
3046 for (j = 0; j < i; j++) {
3048 "failed to add mc address"
3050 "%02x:%02x:%02x rc=%d\n",
3051 mcaddr[j][0], mcaddr[j][1],
3052 mcaddr[j][2], mcaddr[j][3],
3053 mcaddr[j][4], mcaddr[j][5],
3063 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i,
3064 mcaddr, NULL, &hash, 0);
3067 for (j = 0; j < i; j++) {
3069 "failed to add mc address"
3071 "%02x:%02x:%02x rc=%d\n",
3072 mcaddr[j][0], mcaddr[j][1],
3073 mcaddr[j][2], mcaddr[j][3],
3074 mcaddr[j][4], mcaddr[j][5],
3081 rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0);
3083 if_printf(ifp, "failed to set mc address hash: %d", rc);
3085 if_maddr_runlock(ifp);
3092 * {begin|end}_synchronized_op must be called from the same thread.
3095 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
3101 /* the caller thinks it's ok to sleep, but is it really? */
3102 if (flags & SLEEP_OK)
3103 pause("t4slptst", 1);
3114 if (pi && IS_DOOMED(pi)) {
3124 if (!(flags & SLEEP_OK)) {
3129 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3135 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3138 sc->last_op = wmesg;
3139 sc->last_op_thr = curthread;
3140 sc->last_op_flags = flags;
3144 if (!(flags & HOLD_LOCK) || rc)
3151 * {begin|end}_synchronized_op must be called from the same thread.
3154 end_synchronized_op(struct adapter *sc, int flags)
3157 if (flags & LOCK_HELD)
3158 ADAPTER_LOCK_ASSERT_OWNED(sc);
3162 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3169 cxgbe_init_synchronized(struct port_info *pi)
3171 struct adapter *sc = pi->adapter;
3172 struct ifnet *ifp = pi->ifp;
3174 struct sge_txq *txq;
3176 ASSERT_SYNCHRONIZED_OP(sc);
3178 if (isset(&sc->open_device_map, pi->port_id)) {
3179 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
3180 ("mismatch between open_device_map and if_drv_flags"));
3181 return (0); /* already running */
3184 if (!(sc->flags & FULL_INIT_DONE) &&
3185 ((rc = adapter_full_init(sc)) != 0))
3186 return (rc); /* error message displayed already */
3188 if (!(pi->flags & PORT_INIT_DONE) &&
3189 ((rc = port_full_init(pi)) != 0))
3190 return (rc); /* error message displayed already */
3192 rc = update_mac_settings(ifp, XGMAC_ALL);
3194 goto done; /* error message displayed already */
3196 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
3198 if_printf(ifp, "enable_vi failed: %d\n", rc);
3203 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized
3207 for_each_txq(pi, i, txq) {
3209 txq->eq.flags |= EQ_ENABLED;
3214 * The first iq of the first port to come up is used for tracing.
3216 if (sc->traceq < 0) {
3217 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
3218 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
3219 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
3220 V_QUEUENUMBER(sc->traceq));
3221 pi->flags |= HAS_TRACEQ;
3225 setbit(&sc->open_device_map, pi->port_id);
3227 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3230 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3233 cxgbe_uninit_synchronized(pi);
3242 cxgbe_uninit_synchronized(struct port_info *pi)
3244 struct adapter *sc = pi->adapter;
3245 struct ifnet *ifp = pi->ifp;
3247 struct sge_txq *txq;
3249 ASSERT_SYNCHRONIZED_OP(sc);
3251 if (!(pi->flags & PORT_INIT_DONE)) {
3252 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
3253 ("uninited port is running"));
3258 * Disable the VI so that all its data in either direction is discarded
3259 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
3260 * tick) intact as the TP can deliver negative advice or data that it's
3261 * holding in its RAM (for an offloaded connection) even after the VI is
3264 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
3266 if_printf(ifp, "disable_vi failed: %d\n", rc);
3270 for_each_txq(pi, i, txq) {
3272 txq->eq.flags &= ~EQ_ENABLED;
3276 clrbit(&sc->open_device_map, pi->port_id);
3278 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3281 pi->link_cfg.link_ok = 0;
3282 pi->link_cfg.speed = 0;
3284 t4_os_link_changed(sc, pi->port_id, 0, -1);
3290 * It is ok for this function to fail midway and return right away. t4_detach
3291 * will walk the entire sc->irq list and clean up whatever is valid.
3294 setup_intr_handlers(struct adapter *sc)
3299 struct port_info *pi;
3300 struct sge_rxq *rxq;
3302 struct sge_ofld_rxq *ofld_rxq;
3305 struct sge_nm_rxq *nm_rxq;
3312 rid = sc->intr_type == INTR_INTX ? 0 : 1;
3313 if (sc->intr_count == 1)
3314 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
3316 /* Multiple interrupts. */
3317 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3318 ("%s: too few intr.", __func__));
3320 /* The first one is always error intr */
3321 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3327 /* The second one is always the firmware event queue */
3328 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
3334 for_each_port(sc, p) {
3337 if (pi->flags & INTR_RXQ) {
3338 for_each_rxq(pi, q, rxq) {
3339 snprintf(s, sizeof(s), "%d.%d", p, q);
3340 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3349 if (pi->flags & INTR_OFLD_RXQ) {
3350 for_each_ofld_rxq(pi, q, ofld_rxq) {
3351 snprintf(s, sizeof(s), "%d,%d", p, q);
3352 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3362 if (pi->flags & INTR_NM_RXQ) {
3363 for_each_nm_rxq(pi, q, nm_rxq) {
3364 snprintf(s, sizeof(s), "%d-%d", p, q);
3365 rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr,
3375 MPASS(irq == &sc->irq[sc->intr_count]);
3381 adapter_full_init(struct adapter *sc)
3385 ASSERT_SYNCHRONIZED_OP(sc);
3386 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3387 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3388 ("%s: FULL_INIT_DONE already", __func__));
3391 * queues that belong to the adapter (not any particular port).
3393 rc = t4_setup_adapter_queues(sc);
3397 for (i = 0; i < nitems(sc->tq); i++) {
3398 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3399 taskqueue_thread_enqueue, &sc->tq[i]);
3400 if (sc->tq[i] == NULL) {
3401 device_printf(sc->dev,
3402 "failed to allocate task queue %d\n", i);
3406 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3407 device_get_nameunit(sc->dev), i);
3411 sc->flags |= FULL_INIT_DONE;
3414 adapter_full_uninit(sc);
3420 adapter_full_uninit(struct adapter *sc)
3424 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3426 t4_teardown_adapter_queues(sc);
3428 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3429 taskqueue_free(sc->tq[i]);
3433 sc->flags &= ~FULL_INIT_DONE;
3439 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
3440 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
3441 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
3442 RSS_HASHTYPE_RSS_UDP_IPV6)
3444 /* Translates kernel hash types to hardware. */
3446 hashconfig_to_hashen(int hashconfig)
3450 if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
3451 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
3452 if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
3453 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
3454 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
3455 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3456 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3458 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
3459 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
3460 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3462 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
3463 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
3464 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
3465 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
3470 /* Translates hardware hash types to kernel. */
3472 hashen_to_hashconfig(int hashen)
3476 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
3478 * If UDP hashing was enabled it must have been enabled for
3479 * either IPv4 or IPv6 (inclusive or). Enabling UDP without
3480 * enabling any 4-tuple hash is nonsense configuration.
3482 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3483 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
3485 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3486 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
3487 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3488 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
3490 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3491 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
3492 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3493 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
3494 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3495 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
3496 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3497 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
3499 return (hashconfig);
3504 port_full_init(struct port_info *pi)
3506 struct adapter *sc = pi->adapter;
3507 struct ifnet *ifp = pi->ifp;
3509 struct sge_rxq *rxq;
3510 int rc, i, j, hashen;
3512 int nbuckets = rss_getnumbuckets();
3513 int hashconfig = rss_gethashconfig();
3515 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3516 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3519 ASSERT_SYNCHRONIZED_OP(sc);
3520 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3521 ("%s: PORT_INIT_DONE already", __func__));
3523 sysctl_ctx_init(&pi->ctx);
3524 pi->flags |= PORT_SYSCTL_CTX;
3527 * Allocate tx/rx/fl queues for this port.
3529 rc = t4_setup_port_queues(pi);
3531 goto done; /* error message displayed already */
3534 * Setup RSS for this port. Save a copy of the RSS table for later use.
3536 if (pi->nrxq > pi->rss_size) {
3537 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
3538 "some queues will never receive traffic.\n", pi->nrxq,
3540 } else if (pi->rss_size % pi->nrxq) {
3541 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
3542 "expect uneven traffic distribution.\n", pi->nrxq,
3546 MPASS(RSS_KEYSIZE == 40);
3547 if (pi->nrxq != nbuckets) {
3548 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
3549 "performance will be impacted.\n", pi->nrxq, nbuckets);
3552 rss_getkey((void *)&raw_rss_key[0]);
3553 for (i = 0; i < nitems(rss_key); i++) {
3554 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
3556 t4_write_rss_key(sc, (void *)&rss_key[0], -1);
3558 rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3559 for (i = 0; i < pi->rss_size;) {
3561 j = rss_get_indirection_to_bucket(i);
3563 rxq = &sc->sge.rxq[pi->first_rxq + j];
3564 rss[i++] = rxq->iq.abs_id;
3566 for_each_rxq(pi, j, rxq) {
3567 rss[i++] = rxq->iq.abs_id;
3568 if (i == pi->rss_size)
3574 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3577 if_printf(ifp, "rss_config failed: %d\n", rc);
3582 hashen = hashconfig_to_hashen(hashconfig);
3585 * We may have had to enable some hashes even though the global config
3586 * wants them disabled. This is a potential problem that must be
3587 * reported to the user.
3589 extra = hashen_to_hashconfig(hashen) ^ hashconfig;
3592 * If we consider only the supported hash types, then the enabled hashes
3593 * are a superset of the requested hashes. In other words, there cannot
3594 * be any supported hash that was requested but not enabled, but there
3595 * can be hashes that were not requested but had to be enabled.
3597 extra &= SUPPORTED_RSS_HASHTYPES;
3598 MPASS((extra & hashconfig) == 0);
3602 "global RSS config (0x%x) cannot be accomodated.\n",
3605 if (extra & RSS_HASHTYPE_RSS_IPV4)
3606 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
3607 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
3608 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
3609 if (extra & RSS_HASHTYPE_RSS_IPV6)
3610 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
3611 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
3612 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
3613 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
3614 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
3615 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
3616 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
3618 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
3619 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
3620 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
3621 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
3623 rc = -t4_config_vi_rss(sc, sc->mbox, pi->viid, hashen, rss[0]);
3625 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
3630 pi->flags |= PORT_INIT_DONE;
3633 port_full_uninit(pi);
3642 port_full_uninit(struct port_info *pi)
3644 struct adapter *sc = pi->adapter;
3646 struct sge_rxq *rxq;
3647 struct sge_txq *txq;
3649 struct sge_ofld_rxq *ofld_rxq;
3650 struct sge_wrq *ofld_txq;
3653 if (pi->flags & PORT_INIT_DONE) {
3655 /* Need to quiesce queues. */
3657 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
3659 for_each_txq(pi, i, txq) {
3660 quiesce_txq(sc, txq);
3664 for_each_ofld_txq(pi, i, ofld_txq) {
3665 quiesce_wrq(sc, ofld_txq);
3669 for_each_rxq(pi, i, rxq) {
3670 quiesce_iq(sc, &rxq->iq);
3671 quiesce_fl(sc, &rxq->fl);
3675 for_each_ofld_rxq(pi, i, ofld_rxq) {
3676 quiesce_iq(sc, &ofld_rxq->iq);
3677 quiesce_fl(sc, &ofld_rxq->fl);
3680 free(pi->rss, M_CXGBE);
3683 t4_teardown_port_queues(pi);
3684 pi->flags &= ~PORT_INIT_DONE;
3690 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
3692 struct sge_eq *eq = &txq->eq;
3693 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
3695 (void) sc; /* unused */
3699 MPASS((eq->flags & EQ_ENABLED) == 0);
3703 /* Wait for the mp_ring to empty. */
3704 while (!mp_ring_is_idle(txq->r)) {
3705 mp_ring_check_drainage(txq->r, 0);
3706 pause("rquiesce", 1);
3709 /* Then wait for the hardware to finish. */
3710 while (spg->cidx != htobe16(eq->pidx))
3711 pause("equiesce", 1);
3713 /* Finally, wait for the driver to reclaim all descriptors. */
3714 while (eq->cidx != eq->pidx)
3715 pause("dquiesce", 1);
3719 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
3726 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3728 (void) sc; /* unused */
3730 /* Synchronize with the interrupt handler */
3731 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3736 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3738 mtx_lock(&sc->sfl_lock);
3740 fl->flags |= FL_DOOMED;
3742 mtx_unlock(&sc->sfl_lock);
3744 callout_drain(&sc->sfl_callout);
3745 KASSERT((fl->flags & FL_STARVING) == 0,
3746 ("%s: still starving", __func__));
3750 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3751 driver_intr_t *handler, void *arg, char *name)
3756 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3757 RF_SHAREABLE | RF_ACTIVE);
3758 if (irq->res == NULL) {
3759 device_printf(sc->dev,
3760 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3764 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3765 NULL, handler, arg, &irq->tag);
3767 device_printf(sc->dev,
3768 "failed to setup interrupt for rid %d, name %s: %d\n",
3771 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3777 t4_free_irq(struct adapter *sc, struct irq *irq)
3780 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3782 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3784 bzero(irq, sizeof(*irq));
3790 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3793 uint32_t *p = (uint32_t *)(buf + start);
3795 for ( ; start <= end; start += sizeof(uint32_t))
3796 *p++ = t4_read_reg(sc, start);
3800 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3803 const unsigned int *reg_ranges;
3804 static const unsigned int t4_reg_ranges[] = {
4024 static const unsigned int t5_reg_ranges[] = {
4465 reg_ranges = &t4_reg_ranges[0];
4466 n = nitems(t4_reg_ranges);
4468 reg_ranges = &t5_reg_ranges[0];
4469 n = nitems(t5_reg_ranges);
4472 regs->version = chip_id(sc) | chip_rev(sc) << 10;
4473 for (i = 0; i < n; i += 2)
4474 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4478 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4480 struct ifnet *ifp = pi->ifp;
4481 struct sge_txq *txq;
4483 struct port_stats *s = &pi->stats;
4485 const struct timeval interval = {0, 250000}; /* 250ms */
4488 timevalsub(&tv, &interval);
4489 if (timevalcmp(&tv, &pi->last_refreshed, <))
4492 t4_get_port_stats(sc, pi->tx_chan, s);
4494 ifp->if_opackets = s->tx_frames - s->tx_pause;
4495 ifp->if_ipackets = s->rx_frames - s->rx_pause;
4496 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4497 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4498 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4499 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4500 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4501 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4503 for (i = 0; i < NCHAN; i++) {
4504 if (pi->rx_chan_map & (1 << i)) {
4507 mtx_lock(&sc->regwin_lock);
4508 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4509 1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4510 mtx_unlock(&sc->regwin_lock);
4511 ifp->if_iqdrops += v;
4516 for_each_txq(pi, i, txq)
4517 drops += counter_u64_fetch(txq->r->drops);
4518 ifp->if_snd.ifq_drops = drops;
4520 ifp->if_oerrors = s->tx_error_frames;
4521 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4522 s->rx_fcs_err + s->rx_len_err;
4524 getmicrotime(&pi->last_refreshed);
4528 cxgbe_tick(void *arg)
4530 struct port_info *pi = arg;
4531 struct adapter *sc = pi->adapter;
4532 struct ifnet *ifp = pi->ifp;
4535 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4537 return; /* without scheduling another callout */
4540 cxgbe_refresh_stats(sc, pi);
4542 callout_schedule(&pi->tick, hz);
4547 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4551 if (arg != ifp || ifp->if_type != IFT_ETHER)
4554 vlan = VLAN_DEVAT(ifp, vid);
4555 VLAN_SETCOOKIE(vlan, ifp);
4559 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4563 panic("%s: opcode 0x%02x on iq %p with payload %p",
4564 __func__, rss->opcode, iq, m);
4566 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4567 __func__, rss->opcode, iq, m);
4574 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4576 uintptr_t *loc, new;
4578 if (opcode >= nitems(sc->cpl_handler))
4581 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4582 loc = (uintptr_t *) &sc->cpl_handler[opcode];
4583 atomic_store_rel_ptr(loc, new);
4589 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4593 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4595 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4596 __func__, iq, ctrl);
4602 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4604 uintptr_t *loc, new;
4606 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4607 loc = (uintptr_t *) &sc->an_handler;
4608 atomic_store_rel_ptr(loc, new);
4614 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4616 const struct cpl_fw6_msg *cpl =
4617 __containerof(rpl, struct cpl_fw6_msg, data[0]);
4620 panic("%s: fw_msg type %d", __func__, cpl->type);
4622 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4628 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4630 uintptr_t *loc, new;
4632 if (type >= nitems(sc->fw_msg_handler))
4636 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4637 * handler dispatch table. Reject any attempt to install a handler for
4640 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4643 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4644 loc = (uintptr_t *) &sc->fw_msg_handler[type];
4645 atomic_store_rel_ptr(loc, new);
4651 t4_sysctls(struct adapter *sc)
4653 struct sysctl_ctx_list *ctx;
4654 struct sysctl_oid *oid;
4655 struct sysctl_oid_list *children, *c0;
4656 static char *caps[] = {
4657 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
4658 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL" /* caps[1] niccaps */
4659 "\6HASHFILTER\7ETHOFLD",
4660 "\20\1TOE", /* caps[2] toecaps */
4661 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
4662 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
4663 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4664 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4665 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
4666 "\4PO_INITIAOR\5PO_TARGET"
4668 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4670 ctx = device_get_sysctl_ctx(sc->dev);
4675 oid = device_get_sysctl_tree(sc->dev);
4676 c0 = children = SYSCTL_CHILDREN(oid);
4678 sc->sc_do_rxcopy = 1;
4679 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4680 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4682 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4683 sc->params.nports, "# of ports");
4685 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4686 NULL, chip_rev(sc), "chip hardware revision");
4688 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4689 CTLFLAG_RD, sc->fw_version, 0, "firmware version");
4691 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4692 CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
4694 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4695 sc->cfcsum, "config file checksum");
4697 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4698 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4699 sysctl_bitfield, "A", "available doorbells");
4701 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4702 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4703 sysctl_bitfield, "A", "available link capabilities");
4705 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4706 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4707 sysctl_bitfield, "A", "available NIC capabilities");
4709 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4710 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4711 sysctl_bitfield, "A", "available TCP offload capabilities");
4713 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4714 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4715 sysctl_bitfield, "A", "available RDMA capabilities");
4717 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4718 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4719 sysctl_bitfield, "A", "available iSCSI capabilities");
4721 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4722 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4723 sysctl_bitfield, "A", "available FCoE capabilities");
4725 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4726 sc->params.vpd.cclk, "core clock frequency (in KHz)");
4728 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4729 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4730 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4731 "interrupt holdoff timer values (us)");
4733 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4734 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4735 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4736 "interrupt holdoff packet counter values");
4738 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4739 NULL, sc->tids.nftids, "number of filters");
4741 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4742 CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4743 "chip temperature (in Celsius)");
4745 t4_sge_sysctls(sc, ctx, children);
4747 sc->lro_timeout = 100;
4748 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4749 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4751 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW,
4752 &sc->debug_flags, 0, "flags to enable runtime debugging");
4756 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
4758 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4759 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4760 "logs and miscellaneous information");
4761 children = SYSCTL_CHILDREN(oid);
4763 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4764 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4765 sysctl_cctrl, "A", "congestion control");
4767 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4768 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4769 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4771 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4772 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4773 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4775 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4776 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4777 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4779 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4780 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4781 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4783 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4784 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4785 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4787 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4788 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4789 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4791 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4792 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4793 sysctl_cim_la, "A", "CIM logic analyzer");
4795 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4796 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4797 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4799 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4800 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4801 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4803 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4804 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4805 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4807 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4808 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4809 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4811 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4812 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4813 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4815 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4816 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4817 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4819 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4820 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4821 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4824 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4825 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4826 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4828 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4829 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4830 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4833 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4834 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4835 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4837 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4838 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4839 sysctl_cim_qcfg, "A", "CIM queue configuration");
4841 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4842 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4843 sysctl_cpl_stats, "A", "CPL statistics");
4845 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4846 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4847 sysctl_ddp_stats, "A", "non-TCP DDP statistics");
4849 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4850 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4851 sysctl_devlog, "A", "firmware's device log");
4853 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4854 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4855 sysctl_fcoe_stats, "A", "FCoE statistics");
4857 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4858 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4859 sysctl_hw_sched, "A", "hardware scheduler ");
4861 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4862 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4863 sysctl_l2t, "A", "hardware L2 table");
4865 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4866 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4867 sysctl_lb_stats, "A", "loopback statistics");
4869 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4870 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4871 sysctl_meminfo, "A", "memory regions");
4873 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4874 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4875 sysctl_mps_tcam, "A", "MPS TCAM entries");
4877 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4878 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4879 sysctl_path_mtus, "A", "path MTUs");
4881 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4882 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4883 sysctl_pm_stats, "A", "PM statistics");
4885 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4886 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4887 sysctl_rdma_stats, "A", "RDMA statistics");
4889 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4890 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4891 sysctl_tcp_stats, "A", "TCP statistics");
4893 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4894 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4895 sysctl_tids, "A", "TID information");
4897 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4898 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4899 sysctl_tp_err_stats, "A", "TP error statistics");
4901 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4902 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4903 sysctl_tp_la, "A", "TP logic analyzer");
4905 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4906 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4907 sysctl_tx_rate, "A", "Tx rate");
4909 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4910 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4911 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4914 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4915 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4916 sysctl_wcwr_stats, "A", "write combined work requests");
4921 if (is_offload(sc)) {
4925 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4926 NULL, "TOE parameters");
4927 children = SYSCTL_CHILDREN(oid);
4929 sc->tt.sndbuf = 256 * 1024;
4930 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4931 &sc->tt.sndbuf, 0, "max hardware send buffer size");
4934 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4935 &sc->tt.ddp, 0, "DDP allowed");
4937 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4938 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4939 &sc->tt.indsz, 0, "DDP max indicate size allowed");
4942 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4943 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4944 &sc->tt.ddp_thres, 0, "DDP threshold");
4946 sc->tt.rx_coalesce = 1;
4947 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4948 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4950 sc->tt.tx_align = 1;
4951 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
4952 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
4961 cxgbe_sysctls(struct port_info *pi)
4963 struct sysctl_ctx_list *ctx;
4964 struct sysctl_oid *oid;
4965 struct sysctl_oid_list *children;
4966 struct adapter *sc = pi->adapter;
4968 ctx = device_get_sysctl_ctx(pi->dev);
4973 oid = device_get_sysctl_tree(pi->dev);
4974 children = SYSCTL_CHILDREN(oid);
4976 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4977 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4978 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4979 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4980 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4981 "PHY temperature (in Celsius)");
4982 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4983 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4984 "PHY firmware version");
4986 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4987 &pi->nrxq, 0, "# of rx queues");
4988 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4989 &pi->ntxq, 0, "# of tx queues");
4990 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4991 &pi->first_rxq, 0, "index of first rx queue");
4992 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4993 &pi->first_txq, 0, "index of first tx queue");
4994 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4995 CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4996 "Reserve queue 0 for non-flowid packets");
4999 if (is_offload(sc)) {
5000 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5002 "# of rx queues for offloaded TCP connections");
5003 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5005 "# of tx queues for offloaded TCP connections");
5006 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5007 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
5008 "index of first TOE rx queue");
5009 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5010 CTLFLAG_RD, &pi->first_ofld_txq, 0,
5011 "index of first TOE tx queue");
5015 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5016 &pi->nnmrxq, 0, "# of rx queues for netmap");
5017 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5018 &pi->nnmtxq, 0, "# of tx queues for netmap");
5019 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5020 CTLFLAG_RD, &pi->first_nm_rxq, 0,
5021 "index of first netmap rx queue");
5022 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5023 CTLFLAG_RD, &pi->first_nm_txq, 0,
5024 "index of first netmap tx queue");
5027 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5028 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
5029 "holdoff timer index");
5030 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5031 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
5032 "holdoff packet counter index");
5034 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5035 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
5037 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5038 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
5041 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5042 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings,
5043 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5046 * dev.cxgbe.X.stats.
5048 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5049 NULL, "port statistics");
5050 children = SYSCTL_CHILDREN(oid);
5051 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5052 &pi->tx_parse_error, 0,
5053 "# of tx packets with invalid length or # of segments");
5055 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5056 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5057 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5058 sysctl_handle_t4_reg64, "QU", desc)
5060 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5061 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5062 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5063 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5064 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5065 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5066 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5067 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5068 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5069 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5070 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5071 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5072 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5073 "# of tx frames in this range",
5074 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5075 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5076 "# of tx frames in this range",
5077 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5078 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5079 "# of tx frames in this range",
5080 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5081 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5082 "# of tx frames in this range",
5083 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5084 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5085 "# of tx frames in this range",
5086 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5087 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5088 "# of tx frames in this range",
5089 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5090 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5091 "# of tx frames in this range",
5092 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5093 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5094 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5095 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5096 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5097 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5098 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5099 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5100 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5101 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5102 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5103 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5104 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5105 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5106 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5107 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5108 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5109 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5110 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5111 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5112 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5114 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5115 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5116 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5117 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5118 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5119 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5120 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5121 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5122 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5123 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5124 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5125 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5126 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5127 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5128 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5129 "# of frames received with bad FCS",
5130 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5131 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5132 "# of frames received with length error",
5133 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5134 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5135 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5136 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5137 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5138 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5139 "# of rx frames in this range",
5140 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5141 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5142 "# of rx frames in this range",
5143 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5144 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5145 "# of rx frames in this range",
5146 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5147 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5148 "# of rx frames in this range",
5149 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5150 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5151 "# of rx frames in this range",
5152 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5153 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5154 "# of rx frames in this range",
5155 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5156 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5157 "# of rx frames in this range",
5158 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5159 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5160 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5161 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5162 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5163 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5164 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5165 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5166 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5167 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5168 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5169 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5170 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5171 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5172 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5173 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5174 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5175 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5176 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5178 #undef SYSCTL_ADD_T4_REG64
5180 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5181 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5182 &pi->stats.name, desc)
5184 /* We get these from port_stats and they may be stale by upto 1s */
5185 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5186 "# drops due to buffer-group 0 overflows");
5187 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5188 "# drops due to buffer-group 1 overflows");
5189 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5190 "# drops due to buffer-group 2 overflows");
5191 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5192 "# drops due to buffer-group 3 overflows");
5193 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5194 "# of buffer-group 0 truncated packets");
5195 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5196 "# of buffer-group 1 truncated packets");
5197 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5198 "# of buffer-group 2 truncated packets");
5199 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5200 "# of buffer-group 3 truncated packets");
5202 #undef SYSCTL_ADD_T4_PORTSTAT
5208 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5210 int rc, *i, space = 0;
5213 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
5214 for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5216 sbuf_printf(&sb, " ");
5217 sbuf_printf(&sb, "%d", *i);
5221 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
5227 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5232 rc = sysctl_wire_old_buffer(req, 0);
5236 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5240 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5241 rc = sbuf_finish(sb);
5248 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5250 struct port_info *pi = arg1;
5252 struct adapter *sc = pi->adapter;
5256 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
5259 /* XXX: magic numbers */
5260 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5262 end_synchronized_op(sc, 0);
5268 rc = sysctl_handle_int(oidp, &v, 0, req);
5273 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5275 struct port_info *pi = arg1;
5278 val = pi->rsrv_noflowq;
5279 rc = sysctl_handle_int(oidp, &val, 0, req);
5280 if (rc != 0 || req->newptr == NULL)
5283 if ((val >= 1) && (pi->ntxq > 1))
5284 pi->rsrv_noflowq = 1;
5286 pi->rsrv_noflowq = 0;
5292 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5294 struct port_info *pi = arg1;
5295 struct adapter *sc = pi->adapter;
5297 struct sge_rxq *rxq;
5299 struct sge_ofld_rxq *ofld_rxq;
5305 rc = sysctl_handle_int(oidp, &idx, 0, req);
5306 if (rc != 0 || req->newptr == NULL)
5309 if (idx < 0 || idx >= SGE_NTIMERS)
5312 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5317 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
5318 for_each_rxq(pi, i, rxq) {
5319 #ifdef atomic_store_rel_8
5320 atomic_store_rel_8(&rxq->iq.intr_params, v);
5322 rxq->iq.intr_params = v;
5326 for_each_ofld_rxq(pi, i, ofld_rxq) {
5327 #ifdef atomic_store_rel_8
5328 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5330 ofld_rxq->iq.intr_params = v;
5336 end_synchronized_op(sc, LOCK_HELD);
5341 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5343 struct port_info *pi = arg1;
5344 struct adapter *sc = pi->adapter;
5349 rc = sysctl_handle_int(oidp, &idx, 0, req);
5350 if (rc != 0 || req->newptr == NULL)
5353 if (idx < -1 || idx >= SGE_NCOUNTERS)
5356 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5361 if (pi->flags & PORT_INIT_DONE)
5362 rc = EBUSY; /* cannot be changed once the queues are created */
5366 end_synchronized_op(sc, LOCK_HELD);
5371 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5373 struct port_info *pi = arg1;
5374 struct adapter *sc = pi->adapter;
5377 qsize = pi->qsize_rxq;
5379 rc = sysctl_handle_int(oidp, &qsize, 0, req);
5380 if (rc != 0 || req->newptr == NULL)
5383 if (qsize < 128 || (qsize & 7))
5386 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5391 if (pi->flags & PORT_INIT_DONE)
5392 rc = EBUSY; /* cannot be changed once the queues are created */
5394 pi->qsize_rxq = qsize;
5396 end_synchronized_op(sc, LOCK_HELD);
5401 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5403 struct port_info *pi = arg1;
5404 struct adapter *sc = pi->adapter;
5407 qsize = pi->qsize_txq;
5409 rc = sysctl_handle_int(oidp, &qsize, 0, req);
5410 if (rc != 0 || req->newptr == NULL)
5413 if (qsize < 128 || qsize > 65536)
5416 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5421 if (pi->flags & PORT_INIT_DONE)
5422 rc = EBUSY; /* cannot be changed once the queues are created */
5424 pi->qsize_txq = qsize;
5426 end_synchronized_op(sc, LOCK_HELD);
5431 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5433 struct port_info *pi = arg1;
5434 struct adapter *sc = pi->adapter;
5435 struct link_config *lc = &pi->link_cfg;
5438 if (req->newptr == NULL) {
5440 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5442 rc = sysctl_wire_old_buffer(req, 0);
5446 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5450 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5451 rc = sbuf_finish(sb);
5457 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5460 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5466 if (s[0] < '0' || s[0] > '9')
5467 return (EINVAL); /* not a number */
5469 if (n & ~(PAUSE_TX | PAUSE_RX))
5470 return (EINVAL); /* some other bit is set too */
5472 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4PAUSE");
5475 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5476 int link_ok = lc->link_ok;
5478 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5479 lc->requested_fc |= n;
5480 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc);
5481 lc->link_ok = link_ok; /* restore */
5483 end_synchronized_op(sc, 0);
5490 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5492 struct adapter *sc = arg1;
5496 val = t4_read_reg64(sc, reg);
5498 return (sysctl_handle_64(oidp, &val, 0, req));
5502 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5504 struct adapter *sc = arg1;
5506 uint32_t param, val;
5508 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5511 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5512 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5513 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5514 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
5515 end_synchronized_op(sc, 0);
5519 /* unknown is returned as 0 but we display -1 in that case */
5520 t = val == 0 ? -1 : val;
5522 rc = sysctl_handle_int(oidp, &t, 0, req);
5528 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5530 struct adapter *sc = arg1;
5533 uint16_t incr[NMTUS][NCCTRL_WIN];
5534 static const char *dec_fac[] = {
5535 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5539 rc = sysctl_wire_old_buffer(req, 0);
5543 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5547 t4_read_cong_tbl(sc, incr);
5549 for (i = 0; i < NCCTRL_WIN; ++i) {
5550 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5551 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5552 incr[5][i], incr[6][i], incr[7][i]);
5553 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5554 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5555 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5556 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5559 rc = sbuf_finish(sb);
5565 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5566 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
5567 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5568 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
5572 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5574 struct adapter *sc = arg1;
5576 int rc, i, n, qid = arg2;
5579 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5581 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5582 ("%s: bad qid %d\n", __func__, qid));
5584 if (qid < CIM_NUM_IBQ) {
5587 n = 4 * CIM_IBQ_SIZE;
5588 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5589 rc = t4_read_cim_ibq(sc, qid, buf, n);
5591 /* outbound queue */
5594 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5595 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5596 rc = t4_read_cim_obq(sc, qid, buf, n);
5603 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
5605 rc = sysctl_wire_old_buffer(req, 0);
5609 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5615 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5616 for (i = 0, p = buf; i < n; i += 16, p += 4)
5617 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5620 rc = sbuf_finish(sb);
5628 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5630 struct adapter *sc = arg1;
5636 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5640 rc = sysctl_wire_old_buffer(req, 0);
5644 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5648 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5651 rc = -t4_cim_read_la(sc, buf, NULL);
5655 sbuf_printf(sb, "Status Data PC%s",
5656 cfg & F_UPDBGLACAPTPCONLY ? "" :
5657 " LS0Stat LS0Addr LS0Data");
5659 KASSERT((sc->params.cim_la_size & 7) == 0,
5660 ("%s: p will walk off the end of buf", __func__));
5662 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5663 if (cfg & F_UPDBGLACAPTPCONLY) {
5664 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
5666 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
5667 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5668 p[4] & 0xff, p[5] >> 8);
5669 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
5670 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5671 p[1] & 0xf, p[2] >> 4);
5674 "\n %02x %x%07x %x%07x %08x %08x "
5676 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5677 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5682 rc = sbuf_finish(sb);
5690 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5692 struct adapter *sc = arg1;
5698 rc = sysctl_wire_old_buffer(req, 0);
5702 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5706 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5709 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5712 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5713 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5717 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
5718 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5719 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
5720 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5721 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5722 (p[1] >> 2) | ((p[2] & 3) << 30),
5723 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5727 rc = sbuf_finish(sb);
5734 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5736 struct adapter *sc = arg1;
5742 rc = sysctl_wire_old_buffer(req, 0);
5746 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5750 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5753 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5756 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
5757 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5758 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
5759 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5760 p[4], p[3], p[2], p[1], p[0]);
5763 sbuf_printf(sb, "\n\nCntl ID Data");
5764 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5765 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
5766 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5769 rc = sbuf_finish(sb);
5776 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5778 struct adapter *sc = arg1;
5781 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5782 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5783 uint16_t thres[CIM_NUM_IBQ];
5784 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5785 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5786 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5789 cim_num_obq = CIM_NUM_OBQ;
5790 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5791 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5793 cim_num_obq = CIM_NUM_OBQ_T5;
5794 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5795 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5797 nq = CIM_NUM_IBQ + cim_num_obq;
5799 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5801 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5805 t4_read_cimq_cfg(sc, base, size, thres);
5807 rc = sysctl_wire_old_buffer(req, 0);
5811 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5815 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
5817 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5818 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
5819 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5820 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5821 G_QUEREMFLITS(p[2]) * 16);
5822 for ( ; i < nq; i++, p += 4, wr += 2)
5823 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
5824 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5825 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5826 G_QUEREMFLITS(p[2]) * 16);
5828 rc = sbuf_finish(sb);
5835 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5837 struct adapter *sc = arg1;
5840 struct tp_cpl_stats stats;
5842 rc = sysctl_wire_old_buffer(req, 0);
5846 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5850 t4_tp_get_cpl_stats(sc, &stats);
5852 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
5854 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
5855 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5856 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
5857 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5859 rc = sbuf_finish(sb);
5866 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5868 struct adapter *sc = arg1;
5871 struct tp_usm_stats stats;
5873 rc = sysctl_wire_old_buffer(req, 0);
5877 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5881 t4_get_usm_stats(sc, &stats);
5883 sbuf_printf(sb, "Frames: %u\n", stats.frames);
5884 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5885 sbuf_printf(sb, "Drops: %u", stats.drops);
5887 rc = sbuf_finish(sb);
5893 const char *devlog_level_strings[] = {
5894 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
5895 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
5896 [FW_DEVLOG_LEVEL_ERR] = "ERR",
5897 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
5898 [FW_DEVLOG_LEVEL_INFO] = "INFO",
5899 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
5902 const char *devlog_facility_strings[] = {
5903 [FW_DEVLOG_FACILITY_CORE] = "CORE",
5904 [FW_DEVLOG_FACILITY_CF] = "CF",
5905 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
5906 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
5907 [FW_DEVLOG_FACILITY_RES] = "RES",
5908 [FW_DEVLOG_FACILITY_HW] = "HW",
5909 [FW_DEVLOG_FACILITY_FLR] = "FLR",
5910 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
5911 [FW_DEVLOG_FACILITY_PHY] = "PHY",
5912 [FW_DEVLOG_FACILITY_MAC] = "MAC",
5913 [FW_DEVLOG_FACILITY_PORT] = "PORT",
5914 [FW_DEVLOG_FACILITY_VI] = "VI",
5915 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
5916 [FW_DEVLOG_FACILITY_ACL] = "ACL",
5917 [FW_DEVLOG_FACILITY_TM] = "TM",
5918 [FW_DEVLOG_FACILITY_QFC] = "QFC",
5919 [FW_DEVLOG_FACILITY_DCB] = "DCB",
5920 [FW_DEVLOG_FACILITY_ETH] = "ETH",
5921 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
5922 [FW_DEVLOG_FACILITY_RI] = "RI",
5923 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
5924 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
5925 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
5926 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
5930 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5932 struct adapter *sc = arg1;
5933 struct devlog_params *dparams = &sc->params.devlog;
5934 struct fw_devlog_e *buf, *e;
5935 int i, j, rc, nentries, first = 0, m;
5937 uint64_t ftstamp = UINT64_MAX;
5939 if (dparams->start == 0) {
5940 dparams->memtype = FW_MEMTYPE_EDC0;
5941 dparams->start = 0x84000;
5942 dparams->size = 32768;
5945 nentries = dparams->size / sizeof(struct fw_devlog_e);
5947 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5951 m = fwmtype_to_hwmtype(dparams->memtype);
5952 rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5956 for (i = 0; i < nentries; i++) {
5959 if (e->timestamp == 0)
5962 e->timestamp = be64toh(e->timestamp);
5963 e->seqno = be32toh(e->seqno);
5964 for (j = 0; j < 8; j++)
5965 e->params[j] = be32toh(e->params[j]);
5967 if (e->timestamp < ftstamp) {
5968 ftstamp = e->timestamp;
5973 if (buf[first].timestamp == 0)
5974 goto done; /* nothing in the log */
5976 rc = sysctl_wire_old_buffer(req, 0);
5980 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5985 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
5986 "Seq#", "Tstamp", "Level", "Facility", "Message");
5991 if (e->timestamp == 0)
5994 sbuf_printf(sb, "%10d %15ju %8s %8s ",
5995 e->seqno, e->timestamp,
5996 (e->level < nitems(devlog_level_strings) ?
5997 devlog_level_strings[e->level] : "UNKNOWN"),
5998 (e->facility < nitems(devlog_facility_strings) ?
5999 devlog_facility_strings[e->facility] : "UNKNOWN"));
6000 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6001 e->params[2], e->params[3], e->params[4],
6002 e->params[5], e->params[6], e->params[7]);
6004 if (++i == nentries)
6006 } while (i != first);
6008 rc = sbuf_finish(sb);
6016 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6018 struct adapter *sc = arg1;
6021 struct tp_fcoe_stats stats[4];
6023 rc = sysctl_wire_old_buffer(req, 0);
6027 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6031 t4_get_fcoe_stats(sc, 0, &stats[0]);
6032 t4_get_fcoe_stats(sc, 1, &stats[1]);
6033 t4_get_fcoe_stats(sc, 2, &stats[2]);
6034 t4_get_fcoe_stats(sc, 3, &stats[3]);
6036 sbuf_printf(sb, " channel 0 channel 1 "
6037 "channel 2 channel 3\n");
6038 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
6039 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
6040 stats[3].octetsDDP);
6041 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
6042 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
6043 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
6044 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
6045 stats[3].framesDrop);
6047 rc = sbuf_finish(sb);
6054 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6056 struct adapter *sc = arg1;
6059 unsigned int map, kbps, ipg, mode;
6060 unsigned int pace_tab[NTX_SCHED];
6062 rc = sysctl_wire_old_buffer(req, 0);
6066 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6070 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6071 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6072 t4_read_pace_tbl(sc, pace_tab);
6074 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
6075 "Class IPG (0.1 ns) Flow IPG (us)");
6077 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6078 t4_get_tx_sched(sc, i, &kbps, &ipg);
6079 sbuf_printf(sb, "\n %u %-5s %u ", i,
6080 (mode & (1 << i)) ? "flow" : "class", map & 3);
6082 sbuf_printf(sb, "%9u ", kbps);
6084 sbuf_printf(sb, " disabled ");
6087 sbuf_printf(sb, "%13u ", ipg);
6089 sbuf_printf(sb, " disabled ");
6092 sbuf_printf(sb, "%10u", pace_tab[i]);
6094 sbuf_printf(sb, " disabled");
6097 rc = sbuf_finish(sb);
6104 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6106 struct adapter *sc = arg1;
6110 struct lb_port_stats s[2];
6111 static const char *stat_name[] = {
6112 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6113 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6114 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
6115 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6116 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6117 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6118 "BG2FramesTrunc:", "BG3FramesTrunc:"
6121 rc = sysctl_wire_old_buffer(req, 0);
6125 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6129 memset(s, 0, sizeof(s));
6131 for (i = 0; i < 4; i += 2) {
6132 t4_get_lb_stats(sc, i, &s[0]);
6133 t4_get_lb_stats(sc, i + 1, &s[1]);
6137 sbuf_printf(sb, "%s Loopback %u"
6138 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6140 for (j = 0; j < nitems(stat_name); j++)
6141 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6145 rc = sbuf_finish(sb);
6152 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6155 struct port_info *pi = arg1;
6157 static const char *linkdnreasons[] = {
6158 "non-specific", "remote fault", "autoneg failed", "reserved3",
6159 "PHY overheated", "unknown", "rx los", "reserved7"
6162 rc = sysctl_wire_old_buffer(req, 0);
6165 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6169 if (pi->linkdnrc < 0)
6170 sbuf_printf(sb, "n/a");
6171 else if (pi->linkdnrc < nitems(linkdnreasons))
6172 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
6174 sbuf_printf(sb, "%d", pi->linkdnrc);
6176 rc = sbuf_finish(sb);
6189 mem_desc_cmp(const void *a, const void *b)
6191 return ((const struct mem_desc *)a)->base -
6192 ((const struct mem_desc *)b)->base;
6196 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6201 size = to - from + 1;
6205 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6206 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6210 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6212 struct adapter *sc = arg1;
6215 uint32_t lo, hi, used, alloc;
6216 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6217 static const char *region[] = {
6218 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6219 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6220 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6221 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6222 "RQUDP region:", "PBL region:", "TXPBL region:",
6223 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6226 struct mem_desc avail[4];
6227 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
6228 struct mem_desc *md = mem;
6230 rc = sysctl_wire_old_buffer(req, 0);
6234 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6238 for (i = 0; i < nitems(mem); i++) {
6243 /* Find and sort the populated memory ranges */
6245 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6246 if (lo & F_EDRAM0_ENABLE) {
6247 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6248 avail[i].base = G_EDRAM0_BASE(hi) << 20;
6249 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6253 if (lo & F_EDRAM1_ENABLE) {
6254 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6255 avail[i].base = G_EDRAM1_BASE(hi) << 20;
6256 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6260 if (lo & F_EXT_MEM_ENABLE) {
6261 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6262 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6263 avail[i].limit = avail[i].base +
6264 (G_EXT_MEM_SIZE(hi) << 20);
6265 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */
6268 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
6269 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6270 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6271 avail[i].limit = avail[i].base +
6272 (G_EXT_MEM1_SIZE(hi) << 20);
6276 if (!i) /* no memory available */
6278 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6280 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6281 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6282 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6283 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6284 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6285 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6286 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6287 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6288 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6290 /* the next few have explicit upper bounds */
6291 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6292 md->limit = md->base - 1 +
6293 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6294 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6297 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6298 md->limit = md->base - 1 +
6299 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6300 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6303 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6304 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
6305 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6306 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
6309 md->idx = nitems(region); /* hide it */
6313 #define ulp_region(reg) \
6314 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6315 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6317 ulp_region(RX_ISCSI);
6318 ulp_region(RX_TDDP);
6320 ulp_region(RX_STAG);
6322 ulp_region(RX_RQUDP);
6328 md->idx = nitems(region);
6329 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
6330 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
6331 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
6332 A_SGE_DBVFIFO_SIZE))) << 2) - 1;
6336 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6337 md->limit = md->base + sc->tids.ntids - 1;
6339 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6340 md->limit = md->base + sc->tids.ntids - 1;
6343 md->base = sc->vres.ocq.start;
6344 if (sc->vres.ocq.size)
6345 md->limit = md->base + sc->vres.ocq.size - 1;
6347 md->idx = nitems(region); /* hide it */
6350 /* add any address-space holes, there can be up to 3 */
6351 for (n = 0; n < i - 1; n++)
6352 if (avail[n].limit < avail[n + 1].base)
6353 (md++)->base = avail[n].limit;
6355 (md++)->base = avail[n].limit;
6358 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6360 for (lo = 0; lo < i; lo++)
6361 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6362 avail[lo].limit - 1);
6364 sbuf_printf(sb, "\n");
6365 for (i = 0; i < n; i++) {
6366 if (mem[i].idx >= nitems(region))
6367 continue; /* skip holes */
6369 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6370 mem_region_show(sb, region[mem[i].idx], mem[i].base,
6374 sbuf_printf(sb, "\n");
6375 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6376 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6377 mem_region_show(sb, "uP RAM:", lo, hi);
6379 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6380 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6381 mem_region_show(sb, "uP Extmem2:", lo, hi);
6383 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6384 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6386 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6387 (lo & F_PMRXNUMCHN) ? 2 : 1);
6389 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6390 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6391 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6393 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6394 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6395 sbuf_printf(sb, "%u p-structs\n",
6396 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6398 for (i = 0; i < 4; i++) {
6399 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6402 alloc = G_ALLOC(lo);
6404 used = G_T5_USED(lo);
6405 alloc = G_T5_ALLOC(lo);
6407 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6410 for (i = 0; i < 4; i++) {
6411 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6414 alloc = G_ALLOC(lo);
6416 used = G_T5_USED(lo);
6417 alloc = G_T5_ALLOC(lo);
6420 "\nLoopback %d using %u pages out of %u allocated",
6424 rc = sbuf_finish(sb);
6431 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6435 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6439 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6441 struct adapter *sc = arg1;
6445 rc = sysctl_wire_old_buffer(req, 0);
6449 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6454 "Idx Ethernet address Mask Vld Ports PF"
6455 " VF Replication P0 P1 P2 P3 ML");
6456 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
6457 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6458 for (i = 0; i < n; i++) {
6459 uint64_t tcamx, tcamy, mask;
6460 uint32_t cls_lo, cls_hi;
6461 uint8_t addr[ETHER_ADDR_LEN];
6463 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6464 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6465 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6466 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6471 tcamxy2valmask(tcamx, tcamy, addr, &mask);
6472 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6473 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
6474 addr[3], addr[4], addr[5], (uintmax_t)mask,
6475 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6476 G_PORTMAP(cls_hi), G_PF(cls_lo),
6477 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6479 if (cls_lo & F_REPLICATE) {
6480 struct fw_ldst_cmd ldst_cmd;
6482 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6483 ldst_cmd.op_to_addrspace =
6484 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6485 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6486 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6487 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6488 ldst_cmd.u.mps.rplc.fid_idx =
6489 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6490 V_FW_LDST_CMD_IDX(i));
6492 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6496 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6497 sizeof(ldst_cmd), &ldst_cmd);
6498 end_synchronized_op(sc, 0);
6502 " ------------ error %3u ------------", rc);
6505 sbuf_printf(sb, " %08x %08x %08x %08x",
6506 be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
6507 be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
6508 be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
6509 be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
6512 sbuf_printf(sb, "%36s", "");
6514 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6515 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6516 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6520 (void) sbuf_finish(sb);
6522 rc = sbuf_finish(sb);
6529 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6531 struct adapter *sc = arg1;
6534 uint16_t mtus[NMTUS];
6536 rc = sysctl_wire_old_buffer(req, 0);
6540 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6544 t4_read_mtu_tbl(sc, mtus, NULL);
6546 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6547 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6548 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6549 mtus[14], mtus[15]);
6551 rc = sbuf_finish(sb);
6558 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6560 struct adapter *sc = arg1;
6563 uint32_t cnt[PM_NSTATS];
6564 uint64_t cyc[PM_NSTATS];
6565 static const char *rx_stats[] = {
6566 "Read:", "Write bypass:", "Write mem:", "Flush:"
6568 static const char *tx_stats[] = {
6569 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6572 rc = sysctl_wire_old_buffer(req, 0);
6576 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6580 t4_pmtx_get_stats(sc, cnt, cyc);
6581 sbuf_printf(sb, " Tx pcmds Tx bytes");
6582 for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6583 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6586 t4_pmrx_get_stats(sc, cnt, cyc);
6587 sbuf_printf(sb, "\n Rx pcmds Rx bytes");
6588 for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6589 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6592 rc = sbuf_finish(sb);
6599 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6601 struct adapter *sc = arg1;
6604 struct tp_rdma_stats stats;
6606 rc = sysctl_wire_old_buffer(req, 0);
6610 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6614 t4_tp_get_rdma_stats(sc, &stats);
6615 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6616 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6618 rc = sbuf_finish(sb);
6625 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6627 struct adapter *sc = arg1;
6630 struct tp_tcp_stats v4, v6;
6632 rc = sysctl_wire_old_buffer(req, 0);
6636 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6640 t4_tp_get_tcp_stats(sc, &v4, &v6);
6643 sbuf_printf(sb, "OutRsts: %20u %20u\n",
6644 v4.tcpOutRsts, v6.tcpOutRsts);
6645 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
6646 v4.tcpInSegs, v6.tcpInSegs);
6647 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
6648 v4.tcpOutSegs, v6.tcpOutSegs);
6649 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
6650 v4.tcpRetransSegs, v6.tcpRetransSegs);
6652 rc = sbuf_finish(sb);
6659 sysctl_tids(SYSCTL_HANDLER_ARGS)
6661 struct adapter *sc = arg1;
6664 struct tid_info *t = &sc->tids;
6666 rc = sysctl_wire_old_buffer(req, 0);
6670 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6675 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6680 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6681 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6684 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6685 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6688 sbuf_printf(sb, "TID range: %u-%u",
6689 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6693 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6694 sbuf_printf(sb, ", in use: %u\n",
6695 atomic_load_acq_int(&t->tids_in_use));
6699 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6700 t->stid_base + t->nstids - 1, t->stids_in_use);
6704 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6705 t->ftid_base + t->nftids - 1);
6709 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6710 t->etid_base + t->netids - 1);
6713 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6714 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6715 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6717 rc = sbuf_finish(sb);
6724 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6726 struct adapter *sc = arg1;
6729 struct tp_err_stats stats;
6731 rc = sysctl_wire_old_buffer(req, 0);
6735 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6739 t4_tp_get_err_stats(sc, &stats);
6741 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6743 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
6744 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6745 stats.macInErrs[3]);
6746 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
6747 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6748 stats.hdrInErrs[3]);
6749 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
6750 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6751 stats.tcpInErrs[3]);
6752 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
6753 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6754 stats.tcp6InErrs[3]);
6755 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
6756 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6757 stats.tnlCongDrops[3]);
6758 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
6759 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6760 stats.tnlTxDrops[3]);
6761 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
6762 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6763 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6764 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
6765 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6766 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6767 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
6768 stats.ofldNoNeigh, stats.ofldCongDefer);
6770 rc = sbuf_finish(sb);
6783 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6789 uint64_t mask = (1ULL << f->width) - 1;
6790 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6791 ((uintmax_t)v >> f->start) & mask);
6793 if (line_size + len >= 79) {
6795 sbuf_printf(sb, "\n ");
6797 sbuf_printf(sb, "%s ", buf);
6798 line_size += len + 1;
6801 sbuf_printf(sb, "\n");
6804 static struct field_desc tp_la0[] = {
6805 { "RcfOpCodeOut", 60, 4 },
6807 { "WcfState", 52, 4 },
6808 { "RcfOpcSrcOut", 50, 2 },
6809 { "CRxError", 49, 1 },
6810 { "ERxError", 48, 1 },
6811 { "SanityFailed", 47, 1 },
6812 { "SpuriousMsg", 46, 1 },
6813 { "FlushInputMsg", 45, 1 },
6814 { "FlushInputCpl", 44, 1 },
6815 { "RssUpBit", 43, 1 },
6816 { "RssFilterHit", 42, 1 },
6818 { "InitTcb", 31, 1 },
6819 { "LineNumber", 24, 7 },
6821 { "EdataOut", 22, 1 },
6823 { "CdataOut", 20, 1 },
6824 { "EreadPdu", 19, 1 },
6825 { "CreadPdu", 18, 1 },
6826 { "TunnelPkt", 17, 1 },
6827 { "RcfPeerFin", 16, 1 },
6828 { "RcfReasonOut", 12, 4 },
6829 { "TxCchannel", 10, 2 },
6830 { "RcfTxChannel", 8, 2 },
6831 { "RxEchannel", 6, 2 },
6832 { "RcfRxChannel", 5, 1 },
6833 { "RcfDataOutSrdy", 4, 1 },
6835 { "RxOoDvld", 2, 1 },
6836 { "RxCongestion", 1, 1 },
6837 { "TxCongestion", 0, 1 },
6841 static struct field_desc tp_la1[] = {
6842 { "CplCmdIn", 56, 8 },
6843 { "CplCmdOut", 48, 8 },
6844 { "ESynOut", 47, 1 },
6845 { "EAckOut", 46, 1 },
6846 { "EFinOut", 45, 1 },
6847 { "ERstOut", 44, 1 },
6852 { "DataIn", 39, 1 },
6853 { "DataInVld", 38, 1 },
6855 { "RxBufEmpty", 36, 1 },
6857 { "RxFbCongestion", 34, 1 },
6858 { "TxFbCongestion", 33, 1 },
6859 { "TxPktSumSrdy", 32, 1 },
6860 { "RcfUlpType", 28, 4 },
6862 { "Ebypass", 26, 1 },
6864 { "Static0", 24, 1 },
6866 { "Cbypass", 22, 1 },
6868 { "CPktOut", 20, 1 },
6869 { "RxPagePoolFull", 18, 2 },
6870 { "RxLpbkPkt", 17, 1 },
6871 { "TxLpbkPkt", 16, 1 },
6872 { "RxVfValid", 15, 1 },
6873 { "SynLearned", 14, 1 },
6874 { "SetDelEntry", 13, 1 },
6875 { "SetInvEntry", 12, 1 },
6876 { "CpcmdDvld", 11, 1 },
6877 { "CpcmdSave", 10, 1 },
6878 { "RxPstructsFull", 8, 2 },
6879 { "EpcmdDvld", 7, 1 },
6880 { "EpcmdFlush", 6, 1 },
6881 { "EpcmdTrimPrefix", 5, 1 },
6882 { "EpcmdTrimPostfix", 4, 1 },
6883 { "ERssIp4Pkt", 3, 1 },
6884 { "ERssIp6Pkt", 2, 1 },
6885 { "ERssTcpUdpPkt", 1, 1 },
6886 { "ERssFceFipPkt", 0, 1 },
6890 static struct field_desc tp_la2[] = {
6891 { "CplCmdIn", 56, 8 },
6892 { "MpsVfVld", 55, 1 },
6899 { "DataIn", 39, 1 },
6900 { "DataInVld", 38, 1 },
6902 { "RxBufEmpty", 36, 1 },
6904 { "RxFbCongestion", 34, 1 },
6905 { "TxFbCongestion", 33, 1 },
6906 { "TxPktSumSrdy", 32, 1 },
6907 { "RcfUlpType", 28, 4 },
6909 { "Ebypass", 26, 1 },
6911 { "Static0", 24, 1 },
6913 { "Cbypass", 22, 1 },
6915 { "CPktOut", 20, 1 },
6916 { "RxPagePoolFull", 18, 2 },
6917 { "RxLpbkPkt", 17, 1 },
6918 { "TxLpbkPkt", 16, 1 },
6919 { "RxVfValid", 15, 1 },
6920 { "SynLearned", 14, 1 },
6921 { "SetDelEntry", 13, 1 },
6922 { "SetInvEntry", 12, 1 },
6923 { "CpcmdDvld", 11, 1 },
6924 { "CpcmdSave", 10, 1 },
6925 { "RxPstructsFull", 8, 2 },
6926 { "EpcmdDvld", 7, 1 },
6927 { "EpcmdFlush", 6, 1 },
6928 { "EpcmdTrimPrefix", 5, 1 },
6929 { "EpcmdTrimPostfix", 4, 1 },
6930 { "ERssIp4Pkt", 3, 1 },
6931 { "ERssIp6Pkt", 2, 1 },
6932 { "ERssTcpUdpPkt", 1, 1 },
6933 { "ERssFceFipPkt", 0, 1 },
6938 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6941 field_desc_show(sb, *p, tp_la0);
6945 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6949 sbuf_printf(sb, "\n");
6950 field_desc_show(sb, p[0], tp_la0);
6951 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6952 field_desc_show(sb, p[1], tp_la0);
6956 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6960 sbuf_printf(sb, "\n");
6961 field_desc_show(sb, p[0], tp_la0);
6962 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6963 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6967 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6969 struct adapter *sc = arg1;
6974 void (*show_func)(struct sbuf *, uint64_t *, int);
6976 rc = sysctl_wire_old_buffer(req, 0);
6980 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6984 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6986 t4_tp_read_la(sc, buf, NULL);
6989 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6992 show_func = tp_la_show2;
6996 show_func = tp_la_show3;
7000 show_func = tp_la_show;
7003 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
7004 (*show_func)(sb, p, i);
7006 rc = sbuf_finish(sb);
7013 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
7015 struct adapter *sc = arg1;
7018 u64 nrate[NCHAN], orate[NCHAN];
7020 rc = sysctl_wire_old_buffer(req, 0);
7024 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7028 t4_get_chan_txrate(sc, nrate, orate);
7029 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
7031 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
7032 nrate[0], nrate[1], nrate[2], nrate[3]);
7033 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
7034 orate[0], orate[1], orate[2], orate[3]);
7036 rc = sbuf_finish(sb);
7043 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
7045 struct adapter *sc = arg1;
7050 rc = sysctl_wire_old_buffer(req, 0);
7054 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7058 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
7061 t4_ulprx_read_la(sc, buf);
7064 sbuf_printf(sb, " Pcmd Type Message"
7066 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
7067 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
7068 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
7071 rc = sbuf_finish(sb);
7078 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
7080 struct adapter *sc = arg1;
7084 rc = sysctl_wire_old_buffer(req, 0);
7088 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7092 v = t4_read_reg(sc, A_SGE_STAT_CFG);
7093 if (G_STATSOURCE_T5(v) == 7) {
7094 if (G_STATMODE(v) == 0) {
7095 sbuf_printf(sb, "total %d, incomplete %d",
7096 t4_read_reg(sc, A_SGE_STAT_TOTAL),
7097 t4_read_reg(sc, A_SGE_STAT_MATCH));
7098 } else if (G_STATMODE(v) == 1) {
7099 sbuf_printf(sb, "total %d, data overflow %d",
7100 t4_read_reg(sc, A_SGE_STAT_TOTAL),
7101 t4_read_reg(sc, A_SGE_STAT_MATCH));
7104 rc = sbuf_finish(sb);
7112 fconf_to_mode(uint32_t fconf)
7116 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
7117 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
7119 if (fconf & F_FRAGMENTATION)
7120 mode |= T4_FILTER_IP_FRAGMENT;
7122 if (fconf & F_MPSHITTYPE)
7123 mode |= T4_FILTER_MPS_HIT_TYPE;
7125 if (fconf & F_MACMATCH)
7126 mode |= T4_FILTER_MAC_IDX;
7128 if (fconf & F_ETHERTYPE)
7129 mode |= T4_FILTER_ETH_TYPE;
7131 if (fconf & F_PROTOCOL)
7132 mode |= T4_FILTER_IP_PROTO;
7135 mode |= T4_FILTER_IP_TOS;
7138 mode |= T4_FILTER_VLAN;
7140 if (fconf & F_VNIC_ID)
7141 mode |= T4_FILTER_VNIC;
7144 mode |= T4_FILTER_PORT;
7147 mode |= T4_FILTER_FCoE;
7153 mode_to_fconf(uint32_t mode)
7157 if (mode & T4_FILTER_IP_FRAGMENT)
7158 fconf |= F_FRAGMENTATION;
7160 if (mode & T4_FILTER_MPS_HIT_TYPE)
7161 fconf |= F_MPSHITTYPE;
7163 if (mode & T4_FILTER_MAC_IDX)
7164 fconf |= F_MACMATCH;
7166 if (mode & T4_FILTER_ETH_TYPE)
7167 fconf |= F_ETHERTYPE;
7169 if (mode & T4_FILTER_IP_PROTO)
7170 fconf |= F_PROTOCOL;
7172 if (mode & T4_FILTER_IP_TOS)
7175 if (mode & T4_FILTER_VLAN)
7178 if (mode & T4_FILTER_VNIC)
7181 if (mode & T4_FILTER_PORT)
7184 if (mode & T4_FILTER_FCoE)
7191 fspec_to_fconf(struct t4_filter_specification *fs)
7195 if (fs->val.frag || fs->mask.frag)
7196 fconf |= F_FRAGMENTATION;
7198 if (fs->val.matchtype || fs->mask.matchtype)
7199 fconf |= F_MPSHITTYPE;
7201 if (fs->val.macidx || fs->mask.macidx)
7202 fconf |= F_MACMATCH;
7204 if (fs->val.ethtype || fs->mask.ethtype)
7205 fconf |= F_ETHERTYPE;
7207 if (fs->val.proto || fs->mask.proto)
7208 fconf |= F_PROTOCOL;
7210 if (fs->val.tos || fs->mask.tos)
7213 if (fs->val.vlan_vld || fs->mask.vlan_vld)
7216 if (fs->val.vnic_vld || fs->mask.vnic_vld)
7219 if (fs->val.iport || fs->mask.iport)
7222 if (fs->val.fcoe || fs->mask.fcoe)
7229 get_filter_mode(struct adapter *sc, uint32_t *mode)
7234 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7239 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
7242 if (sc->params.tp.vlan_pri_map != fconf) {
7243 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
7244 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
7248 *mode = fconf_to_mode(fconf);
7250 end_synchronized_op(sc, LOCK_HELD);
7255 set_filter_mode(struct adapter *sc, uint32_t mode)
7260 fconf = mode_to_fconf(mode);
7262 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7267 if (sc->tids.ftids_in_use > 0) {
7273 if (uld_active(sc, ULD_TOM)) {
7279 rc = -t4_set_filter_mode(sc, fconf);
7281 end_synchronized_op(sc, LOCK_HELD);
7285 static inline uint64_t
7286 get_filter_hits(struct adapter *sc, uint32_t fid)
7288 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7291 memwin_info(sc, 0, &mw_base, NULL);
7292 off = position_memwin(sc, 0,
7293 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
7295 hits = t4_read_reg64(sc, mw_base + off + 16);
7296 hits = be64toh(hits);
7298 hits = t4_read_reg(sc, mw_base + off + 24);
7299 hits = be32toh(hits);
7306 get_filter(struct adapter *sc, struct t4_filter *t)
7308 int i, rc, nfilters = sc->tids.nftids;
7309 struct filter_entry *f;
7311 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7316 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
7317 t->idx >= nfilters) {
7318 t->idx = 0xffffffff;
7322 f = &sc->tids.ftid_tab[t->idx];
7323 for (i = t->idx; i < nfilters; i++, f++) {
7326 t->l2tidx = f->l2t ? f->l2t->idx : 0;
7327 t->smtidx = f->smtidx;
7329 t->hits = get_filter_hits(sc, t->idx);
7331 t->hits = UINT64_MAX;
7338 t->idx = 0xffffffff;
7340 end_synchronized_op(sc, LOCK_HELD);
7345 set_filter(struct adapter *sc, struct t4_filter *t)
7347 unsigned int nfilters, nports;
7348 struct filter_entry *f;
7351 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
7355 nfilters = sc->tids.nftids;
7356 nports = sc->params.nports;
7358 if (nfilters == 0) {
7363 if (!(sc->flags & FULL_INIT_DONE)) {
7368 if (t->idx >= nfilters) {
7373 /* Validate against the global filter mode */
7374 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
7375 sc->params.tp.vlan_pri_map) {
7380 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
7385 if (t->fs.val.iport >= nports) {
7390 /* Can't specify an iq if not steering to it */
7391 if (!t->fs.dirsteer && t->fs.iq) {
7396 /* IPv6 filter idx must be 4 aligned */
7397 if (t->fs.type == 1 &&
7398 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
7403 if (sc->tids.ftid_tab == NULL) {
7404 KASSERT(sc->tids.ftids_in_use == 0,
7405 ("%s: no memory allocated but filters_in_use > 0",
7408 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
7409 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
7410 if (sc->tids.ftid_tab == NULL) {
7414 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
7417 for (i = 0; i < 4; i++) {
7418 f = &sc->tids.ftid_tab[t->idx + i];
7420 if (f->pending || f->valid) {
7429 if (t->fs.type == 0)
7433 f = &sc->tids.ftid_tab[t->idx];
7436 rc = set_filter_wr(sc, t->idx);
7438 end_synchronized_op(sc, 0);
7441 mtx_lock(&sc->tids.ftid_lock);
7443 if (f->pending == 0) {
7444 rc = f->valid ? 0 : EIO;
7448 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7449 PCATCH, "t4setfw", 0)) {
7454 mtx_unlock(&sc->tids.ftid_lock);
7460 del_filter(struct adapter *sc, struct t4_filter *t)
7462 unsigned int nfilters;
7463 struct filter_entry *f;
7466 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7470 nfilters = sc->tids.nftids;
7472 if (nfilters == 0) {
7477 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7478 t->idx >= nfilters) {
7483 if (!(sc->flags & FULL_INIT_DONE)) {
7488 f = &sc->tids.ftid_tab[t->idx];
7500 t->fs = f->fs; /* extra info for the caller */
7501 rc = del_filter_wr(sc, t->idx);
7505 end_synchronized_op(sc, 0);
7508 mtx_lock(&sc->tids.ftid_lock);
7510 if (f->pending == 0) {
7511 rc = f->valid ? EIO : 0;
7515 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7516 PCATCH, "t4delfw", 0)) {
7521 mtx_unlock(&sc->tids.ftid_lock);
7528 clear_filter(struct filter_entry *f)
7531 t4_l2t_release(f->l2t);
7533 bzero(f, sizeof (*f));
7537 set_filter_wr(struct adapter *sc, int fidx)
7539 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7540 struct fw_filter_wr *fwr;
7542 struct wrq_cookie cookie;
7544 ASSERT_SYNCHRONIZED_OP(sc);
7546 if (f->fs.newdmac || f->fs.newvlan) {
7547 /* This filter needs an L2T entry; allocate one. */
7548 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7551 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7553 t4_l2t_release(f->l2t);
7559 ftid = sc->tids.ftid_base + fidx;
7561 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
7564 bzero(fwr, sizeof(*fwr));
7566 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7567 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7569 htobe32(V_FW_FILTER_WR_TID(ftid) |
7570 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7571 V_FW_FILTER_WR_NOREPLY(0) |
7572 V_FW_FILTER_WR_IQ(f->fs.iq));
7573 fwr->del_filter_to_l2tix =
7574 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7575 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7576 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7577 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7578 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7579 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7580 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7581 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7582 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7583 f->fs.newvlan == VLAN_REWRITE) |
7584 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7585 f->fs.newvlan == VLAN_REWRITE) |
7586 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7587 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7588 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7589 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7590 fwr->ethtype = htobe16(f->fs.val.ethtype);
7591 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7592 fwr->frag_to_ovlan_vldm =
7593 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7594 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7595 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7596 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7597 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7598 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7600 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7601 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7602 fwr->maci_to_matchtypem =
7603 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7604 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7605 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7606 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7607 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7608 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7609 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7610 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7611 fwr->ptcl = f->fs.val.proto;
7612 fwr->ptclm = f->fs.mask.proto;
7613 fwr->ttyp = f->fs.val.tos;
7614 fwr->ttypm = f->fs.mask.tos;
7615 fwr->ivlan = htobe16(f->fs.val.vlan);
7616 fwr->ivlanm = htobe16(f->fs.mask.vlan);
7617 fwr->ovlan = htobe16(f->fs.val.vnic);
7618 fwr->ovlanm = htobe16(f->fs.mask.vnic);
7619 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7620 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7621 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7622 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7623 fwr->lp = htobe16(f->fs.val.dport);
7624 fwr->lpm = htobe16(f->fs.mask.dport);
7625 fwr->fp = htobe16(f->fs.val.sport);
7626 fwr->fpm = htobe16(f->fs.mask.sport);
7628 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7631 sc->tids.ftids_in_use++;
7633 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
7638 del_filter_wr(struct adapter *sc, int fidx)
7640 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7641 struct fw_filter_wr *fwr;
7643 struct wrq_cookie cookie;
7645 ftid = sc->tids.ftid_base + fidx;
7647 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
7650 bzero(fwr, sizeof (*fwr));
7652 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7655 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
7660 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7662 struct adapter *sc = iq->adapter;
7663 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7664 unsigned int idx = GET_TID(rpl);
7666 struct filter_entry *f;
7668 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7671 if (is_ftid(sc, idx)) {
7673 idx -= sc->tids.ftid_base;
7674 f = &sc->tids.ftid_tab[idx];
7675 rc = G_COOKIE(rpl->cookie);
7677 mtx_lock(&sc->tids.ftid_lock);
7678 if (rc == FW_FILTER_WR_FLT_ADDED) {
7679 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7681 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7682 f->pending = 0; /* asynchronous setup completed */
7685 if (rc != FW_FILTER_WR_FLT_DELETED) {
7686 /* Add or delete failed, display an error */
7688 "filter %u setup failed with error %u\n",
7693 sc->tids.ftids_in_use--;
7695 wakeup(&sc->tids.ftid_tab);
7696 mtx_unlock(&sc->tids.ftid_lock);
7703 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7707 if (cntxt->cid > M_CTXTQID)
7710 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7711 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7714 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7718 if (sc->flags & FW_OK) {
7719 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7726 * Read via firmware failed or wasn't even attempted. Read directly via
7729 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7731 end_synchronized_op(sc, 0);
7736 load_fw(struct adapter *sc, struct t4_data *fw)
7741 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7745 if (sc->flags & FULL_INIT_DONE) {
7750 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7751 if (fw_data == NULL) {
7756 rc = copyin(fw->data, fw_data, fw->len);
7758 rc = -t4_load_fw(sc, fw_data, fw->len);
7760 free(fw_data, M_CXGBE);
7762 end_synchronized_op(sc, 0);
7767 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7769 uint32_t addr, off, remaining, i, n;
7771 uint32_t mw_base, mw_aperture;
7775 rc = validate_mem_range(sc, mr->addr, mr->len);
7779 memwin_info(sc, win, &mw_base, &mw_aperture);
7780 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7782 remaining = mr->len;
7783 dst = (void *)mr->data;
7786 off = position_memwin(sc, win, addr);
7788 /* number of bytes that we'll copy in the inner loop */
7789 n = min(remaining, mw_aperture - off);
7790 for (i = 0; i < n; i += 4)
7791 *b++ = t4_read_reg(sc, mw_base + off + i);
7793 rc = copyout(buf, dst, n);
7808 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7812 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7815 if (i2cd->len > sizeof(i2cd->data))
7818 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7821 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7822 i2cd->offset, i2cd->len, &i2cd->data[0]);
7823 end_synchronized_op(sc, 0);
7829 in_range(int val, int lo, int hi)
7832 return (val < 0 || (val <= hi && val >= lo));
7836 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7838 int fw_subcmd, fw_type, rc;
7840 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7844 if (!(sc->flags & FULL_INIT_DONE)) {
7850 * Translate the cxgbetool parameters into T4 firmware parameters. (The
7851 * sub-command and type are in common locations.)
7853 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7854 fw_subcmd = FW_SCHED_SC_CONFIG;
7855 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7856 fw_subcmd = FW_SCHED_SC_PARAMS;
7861 if (p->type == SCHED_CLASS_TYPE_PACKET)
7862 fw_type = FW_SCHED_TYPE_PKTSCHED;
7868 if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7869 /* Vet our parameters ..*/
7870 if (p->u.config.minmax < 0) {
7875 /* And pass the request to the firmware ...*/
7876 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1);
7880 if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7886 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7887 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7888 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7889 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7890 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7891 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7897 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7898 fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7899 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7900 fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7906 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7907 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7908 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7909 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7915 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7916 fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7917 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7918 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7924 /* Vet our parameters ... */
7925 if (!in_range(p->u.params.channel, 0, 3) ||
7926 !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7927 !in_range(p->u.params.minrate, 0, 10000000) ||
7928 !in_range(p->u.params.maxrate, 0, 10000000) ||
7929 !in_range(p->u.params.weight, 0, 100)) {
7935 * Translate any unset parameters into the firmware's
7936 * nomenclature and/or fail the call if the parameters
7939 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7940 p->u.params.channel < 0 || p->u.params.cl < 0) {
7944 if (p->u.params.minrate < 0)
7945 p->u.params.minrate = 0;
7946 if (p->u.params.maxrate < 0) {
7947 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7948 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7952 p->u.params.maxrate = 0;
7954 if (p->u.params.weight < 0) {
7955 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7959 p->u.params.weight = 0;
7961 if (p->u.params.pktsize < 0) {
7962 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7963 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7967 p->u.params.pktsize = 0;
7970 /* See what the firmware thinks of the request ... */
7971 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7972 fw_rateunit, fw_ratemode, p->u.params.channel,
7973 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7974 p->u.params.weight, p->u.params.pktsize, 1);
7980 end_synchronized_op(sc, 0);
7985 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7987 struct port_info *pi = NULL;
7988 struct sge_txq *txq;
7989 uint32_t fw_mnem, fw_queue, fw_class;
7992 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7996 if (!(sc->flags & FULL_INIT_DONE)) {
8001 if (p->port >= sc->params.nports) {
8006 pi = sc->port[p->port];
8007 if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
8013 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
8014 * Scheduling Class in this case).
8016 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
8017 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
8018 fw_class = p->cl < 0 ? 0xffffffff : p->cl;
8021 * If op.queue is non-negative, then we're only changing the scheduling
8022 * on a single specified TX queue.
8024 if (p->queue >= 0) {
8025 txq = &sc->sge.txq[pi->first_txq + p->queue];
8026 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8027 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8033 * Change the scheduling on all the TX queues for the
8036 for_each_txq(pi, i, txq) {
8037 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
8038 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
8046 end_synchronized_op(sc, 0);
8051 t4_os_find_pci_capability(struct adapter *sc, int cap)
8055 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
8059 t4_os_pci_save_state(struct adapter *sc)
8062 struct pci_devinfo *dinfo;
8065 dinfo = device_get_ivars(dev);
8067 pci_cfg_save(dev, dinfo, 0);
8072 t4_os_pci_restore_state(struct adapter *sc)
8075 struct pci_devinfo *dinfo;
8078 dinfo = device_get_ivars(dev);
8080 pci_cfg_restore(dev, dinfo);
8085 t4_os_portmod_changed(const struct adapter *sc, int idx)
8087 struct port_info *pi = sc->port[idx];
8088 static const char *mod_str[] = {
8089 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
8092 build_medialist(pi, &pi->media);
8094 build_medialist(pi, &pi->nm_media);
8097 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
8098 if_printf(pi->ifp, "transceiver unplugged.\n");
8099 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
8100 if_printf(pi->ifp, "unknown transceiver inserted.\n");
8101 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
8102 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
8103 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
8104 if_printf(pi->ifp, "%s transceiver inserted.\n",
8105 mod_str[pi->mod_type]);
8107 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
8113 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
8115 struct port_info *pi = sc->port[idx];
8116 struct ifnet *ifp = pi->ifp;
8120 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
8121 if_link_state_change(ifp, LINK_STATE_UP);
8124 pi->linkdnrc = reason;
8125 if_link_state_change(ifp, LINK_STATE_DOWN);
8130 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
8134 sx_slock(&t4_list_lock);
8135 SLIST_FOREACH(sc, &t4_list, link) {
8137 * func should not make any assumptions about what state sc is
8138 * in - the only guarantee is that sc->sc_lock is a valid lock.
8142 sx_sunlock(&t4_list_lock);
8146 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
8152 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
8158 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
8162 struct adapter *sc = dev->si_drv1;
8164 rc = priv_check(td, PRIV_DRIVER);
8169 case CHELSIO_T4_GETREG: {
8170 struct t4_reg *edata = (struct t4_reg *)data;
8172 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8175 if (edata->size == 4)
8176 edata->val = t4_read_reg(sc, edata->addr);
8177 else if (edata->size == 8)
8178 edata->val = t4_read_reg64(sc, edata->addr);
8184 case CHELSIO_T4_SETREG: {
8185 struct t4_reg *edata = (struct t4_reg *)data;
8187 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
8190 if (edata->size == 4) {
8191 if (edata->val & 0xffffffff00000000)
8193 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
8194 } else if (edata->size == 8)
8195 t4_write_reg64(sc, edata->addr, edata->val);
8200 case CHELSIO_T4_REGDUMP: {
8201 struct t4_regdump *regs = (struct t4_regdump *)data;
8202 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
8205 if (regs->len < reglen) {
8206 regs->len = reglen; /* hint to the caller */
8211 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
8212 t4_get_regs(sc, regs, buf);
8213 rc = copyout(buf, regs->data, reglen);
8217 case CHELSIO_T4_GET_FILTER_MODE:
8218 rc = get_filter_mode(sc, (uint32_t *)data);
8220 case CHELSIO_T4_SET_FILTER_MODE:
8221 rc = set_filter_mode(sc, *(uint32_t *)data);
8223 case CHELSIO_T4_GET_FILTER:
8224 rc = get_filter(sc, (struct t4_filter *)data);
8226 case CHELSIO_T4_SET_FILTER:
8227 rc = set_filter(sc, (struct t4_filter *)data);
8229 case CHELSIO_T4_DEL_FILTER:
8230 rc = del_filter(sc, (struct t4_filter *)data);
8232 case CHELSIO_T4_GET_SGE_CONTEXT:
8233 rc = get_sge_context(sc, (struct t4_sge_context *)data);
8235 case CHELSIO_T4_LOAD_FW:
8236 rc = load_fw(sc, (struct t4_data *)data);
8238 case CHELSIO_T4_GET_MEM:
8239 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
8241 case CHELSIO_T4_GET_I2C:
8242 rc = read_i2c(sc, (struct t4_i2c_data *)data);
8244 case CHELSIO_T4_CLEAR_STATS: {
8246 u_int port_id = *(uint32_t *)data;
8247 struct port_info *pi;
8249 if (port_id >= sc->params.nports)
8251 pi = sc->port[port_id];
8254 t4_clr_port_stats(sc, pi->tx_chan);
8255 pi->tx_parse_error = 0;
8257 if (pi->flags & PORT_INIT_DONE) {
8258 struct sge_rxq *rxq;
8259 struct sge_txq *txq;
8260 struct sge_wrq *wrq;
8262 for_each_rxq(pi, i, rxq) {
8263 #if defined(INET) || defined(INET6)
8264 rxq->lro.lro_queued = 0;
8265 rxq->lro.lro_flushed = 0;
8268 rxq->vlan_extraction = 0;
8271 for_each_txq(pi, i, txq) {
8274 txq->vlan_insertion = 0;
8278 txq->txpkts0_wrs = 0;
8279 txq->txpkts1_wrs = 0;
8280 txq->txpkts0_pkts = 0;
8281 txq->txpkts1_pkts = 0;
8282 mp_ring_reset_stats(txq->r);
8286 /* nothing to clear for each ofld_rxq */
8288 for_each_ofld_txq(pi, i, wrq) {
8289 wrq->tx_wrs_direct = 0;
8290 wrq->tx_wrs_copied = 0;
8293 wrq = &sc->sge.ctrlq[pi->port_id];
8294 wrq->tx_wrs_direct = 0;
8295 wrq->tx_wrs_copied = 0;
8299 case CHELSIO_T4_SCHED_CLASS:
8300 rc = set_sched_class(sc, (struct t4_sched_params *)data);
8302 case CHELSIO_T4_SCHED_QUEUE:
8303 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
8305 case CHELSIO_T4_GET_TRACER:
8306 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
8308 case CHELSIO_T4_SET_TRACER:
8309 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
8320 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
8321 const unsigned int *pgsz_order)
8323 struct port_info *pi = ifp->if_softc;
8324 struct adapter *sc = pi->adapter;
8326 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
8327 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
8328 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
8329 V_HPZ3(pgsz_order[3]));
8333 toe_capability(struct port_info *pi, int enable)
8336 struct adapter *sc = pi->adapter;
8338 ASSERT_SYNCHRONIZED_OP(sc);
8340 if (!is_offload(sc))
8345 * We need the port's queues around so that we're able to send
8346 * and receive CPLs to/from the TOE even if the ifnet for this
8347 * port has never been UP'd administratively.
8349 if (!(pi->flags & PORT_INIT_DONE)) {
8350 rc = cxgbe_init_synchronized(pi);
8355 if (isset(&sc->offload_map, pi->port_id))
8358 if (!uld_active(sc, ULD_TOM)) {
8359 rc = t4_activate_uld(sc, ULD_TOM);
8362 "You must kldload t4_tom.ko before trying "
8363 "to enable TOE on a cxgbe interface.\n");
8367 KASSERT(sc->tom_softc != NULL,
8368 ("%s: TOM activated but softc NULL", __func__));
8369 KASSERT(uld_active(sc, ULD_TOM),
8370 ("%s: TOM activated but flag not set", __func__));
8373 /* Activate iWARP and iSCSI too, if the modules are loaded. */
8374 if (!uld_active(sc, ULD_IWARP))
8375 (void) t4_activate_uld(sc, ULD_IWARP);
8376 if (!uld_active(sc, ULD_ISCSI))
8377 (void) t4_activate_uld(sc, ULD_ISCSI);
8379 setbit(&sc->offload_map, pi->port_id);
8381 if (!isset(&sc->offload_map, pi->port_id))
8384 KASSERT(uld_active(sc, ULD_TOM),
8385 ("%s: TOM never initialized?", __func__));
8386 clrbit(&sc->offload_map, pi->port_id);
8393 * Add an upper layer driver to the global list.
8396 t4_register_uld(struct uld_info *ui)
8401 sx_xlock(&t4_uld_list_lock);
8402 SLIST_FOREACH(u, &t4_uld_list, link) {
8403 if (u->uld_id == ui->uld_id) {
8409 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
8412 sx_xunlock(&t4_uld_list_lock);
8417 t4_unregister_uld(struct uld_info *ui)
8422 sx_xlock(&t4_uld_list_lock);
8424 SLIST_FOREACH(u, &t4_uld_list, link) {
8426 if (ui->refcount > 0) {
8431 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
8437 sx_xunlock(&t4_uld_list_lock);
8442 t4_activate_uld(struct adapter *sc, int id)
8445 struct uld_info *ui;
8447 ASSERT_SYNCHRONIZED_OP(sc);
8449 if (id < 0 || id > ULD_MAX)
8451 rc = EAGAIN; /* kldoad the module with this ULD and try again. */
8453 sx_slock(&t4_uld_list_lock);
8455 SLIST_FOREACH(ui, &t4_uld_list, link) {
8456 if (ui->uld_id == id) {
8457 if (!(sc->flags & FULL_INIT_DONE)) {
8458 rc = adapter_full_init(sc);
8463 rc = ui->activate(sc);
8465 setbit(&sc->active_ulds, id);
8472 sx_sunlock(&t4_uld_list_lock);
8478 t4_deactivate_uld(struct adapter *sc, int id)
8481 struct uld_info *ui;
8483 ASSERT_SYNCHRONIZED_OP(sc);
8485 if (id < 0 || id > ULD_MAX)
8489 sx_slock(&t4_uld_list_lock);
8491 SLIST_FOREACH(ui, &t4_uld_list, link) {
8492 if (ui->uld_id == id) {
8493 rc = ui->deactivate(sc);
8495 clrbit(&sc->active_ulds, id);
8502 sx_sunlock(&t4_uld_list_lock);
8508 uld_active(struct adapter *sc, int uld_id)
8511 MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
8513 return (isset(&sc->active_ulds, uld_id));
8518 * Come up with reasonable defaults for some of the tunables, provided they're
8519 * not set by the user (in which case we'll use the values as is).
8522 tweak_tunables(void)
8524 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
8526 if (t4_ntxq10g < 1) {
8528 t4_ntxq10g = rss_getnumbuckets();
8530 t4_ntxq10g = min(nc, NTXQ_10G);
8534 if (t4_ntxq1g < 1) {
8536 /* XXX: way too many for 1GbE? */
8537 t4_ntxq1g = rss_getnumbuckets();
8539 t4_ntxq1g = min(nc, NTXQ_1G);
8543 if (t4_nrxq10g < 1) {
8545 t4_nrxq10g = rss_getnumbuckets();
8547 t4_nrxq10g = min(nc, NRXQ_10G);
8551 if (t4_nrxq1g < 1) {
8553 /* XXX: way too many for 1GbE? */
8554 t4_nrxq1g = rss_getnumbuckets();
8556 t4_nrxq1g = min(nc, NRXQ_1G);
8561 if (t4_nofldtxq10g < 1)
8562 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8564 if (t4_nofldtxq1g < 1)
8565 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8567 if (t4_nofldrxq10g < 1)
8568 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8570 if (t4_nofldrxq1g < 1)
8571 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8573 if (t4_toecaps_allowed == -1)
8574 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8576 if (t4_toecaps_allowed == -1)
8577 t4_toecaps_allowed = 0;
8581 if (t4_nnmtxq10g < 1)
8582 t4_nnmtxq10g = min(nc, NNMTXQ_10G);
8584 if (t4_nnmtxq1g < 1)
8585 t4_nnmtxq1g = min(nc, NNMTXQ_1G);
8587 if (t4_nnmrxq10g < 1)
8588 t4_nnmrxq10g = min(nc, NNMRXQ_10G);
8590 if (t4_nnmrxq1g < 1)
8591 t4_nnmrxq1g = min(nc, NNMRXQ_1G);
8594 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8595 t4_tmr_idx_10g = TMR_IDX_10G;
8597 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8598 t4_pktc_idx_10g = PKTC_IDX_10G;
8600 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8601 t4_tmr_idx_1g = TMR_IDX_1G;
8603 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8604 t4_pktc_idx_1g = PKTC_IDX_1G;
8606 if (t4_qsize_txq < 128)
8609 if (t4_qsize_rxq < 128)
8611 while (t4_qsize_rxq & 7)
8614 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8617 static struct sx mlu; /* mod load unload */
8618 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
8621 mod_event(module_t mod, int cmd, void *arg)
8624 static int loaded = 0;
8629 if (loaded++ == 0) {
8631 sx_init(&t4_list_lock, "T4/T5 adapters");
8632 SLIST_INIT(&t4_list);
8634 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8635 SLIST_INIT(&t4_uld_list);
8637 t4_tracer_modload();
8645 if (--loaded == 0) {
8648 sx_slock(&t4_list_lock);
8649 if (!SLIST_EMPTY(&t4_list)) {
8651 sx_sunlock(&t4_list_lock);
8655 sx_slock(&t4_uld_list_lock);
8656 if (!SLIST_EMPTY(&t4_uld_list)) {
8658 sx_sunlock(&t4_uld_list_lock);
8659 sx_sunlock(&t4_list_lock);
8664 while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
8665 uprintf("%ju clusters with custom free routine "
8666 "still is use.\n", t4_sge_extfree_refs());
8667 pause("t4unload", 2 * hz);
8670 sx_sunlock(&t4_uld_list_lock);
8672 sx_sunlock(&t4_list_lock);
8674 if (t4_sge_extfree_refs() == 0) {
8675 t4_tracer_modunload();
8677 sx_destroy(&t4_uld_list_lock);
8679 sx_destroy(&t4_list_lock);
8684 loaded++; /* undo earlier decrement */
8695 static devclass_t t4_devclass, t5_devclass;
8696 static devclass_t cxgbe_devclass, cxl_devclass;
8698 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8699 MODULE_VERSION(t4nex, 1);
8700 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8702 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8703 MODULE_VERSION(t5nex, 1);
8704 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8706 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8707 MODULE_VERSION(cxgbe, 1);
8709 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8710 MODULE_VERSION(cxl, 1);