]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/cxgbe/t4_main.c
MFC r319872, r321063, r321582, r322034, r322425, r322962, r322985,
[FreeBSD/stable/10.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_ddb.h"
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/priv.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/systm.h>
41 #include <sys/counter.h>
42 #include <sys/module.h>
43 #include <sys/malloc.h>
44 #include <sys/queue.h>
45 #include <sys/taskqueue.h>
46 #include <sys/pciio.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pci_private.h>
50 #include <sys/firmware.h>
51 #include <sys/sbuf.h>
52 #include <sys/smp.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/sysctl.h>
56 #include <net/ethernet.h>
57 #include <net/if.h>
58 #include <net/if_types.h>
59 #include <net/if_dl.h>
60 #include <net/if_vlan_var.h>
61 #ifdef RSS
62 #include <net/rss_config.h>
63 #endif
64 #if defined(__i386__) || defined(__amd64__)
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #endif
68 #ifdef DDB
69 #include <ddb/ddb.h>
70 #include <ddb/db_lex.h>
71 #endif
72
73 #include "common/common.h"
74 #include "common/t4_msg.h"
75 #include "common/t4_regs.h"
76 #include "common/t4_regs_values.h"
77 #include "t4_ioctl.h"
78 #include "t4_l2t.h"
79 #include "t4_mp_ring.h"
80
81 /* T4 bus driver interface */
82 static int t4_probe(device_t);
83 static int t4_attach(device_t);
84 static int t4_detach(device_t);
85 static device_method_t t4_methods[] = {
86         DEVMETHOD(device_probe,         t4_probe),
87         DEVMETHOD(device_attach,        t4_attach),
88         DEVMETHOD(device_detach,        t4_detach),
89
90         DEVMETHOD_END
91 };
92 static driver_t t4_driver = {
93         "t4nex",
94         t4_methods,
95         sizeof(struct adapter)
96 };
97
98
99 /* T4 port (cxgbe) interface */
100 static int cxgbe_probe(device_t);
101 static int cxgbe_attach(device_t);
102 static int cxgbe_detach(device_t);
103 device_method_t cxgbe_methods[] = {
104         DEVMETHOD(device_probe,         cxgbe_probe),
105         DEVMETHOD(device_attach,        cxgbe_attach),
106         DEVMETHOD(device_detach,        cxgbe_detach),
107         { 0, 0 }
108 };
109 static driver_t cxgbe_driver = {
110         "cxgbe",
111         cxgbe_methods,
112         sizeof(struct port_info)
113 };
114
115 /* T4 VI (vcxgbe) interface */
116 static int vcxgbe_probe(device_t);
117 static int vcxgbe_attach(device_t);
118 static int vcxgbe_detach(device_t);
119 static device_method_t vcxgbe_methods[] = {
120         DEVMETHOD(device_probe,         vcxgbe_probe),
121         DEVMETHOD(device_attach,        vcxgbe_attach),
122         DEVMETHOD(device_detach,        vcxgbe_detach),
123         { 0, 0 }
124 };
125 static driver_t vcxgbe_driver = {
126         "vcxgbe",
127         vcxgbe_methods,
128         sizeof(struct vi_info)
129 };
130
131 static d_ioctl_t t4_ioctl;
132
133 static struct cdevsw t4_cdevsw = {
134        .d_version = D_VERSION,
135        .d_ioctl = t4_ioctl,
136        .d_name = "t4nex",
137 };
138
139 /* T5 bus driver interface */
140 static int t5_probe(device_t);
141 static device_method_t t5_methods[] = {
142         DEVMETHOD(device_probe,         t5_probe),
143         DEVMETHOD(device_attach,        t4_attach),
144         DEVMETHOD(device_detach,        t4_detach),
145
146         DEVMETHOD_END
147 };
148 static driver_t t5_driver = {
149         "t5nex",
150         t5_methods,
151         sizeof(struct adapter)
152 };
153
154
155 /* T5 port (cxl) interface */
156 static driver_t cxl_driver = {
157         "cxl",
158         cxgbe_methods,
159         sizeof(struct port_info)
160 };
161
162 /* T5 VI (vcxl) interface */
163 static driver_t vcxl_driver = {
164         "vcxl",
165         vcxgbe_methods,
166         sizeof(struct vi_info)
167 };
168
169 /* T6 bus driver interface */
170 static int t6_probe(device_t);
171 static device_method_t t6_methods[] = {
172         DEVMETHOD(device_probe,         t6_probe),
173         DEVMETHOD(device_attach,        t4_attach),
174         DEVMETHOD(device_detach,        t4_detach),
175
176         DEVMETHOD_END
177 };
178 static driver_t t6_driver = {
179         "t6nex",
180         t6_methods,
181         sizeof(struct adapter)
182 };
183
184
185 /* T6 port (cc) interface */
186 static driver_t cc_driver = {
187         "cc",
188         cxgbe_methods,
189         sizeof(struct port_info)
190 };
191
192 /* T6 VI (vcc) interface */
193 static driver_t vcc_driver = {
194         "vcc",
195         vcxgbe_methods,
196         sizeof(struct vi_info)
197 };
198
199 /* ifnet + media interface */
200 static void cxgbe_init(void *);
201 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
202 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
203 static void cxgbe_qflush(struct ifnet *);
204 static int cxgbe_media_change(struct ifnet *);
205 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
206
207 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
208
209 /*
210  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
211  * then ADAPTER_LOCK, then t4_uld_list_lock.
212  */
213 static struct sx t4_list_lock;
214 SLIST_HEAD(, adapter) t4_list;
215 #ifdef TCP_OFFLOAD
216 static struct sx t4_uld_list_lock;
217 SLIST_HEAD(, uld_info) t4_uld_list;
218 #endif
219
220 /*
221  * Tunables.  See tweak_tunables() too.
222  *
223  * Each tunable is set to a default value here if it's known at compile-time.
224  * Otherwise it is set to -n as an indication to tweak_tunables() that it should
225  * provide a reasonable default (upto n) when the driver is loaded.
226  *
227  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
228  * T5 are under hw.cxl.
229  */
230
231 /*
232  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
233  */
234 #define NTXQ_10G 16
235 int t4_ntxq10g = -NTXQ_10G;
236 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
237
238 #define NRXQ_10G 8
239 int t4_nrxq10g = -NRXQ_10G;
240 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
241
242 #define NTXQ_1G 4
243 int t4_ntxq1g = -NTXQ_1G;
244 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
245
246 #define NRXQ_1G 2
247 int t4_nrxq1g = -NRXQ_1G;
248 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
249
250 #define NTXQ_VI 1
251 static int t4_ntxq_vi = -NTXQ_VI;
252 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
253
254 #define NRXQ_VI 1
255 static int t4_nrxq_vi = -NRXQ_VI;
256 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
257
258 static int t4_rsrv_noflowq = 0;
259 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
260
261 #ifdef TCP_OFFLOAD
262 #define NOFLDTXQ_10G 8
263 static int t4_nofldtxq10g = -NOFLDTXQ_10G;
264 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
265
266 #define NOFLDRXQ_10G 2
267 static int t4_nofldrxq10g = -NOFLDRXQ_10G;
268 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
269
270 #define NOFLDTXQ_1G 2
271 static int t4_nofldtxq1g = -NOFLDTXQ_1G;
272 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
273
274 #define NOFLDRXQ_1G 1
275 static int t4_nofldrxq1g = -NOFLDRXQ_1G;
276 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
277
278 #define NOFLDTXQ_VI 1
279 static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
280 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
281
282 #define NOFLDRXQ_VI 1
283 static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
284 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
285 #endif
286
287 #ifdef DEV_NETMAP
288 #define NNMTXQ_VI 2
289 static int t4_nnmtxq_vi = -NNMTXQ_VI;
290 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
291
292 #define NNMRXQ_VI 2
293 static int t4_nnmrxq_vi = -NNMRXQ_VI;
294 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
295 #endif
296
297 /*
298  * Holdoff parameters for 10G and 1G ports.
299  */
300 #define TMR_IDX_10G 1
301 int t4_tmr_idx_10g = TMR_IDX_10G;
302 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
303
304 #define PKTC_IDX_10G (-1)
305 int t4_pktc_idx_10g = PKTC_IDX_10G;
306 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
307
308 #define TMR_IDX_1G 1
309 int t4_tmr_idx_1g = TMR_IDX_1G;
310 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
311
312 #define PKTC_IDX_1G (-1)
313 int t4_pktc_idx_1g = PKTC_IDX_1G;
314 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
315
316 /*
317  * Size (# of entries) of each tx and rx queue.
318  */
319 unsigned int t4_qsize_txq = TX_EQ_QSIZE;
320 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
321
322 unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
323 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
324
325 /*
326  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
327  */
328 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
329 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
330
331 /*
332  * Configuration file.
333  */
334 #define DEFAULT_CF      "default"
335 #define FLASH_CF        "flash"
336 #define UWIRE_CF        "uwire"
337 #define FPGA_CF         "fpga"
338 static char t4_cfg_file[32] = DEFAULT_CF;
339 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
340
341 /*
342  * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
343  * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
344  * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
345  *            mark or when signalled to do so, 0 to never emit PAUSE.
346  */
347 static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
348 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
349
350 /*
351  * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS,
352  * FEC_RESERVED respectively).
353  * -1 to run with the firmware default.
354  *  0 to disable FEC.
355  */
356 static int t4_fec = -1;
357 TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
358
359 /*
360  * Link autonegotiation.
361  * -1 to run with the firmware default.
362  *  0 to disable.
363  *  1 to enable.
364  */
365 static int t4_autoneg = -1;
366 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
367
368 /*
369  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
370  * encouraged respectively).
371  */
372 static unsigned int t4_fw_install = 1;
373 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
374
375 /*
376  * ASIC features that will be used.  Disable the ones you don't want so that the
377  * chip resources aren't wasted on features that will not be used.
378  */
379 static int t4_nbmcaps_allowed = 0;
380 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
381
382 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
383 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
384
385 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
386     FW_CAPS_CONFIG_SWITCH_EGRESS;
387 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
388
389 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
390 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
391
392 static int t4_toecaps_allowed = -1;
393 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
394
395 static int t4_rdmacaps_allowed = -1;
396 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
397
398 static int t4_cryptocaps_allowed = 0;
399 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
400
401 static int t4_iscsicaps_allowed = -1;
402 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
403
404 static int t4_fcoecaps_allowed = 0;
405 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
406
407 static int t5_write_combine = 0;
408 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
409
410 static int t4_num_vis = 1;
411 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
412
413 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */
414 static int vi_mac_funcs[] = {
415         FW_VI_FUNC_OFLD,
416         FW_VI_FUNC_IWARP,
417         FW_VI_FUNC_OPENISCSI,
418         FW_VI_FUNC_OPENFCOE,
419         FW_VI_FUNC_FOISCSI,
420         FW_VI_FUNC_FOFCOE,
421 };
422
423 struct intrs_and_queues {
424         uint16_t intr_type;     /* INTx, MSI, or MSI-X */
425         uint16_t nirq;          /* Total # of vectors */
426         uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
427         uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */
428         uint16_t ntxq10g;       /* # of NIC txq's for each 10G port */
429         uint16_t nrxq10g;       /* # of NIC rxq's for each 10G port */
430         uint16_t ntxq1g;        /* # of NIC txq's for each 1G port */
431         uint16_t nrxq1g;        /* # of NIC rxq's for each 1G port */
432         uint16_t rsrv_noflowq;  /* Flag whether to reserve queue 0 */
433         uint16_t nofldtxq10g;   /* # of TOE txq's for each 10G port */
434         uint16_t nofldrxq10g;   /* # of TOE rxq's for each 10G port */
435         uint16_t nofldtxq1g;    /* # of TOE txq's for each 1G port */
436         uint16_t nofldrxq1g;    /* # of TOE rxq's for each 1G port */
437
438         /* The vcxgbe/vcxl interfaces use these and not the ones above. */
439         uint16_t ntxq_vi;       /* # of NIC txq's */
440         uint16_t nrxq_vi;       /* # of NIC rxq's */
441         uint16_t nofldtxq_vi;   /* # of TOE txq's */
442         uint16_t nofldrxq_vi;   /* # of TOE rxq's */
443         uint16_t nnmtxq_vi;     /* # of netmap txq's */
444         uint16_t nnmrxq_vi;     /* # of netmap rxq's */
445 };
446
447 struct filter_entry {
448         uint32_t valid:1;       /* filter allocated and valid */
449         uint32_t locked:1;      /* filter is administratively locked */
450         uint32_t pending:1;     /* filter action is pending firmware reply */
451         uint32_t smtidx:8;      /* Source MAC Table index for smac */
452         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
453
454         struct t4_filter_specification fs;
455 };
456
457 static void setup_memwin(struct adapter *);
458 static void position_memwin(struct adapter *, int, uint32_t);
459 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int);
460 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *,
461     int);
462 static inline int write_via_memwin(struct adapter *, int, uint32_t,
463     const uint32_t *, int);
464 static int validate_mem_range(struct adapter *, uint32_t, int);
465 static int fwmtype_to_hwmtype(int);
466 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
467     uint32_t *);
468 static int fixup_devlog_params(struct adapter *);
469 static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
470     struct intrs_and_queues *);
471 static int prep_firmware(struct adapter *);
472 static int partition_resources(struct adapter *, const struct firmware *,
473     const char *);
474 static int get_params__pre_init(struct adapter *);
475 static int get_params__post_init(struct adapter *);
476 static int set_params__post_init(struct adapter *);
477 static void t4_set_desc(struct adapter *);
478 static void build_medialist(struct port_info *, struct ifmedia *);
479 static void init_l1cfg(struct port_info *);
480 static int apply_l1cfg(struct port_info *);
481 static int cxgbe_init_synchronized(struct vi_info *);
482 static int cxgbe_uninit_synchronized(struct vi_info *);
483 static void quiesce_txq(struct adapter *, struct sge_txq *);
484 static void quiesce_wrq(struct adapter *, struct sge_wrq *);
485 static void quiesce_iq(struct adapter *, struct sge_iq *);
486 static void quiesce_fl(struct adapter *, struct sge_fl *);
487 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
488     driver_intr_t *, void *, char *);
489 static int t4_free_irq(struct adapter *, struct irq *);
490 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
491 static void vi_refresh_stats(struct adapter *, struct vi_info *);
492 static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
493 static void cxgbe_tick(void *);
494 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
495 static void cxgbe_sysctls(struct port_info *);
496 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
497 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
498 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
499 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
500 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
501 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
502 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
503 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
504 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
505 static int sysctl_fec(SYSCTL_HANDLER_ARGS);
506 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
507 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
508 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
509 #ifdef SBUF_DRAIN
510 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
511 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
512 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
513 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
514 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
515 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
516 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
517 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
518 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
519 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
520 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
521 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
522 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
523 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
524 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
525 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
526 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
527 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
528 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
529 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
530 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
531 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
532 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
533 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
534 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
535 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
536 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
537 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
538 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS);
539 #endif
540 #ifdef TCP_OFFLOAD
541 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
542 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
543 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
544 #endif
545 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t);
546 static uint32_t mode_to_fconf(uint32_t);
547 static uint32_t mode_to_iconf(uint32_t);
548 static int check_fspec_against_fconf_iconf(struct adapter *,
549     struct t4_filter_specification *);
550 static int get_filter_mode(struct adapter *, uint32_t *);
551 static int set_filter_mode(struct adapter *, uint32_t);
552 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
553 static int get_filter(struct adapter *, struct t4_filter *);
554 static int set_filter(struct adapter *, struct t4_filter *);
555 static int del_filter(struct adapter *, struct t4_filter *);
556 static void clear_filter(struct filter_entry *);
557 static int set_filter_wr(struct adapter *, int);
558 static int del_filter_wr(struct adapter *, int);
559 static int set_tcb_rpl(struct sge_iq *, const struct rss_header *,
560     struct mbuf *);
561 static int get_sge_context(struct adapter *, struct t4_sge_context *);
562 static int load_fw(struct adapter *, struct t4_data *);
563 static int load_cfg(struct adapter *, struct t4_data *);
564 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
565 static int read_i2c(struct adapter *, struct t4_i2c_data *);
566 #ifdef TCP_OFFLOAD
567 static int toe_capability(struct vi_info *, int);
568 #endif
569 static int mod_event(module_t, int, void *);
570
571 struct {
572         uint16_t device;
573         char *desc;
574 } t4_pciids[] = {
575         {0xa000, "Chelsio Terminator 4 FPGA"},
576         {0x4400, "Chelsio T440-dbg"},
577         {0x4401, "Chelsio T420-CR"},
578         {0x4402, "Chelsio T422-CR"},
579         {0x4403, "Chelsio T440-CR"},
580         {0x4404, "Chelsio T420-BCH"},
581         {0x4405, "Chelsio T440-BCH"},
582         {0x4406, "Chelsio T440-CH"},
583         {0x4407, "Chelsio T420-SO"},
584         {0x4408, "Chelsio T420-CX"},
585         {0x4409, "Chelsio T420-BT"},
586         {0x440a, "Chelsio T404-BT"},
587         {0x440e, "Chelsio T440-LP-CR"},
588 }, t5_pciids[] = {
589         {0xb000, "Chelsio Terminator 5 FPGA"},
590         {0x5400, "Chelsio T580-dbg"},
591         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
592         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
593         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
594         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
595         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
596         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
597         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
598         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
599         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
600         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
601         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
602         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
603         {0x5415,  "Chelsio T502-BT"},           /* 2 x 1G */
604         {0x5418,  "Chelsio T540-BT"},           /* 4 x 10GBaseT */
605         {0x5419,  "Chelsio T540-LP-BT"},        /* 4 x 10GBaseT */
606         {0x541a,  "Chelsio T540-SO-BT"},        /* 4 x 10GBaseT, nomem */
607         {0x541b,  "Chelsio T540-SO-CR"},        /* 4 x 10G, nomem */
608
609         /* Custom */
610         {0x5483, "Custom T540-CR"},
611         {0x5484, "Custom T540-BT"},
612 }, t6_pciids[] = {
613         {0xc006, "Chelsio Terminator 6 FPGA"},  /* T6 PE10K6 FPGA (PF0) */
614         {0x6400, "Chelsio T6-DBG-25"},          /* 2 x 10/25G, debug */
615         {0x6401, "Chelsio T6225-CR"},           /* 2 x 10/25G */
616         {0x6402, "Chelsio T6225-SO-CR"},        /* 2 x 10/25G, nomem */
617         {0x6403, "Chelsio T6425-CR"},           /* 4 x 10/25G */
618         {0x6404, "Chelsio T6425-SO-CR"},        /* 4 x 10/25G, nomem */
619         {0x6405, "Chelsio T6225-OCP-SO"},       /* 2 x 10/25G, nomem */
620         {0x6406, "Chelsio T62100-OCP-SO"},      /* 2 x 40/50/100G, nomem */
621         {0x6407, "Chelsio T62100-LP-CR"},       /* 2 x 40/50/100G */
622         {0x6408, "Chelsio T62100-SO-CR"},       /* 2 x 40/50/100G, nomem */
623         {0x6409, "Chelsio T6210-BT"},           /* 2 x 10GBASE-T */
624         {0x640d, "Chelsio T62100-CR"},          /* 2 x 40/50/100G */
625         {0x6410, "Chelsio T6-DBG-100"},         /* 2 x 40/50/100G, debug */
626         {0x6411, "Chelsio T6225-LL-CR"},        /* 2 x 10/25G */
627         {0x6414, "Chelsio T61100-OCP-SO"},      /* 1 x 40/50/100G, nomem */
628         {0x6415, "Chelsio T6201-BT"},           /* 2 x 1000BASE-T */
629
630         /* Custom */
631         {0x6480, "Custom T6225-CR"},
632         {0x6481, "Custom T62100-CR"},
633         {0x6482, "Custom T6225-CR"},
634         {0x6483, "Custom T62100-CR"},
635         {0x6484, "Custom T64100-CR"},
636         {0x6485, "Custom T6240-SO"},
637         {0x6486, "Custom T6225-SO-CR"},
638         {0x6487, "Custom T6225-CR"},
639 };
640
641 #ifdef TCP_OFFLOAD
642 /*
643  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
644  * exactly the same for both rxq and ofld_rxq.
645  */
646 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
647 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
648 #endif
649 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
650
651 static int
652 t4_probe(device_t dev)
653 {
654         int i;
655         uint16_t v = pci_get_vendor(dev);
656         uint16_t d = pci_get_device(dev);
657         uint8_t f = pci_get_function(dev);
658
659         if (v != PCI_VENDOR_ID_CHELSIO)
660                 return (ENXIO);
661
662         /* Attach only to PF0 of the FPGA */
663         if (d == 0xa000 && f != 0)
664                 return (ENXIO);
665
666         for (i = 0; i < nitems(t4_pciids); i++) {
667                 if (d == t4_pciids[i].device) {
668                         device_set_desc(dev, t4_pciids[i].desc);
669                         return (BUS_PROBE_DEFAULT);
670                 }
671         }
672
673         return (ENXIO);
674 }
675
676 static int
677 t5_probe(device_t dev)
678 {
679         int i;
680         uint16_t v = pci_get_vendor(dev);
681         uint16_t d = pci_get_device(dev);
682         uint8_t f = pci_get_function(dev);
683
684         if (v != PCI_VENDOR_ID_CHELSIO)
685                 return (ENXIO);
686
687         /* Attach only to PF0 of the FPGA */
688         if (d == 0xb000 && f != 0)
689                 return (ENXIO);
690
691         for (i = 0; i < nitems(t5_pciids); i++) {
692                 if (d == t5_pciids[i].device) {
693                         device_set_desc(dev, t5_pciids[i].desc);
694                         return (BUS_PROBE_DEFAULT);
695                 }
696         }
697
698         return (ENXIO);
699 }
700
701 static int
702 t6_probe(device_t dev)
703 {
704         int i;
705         uint16_t v = pci_get_vendor(dev);
706         uint16_t d = pci_get_device(dev);
707
708         if (v != PCI_VENDOR_ID_CHELSIO)
709                 return (ENXIO);
710
711         for (i = 0; i < nitems(t6_pciids); i++) {
712                 if (d == t6_pciids[i].device) {
713                         device_set_desc(dev, t6_pciids[i].desc);
714                         return (BUS_PROBE_DEFAULT);
715                 }
716         }
717
718         return (ENXIO);
719 }
720
721 static void
722 t5_attribute_workaround(device_t dev)
723 {
724         device_t root_port;
725         uint32_t v;
726
727         /*
728          * The T5 chips do not properly echo the No Snoop and Relaxed
729          * Ordering attributes when replying to a TLP from a Root
730          * Port.  As a workaround, find the parent Root Port and
731          * disable No Snoop and Relaxed Ordering.  Note that this
732          * affects all devices under this root port.
733          */
734         root_port = pci_find_pcie_root_port(dev);
735         if (root_port == NULL) {
736                 device_printf(dev, "Unable to find parent root port\n");
737                 return;
738         }
739
740         v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
741             PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
742         if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
743             0)
744                 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
745                     device_get_nameunit(root_port));
746 }
747
748 static const struct devnames devnames[] = {
749         {
750                 .nexus_name = "t4nex",
751                 .ifnet_name = "cxgbe",
752                 .vi_ifnet_name = "vcxgbe",
753                 .pf03_drv_name = "t4iov",
754                 .vf_nexus_name = "t4vf",
755                 .vf_ifnet_name = "cxgbev"
756         }, {
757                 .nexus_name = "t5nex",
758                 .ifnet_name = "cxl",
759                 .vi_ifnet_name = "vcxl",
760                 .pf03_drv_name = "t5iov",
761                 .vf_nexus_name = "t5vf",
762                 .vf_ifnet_name = "cxlv"
763         }, {
764                 .nexus_name = "t6nex",
765                 .ifnet_name = "cc",
766                 .vi_ifnet_name = "vcc",
767                 .pf03_drv_name = "t6iov",
768                 .vf_nexus_name = "t6vf",
769                 .vf_ifnet_name = "ccv"
770         }
771 };
772
773 void
774 t4_init_devnames(struct adapter *sc)
775 {
776         int id;
777
778         id = chip_id(sc);
779         if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
780                 sc->names = &devnames[id - CHELSIO_T4];
781         else {
782                 device_printf(sc->dev, "chip id %d is not supported.\n", id);
783                 sc->names = NULL;
784         }
785 }
786
787 static int
788 t4_attach(device_t dev)
789 {
790         struct adapter *sc;
791         int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
792         struct make_dev_args mda;
793         struct intrs_and_queues iaq;
794         struct sge *s;
795         uint8_t *buf;
796 #ifdef TCP_OFFLOAD
797         int ofld_rqidx, ofld_tqidx;
798 #endif
799 #ifdef DEV_NETMAP
800         int nm_rqidx, nm_tqidx;
801 #endif
802         int num_vis;
803
804         sc = device_get_softc(dev);
805         sc->dev = dev;
806         TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
807
808         if ((pci_get_device(dev) & 0xff00) == 0x5400)
809                 t5_attribute_workaround(dev);
810         pci_enable_busmaster(dev);
811         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
812                 uint32_t v;
813
814                 pci_set_max_read_req(dev, 4096);
815                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
816                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
817                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
818
819                 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
820         }
821
822         sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
823         sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
824         sc->traceq = -1;
825         mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
826         snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
827             device_get_nameunit(dev));
828
829         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
830             device_get_nameunit(dev));
831         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
832         t4_add_adapter(sc);
833
834         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
835         TAILQ_INIT(&sc->sfl);
836         callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
837
838         mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
839
840         rc = t4_map_bars_0_and_4(sc);
841         if (rc != 0)
842                 goto done; /* error message displayed already */
843
844         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
845
846         /* Prepare the adapter for operation. */
847         buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
848         rc = -t4_prep_adapter(sc, buf);
849         free(buf, M_CXGBE);
850         if (rc != 0) {
851                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
852                 goto done;
853         }
854
855         /*
856          * This is the real PF# to which we're attaching.  Works from within PCI
857          * passthrough environments too, where pci_get_function() could return a
858          * different PF# depending on the passthrough configuration.  We need to
859          * use the real PF# in all our communication with the firmware.
860          */
861         j = t4_read_reg(sc, A_PL_WHOAMI);
862         sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
863         sc->mbox = sc->pf;
864
865         t4_init_devnames(sc);
866         if (sc->names == NULL) {
867                 rc = ENOTSUP;
868                 goto done; /* error message displayed already */
869         }
870
871         /*
872          * Do this really early, with the memory windows set up even before the
873          * character device.  The userland tool's register i/o and mem read
874          * will work even in "recovery mode".
875          */
876         setup_memwin(sc);
877         if (t4_init_devlog_params(sc, 0) == 0)
878                 fixup_devlog_params(sc);
879         make_dev_args_init(&mda);
880         mda.mda_devsw = &t4_cdevsw;
881         mda.mda_uid = UID_ROOT;
882         mda.mda_gid = GID_WHEEL;
883         mda.mda_mode = 0600;
884         mda.mda_si_drv1 = sc;
885         rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
886         if (rc != 0)
887                 device_printf(dev, "failed to create nexus char device: %d.\n",
888                     rc);
889
890         /* Go no further if recovery mode has been requested. */
891         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
892                 device_printf(dev, "recovery mode.\n");
893                 goto done;
894         }
895
896 #if defined(__i386__)
897         if ((cpu_feature & CPUID_CX8) == 0) {
898                 device_printf(dev, "64 bit atomics not available.\n");
899                 rc = ENOTSUP;
900                 goto done;
901         }
902 #endif
903
904         /* Prepare the firmware for operation */
905         rc = prep_firmware(sc);
906         if (rc != 0)
907                 goto done; /* error message displayed already */
908
909         rc = get_params__post_init(sc);
910         if (rc != 0)
911                 goto done; /* error message displayed already */
912
913         rc = set_params__post_init(sc);
914         if (rc != 0)
915                 goto done; /* error message displayed already */
916
917         rc = t4_map_bar_2(sc);
918         if (rc != 0)
919                 goto done; /* error message displayed already */
920
921         rc = t4_create_dma_tag(sc);
922         if (rc != 0)
923                 goto done; /* error message displayed already */
924
925         /*
926          * Number of VIs to create per-port.  The first VI is the "main" regular
927          * VI for the port.  The rest are additional virtual interfaces on the
928          * same physical port.  Note that the main VI does not have native
929          * netmap support but the extra VIs do.
930          *
931          * Limit the number of VIs per port to the number of available
932          * MAC addresses per port.
933          */
934         if (t4_num_vis >= 1)
935                 num_vis = t4_num_vis;
936         else
937                 num_vis = 1;
938         if (num_vis > nitems(vi_mac_funcs)) {
939                 num_vis = nitems(vi_mac_funcs);
940                 device_printf(dev, "Number of VIs limited to %d\n", num_vis);
941         }
942
943         /*
944          * First pass over all the ports - allocate VIs and initialize some
945          * basic parameters like mac address, port type, etc.  We also figure
946          * out whether a port is 10G or 1G and use that information when
947          * calculating how many interrupts to attempt to allocate.
948          */
949         n10g = n1g = 0;
950         for_each_port(sc, i) {
951                 struct port_info *pi;
952
953                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
954                 sc->port[i] = pi;
955
956                 /* These must be set before t4_port_init */
957                 pi->adapter = sc;
958                 pi->port_id = i;
959                 /*
960                  * XXX: vi[0] is special so we can't delay this allocation until
961                  * pi->nvi's final value is known.
962                  */
963                 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
964                     M_ZERO | M_WAITOK);
965
966                 /*
967                  * Allocate the "main" VI and initialize parameters
968                  * like mac addr.
969                  */
970                 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
971                 if (rc != 0) {
972                         device_printf(dev, "unable to initialize port %d: %d\n",
973                             i, rc);
974                         free(pi->vi, M_CXGBE);
975                         free(pi, M_CXGBE);
976                         sc->port[i] = NULL;
977                         goto done;
978                 }
979
980                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
981                     device_get_nameunit(dev), i);
982                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
983                 sc->chan_map[pi->tx_chan] = i;
984
985                 if (port_top_speed(pi) >= 10) {
986                         n10g++;
987                 } else {
988                         n1g++;
989                 }
990
991                 /* All VIs on this port share this media. */
992                 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
993                     cxgbe_media_status);
994
995                 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
996                 if (pi->dev == NULL) {
997                         device_printf(dev,
998                             "failed to add device for port %d.\n", i);
999                         rc = ENXIO;
1000                         goto done;
1001                 }
1002                 pi->vi[0].dev = pi->dev;
1003                 device_set_softc(pi->dev, pi);
1004         }
1005
1006         /*
1007          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
1008          */
1009         rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
1010         if (rc != 0)
1011                 goto done; /* error message displayed already */
1012         if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0)
1013                 num_vis = 1;
1014
1015         sc->intr_type = iaq.intr_type;
1016         sc->intr_count = iaq.nirq;
1017
1018         s = &sc->sge;
1019         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
1020         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
1021         if (num_vis > 1) {
1022                 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi;
1023                 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi;
1024         }
1025         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
1026         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
1027         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
1028 #ifdef TCP_OFFLOAD
1029         if (is_offload(sc)) {
1030                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
1031                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
1032                 if (num_vis > 1) {
1033                         s->nofldrxq += (n10g + n1g) * (num_vis - 1) *
1034                             iaq.nofldrxq_vi;
1035                         s->nofldtxq += (n10g + n1g) * (num_vis - 1) *
1036                             iaq.nofldtxq_vi;
1037                 }
1038                 s->neq += s->nofldtxq + s->nofldrxq;
1039                 s->niq += s->nofldrxq;
1040
1041                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
1042                     M_CXGBE, M_ZERO | M_WAITOK);
1043                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
1044                     M_CXGBE, M_ZERO | M_WAITOK);
1045         }
1046 #endif
1047 #ifdef DEV_NETMAP
1048         if (num_vis > 1) {
1049                 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi;
1050                 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi;
1051         }
1052         s->neq += s->nnmtxq + s->nnmrxq;
1053         s->niq += s->nnmrxq;
1054
1055         s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
1056             M_CXGBE, M_ZERO | M_WAITOK);
1057         s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
1058             M_CXGBE, M_ZERO | M_WAITOK);
1059 #endif
1060
1061         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
1062             M_ZERO | M_WAITOK);
1063         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
1064             M_ZERO | M_WAITOK);
1065         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
1066             M_ZERO | M_WAITOK);
1067         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
1068             M_ZERO | M_WAITOK);
1069         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
1070             M_ZERO | M_WAITOK);
1071
1072         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
1073             M_ZERO | M_WAITOK);
1074
1075         t4_init_l2t(sc, M_WAITOK);
1076         t4_init_tx_sched(sc);
1077
1078         /*
1079          * Second pass over the ports.  This time we know the number of rx and
1080          * tx queues that each port should get.
1081          */
1082         rqidx = tqidx = 0;
1083 #ifdef TCP_OFFLOAD
1084         ofld_rqidx = ofld_tqidx = 0;
1085 #endif
1086 #ifdef DEV_NETMAP
1087         nm_rqidx = nm_tqidx = 0;
1088 #endif
1089         for_each_port(sc, i) {
1090                 struct port_info *pi = sc->port[i];
1091                 struct vi_info *vi;
1092
1093                 if (pi == NULL)
1094                         continue;
1095
1096                 pi->nvi = num_vis;
1097                 for_each_vi(pi, j, vi) {
1098                         vi->pi = pi;
1099                         vi->qsize_rxq = t4_qsize_rxq;
1100                         vi->qsize_txq = t4_qsize_txq;
1101
1102                         vi->first_rxq = rqidx;
1103                         vi->first_txq = tqidx;
1104                         if (port_top_speed(pi) >= 10) {
1105                                 vi->tmr_idx = t4_tmr_idx_10g;
1106                                 vi->pktc_idx = t4_pktc_idx_10g;
1107                                 vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
1108                                 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi;
1109                                 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi;
1110                         } else {
1111                                 vi->tmr_idx = t4_tmr_idx_1g;
1112                                 vi->pktc_idx = t4_pktc_idx_1g;
1113                                 vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
1114                                 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi;
1115                                 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi;
1116                         }
1117                         rqidx += vi->nrxq;
1118                         tqidx += vi->ntxq;
1119
1120                         if (j == 0 && vi->ntxq > 1)
1121                                 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
1122                         else
1123                                 vi->rsrv_noflowq = 0;
1124
1125 #ifdef TCP_OFFLOAD
1126                         vi->first_ofld_rxq = ofld_rqidx;
1127                         vi->first_ofld_txq = ofld_tqidx;
1128                         if (port_top_speed(pi) >= 10) {
1129                                 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
1130                                 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g :
1131                                     iaq.nofldrxq_vi;
1132                                 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g :
1133                                     iaq.nofldtxq_vi;
1134                         } else {
1135                                 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
1136                                 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g :
1137                                     iaq.nofldrxq_vi;
1138                                 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g :
1139                                     iaq.nofldtxq_vi;
1140                         }
1141                         ofld_rqidx += vi->nofldrxq;
1142                         ofld_tqidx += vi->nofldtxq;
1143 #endif
1144 #ifdef DEV_NETMAP
1145                         if (j > 0) {
1146                                 vi->first_nm_rxq = nm_rqidx;
1147                                 vi->first_nm_txq = nm_tqidx;
1148                                 vi->nnmrxq = iaq.nnmrxq_vi;
1149                                 vi->nnmtxq = iaq.nnmtxq_vi;
1150                                 nm_rqidx += vi->nnmrxq;
1151                                 nm_tqidx += vi->nnmtxq;
1152                         }
1153 #endif
1154                 }
1155         }
1156
1157         rc = t4_setup_intr_handlers(sc);
1158         if (rc != 0) {
1159                 device_printf(dev,
1160                     "failed to setup interrupt handlers: %d\n", rc);
1161                 goto done;
1162         }
1163
1164         rc = bus_generic_probe(dev);
1165         if (rc != 0) {
1166                 device_printf(dev, "failed to probe child drivers: %d\n", rc);
1167                 goto done;
1168         }
1169
1170         rc = bus_generic_attach(dev);
1171         if (rc != 0) {
1172                 device_printf(dev,
1173                     "failed to attach all child ports: %d\n", rc);
1174                 goto done;
1175         }
1176
1177         device_printf(dev,
1178             "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
1179             sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
1180             sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1181             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
1182             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
1183
1184         t4_set_desc(sc);
1185
1186 done:
1187         if (rc != 0 && sc->cdev) {
1188                 /* cdev was created and so cxgbetool works; recover that way. */
1189                 device_printf(dev,
1190                     "error during attach, adapter is now in recovery mode.\n");
1191                 rc = 0;
1192         }
1193
1194         if (rc != 0)
1195                 t4_detach_common(dev);
1196         else
1197                 t4_sysctls(sc);
1198
1199         return (rc);
1200 }
1201
1202 /*
1203  * Idempotent
1204  */
1205 static int
1206 t4_detach(device_t dev)
1207 {
1208         struct adapter *sc;
1209
1210         sc = device_get_softc(dev);
1211
1212         return (t4_detach_common(dev));
1213 }
1214
1215 int
1216 t4_detach_common(device_t dev)
1217 {
1218         struct adapter *sc;
1219         struct port_info *pi;
1220         int i, rc;
1221
1222         sc = device_get_softc(dev);
1223
1224         if (sc->flags & FULL_INIT_DONE) {
1225                 if (!(sc->flags & IS_VF))
1226                         t4_intr_disable(sc);
1227         }
1228
1229         if (sc->cdev) {
1230                 destroy_dev(sc->cdev);
1231                 sc->cdev = NULL;
1232         }
1233
1234         if (device_is_attached(dev)) {
1235                 rc = bus_generic_detach(dev);
1236                 if (rc) {
1237                         device_printf(dev,
1238                             "failed to detach child devices: %d\n", rc);
1239                         return (rc);
1240                 }
1241         }
1242
1243         for (i = 0; i < sc->intr_count; i++)
1244                 t4_free_irq(sc, &sc->irq[i]);
1245
1246         if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1247                 t4_free_tx_sched(sc);
1248
1249         for (i = 0; i < MAX_NPORTS; i++) {
1250                 pi = sc->port[i];
1251                 if (pi) {
1252                         t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
1253                         if (pi->dev)
1254                                 device_delete_child(dev, pi->dev);
1255
1256                         mtx_destroy(&pi->pi_lock);
1257                         free(pi->vi, M_CXGBE);
1258                         free(pi, M_CXGBE);
1259                 }
1260         }
1261
1262         device_delete_children(dev);
1263
1264         if (sc->flags & FULL_INIT_DONE)
1265                 adapter_full_uninit(sc);
1266
1267         if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
1268                 t4_fw_bye(sc, sc->mbox);
1269
1270         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
1271                 pci_release_msi(dev);
1272
1273         if (sc->regs_res)
1274                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
1275                     sc->regs_res);
1276
1277         if (sc->udbs_res)
1278                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
1279                     sc->udbs_res);
1280
1281         if (sc->msix_res)
1282                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
1283                     sc->msix_res);
1284
1285         if (sc->l2t)
1286                 t4_free_l2t(sc->l2t);
1287
1288 #ifdef TCP_OFFLOAD
1289         free(sc->sge.ofld_rxq, M_CXGBE);
1290         free(sc->sge.ofld_txq, M_CXGBE);
1291 #endif
1292 #ifdef DEV_NETMAP
1293         free(sc->sge.nm_rxq, M_CXGBE);
1294         free(sc->sge.nm_txq, M_CXGBE);
1295 #endif
1296         free(sc->irq, M_CXGBE);
1297         free(sc->sge.rxq, M_CXGBE);
1298         free(sc->sge.txq, M_CXGBE);
1299         free(sc->sge.ctrlq, M_CXGBE);
1300         free(sc->sge.iqmap, M_CXGBE);
1301         free(sc->sge.eqmap, M_CXGBE);
1302         free(sc->tids.ftid_tab, M_CXGBE);
1303         t4_destroy_dma_tag(sc);
1304         if (mtx_initialized(&sc->sc_lock)) {
1305                 sx_xlock(&t4_list_lock);
1306                 SLIST_REMOVE(&t4_list, sc, adapter, link);
1307                 sx_xunlock(&t4_list_lock);
1308                 mtx_destroy(&sc->sc_lock);
1309         }
1310
1311         callout_drain(&sc->sfl_callout);
1312         if (mtx_initialized(&sc->tids.ftid_lock))
1313                 mtx_destroy(&sc->tids.ftid_lock);
1314         if (mtx_initialized(&sc->sfl_lock))
1315                 mtx_destroy(&sc->sfl_lock);
1316         if (mtx_initialized(&sc->ifp_lock))
1317                 mtx_destroy(&sc->ifp_lock);
1318         if (mtx_initialized(&sc->reg_lock))
1319                 mtx_destroy(&sc->reg_lock);
1320
1321         for (i = 0; i < NUM_MEMWIN; i++) {
1322                 struct memwin *mw = &sc->memwin[i];
1323
1324                 if (rw_initialized(&mw->mw_lock))
1325                         rw_destroy(&mw->mw_lock);
1326         }
1327
1328         bzero(sc, sizeof(*sc));
1329
1330         return (0);
1331 }
1332
1333 static int
1334 cxgbe_probe(device_t dev)
1335 {
1336         char buf[128];
1337         struct port_info *pi = device_get_softc(dev);
1338
1339         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1340         device_set_desc_copy(dev, buf);
1341
1342         return (BUS_PROBE_DEFAULT);
1343 }
1344
1345 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1346     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1347     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1348 #define T4_CAP_ENABLE (T4_CAP)
1349
1350 static int
1351 cxgbe_vi_attach(device_t dev, struct vi_info *vi)
1352 {
1353         struct ifnet *ifp;
1354         struct sbuf *sb;
1355
1356         vi->xact_addr_filt = -1;
1357         callout_init(&vi->tick, 1);
1358
1359         /* Allocate an ifnet and set it up */
1360         ifp = if_alloc(IFT_ETHER);
1361         if (ifp == NULL) {
1362                 device_printf(dev, "Cannot allocate ifnet\n");
1363                 return (ENOMEM);
1364         }
1365         vi->ifp = ifp;
1366         ifp->if_softc = vi;
1367
1368         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1369         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1370
1371         ifp->if_init = cxgbe_init;
1372         ifp->if_ioctl = cxgbe_ioctl;
1373         ifp->if_transmit = cxgbe_transmit;
1374         ifp->if_qflush = cxgbe_qflush;
1375
1376         ifp->if_capabilities = T4_CAP;
1377 #ifdef TCP_OFFLOAD
1378         if (vi->nofldrxq != 0)
1379                 ifp->if_capabilities |= IFCAP_TOE;
1380 #endif
1381 #ifdef DEV_NETMAP
1382         if (vi->nnmrxq != 0)
1383                 ifp->if_capabilities |= IFCAP_NETMAP;
1384 #endif
1385         ifp->if_capenable = T4_CAP_ENABLE;
1386         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1387             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1388
1389         ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1390         ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1391         ifp->if_hw_tsomaxsegsize = 65536;
1392
1393         vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1394             EVENTHANDLER_PRI_ANY);
1395
1396         ether_ifattach(ifp, vi->hw_addr);
1397 #ifdef DEV_NETMAP
1398         if (ifp->if_capabilities & IFCAP_NETMAP)
1399                 cxgbe_nm_attach(vi);
1400 #endif
1401         sb = sbuf_new_auto();
1402         sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
1403 #ifdef TCP_OFFLOAD
1404         if (ifp->if_capabilities & IFCAP_TOE)
1405                 sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
1406                     vi->nofldtxq, vi->nofldrxq);
1407 #endif
1408 #ifdef DEV_NETMAP
1409         if (ifp->if_capabilities & IFCAP_NETMAP)
1410                 sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
1411                     vi->nnmtxq, vi->nnmrxq);
1412 #endif
1413         sbuf_finish(sb);
1414         device_printf(dev, "%s\n", sbuf_data(sb));
1415         sbuf_delete(sb);
1416
1417         vi_sysctls(vi);
1418
1419         return (0);
1420 }
1421
1422 static int
1423 cxgbe_attach(device_t dev)
1424 {
1425         struct port_info *pi = device_get_softc(dev);
1426         struct adapter *sc = pi->adapter;
1427         struct vi_info *vi;
1428         int i, rc;
1429
1430         callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
1431
1432         rc = cxgbe_vi_attach(dev, &pi->vi[0]);
1433         if (rc)
1434                 return (rc);
1435
1436         for_each_vi(pi, i, vi) {
1437                 if (i == 0)
1438                         continue;
1439                 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
1440                 if (vi->dev == NULL) {
1441                         device_printf(dev, "failed to add VI %d\n", i);
1442                         continue;
1443                 }
1444                 device_set_softc(vi->dev, vi);
1445         }
1446
1447         cxgbe_sysctls(pi);
1448
1449         bus_generic_attach(dev);
1450
1451         return (0);
1452 }
1453
1454 static void
1455 cxgbe_vi_detach(struct vi_info *vi)
1456 {
1457         struct ifnet *ifp = vi->ifp;
1458
1459         ether_ifdetach(ifp);
1460
1461         if (vi->vlan_c)
1462                 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
1463
1464         /* Let detach proceed even if these fail. */
1465 #ifdef DEV_NETMAP
1466         if (ifp->if_capabilities & IFCAP_NETMAP)
1467                 cxgbe_nm_detach(vi);
1468 #endif
1469         cxgbe_uninit_synchronized(vi);
1470         callout_drain(&vi->tick);
1471         vi_full_uninit(vi);
1472
1473         if_free(vi->ifp);
1474         vi->ifp = NULL;
1475 }
1476
1477 static int
1478 cxgbe_detach(device_t dev)
1479 {
1480         struct port_info *pi = device_get_softc(dev);
1481         struct adapter *sc = pi->adapter;
1482         int rc;
1483
1484         /* Detach the extra VIs first. */
1485         rc = bus_generic_detach(dev);
1486         if (rc)
1487                 return (rc);
1488         device_delete_children(dev);
1489
1490         doom_vi(sc, &pi->vi[0]);
1491
1492         if (pi->flags & HAS_TRACEQ) {
1493                 sc->traceq = -1;        /* cloner should not create ifnet */
1494                 t4_tracer_port_detach(sc);
1495         }
1496
1497         cxgbe_vi_detach(&pi->vi[0]);
1498         callout_drain(&pi->tick);
1499         ifmedia_removeall(&pi->media);
1500
1501         end_synchronized_op(sc, 0);
1502
1503         return (0);
1504 }
1505
1506 static void
1507 cxgbe_init(void *arg)
1508 {
1509         struct vi_info *vi = arg;
1510         struct adapter *sc = vi->pi->adapter;
1511
1512         if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
1513                 return;
1514         cxgbe_init_synchronized(vi);
1515         end_synchronized_op(sc, 0);
1516 }
1517
1518 static int
1519 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1520 {
1521         int rc = 0, mtu, flags, can_sleep;
1522         struct vi_info *vi = ifp->if_softc;
1523         struct port_info *pi = vi->pi;
1524         struct adapter *sc = pi->adapter;
1525         struct ifreq *ifr = (struct ifreq *)data;
1526         uint32_t mask;
1527
1528         switch (cmd) {
1529         case SIOCSIFMTU:
1530                 mtu = ifr->ifr_mtu;
1531                 if (mtu < ETHERMIN || mtu > MAX_MTU)
1532                         return (EINVAL);
1533
1534                 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
1535                 if (rc)
1536                         return (rc);
1537                 ifp->if_mtu = mtu;
1538                 if (vi->flags & VI_INIT_DONE) {
1539                         t4_update_fl_bufsize(ifp);
1540                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1541                                 rc = update_mac_settings(ifp, XGMAC_MTU);
1542                 }
1543                 end_synchronized_op(sc, 0);
1544                 break;
1545
1546         case SIOCSIFFLAGS:
1547                 can_sleep = 0;
1548 redo_sifflags:
1549                 rc = begin_synchronized_op(sc, vi,
1550                     can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1551                 if (rc)
1552                         return (rc);
1553
1554                 if (ifp->if_flags & IFF_UP) {
1555                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1556                                 flags = vi->if_flags;
1557                                 if ((ifp->if_flags ^ flags) &
1558                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1559                                         if (can_sleep == 1) {
1560                                                 end_synchronized_op(sc, 0);
1561                                                 can_sleep = 0;
1562                                                 goto redo_sifflags;
1563                                         }
1564                                         rc = update_mac_settings(ifp,
1565                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1566                                 }
1567                         } else {
1568                                 if (can_sleep == 0) {
1569                                         end_synchronized_op(sc, LOCK_HELD);
1570                                         can_sleep = 1;
1571                                         goto redo_sifflags;
1572                                 }
1573                                 rc = cxgbe_init_synchronized(vi);
1574                         }
1575                         vi->if_flags = ifp->if_flags;
1576                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1577                         if (can_sleep == 0) {
1578                                 end_synchronized_op(sc, LOCK_HELD);
1579                                 can_sleep = 1;
1580                                 goto redo_sifflags;
1581                         }
1582                         rc = cxgbe_uninit_synchronized(vi);
1583                 }
1584                 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1585                 break;
1586
1587         case SIOCADDMULTI:
1588         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1589                 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
1590                 if (rc)
1591                         return (rc);
1592                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1593                         rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1594                 end_synchronized_op(sc, LOCK_HELD);
1595                 break;
1596
1597         case SIOCSIFCAP:
1598                 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
1599                 if (rc)
1600                         return (rc);
1601
1602                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1603                 if (mask & IFCAP_TXCSUM) {
1604                         ifp->if_capenable ^= IFCAP_TXCSUM;
1605                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1606
1607                         if (IFCAP_TSO4 & ifp->if_capenable &&
1608                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1609                                 ifp->if_capenable &= ~IFCAP_TSO4;
1610                                 if_printf(ifp,
1611                                     "tso4 disabled due to -txcsum.\n");
1612                         }
1613                 }
1614                 if (mask & IFCAP_TXCSUM_IPV6) {
1615                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1616                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1617
1618                         if (IFCAP_TSO6 & ifp->if_capenable &&
1619                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1620                                 ifp->if_capenable &= ~IFCAP_TSO6;
1621                                 if_printf(ifp,
1622                                     "tso6 disabled due to -txcsum6.\n");
1623                         }
1624                 }
1625                 if (mask & IFCAP_RXCSUM)
1626                         ifp->if_capenable ^= IFCAP_RXCSUM;
1627                 if (mask & IFCAP_RXCSUM_IPV6)
1628                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1629
1630                 /*
1631                  * Note that we leave CSUM_TSO alone (it is always set).  The
1632                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1633                  * sending a TSO request our way, so it's sufficient to toggle
1634                  * IFCAP_TSOx only.
1635                  */
1636                 if (mask & IFCAP_TSO4) {
1637                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1638                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1639                                 if_printf(ifp, "enable txcsum first.\n");
1640                                 rc = EAGAIN;
1641                                 goto fail;
1642                         }
1643                         ifp->if_capenable ^= IFCAP_TSO4;
1644                 }
1645                 if (mask & IFCAP_TSO6) {
1646                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1647                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1648                                 if_printf(ifp, "enable txcsum6 first.\n");
1649                                 rc = EAGAIN;
1650                                 goto fail;
1651                         }
1652                         ifp->if_capenable ^= IFCAP_TSO6;
1653                 }
1654                 if (mask & IFCAP_LRO) {
1655 #if defined(INET) || defined(INET6)
1656                         int i;
1657                         struct sge_rxq *rxq;
1658
1659                         ifp->if_capenable ^= IFCAP_LRO;
1660                         for_each_rxq(vi, i, rxq) {
1661                                 if (ifp->if_capenable & IFCAP_LRO)
1662                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1663                                 else
1664                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1665                         }
1666 #endif
1667                 }
1668 #ifdef TCP_OFFLOAD
1669                 if (mask & IFCAP_TOE) {
1670                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1671
1672                         rc = toe_capability(vi, enable);
1673                         if (rc != 0)
1674                                 goto fail;
1675
1676                         ifp->if_capenable ^= mask;
1677                 }
1678 #endif
1679                 if (mask & IFCAP_VLAN_HWTAGGING) {
1680                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1681                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1682                                 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1683                 }
1684                 if (mask & IFCAP_VLAN_MTU) {
1685                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1686
1687                         /* Need to find out how to disable auto-mtu-inflation */
1688                 }
1689                 if (mask & IFCAP_VLAN_HWTSO)
1690                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1691                 if (mask & IFCAP_VLAN_HWCSUM)
1692                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1693
1694 #ifdef VLAN_CAPABILITIES
1695                 VLAN_CAPABILITIES(ifp);
1696 #endif
1697 fail:
1698                 end_synchronized_op(sc, 0);
1699                 break;
1700
1701         case SIOCSIFMEDIA:
1702         case SIOCGIFMEDIA:
1703         case SIOCGIFXMEDIA:
1704                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1705                 break;
1706
1707         case SIOCGI2C: {
1708                 struct ifi2creq i2c;
1709
1710                 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
1711                 if (rc != 0)
1712                         break;
1713                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
1714                         rc = EPERM;
1715                         break;
1716                 }
1717                 if (i2c.len > sizeof(i2c.data)) {
1718                         rc = EINVAL;
1719                         break;
1720                 }
1721                 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
1722                 if (rc)
1723                         return (rc);
1724                 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
1725                     i2c.offset, i2c.len, &i2c.data[0]);
1726                 end_synchronized_op(sc, 0);
1727                 if (rc == 0)
1728                         rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
1729                 break;
1730         }
1731
1732         default:
1733                 rc = ether_ioctl(ifp, cmd, data);
1734         }
1735
1736         return (rc);
1737 }
1738
1739 static int
1740 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1741 {
1742         struct vi_info *vi = ifp->if_softc;
1743         struct port_info *pi = vi->pi;
1744         struct adapter *sc = pi->adapter;
1745         struct sge_txq *txq;
1746         void *items[1];
1747         int rc;
1748
1749         M_ASSERTPKTHDR(m);
1750         MPASS(m->m_nextpkt == NULL);    /* not quite ready for this yet */
1751
1752         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1753                 m_freem(m);
1754                 return (ENETDOWN);
1755         }
1756
1757         rc = parse_pkt(sc, &m);
1758         if (__predict_false(rc != 0)) {
1759                 MPASS(m == NULL);                       /* was freed already */
1760                 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */
1761                 return (rc);
1762         }
1763
1764         /* Select a txq. */
1765         txq = &sc->sge.txq[vi->first_txq];
1766         if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1767                 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
1768                     vi->rsrv_noflowq);
1769
1770         items[0] = m;
1771         rc = mp_ring_enqueue(txq->r, items, 1, 4096);
1772         if (__predict_false(rc != 0))
1773                 m_freem(m);
1774
1775         return (rc);
1776 }
1777
1778 static void
1779 cxgbe_qflush(struct ifnet *ifp)
1780 {
1781         struct vi_info *vi = ifp->if_softc;
1782         struct sge_txq *txq;
1783         int i;
1784
1785         /* queues do not exist if !VI_INIT_DONE. */
1786         if (vi->flags & VI_INIT_DONE) {
1787                 for_each_txq(vi, i, txq) {
1788                         TXQ_LOCK(txq);
1789                         txq->eq.flags |= EQ_QFLUSH;
1790                         TXQ_UNLOCK(txq);
1791                         while (!mp_ring_is_idle(txq->r)) {
1792                                 mp_ring_check_drainage(txq->r, 0);
1793                                 pause("qflush", 1);
1794                         }
1795                         TXQ_LOCK(txq);
1796                         txq->eq.flags &= ~EQ_QFLUSH;
1797                         TXQ_UNLOCK(txq);
1798                 }
1799         }
1800         if_qflush(ifp);
1801 }
1802
1803 /*
1804  * The kernel picks a media from the list we had provided so we do not have to
1805  * validate the request.
1806  */
1807 static int
1808 cxgbe_media_change(struct ifnet *ifp)
1809 {
1810         struct vi_info *vi = ifp->if_softc;
1811         struct port_info *pi = vi->pi;
1812         struct ifmedia *ifm = &pi->media;
1813         struct link_config *lc = &pi->link_cfg;
1814         struct adapter *sc = pi->adapter;
1815         int rc;
1816
1817         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec");
1818         if (rc != 0)
1819                 return (rc);
1820         PORT_LOCK(pi);
1821         if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1822                 MPASS(lc->supported & FW_PORT_CAP_ANEG);
1823                 lc->requested_aneg = AUTONEG_ENABLE;
1824         } else {
1825                 lc->requested_aneg = AUTONEG_DISABLE;
1826                 lc->requested_speed =
1827                     ifmedia_baudrate(ifm->ifm_media) / 1000000;
1828                 lc->requested_fc = 0;
1829                 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE)
1830                         lc->requested_fc |= PAUSE_RX;
1831                 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
1832                         lc->requested_fc |= PAUSE_TX;
1833         }
1834         if (pi->up_vis > 0)
1835                 rc = apply_l1cfg(pi);
1836         PORT_UNLOCK(pi);
1837         end_synchronized_op(sc, 0);
1838         return (rc);
1839 }
1840
1841 /*
1842  * Mbps to FW_PORT_CAP_SPEED_* bit.
1843  */
1844 static uint16_t
1845 speed_to_fwspeed(int speed)
1846 {
1847
1848         switch (speed) {
1849         case 100000:
1850                 return (FW_PORT_CAP_SPEED_100G);
1851         case 40000:
1852                 return (FW_PORT_CAP_SPEED_40G);
1853         case 25000:
1854                 return (FW_PORT_CAP_SPEED_25G);
1855         case 10000:
1856                 return (FW_PORT_CAP_SPEED_10G);
1857         case 1000:
1858                 return (FW_PORT_CAP_SPEED_1G);
1859         case 100:
1860                 return (FW_PORT_CAP_SPEED_100M);
1861         }
1862
1863         return (0);
1864 }
1865
1866 /*
1867  * Base media word (without ETHER, pause, link active, etc.) for the port at the
1868  * given speed.
1869  */
1870 static int
1871 port_mword(struct port_info *pi, uint16_t speed)
1872 {
1873
1874         MPASS(speed & M_FW_PORT_CAP_SPEED);
1875         MPASS(powerof2(speed));
1876
1877         switch(pi->port_type) {
1878         case FW_PORT_TYPE_BT_SGMII:
1879         case FW_PORT_TYPE_BT_XFI:
1880         case FW_PORT_TYPE_BT_XAUI:
1881                 /* BaseT */
1882                 switch (speed) {
1883                 case FW_PORT_CAP_SPEED_100M:
1884                         return (IFM_100_T);
1885                 case FW_PORT_CAP_SPEED_1G:
1886                         return (IFM_1000_T);
1887                 case FW_PORT_CAP_SPEED_10G:
1888                         return (IFM_10G_T);
1889                 }
1890                 break;
1891         case FW_PORT_TYPE_KX4:
1892                 if (speed == FW_PORT_CAP_SPEED_10G)
1893                         return (IFM_10G_KX4);
1894                 break;
1895         case FW_PORT_TYPE_CX4:
1896                 if (speed == FW_PORT_CAP_SPEED_10G)
1897                         return (IFM_10G_CX4);
1898                 break;
1899         case FW_PORT_TYPE_KX:
1900                 if (speed == FW_PORT_CAP_SPEED_1G)
1901                         return (IFM_1000_KX);
1902                 break;
1903         case FW_PORT_TYPE_KR:
1904         case FW_PORT_TYPE_BP_AP:
1905         case FW_PORT_TYPE_BP4_AP:
1906         case FW_PORT_TYPE_BP40_BA:
1907         case FW_PORT_TYPE_KR4_100G:
1908         case FW_PORT_TYPE_KR_SFP28:
1909         case FW_PORT_TYPE_KR_XLAUI:
1910                 switch (speed) {
1911                 case FW_PORT_CAP_SPEED_1G:
1912                         return (IFM_1000_KX);
1913                 case FW_PORT_CAP_SPEED_10G:
1914                         return (IFM_10G_KR);
1915                 case FW_PORT_CAP_SPEED_25G:
1916                         return (IFM_25G_KR);
1917                 case FW_PORT_CAP_SPEED_40G:
1918                         return (IFM_40G_KR4);
1919                 case FW_PORT_CAP_SPEED_100G:
1920                         return (IFM_100G_KR4);
1921                 }
1922                 break;
1923         case FW_PORT_TYPE_FIBER_XFI:
1924         case FW_PORT_TYPE_FIBER_XAUI:
1925         case FW_PORT_TYPE_SFP:
1926         case FW_PORT_TYPE_QSFP_10G:
1927         case FW_PORT_TYPE_QSA:
1928         case FW_PORT_TYPE_QSFP:
1929         case FW_PORT_TYPE_CR4_QSFP:
1930         case FW_PORT_TYPE_CR_QSFP:
1931         case FW_PORT_TYPE_CR2_QSFP:
1932         case FW_PORT_TYPE_SFP28:
1933                 /* Pluggable transceiver */
1934                 switch (pi->mod_type) {
1935                 case FW_PORT_MOD_TYPE_LR:
1936                         switch (speed) {
1937                         case FW_PORT_CAP_SPEED_1G:
1938                                 return (IFM_1000_LX);
1939                         case FW_PORT_CAP_SPEED_10G:
1940                                 return (IFM_10G_LR);
1941                         case FW_PORT_CAP_SPEED_25G:
1942                                 return (IFM_25G_LR);
1943                         case FW_PORT_CAP_SPEED_40G:
1944                                 return (IFM_40G_LR4);
1945                         case FW_PORT_CAP_SPEED_100G:
1946                                 return (IFM_100G_LR4);
1947                         }
1948                         break;
1949                 case FW_PORT_MOD_TYPE_SR:
1950                         switch (speed) {
1951                         case FW_PORT_CAP_SPEED_1G:
1952                                 return (IFM_1000_SX);
1953                         case FW_PORT_CAP_SPEED_10G:
1954                                 return (IFM_10G_SR);
1955                         case FW_PORT_CAP_SPEED_25G:
1956                                 return (IFM_25G_SR);
1957                         case FW_PORT_CAP_SPEED_40G:
1958                                 return (IFM_40G_SR4);
1959                         case FW_PORT_CAP_SPEED_100G:
1960                                 return (IFM_100G_SR4);
1961                         }
1962                         break;
1963                 case FW_PORT_MOD_TYPE_ER:
1964                         if (speed == FW_PORT_CAP_SPEED_10G)
1965                                 return (IFM_10G_ER);
1966                         break;
1967                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
1968                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
1969                         switch (speed) {
1970                         case FW_PORT_CAP_SPEED_1G:
1971                                 return (IFM_1000_CX);
1972                         case FW_PORT_CAP_SPEED_10G:
1973                                 return (IFM_10G_TWINAX);
1974                         case FW_PORT_CAP_SPEED_25G:
1975                                 return (IFM_25G_CR);
1976                         case FW_PORT_CAP_SPEED_40G:
1977                                 return (IFM_40G_CR4);
1978                         case FW_PORT_CAP_SPEED_100G:
1979                                 return (IFM_100G_CR4);
1980                         }
1981                         break;
1982                 case FW_PORT_MOD_TYPE_LRM:
1983                         if (speed == FW_PORT_CAP_SPEED_10G)
1984                                 return (IFM_10G_LRM);
1985                         break;
1986                 case FW_PORT_MOD_TYPE_NA:
1987                         MPASS(0);       /* Not pluggable? */
1988                         /* fall throough */
1989                 case FW_PORT_MOD_TYPE_ERROR:
1990                 case FW_PORT_MOD_TYPE_UNKNOWN:
1991                 case FW_PORT_MOD_TYPE_NOTSUPPORTED:
1992                         break;
1993                 case FW_PORT_MOD_TYPE_NONE:
1994                         return (IFM_NONE);
1995                 }
1996                 break;
1997         case FW_PORT_TYPE_NONE:
1998                 return (IFM_NONE);
1999         }
2000
2001         return (IFM_UNKNOWN);
2002 }
2003
2004 static void
2005 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2006 {
2007         struct vi_info *vi = ifp->if_softc;
2008         struct port_info *pi = vi->pi;
2009         struct adapter *sc = pi->adapter;
2010         struct link_config *lc = &pi->link_cfg;
2011
2012         if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0)
2013                 return;
2014         PORT_LOCK(pi);
2015
2016         if (pi->up_vis == 0) {
2017                 /*
2018                  * If all the interfaces are administratively down the firmware
2019                  * does not report transceiver changes.  Refresh port info here
2020                  * so that ifconfig displays accurate ifmedia at all times.
2021                  * This is the only reason we have a synchronized op in this
2022                  * function.  Just PORT_LOCK would have been enough otherwise.
2023                  */
2024                 t4_update_port_info(pi);
2025                 build_medialist(pi, &pi->media);
2026         }
2027
2028         /* ifm_status */
2029         ifmr->ifm_status = IFM_AVALID;
2030         if (lc->link_ok == 0)
2031                 goto done;
2032         ifmr->ifm_status |= IFM_ACTIVE;
2033
2034         /* ifm_active */
2035         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2036         ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE);
2037         if (lc->fc & PAUSE_RX)
2038                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2039         if (lc->fc & PAUSE_TX)
2040                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2041         ifmr->ifm_active |= port_mword(pi, speed_to_fwspeed(lc->speed));
2042 done:
2043         PORT_UNLOCK(pi);
2044         end_synchronized_op(sc, 0);
2045 }
2046
2047 static int
2048 vcxgbe_probe(device_t dev)
2049 {
2050         char buf[128];
2051         struct vi_info *vi = device_get_softc(dev);
2052
2053         snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
2054             vi - vi->pi->vi);
2055         device_set_desc_copy(dev, buf);
2056
2057         return (BUS_PROBE_DEFAULT);
2058 }
2059
2060 static int
2061 vcxgbe_attach(device_t dev)
2062 {
2063         struct vi_info *vi;
2064         struct port_info *pi;
2065         struct adapter *sc;
2066         int func, index, rc;
2067         u32 param, val;
2068
2069         vi = device_get_softc(dev);
2070         pi = vi->pi;
2071         sc = pi->adapter;
2072
2073         index = vi - pi->vi;
2074         KASSERT(index < nitems(vi_mac_funcs),
2075             ("%s: VI %s doesn't have a MAC func", __func__,
2076             device_get_nameunit(dev)));
2077         func = vi_mac_funcs[index];
2078         rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
2079             vi->hw_addr, &vi->rss_size, func, 0);
2080         if (rc < 0) {
2081                 device_printf(dev, "Failed to allocate virtual interface "
2082                     "for port %d: %d\n", pi->port_id, -rc);
2083                 return (-rc);
2084         }
2085         vi->viid = rc;
2086         if (chip_id(sc) <= CHELSIO_T5)
2087                 vi->smt_idx = (rc & 0x7f) << 1;
2088         else
2089                 vi->smt_idx = (rc & 0x7f);
2090
2091         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2092             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
2093             V_FW_PARAMS_PARAM_YZ(vi->viid);
2094         rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2095         if (rc)
2096                 vi->rss_base = 0xffff;
2097         else {
2098                 /* MPASS((val >> 16) == rss_size); */
2099                 vi->rss_base = val & 0xffff;
2100         }
2101
2102         rc = cxgbe_vi_attach(dev, vi);
2103         if (rc) {
2104                 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2105                 return (rc);
2106         }
2107         return (0);
2108 }
2109
2110 static int
2111 vcxgbe_detach(device_t dev)
2112 {
2113         struct vi_info *vi;
2114         struct adapter *sc;
2115
2116         vi = device_get_softc(dev);
2117         sc = vi->pi->adapter;
2118
2119         doom_vi(sc, vi);
2120
2121         cxgbe_vi_detach(vi);
2122         t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
2123
2124         end_synchronized_op(sc, 0);
2125
2126         return (0);
2127 }
2128
2129 void
2130 t4_fatal_err(struct adapter *sc)
2131 {
2132         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2133         t4_intr_disable(sc);
2134         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
2135             device_get_nameunit(sc->dev));
2136 }
2137
2138 void
2139 t4_add_adapter(struct adapter *sc)
2140 {
2141         sx_xlock(&t4_list_lock);
2142         SLIST_INSERT_HEAD(&t4_list, sc, link);
2143         sx_xunlock(&t4_list_lock);
2144 }
2145
2146 int
2147 t4_map_bars_0_and_4(struct adapter *sc)
2148 {
2149         sc->regs_rid = PCIR_BAR(0);
2150         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2151             &sc->regs_rid, RF_ACTIVE);
2152         if (sc->regs_res == NULL) {
2153                 device_printf(sc->dev, "cannot map registers.\n");
2154                 return (ENXIO);
2155         }
2156         sc->bt = rman_get_bustag(sc->regs_res);
2157         sc->bh = rman_get_bushandle(sc->regs_res);
2158         sc->mmio_len = rman_get_size(sc->regs_res);
2159         setbit(&sc->doorbells, DOORBELL_KDB);
2160
2161         sc->msix_rid = PCIR_BAR(4);
2162         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2163             &sc->msix_rid, RF_ACTIVE);
2164         if (sc->msix_res == NULL) {
2165                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
2166                 return (ENXIO);
2167         }
2168
2169         return (0);
2170 }
2171
2172 int
2173 t4_map_bar_2(struct adapter *sc)
2174 {
2175
2176         /*
2177          * T4: only iWARP driver uses the userspace doorbells.  There is no need
2178          * to map it if RDMA is disabled.
2179          */
2180         if (is_t4(sc) && sc->rdmacaps == 0)
2181                 return (0);
2182
2183         sc->udbs_rid = PCIR_BAR(2);
2184         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
2185             &sc->udbs_rid, RF_ACTIVE);
2186         if (sc->udbs_res == NULL) {
2187                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
2188                 return (ENXIO);
2189         }
2190         sc->udbs_base = rman_get_virtual(sc->udbs_res);
2191
2192         if (chip_id(sc) >= CHELSIO_T5) {
2193                 setbit(&sc->doorbells, DOORBELL_UDB);
2194 #if defined(__i386__) || defined(__amd64__)
2195                 if (t5_write_combine) {
2196                         int rc, mode;
2197
2198                         /*
2199                          * Enable write combining on BAR2.  This is the
2200                          * userspace doorbell BAR and is split into 128B
2201                          * (UDBS_SEG_SIZE) doorbell regions, each associated
2202                          * with an egress queue.  The first 64B has the doorbell
2203                          * and the second 64B can be used to submit a tx work
2204                          * request with an implicit doorbell.
2205                          */
2206
2207                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
2208                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
2209                         if (rc == 0) {
2210                                 clrbit(&sc->doorbells, DOORBELL_UDB);
2211                                 setbit(&sc->doorbells, DOORBELL_WCWR);
2212                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
2213                         } else {
2214                                 device_printf(sc->dev,
2215                                     "couldn't enable write combining: %d\n",
2216                                     rc);
2217                         }
2218
2219                         mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
2220                         t4_write_reg(sc, A_SGE_STAT_CFG,
2221                             V_STATSOURCE_T5(7) | mode);
2222                 }
2223 #endif
2224         }
2225
2226         return (0);
2227 }
2228
2229 struct memwin_init {
2230         uint32_t base;
2231         uint32_t aperture;
2232 };
2233
2234 static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
2235         { MEMWIN0_BASE, MEMWIN0_APERTURE },
2236         { MEMWIN1_BASE, MEMWIN1_APERTURE },
2237         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
2238 };
2239
2240 static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
2241         { MEMWIN0_BASE, MEMWIN0_APERTURE },
2242         { MEMWIN1_BASE, MEMWIN1_APERTURE },
2243         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
2244 };
2245
2246 static void
2247 setup_memwin(struct adapter *sc)
2248 {
2249         const struct memwin_init *mw_init;
2250         struct memwin *mw;
2251         int i;
2252         uint32_t bar0;
2253
2254         if (is_t4(sc)) {
2255                 /*
2256                  * Read low 32b of bar0 indirectly via the hardware backdoor
2257                  * mechanism.  Works from within PCI passthrough environments
2258                  * too, where rman_get_start() can return a different value.  We
2259                  * need to program the T4 memory window decoders with the actual
2260                  * addresses that will be coming across the PCIe link.
2261                  */
2262                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
2263                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
2264
2265                 mw_init = &t4_memwin[0];
2266         } else {
2267                 /* T5+ use the relative offset inside the PCIe BAR */
2268                 bar0 = 0;
2269
2270                 mw_init = &t5_memwin[0];
2271         }
2272
2273         for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
2274                 rw_init(&mw->mw_lock, "memory window access");
2275                 mw->mw_base = mw_init->base;
2276                 mw->mw_aperture = mw_init->aperture;
2277                 mw->mw_curpos = 0;
2278                 t4_write_reg(sc,
2279                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
2280                     (mw->mw_base + bar0) | V_BIR(0) |
2281                     V_WINDOW(ilog2(mw->mw_aperture) - 10));
2282                 rw_wlock(&mw->mw_lock);
2283                 position_memwin(sc, i, 0);
2284                 rw_wunlock(&mw->mw_lock);
2285         }
2286
2287         /* flush */
2288         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
2289 }
2290
2291 /*
2292  * Positions the memory window at the given address in the card's address space.
2293  * There are some alignment requirements and the actual position may be at an
2294  * address prior to the requested address.  mw->mw_curpos always has the actual
2295  * position of the window.
2296  */
2297 static void
2298 position_memwin(struct adapter *sc, int idx, uint32_t addr)
2299 {
2300         struct memwin *mw;
2301         uint32_t pf;
2302         uint32_t reg;
2303
2304         MPASS(idx >= 0 && idx < NUM_MEMWIN);
2305         mw = &sc->memwin[idx];
2306         rw_assert(&mw->mw_lock, RA_WLOCKED);
2307
2308         if (is_t4(sc)) {
2309                 pf = 0;
2310                 mw->mw_curpos = addr & ~0xf;    /* start must be 16B aligned */
2311         } else {
2312                 pf = V_PFNUM(sc->pf);
2313                 mw->mw_curpos = addr & ~0x7f;   /* start must be 128B aligned */
2314         }
2315         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
2316         t4_write_reg(sc, reg, mw->mw_curpos | pf);
2317         t4_read_reg(sc, reg);   /* flush */
2318 }
2319
2320 static int
2321 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2322     int len, int rw)
2323 {
2324         struct memwin *mw;
2325         uint32_t mw_end, v;
2326
2327         MPASS(idx >= 0 && idx < NUM_MEMWIN);
2328
2329         /* Memory can only be accessed in naturally aligned 4 byte units */
2330         if (addr & 3 || len & 3 || len <= 0)
2331                 return (EINVAL);
2332
2333         mw = &sc->memwin[idx];
2334         while (len > 0) {
2335                 rw_rlock(&mw->mw_lock);
2336                 mw_end = mw->mw_curpos + mw->mw_aperture;
2337                 if (addr >= mw_end || addr < mw->mw_curpos) {
2338                         /* Will need to reposition the window */
2339                         if (!rw_try_upgrade(&mw->mw_lock)) {
2340                                 rw_runlock(&mw->mw_lock);
2341                                 rw_wlock(&mw->mw_lock);
2342                         }
2343                         rw_assert(&mw->mw_lock, RA_WLOCKED);
2344                         position_memwin(sc, idx, addr);
2345                         rw_downgrade(&mw->mw_lock);
2346                         mw_end = mw->mw_curpos + mw->mw_aperture;
2347                 }
2348                 rw_assert(&mw->mw_lock, RA_RLOCKED);
2349                 while (addr < mw_end && len > 0) {
2350                         if (rw == 0) {
2351                                 v = t4_read_reg(sc, mw->mw_base + addr -
2352                                     mw->mw_curpos);
2353                                 *val++ = le32toh(v);
2354                         } else {
2355                                 v = *val++;
2356                                 t4_write_reg(sc, mw->mw_base + addr -
2357                                     mw->mw_curpos, htole32(v));;
2358                         }
2359                         addr += 4;
2360                         len -= 4;
2361                 }
2362                 rw_runlock(&mw->mw_lock);
2363         }
2364
2365         return (0);
2366 }
2367
2368 static inline int
2369 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
2370     int len)
2371 {
2372
2373         return (rw_via_memwin(sc, idx, addr, val, len, 0));
2374 }
2375
2376 static inline int
2377 write_via_memwin(struct adapter *sc, int idx, uint32_t addr,
2378     const uint32_t *val, int len)
2379 {
2380
2381         return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1));
2382 }
2383
2384 static int
2385 t4_range_cmp(const void *a, const void *b)
2386 {
2387         return ((const struct t4_range *)a)->start -
2388                ((const struct t4_range *)b)->start;
2389 }
2390
2391 /*
2392  * Verify that the memory range specified by the addr/len pair is valid within
2393  * the card's address space.
2394  */
2395 static int
2396 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
2397 {
2398         struct t4_range mem_ranges[4], *r, *next;
2399         uint32_t em, addr_len;
2400         int i, n, remaining;
2401
2402         /* Memory can only be accessed in naturally aligned 4 byte units */
2403         if (addr & 3 || len & 3 || len <= 0)
2404                 return (EINVAL);
2405
2406         /* Enabled memories */
2407         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2408
2409         r = &mem_ranges[0];
2410         n = 0;
2411         bzero(r, sizeof(mem_ranges));
2412         if (em & F_EDRAM0_ENABLE) {
2413                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2414                 r->size = G_EDRAM0_SIZE(addr_len) << 20;
2415                 if (r->size > 0) {
2416                         r->start = G_EDRAM0_BASE(addr_len) << 20;
2417                         if (addr >= r->start &&
2418                             addr + len <= r->start + r->size)
2419                                 return (0);
2420                         r++;
2421                         n++;
2422                 }
2423         }
2424         if (em & F_EDRAM1_ENABLE) {
2425                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2426                 r->size = G_EDRAM1_SIZE(addr_len) << 20;
2427                 if (r->size > 0) {
2428                         r->start = G_EDRAM1_BASE(addr_len) << 20;
2429                         if (addr >= r->start &&
2430                             addr + len <= r->start + r->size)
2431                                 return (0);
2432                         r++;
2433                         n++;
2434                 }
2435         }
2436         if (em & F_EXT_MEM_ENABLE) {
2437                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2438                 r->size = G_EXT_MEM_SIZE(addr_len) << 20;
2439                 if (r->size > 0) {
2440                         r->start = G_EXT_MEM_BASE(addr_len) << 20;
2441                         if (addr >= r->start &&
2442                             addr + len <= r->start + r->size)
2443                                 return (0);
2444                         r++;
2445                         n++;
2446                 }
2447         }
2448         if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
2449                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2450                 r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
2451                 if (r->size > 0) {
2452                         r->start = G_EXT_MEM1_BASE(addr_len) << 20;
2453                         if (addr >= r->start &&
2454                             addr + len <= r->start + r->size)
2455                                 return (0);
2456                         r++;
2457                         n++;
2458                 }
2459         }
2460         MPASS(n <= nitems(mem_ranges));
2461
2462         if (n > 1) {
2463                 /* Sort and merge the ranges. */
2464                 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
2465
2466                 /* Start from index 0 and examine the next n - 1 entries. */
2467                 r = &mem_ranges[0];
2468                 for (remaining = n - 1; remaining > 0; remaining--, r++) {
2469
2470                         MPASS(r->size > 0);     /* r is a valid entry. */
2471                         next = r + 1;
2472                         MPASS(next->size > 0);  /* and so is the next one. */
2473
2474                         while (r->start + r->size >= next->start) {
2475                                 /* Merge the next one into the current entry. */
2476                                 r->size = max(r->start + r->size,
2477                                     next->start + next->size) - r->start;
2478                                 n--;    /* One fewer entry in total. */
2479                                 if (--remaining == 0)
2480                                         goto done;      /* short circuit */
2481                                 next++;
2482                         }
2483                         if (next != r + 1) {
2484                                 /*
2485                                  * Some entries were merged into r and next
2486                                  * points to the first valid entry that couldn't
2487                                  * be merged.
2488                                  */
2489                                 MPASS(next->size > 0);  /* must be valid */
2490                                 memcpy(r + 1, next, remaining * sizeof(*r));
2491 #ifdef INVARIANTS
2492                                 /*
2493                                  * This so that the foo->size assertion in the
2494                                  * next iteration of the loop do the right
2495                                  * thing for entries that were pulled up and are
2496                                  * no longer valid.
2497                                  */
2498                                 MPASS(n < nitems(mem_ranges));
2499                                 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
2500                                     sizeof(struct t4_range));
2501 #endif
2502                         }
2503                 }
2504 done:
2505                 /* Done merging the ranges. */
2506                 MPASS(n > 0);
2507                 r = &mem_ranges[0];
2508                 for (i = 0; i < n; i++, r++) {
2509                         if (addr >= r->start &&
2510                             addr + len <= r->start + r->size)
2511                                 return (0);
2512                 }
2513         }
2514
2515         return (EFAULT);
2516 }
2517
2518 static int
2519 fwmtype_to_hwmtype(int mtype)
2520 {
2521
2522         switch (mtype) {
2523         case FW_MEMTYPE_EDC0:
2524                 return (MEM_EDC0);
2525         case FW_MEMTYPE_EDC1:
2526                 return (MEM_EDC1);
2527         case FW_MEMTYPE_EXTMEM:
2528                 return (MEM_MC0);
2529         case FW_MEMTYPE_EXTMEM1:
2530                 return (MEM_MC1);
2531         default:
2532                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
2533         }
2534 }
2535
2536 /*
2537  * Verify that the memory range specified by the memtype/offset/len pair is
2538  * valid and lies entirely within the memtype specified.  The global address of
2539  * the start of the range is returned in addr.
2540  */
2541 static int
2542 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
2543     uint32_t *addr)
2544 {
2545         uint32_t em, addr_len, maddr;
2546
2547         /* Memory can only be accessed in naturally aligned 4 byte units */
2548         if (off & 3 || len & 3 || len == 0)
2549                 return (EINVAL);
2550
2551         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
2552         switch (fwmtype_to_hwmtype(mtype)) {
2553         case MEM_EDC0:
2554                 if (!(em & F_EDRAM0_ENABLE))
2555                         return (EINVAL);
2556                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
2557                 maddr = G_EDRAM0_BASE(addr_len) << 20;
2558                 break;
2559         case MEM_EDC1:
2560                 if (!(em & F_EDRAM1_ENABLE))
2561                         return (EINVAL);
2562                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
2563                 maddr = G_EDRAM1_BASE(addr_len) << 20;
2564                 break;
2565         case MEM_MC:
2566                 if (!(em & F_EXT_MEM_ENABLE))
2567                         return (EINVAL);
2568                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
2569                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
2570                 break;
2571         case MEM_MC1:
2572                 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
2573                         return (EINVAL);
2574                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
2575                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
2576                 break;
2577         default:
2578                 return (EINVAL);
2579         }
2580
2581         *addr = maddr + off;    /* global address */
2582         return (validate_mem_range(sc, *addr, len));
2583 }
2584
2585 static int
2586 fixup_devlog_params(struct adapter *sc)
2587 {
2588         struct devlog_params *dparams = &sc->params.devlog;
2589         int rc;
2590
2591         rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
2592             dparams->size, &dparams->addr);
2593
2594         return (rc);
2595 }
2596
2597 static int
2598 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
2599     struct intrs_and_queues *iaq)
2600 {
2601         int rc, itype, navail, nrxq10g, nrxq1g, n;
2602         int nofldrxq10g = 0, nofldrxq1g = 0;
2603
2604         bzero(iaq, sizeof(*iaq));
2605
2606         iaq->ntxq10g = t4_ntxq10g;
2607         iaq->ntxq1g = t4_ntxq1g;
2608         iaq->ntxq_vi = t4_ntxq_vi;
2609         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
2610         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
2611         iaq->nrxq_vi = t4_nrxq_vi;
2612         iaq->rsrv_noflowq = t4_rsrv_noflowq;
2613 #ifdef TCP_OFFLOAD
2614         if (is_offload(sc)) {
2615                 iaq->nofldtxq10g = t4_nofldtxq10g;
2616                 iaq->nofldtxq1g = t4_nofldtxq1g;
2617                 iaq->nofldtxq_vi = t4_nofldtxq_vi;
2618                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
2619                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
2620                 iaq->nofldrxq_vi = t4_nofldrxq_vi;
2621         }
2622 #endif
2623 #ifdef DEV_NETMAP
2624         iaq->nnmtxq_vi = t4_nnmtxq_vi;
2625         iaq->nnmrxq_vi = t4_nnmrxq_vi;
2626 #endif
2627
2628         for (itype = INTR_MSIX; itype; itype >>= 1) {
2629
2630                 if ((itype & t4_intr_types) == 0)
2631                         continue;       /* not allowed */
2632
2633                 if (itype == INTR_MSIX)
2634                         navail = pci_msix_count(sc->dev);
2635                 else if (itype == INTR_MSI)
2636                         navail = pci_msi_count(sc->dev);
2637                 else
2638                         navail = 1;
2639 restart:
2640                 if (navail == 0)
2641                         continue;
2642
2643                 iaq->intr_type = itype;
2644                 iaq->intr_flags_10g = 0;
2645                 iaq->intr_flags_1g = 0;
2646
2647                 /*
2648                  * Best option: an interrupt vector for errors, one for the
2649                  * firmware event queue, and one for every rxq (NIC and TOE) of
2650                  * every VI.  The VIs that support netmap use the same
2651                  * interrupts for the NIC rx queues and the netmap rx queues
2652                  * because only one set of queues is active at a time.
2653                  */
2654                 iaq->nirq = T4_EXTRA_INTR;
2655                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2656                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2657                 iaq->nirq += (n10g + n1g) * (num_vis - 1) *
2658                     max(iaq->nrxq_vi, iaq->nnmrxq_vi);  /* See comment above. */
2659                 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi;
2660                 if (iaq->nirq <= navail &&
2661                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
2662                         iaq->intr_flags_10g = INTR_ALL;
2663                         iaq->intr_flags_1g = INTR_ALL;
2664                         goto allocate;
2665                 }
2666
2667                 /* Disable the VIs (and netmap) if there aren't enough intrs */
2668                 if (num_vis > 1) {
2669                         device_printf(sc->dev, "virtual interfaces disabled "
2670                             "because num_vis=%u with current settings "
2671                             "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, "
2672                             "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, "
2673                             "nnmrxq_vi=%u) would need %u interrupts but "
2674                             "only %u are available.\n", num_vis, nrxq10g,
2675                             nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi,
2676                             iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq,
2677                             navail);
2678                         num_vis = 1;
2679                         iaq->ntxq_vi = iaq->nrxq_vi = 0;
2680                         iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
2681                         iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
2682                         goto restart;
2683                 }
2684
2685                 /*
2686                  * Second best option: a vector for errors, one for the firmware
2687                  * event queue, and vectors for either all the NIC rx queues or
2688                  * all the TOE rx queues.  The queues that don't get vectors
2689                  * will forward their interrupts to those that do.
2690                  */
2691                 iaq->nirq = T4_EXTRA_INTR;
2692                 if (nrxq10g >= nofldrxq10g) {
2693                         iaq->intr_flags_10g = INTR_RXQ;
2694                         iaq->nirq += n10g * nrxq10g;
2695                 } else {
2696                         iaq->intr_flags_10g = INTR_OFLD_RXQ;
2697                         iaq->nirq += n10g * nofldrxq10g;
2698                 }
2699                 if (nrxq1g >= nofldrxq1g) {
2700                         iaq->intr_flags_1g = INTR_RXQ;
2701                         iaq->nirq += n1g * nrxq1g;
2702                 } else {
2703                         iaq->intr_flags_1g = INTR_OFLD_RXQ;
2704                         iaq->nirq += n1g * nofldrxq1g;
2705                 }
2706                 if (iaq->nirq <= navail &&
2707                     (itype != INTR_MSI || powerof2(iaq->nirq)))
2708                         goto allocate;
2709
2710                 /*
2711                  * Next best option: an interrupt vector for errors, one for the
2712                  * firmware event queue, and at least one per main-VI.  At this
2713                  * point we know we'll have to downsize nrxq and/or nofldrxq to
2714                  * fit what's available to us.
2715                  */
2716                 iaq->nirq = T4_EXTRA_INTR;
2717                 iaq->nirq += n10g + n1g;
2718                 if (iaq->nirq <= navail) {
2719                         int leftover = navail - iaq->nirq;
2720
2721                         if (n10g > 0) {
2722                                 int target = max(nrxq10g, nofldrxq10g);
2723
2724                                 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
2725                                     INTR_RXQ : INTR_OFLD_RXQ;
2726
2727                                 n = 1;
2728                                 while (n < target && leftover >= n10g) {
2729                                         leftover -= n10g;
2730                                         iaq->nirq += n10g;
2731                                         n++;
2732                                 }
2733                                 iaq->nrxq10g = min(n, nrxq10g);
2734 #ifdef TCP_OFFLOAD
2735                                 iaq->nofldrxq10g = min(n, nofldrxq10g);
2736 #endif
2737                         }
2738
2739                         if (n1g > 0) {
2740                                 int target = max(nrxq1g, nofldrxq1g);
2741
2742                                 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
2743                                     INTR_RXQ : INTR_OFLD_RXQ;
2744
2745                                 n = 1;
2746                                 while (n < target && leftover >= n1g) {
2747                                         leftover -= n1g;
2748                                         iaq->nirq += n1g;
2749                                         n++;
2750                                 }
2751                                 iaq->nrxq1g = min(n, nrxq1g);
2752 #ifdef TCP_OFFLOAD
2753                                 iaq->nofldrxq1g = min(n, nofldrxq1g);
2754 #endif
2755                         }
2756
2757                         if (itype != INTR_MSI || powerof2(iaq->nirq))
2758                                 goto allocate;
2759                 }
2760
2761                 /*
2762                  * Least desirable option: one interrupt vector for everything.
2763                  */
2764                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2765                 iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
2766 #ifdef TCP_OFFLOAD
2767                 if (is_offload(sc))
2768                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2769 #endif
2770 allocate:
2771                 navail = iaq->nirq;
2772                 rc = 0;
2773                 if (itype == INTR_MSIX)
2774                         rc = pci_alloc_msix(sc->dev, &navail);
2775                 else if (itype == INTR_MSI)
2776                         rc = pci_alloc_msi(sc->dev, &navail);
2777
2778                 if (rc == 0) {
2779                         if (navail == iaq->nirq)
2780                                 return (0);
2781
2782                         /*
2783                          * Didn't get the number requested.  Use whatever number
2784                          * the kernel is willing to allocate (it's in navail).
2785                          */
2786                         device_printf(sc->dev, "fewer vectors than requested, "
2787                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2788                             itype, iaq->nirq, navail);
2789                         pci_release_msi(sc->dev);
2790                         goto restart;
2791                 }
2792
2793                 device_printf(sc->dev,
2794                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2795                     itype, rc, iaq->nirq, navail);
2796         }
2797
2798         device_printf(sc->dev,
2799             "failed to find a usable interrupt type.  "
2800             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2801             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2802
2803         return (ENXIO);
2804 }
2805
2806 #define FW_VERSION(chip) ( \
2807     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2808     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2809     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2810     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2811 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2812
2813 struct fw_info {
2814         uint8_t chip;
2815         char *kld_name;
2816         char *fw_mod_name;
2817         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
2818 } fw_info[] = {
2819         {
2820                 .chip = CHELSIO_T4,
2821                 .kld_name = "t4fw_cfg",
2822                 .fw_mod_name = "t4fw",
2823                 .fw_hdr = {
2824                         .chip = FW_HDR_CHIP_T4,
2825                         .fw_ver = htobe32_const(FW_VERSION(T4)),
2826                         .intfver_nic = FW_INTFVER(T4, NIC),
2827                         .intfver_vnic = FW_INTFVER(T4, VNIC),
2828                         .intfver_ofld = FW_INTFVER(T4, OFLD),
2829                         .intfver_ri = FW_INTFVER(T4, RI),
2830                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2831                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
2832                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2833                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
2834                 },
2835         }, {
2836                 .chip = CHELSIO_T5,
2837                 .kld_name = "t5fw_cfg",
2838                 .fw_mod_name = "t5fw",
2839                 .fw_hdr = {
2840                         .chip = FW_HDR_CHIP_T5,
2841                         .fw_ver = htobe32_const(FW_VERSION(T5)),
2842                         .intfver_nic = FW_INTFVER(T5, NIC),
2843                         .intfver_vnic = FW_INTFVER(T5, VNIC),
2844                         .intfver_ofld = FW_INTFVER(T5, OFLD),
2845                         .intfver_ri = FW_INTFVER(T5, RI),
2846                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2847                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2848                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2849                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
2850                 },
2851         }, {
2852                 .chip = CHELSIO_T6,
2853                 .kld_name = "t6fw_cfg",
2854                 .fw_mod_name = "t6fw",
2855                 .fw_hdr = {
2856                         .chip = FW_HDR_CHIP_T6,
2857                         .fw_ver = htobe32_const(FW_VERSION(T6)),
2858                         .intfver_nic = FW_INTFVER(T6, NIC),
2859                         .intfver_vnic = FW_INTFVER(T6, VNIC),
2860                         .intfver_ofld = FW_INTFVER(T6, OFLD),
2861                         .intfver_ri = FW_INTFVER(T6, RI),
2862                         .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
2863                         .intfver_iscsi = FW_INTFVER(T6, ISCSI),
2864                         .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
2865                         .intfver_fcoe = FW_INTFVER(T6, FCOE),
2866                 },
2867         }
2868 };
2869
2870 static struct fw_info *
2871 find_fw_info(int chip)
2872 {
2873         int i;
2874
2875         for (i = 0; i < nitems(fw_info); i++) {
2876                 if (fw_info[i].chip == chip)
2877                         return (&fw_info[i]);
2878         }
2879         return (NULL);
2880 }
2881
2882 /*
2883  * Is the given firmware API compatible with the one the driver was compiled
2884  * with?
2885  */
2886 static int
2887 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2888 {
2889
2890         /* short circuit if it's the exact same firmware version */
2891         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2892                 return (1);
2893
2894         /*
2895          * XXX: Is this too conservative?  Perhaps I should limit this to the
2896          * features that are supported in the driver.
2897          */
2898 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2899         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2900             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2901             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2902                 return (1);
2903 #undef SAME_INTF
2904
2905         return (0);
2906 }
2907
2908 /*
2909  * The firmware in the KLD is usable, but should it be installed?  This routine
2910  * explains itself in detail if it indicates the KLD firmware should be
2911  * installed.
2912  */
2913 static int
2914 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2915 {
2916         const char *reason;
2917
2918         if (!card_fw_usable) {
2919                 reason = "incompatible or unusable";
2920                 goto install;
2921         }
2922
2923         if (k > c) {
2924                 reason = "older than the version bundled with this driver";
2925                 goto install;
2926         }
2927
2928         if (t4_fw_install == 2 && k != c) {
2929                 reason = "different than the version bundled with this driver";
2930                 goto install;
2931         }
2932
2933         return (0);
2934
2935 install:
2936         if (t4_fw_install == 0) {
2937                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2938                     "but the driver is prohibited from installing a different "
2939                     "firmware on the card.\n",
2940                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2941                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2942
2943                 return (0);
2944         }
2945
2946         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2947             "installing firmware %u.%u.%u.%u on card.\n",
2948             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2949             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2950             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2951             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2952
2953         return (1);
2954 }
2955 /*
2956  * Establish contact with the firmware and determine if we are the master driver
2957  * or not, and whether we are responsible for chip initialization.
2958  */
2959 static int
2960 prep_firmware(struct adapter *sc)
2961 {
2962         const struct firmware *fw = NULL, *default_cfg;
2963         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2964         enum dev_state state;
2965         struct fw_info *fw_info;
2966         struct fw_hdr *card_fw;         /* fw on the card */
2967         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2968         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2969                                            against */
2970
2971         /* Contact firmware. */
2972         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2973         if (rc < 0 || state == DEV_STATE_ERR) {
2974                 rc = -rc;
2975                 device_printf(sc->dev,
2976                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2977                 return (rc);
2978         }
2979         pf = rc;
2980         if (pf == sc->mbox)
2981                 sc->flags |= MASTER_PF;
2982         else if (state == DEV_STATE_UNINIT) {
2983                 /*
2984                  * We didn't get to be the master so we definitely won't be
2985                  * configuring the chip.  It's a bug if someone else hasn't
2986                  * configured it already.
2987                  */
2988                 device_printf(sc->dev, "couldn't be master(%d), "
2989                     "device not already initialized either(%d).\n", rc, state);
2990                 return (EDOOFUS);
2991         }
2992
2993         /* This is the firmware whose headers the driver was compiled against */
2994         fw_info = find_fw_info(chip_id(sc));
2995         if (fw_info == NULL) {
2996                 device_printf(sc->dev,
2997                     "unable to look up firmware information for chip %d.\n",
2998                     chip_id(sc));
2999                 return (EINVAL);
3000         }
3001         drv_fw = &fw_info->fw_hdr;
3002
3003         /*
3004          * The firmware KLD contains many modules.  The KLD name is also the
3005          * name of the module that contains the default config file.
3006          */
3007         default_cfg = firmware_get(fw_info->kld_name);
3008
3009         /* Read the header of the firmware on the card */
3010         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
3011         rc = -t4_read_flash(sc, FLASH_FW_START,
3012             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
3013         if (rc == 0)
3014                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
3015         else {
3016                 device_printf(sc->dev,
3017                     "Unable to read card's firmware header: %d\n", rc);
3018                 card_fw_usable = 0;
3019         }
3020
3021         /* This is the firmware in the KLD */
3022         fw = firmware_get(fw_info->fw_mod_name);
3023         if (fw != NULL) {
3024                 kld_fw = (const void *)fw->data;
3025                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
3026         } else {
3027                 kld_fw = NULL;
3028                 kld_fw_usable = 0;
3029         }
3030
3031         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3032             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
3033                 /*
3034                  * Common case: the firmware on the card is an exact match and
3035                  * the KLD is an exact match too, or the KLD is
3036                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
3037                  * here -- use cxgbetool loadfw if you want to reinstall the
3038                  * same firmware as the one on the card.
3039                  */
3040         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
3041             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
3042             be32toh(card_fw->fw_ver))) {
3043
3044                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
3045                 if (rc != 0) {
3046                         device_printf(sc->dev,
3047                             "failed to install firmware: %d\n", rc);
3048                         goto done;
3049                 }
3050
3051                 /* Installed successfully, update the cached header too. */
3052                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
3053                 card_fw_usable = 1;
3054                 need_fw_reset = 0;      /* already reset as part of load_fw */
3055         }
3056
3057         if (!card_fw_usable) {
3058                 uint32_t d, c, k;
3059
3060                 d = ntohl(drv_fw->fw_ver);
3061                 c = ntohl(card_fw->fw_ver);
3062                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
3063
3064                 device_printf(sc->dev, "Cannot find a usable firmware: "
3065                     "fw_install %d, chip state %d, "
3066                     "driver compiled with %d.%d.%d.%d, "
3067                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
3068                     t4_fw_install, state,
3069                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
3070                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
3071                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3072                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3073                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3074                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3075                 rc = EINVAL;
3076                 goto done;
3077         }
3078
3079         /* Reset device */
3080         if (need_fw_reset &&
3081             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
3082                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
3083                 if (rc != ETIMEDOUT && rc != EIO)
3084                         t4_fw_bye(sc, sc->mbox);
3085                 goto done;
3086         }
3087         sc->flags |= FW_OK;
3088
3089         rc = get_params__pre_init(sc);
3090         if (rc != 0)
3091                 goto done; /* error message displayed already */
3092
3093         /* Partition adapter resources as specified in the config file. */
3094         if (state == DEV_STATE_UNINIT) {
3095
3096                 KASSERT(sc->flags & MASTER_PF,
3097                     ("%s: trying to change chip settings when not master.",
3098                     __func__));
3099
3100                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
3101                 if (rc != 0)
3102                         goto done;      /* error message displayed already */
3103
3104                 t4_tweak_chip_settings(sc);
3105
3106                 /* get basic stuff going */
3107                 rc = -t4_fw_initialize(sc, sc->mbox);
3108                 if (rc != 0) {
3109                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
3110                         goto done;
3111                 }
3112         } else {
3113                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
3114                 sc->cfcsum = 0;
3115         }
3116
3117 done:
3118         free(card_fw, M_CXGBE);
3119         if (fw != NULL)
3120                 firmware_put(fw, FIRMWARE_UNLOAD);
3121         if (default_cfg != NULL)
3122                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
3123
3124         return (rc);
3125 }
3126
3127 #define FW_PARAM_DEV(param) \
3128         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3129          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3130 #define FW_PARAM_PFVF(param) \
3131         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3132          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
3133
3134 /*
3135  * Partition chip resources for use between various PFs, VFs, etc.
3136  */
3137 static int
3138 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
3139     const char *name_prefix)
3140 {
3141         const struct firmware *cfg = NULL;
3142         int rc = 0;
3143         struct fw_caps_config_cmd caps;
3144         uint32_t mtype, moff, finicsum, cfcsum;
3145
3146         /*
3147          * Figure out what configuration file to use.  Pick the default config
3148          * file for the card if the user hasn't specified one explicitly.
3149          */
3150         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
3151         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
3152                 /* Card specific overrides go here. */
3153                 if (pci_get_device(sc->dev) == 0x440a)
3154                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
3155                 if (is_fpga(sc))
3156                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
3157         }
3158
3159         /*
3160          * We need to load another module if the profile is anything except
3161          * "default" or "flash".
3162          */
3163         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
3164             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3165                 char s[32];
3166
3167                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
3168                 cfg = firmware_get(s);
3169                 if (cfg == NULL) {
3170                         if (default_cfg != NULL) {
3171                                 device_printf(sc->dev,
3172                                     "unable to load module \"%s\" for "
3173                                     "configuration profile \"%s\", will use "
3174                                     "the default config file instead.\n",
3175                                     s, sc->cfg_file);
3176                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3177                                     "%s", DEFAULT_CF);
3178                         } else {
3179                                 device_printf(sc->dev,
3180                                     "unable to load module \"%s\" for "
3181                                     "configuration profile \"%s\", will use "
3182                                     "the config file on the card's flash "
3183                                     "instead.\n", s, sc->cfg_file);
3184                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
3185                                     "%s", FLASH_CF);
3186                         }
3187                 }
3188         }
3189
3190         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
3191             default_cfg == NULL) {
3192                 device_printf(sc->dev,
3193                     "default config file not available, will use the config "
3194                     "file on the card's flash instead.\n");
3195                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
3196         }
3197
3198         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
3199                 u_int cflen;
3200                 const uint32_t *cfdata;
3201                 uint32_t param, val, addr;
3202
3203                 KASSERT(cfg != NULL || default_cfg != NULL,
3204                     ("%s: no config to upload", __func__));
3205
3206                 /*
3207                  * Ask the firmware where it wants us to upload the config file.
3208                  */
3209                 param = FW_PARAM_DEV(CF);
3210                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3211                 if (rc != 0) {
3212                         /* No support for config file?  Shouldn't happen. */
3213                         device_printf(sc->dev,
3214                             "failed to query config file location: %d.\n", rc);
3215                         goto done;
3216                 }
3217                 mtype = G_FW_PARAMS_PARAM_Y(val);
3218                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
3219
3220                 /*
3221                  * XXX: sheer laziness.  We deliberately added 4 bytes of
3222                  * useless stuffing/comments at the end of the config file so
3223                  * it's ok to simply throw away the last remaining bytes when
3224                  * the config file is not an exact multiple of 4.  This also
3225                  * helps with the validate_mt_off_len check.
3226                  */
3227                 if (cfg != NULL) {
3228                         cflen = cfg->datasize & ~3;
3229                         cfdata = cfg->data;
3230                 } else {
3231                         cflen = default_cfg->datasize & ~3;
3232                         cfdata = default_cfg->data;
3233                 }
3234
3235                 if (cflen > FLASH_CFG_MAX_SIZE) {
3236                         device_printf(sc->dev,
3237                             "config file too long (%d, max allowed is %d).  "
3238                             "Will try to use the config on the card, if any.\n",
3239                             cflen, FLASH_CFG_MAX_SIZE);
3240                         goto use_config_on_flash;
3241                 }
3242
3243                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
3244                 if (rc != 0) {
3245                         device_printf(sc->dev,
3246                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
3247                             "Will try to use the config on the card, if any.\n",
3248                             __func__, mtype, moff, cflen, rc);
3249                         goto use_config_on_flash;
3250                 }
3251                 write_via_memwin(sc, 2, addr, cfdata, cflen);
3252         } else {
3253 use_config_on_flash:
3254                 mtype = FW_MEMTYPE_FLASH;
3255                 moff = t4_flash_cfg_addr(sc);
3256         }
3257
3258         bzero(&caps, sizeof(caps));
3259         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3260             F_FW_CMD_REQUEST | F_FW_CMD_READ);
3261         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
3262             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3263             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
3264         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3265         if (rc != 0) {
3266                 device_printf(sc->dev,
3267                     "failed to pre-process config file: %d "
3268                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
3269                 goto done;
3270         }
3271
3272         finicsum = be32toh(caps.finicsum);
3273         cfcsum = be32toh(caps.cfcsum);
3274         if (finicsum != cfcsum) {
3275                 device_printf(sc->dev,
3276                     "WARNING: config file checksum mismatch: %08x %08x\n",
3277                     finicsum, cfcsum);
3278         }
3279         sc->cfcsum = cfcsum;
3280
3281 #define LIMIT_CAPS(x) do { \
3282         caps.x &= htobe16(t4_##x##_allowed); \
3283 } while (0)
3284
3285         /*
3286          * Let the firmware know what features will (not) be used so it can tune
3287          * things accordingly.
3288          */
3289         LIMIT_CAPS(nbmcaps);
3290         LIMIT_CAPS(linkcaps);
3291         LIMIT_CAPS(switchcaps);
3292         LIMIT_CAPS(niccaps);
3293         LIMIT_CAPS(toecaps);
3294         LIMIT_CAPS(rdmacaps);
3295         LIMIT_CAPS(cryptocaps);
3296         LIMIT_CAPS(iscsicaps);
3297         LIMIT_CAPS(fcoecaps);
3298 #undef LIMIT_CAPS
3299
3300         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3301             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
3302         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3303         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
3304         if (rc != 0) {
3305                 device_printf(sc->dev,
3306                     "failed to process config file: %d.\n", rc);
3307         }
3308 done:
3309         if (cfg != NULL)
3310                 firmware_put(cfg, FIRMWARE_UNLOAD);
3311         return (rc);
3312 }
3313
3314 /*
3315  * Retrieve parameters that are needed (or nice to have) very early.
3316  */
3317 static int
3318 get_params__pre_init(struct adapter *sc)
3319 {
3320         int rc;
3321         uint32_t param[2], val[2];
3322
3323         t4_get_version_info(sc);
3324
3325         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
3326             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3327             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3328             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3329             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
3330
3331         snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
3332             G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
3333             G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
3334             G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
3335             G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
3336
3337         snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
3338             G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
3339             G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
3340             G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
3341             G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
3342
3343         snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
3344             G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
3345             G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
3346             G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
3347             G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
3348
3349         param[0] = FW_PARAM_DEV(PORTVEC);
3350         param[1] = FW_PARAM_DEV(CCLK);
3351         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3352         if (rc != 0) {
3353                 device_printf(sc->dev,
3354                     "failed to query parameters (pre_init): %d.\n", rc);
3355                 return (rc);
3356         }
3357
3358         sc->params.portvec = val[0];
3359         sc->params.nports = bitcount32(val[0]);
3360         sc->params.vpd.cclk = val[1];
3361
3362         /* Read device log parameters. */
3363         rc = -t4_init_devlog_params(sc, 1);
3364         if (rc == 0)
3365                 fixup_devlog_params(sc);
3366         else {
3367                 device_printf(sc->dev,
3368                     "failed to get devlog parameters: %d.\n", rc);
3369                 rc = 0; /* devlog isn't critical for device operation */
3370         }
3371
3372         return (rc);
3373 }
3374
3375 /*
3376  * Retrieve various parameters that are of interest to the driver.  The device
3377  * has been initialized by the firmware at this point.
3378  */
3379 static int
3380 get_params__post_init(struct adapter *sc)
3381 {
3382         int rc;
3383         uint32_t param[7], val[7];
3384         struct fw_caps_config_cmd caps;
3385
3386         param[0] = FW_PARAM_PFVF(IQFLINT_START);
3387         param[1] = FW_PARAM_PFVF(EQ_START);
3388         param[2] = FW_PARAM_PFVF(FILTER_START);
3389         param[3] = FW_PARAM_PFVF(FILTER_END);
3390         param[4] = FW_PARAM_PFVF(L2T_START);
3391         param[5] = FW_PARAM_PFVF(L2T_END);
3392         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3393         if (rc != 0) {
3394                 device_printf(sc->dev,
3395                     "failed to query parameters (post_init): %d.\n", rc);
3396                 return (rc);
3397         }
3398
3399         sc->sge.iq_start = val[0];
3400         sc->sge.eq_start = val[1];
3401         sc->tids.ftid_base = val[2];
3402         sc->tids.nftids = val[3] - val[2] + 1;
3403         sc->params.ftid_min = val[2];
3404         sc->params.ftid_max = val[3];
3405         sc->vres.l2t.start = val[4];
3406         sc->vres.l2t.size = val[5] - val[4] + 1;
3407         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
3408             ("%s: L2 table size (%u) larger than expected (%u)",
3409             __func__, sc->vres.l2t.size, L2T_SIZE));
3410
3411         /* get capabilites */
3412         bzero(&caps, sizeof(caps));
3413         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3414             F_FW_CMD_REQUEST | F_FW_CMD_READ);
3415         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
3416         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
3417         if (rc != 0) {
3418                 device_printf(sc->dev,
3419                     "failed to get card capabilities: %d.\n", rc);
3420                 return (rc);
3421         }
3422
3423 #define READ_CAPS(x) do { \
3424         sc->x = htobe16(caps.x); \
3425 } while (0)
3426         READ_CAPS(nbmcaps);
3427         READ_CAPS(linkcaps);
3428         READ_CAPS(switchcaps);
3429         READ_CAPS(niccaps);
3430         READ_CAPS(toecaps);
3431         READ_CAPS(rdmacaps);
3432         READ_CAPS(cryptocaps);
3433         READ_CAPS(iscsicaps);
3434         READ_CAPS(fcoecaps);
3435
3436         if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
3437                 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
3438                 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
3439                 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3440                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
3441                 if (rc != 0) {
3442                         device_printf(sc->dev,
3443                             "failed to query NIC parameters: %d.\n", rc);
3444                         return (rc);
3445                 }
3446                 sc->tids.etid_base = val[0];
3447                 sc->params.etid_min = val[0];
3448                 sc->tids.netids = val[1] - val[0] + 1;
3449                 sc->params.netids = sc->tids.netids;
3450                 sc->params.eo_wr_cred = val[2];
3451                 sc->params.ethoffload = 1;
3452         }
3453
3454         if (sc->toecaps) {
3455                 /* query offload-related parameters */
3456                 param[0] = FW_PARAM_DEV(NTID);
3457                 param[1] = FW_PARAM_PFVF(SERVER_START);
3458                 param[2] = FW_PARAM_PFVF(SERVER_END);
3459                 param[3] = FW_PARAM_PFVF(TDDP_START);
3460                 param[4] = FW_PARAM_PFVF(TDDP_END);
3461                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3462                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3463                 if (rc != 0) {
3464                         device_printf(sc->dev,
3465                             "failed to query TOE parameters: %d.\n", rc);
3466                         return (rc);
3467                 }
3468                 sc->tids.ntids = val[0];
3469                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
3470                 sc->tids.stid_base = val[1];
3471                 sc->tids.nstids = val[2] - val[1] + 1;
3472                 sc->vres.ddp.start = val[3];
3473                 sc->vres.ddp.size = val[4] - val[3] + 1;
3474                 sc->params.ofldq_wr_cred = val[5];
3475                 sc->params.offload = 1;
3476         }
3477         if (sc->rdmacaps) {
3478                 param[0] = FW_PARAM_PFVF(STAG_START);
3479                 param[1] = FW_PARAM_PFVF(STAG_END);
3480                 param[2] = FW_PARAM_PFVF(RQ_START);
3481                 param[3] = FW_PARAM_PFVF(RQ_END);
3482                 param[4] = FW_PARAM_PFVF(PBL_START);
3483                 param[5] = FW_PARAM_PFVF(PBL_END);
3484                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3485                 if (rc != 0) {
3486                         device_printf(sc->dev,
3487                             "failed to query RDMA parameters(1): %d.\n", rc);
3488                         return (rc);
3489                 }
3490                 sc->vres.stag.start = val[0];
3491                 sc->vres.stag.size = val[1] - val[0] + 1;
3492                 sc->vres.rq.start = val[2];
3493                 sc->vres.rq.size = val[3] - val[2] + 1;
3494                 sc->vres.pbl.start = val[4];
3495                 sc->vres.pbl.size = val[5] - val[4] + 1;
3496
3497                 param[0] = FW_PARAM_PFVF(SQRQ_START);
3498                 param[1] = FW_PARAM_PFVF(SQRQ_END);
3499                 param[2] = FW_PARAM_PFVF(CQ_START);
3500                 param[3] = FW_PARAM_PFVF(CQ_END);
3501                 param[4] = FW_PARAM_PFVF(OCQ_START);
3502                 param[5] = FW_PARAM_PFVF(OCQ_END);
3503                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
3504                 if (rc != 0) {
3505                         device_printf(sc->dev,
3506                             "failed to query RDMA parameters(2): %d.\n", rc);
3507                         return (rc);
3508                 }
3509                 sc->vres.qp.start = val[0];
3510                 sc->vres.qp.size = val[1] - val[0] + 1;
3511                 sc->vres.cq.start = val[2];
3512                 sc->vres.cq.size = val[3] - val[2] + 1;
3513                 sc->vres.ocq.start = val[4];
3514                 sc->vres.ocq.size = val[5] - val[4] + 1;
3515
3516                 param[0] = FW_PARAM_PFVF(SRQ_START);
3517                 param[1] = FW_PARAM_PFVF(SRQ_END);
3518                 param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
3519                 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
3520                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
3521                 if (rc != 0) {
3522                         device_printf(sc->dev,
3523                             "failed to query RDMA parameters(3): %d.\n", rc);
3524                         return (rc);
3525                 }
3526                 sc->vres.srq.start = val[0];
3527                 sc->vres.srq.size = val[1] - val[0] + 1;
3528                 sc->params.max_ordird_qp = val[2];
3529                 sc->params.max_ird_adapter = val[3];
3530         }
3531         if (sc->iscsicaps) {
3532                 param[0] = FW_PARAM_PFVF(ISCSI_START);
3533                 param[1] = FW_PARAM_PFVF(ISCSI_END);
3534                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
3535                 if (rc != 0) {
3536                         device_printf(sc->dev,
3537                             "failed to query iSCSI parameters: %d.\n", rc);
3538                         return (rc);
3539                 }
3540                 sc->vres.iscsi.start = val[0];
3541                 sc->vres.iscsi.size = val[1] - val[0] + 1;
3542         }
3543
3544         t4_init_sge_params(sc);
3545
3546         /*
3547          * We've got the params we wanted to query via the firmware.  Now grab
3548          * some others directly from the chip.
3549          */
3550         rc = t4_read_chip_settings(sc);
3551
3552         return (rc);
3553 }
3554
3555 static int
3556 set_params__post_init(struct adapter *sc)
3557 {
3558         uint32_t param, val;
3559
3560         /* ask for encapsulated CPLs */
3561         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3562         val = 1;
3563         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3564
3565         return (0);
3566 }
3567
3568 #undef FW_PARAM_PFVF
3569 #undef FW_PARAM_DEV
3570
3571 static void
3572 t4_set_desc(struct adapter *sc)
3573 {
3574         char buf[128];
3575         struct adapter_params *p = &sc->params;
3576
3577         snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
3578
3579         device_set_desc_copy(sc->dev, buf);
3580 }
3581
3582 static inline void
3583 ifmedia_add4(struct ifmedia *ifm, int m)
3584 {
3585
3586         ifmedia_add(ifm, m, 0, NULL);
3587         ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL);
3588         ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL);
3589         ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL);
3590 }
3591
3592 static void
3593 set_current_media(struct port_info *pi, struct ifmedia *ifm)
3594 {
3595         struct link_config *lc;
3596         int mword;
3597
3598         PORT_LOCK_ASSERT_OWNED(pi);
3599
3600         /* Leave current media alone if it's already set to IFM_NONE. */
3601         if (ifm->ifm_cur != NULL &&
3602             IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE)
3603                 return;
3604
3605         lc = &pi->link_cfg;
3606         if (lc->requested_aneg == AUTONEG_ENABLE &&
3607             lc->supported & FW_PORT_CAP_ANEG) {
3608                 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
3609                 return;
3610         }
3611         mword = IFM_ETHER | IFM_FDX;
3612         if (lc->requested_fc & PAUSE_TX)
3613                 mword |= IFM_ETH_TXPAUSE;
3614         if (lc->requested_fc & PAUSE_RX)
3615                 mword |= IFM_ETH_RXPAUSE;
3616         mword |= port_mword(pi, speed_to_fwspeed(lc->requested_speed));
3617         ifmedia_set(ifm, mword);
3618 }
3619
3620 static void
3621 build_medialist(struct port_info *pi, struct ifmedia *ifm)
3622 {
3623         uint16_t ss, speed;
3624         int unknown, mword, bit;
3625         struct link_config *lc;
3626
3627         PORT_LOCK_ASSERT_OWNED(pi);
3628
3629         if (pi->flags & FIXED_IFMEDIA)
3630                 return;
3631
3632         /*
3633          * First setup all the requested_ fields so that they comply with what's
3634          * supported by the port + transceiver.  Note that this clobbers any
3635          * user preferences set via sysctl_pause_settings or sysctl_autoneg.
3636          */
3637         init_l1cfg(pi);
3638
3639         /*
3640          * Now (re)build the ifmedia list.
3641          */
3642         ifmedia_removeall(ifm);
3643         lc = &pi->link_cfg;
3644         ss = G_FW_PORT_CAP_SPEED(lc->supported); /* Supported Speeds */
3645         if (__predict_false(ss == 0)) { /* not supposed to happen. */
3646                 MPASS(ss != 0);
3647 no_media:
3648                 MPASS(LIST_EMPTY(&ifm->ifm_list));
3649                 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
3650                 ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
3651                 return;
3652         }
3653
3654         unknown = 0;
3655         for (bit = 0; bit < fls(ss); bit++) {
3656                 speed = 1 << bit;
3657                 MPASS(speed & M_FW_PORT_CAP_SPEED);
3658                 if (ss & speed) {
3659                         mword = port_mword(pi, speed);
3660                         if (mword == IFM_NONE) {
3661                                 goto no_media;
3662                         } else if (mword == IFM_UNKNOWN)
3663                                 unknown++;
3664                         else
3665                                 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword);
3666                 }
3667         }
3668         if (unknown > 0) /* Add one unknown for all unknown media types. */
3669                 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN);
3670         if (lc->supported & FW_PORT_CAP_ANEG)
3671                 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL);
3672
3673         set_current_media(pi, ifm);
3674 }
3675
3676 /*
3677  * Update all the requested_* fields in the link config to something valid (and
3678  * reasonable).
3679  */
3680 static void
3681 init_l1cfg(struct port_info *pi)
3682 {
3683         struct link_config *lc = &pi->link_cfg;
3684
3685         PORT_LOCK_ASSERT_OWNED(pi);
3686
3687         /* Gbps -> Mbps */
3688         lc->requested_speed = port_top_speed(pi) * 1000;
3689
3690         if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) {
3691                 lc->requested_aneg = AUTONEG_ENABLE;
3692         } else {
3693                 lc->requested_aneg = AUTONEG_DISABLE;
3694         }
3695
3696         lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX);
3697
3698         if (t4_fec != -1) {
3699                 if (t4_fec & FEC_RS && lc->supported & FW_PORT_CAP_FEC_RS) {
3700                         lc->requested_fec = FEC_RS;
3701                 } else if (t4_fec & FEC_BASER_RS &&
3702                     lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
3703                         lc->requested_fec = FEC_BASER_RS;
3704                 } else {
3705                         lc->requested_fec = 0;
3706                 }
3707         } else {
3708                 /* Use the suggested value provided by the firmware in acaps */
3709                 if (lc->advertising & FW_PORT_CAP_FEC_RS &&
3710                     lc->supported & FW_PORT_CAP_FEC_RS) {
3711                         lc->requested_fec = FEC_RS;
3712                 } else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS &&
3713                     lc->supported & FW_PORT_CAP_FEC_BASER_RS) {
3714                         lc->requested_fec = FEC_BASER_RS;
3715                 } else {
3716                         lc->requested_fec = 0;
3717                 }
3718         }
3719 }
3720
3721 /*
3722  * Apply the settings in requested_* to the hardware.  The parameters are
3723  * expected to be sane.
3724  */
3725 static int
3726 apply_l1cfg(struct port_info *pi)
3727 {
3728         struct adapter *sc = pi->adapter;
3729         struct link_config *lc = &pi->link_cfg;
3730         int rc;
3731 #ifdef INVARIANTS
3732         uint16_t fwspeed;
3733
3734         ASSERT_SYNCHRONIZED_OP(sc);
3735         PORT_LOCK_ASSERT_OWNED(pi);
3736
3737         if (lc->requested_aneg == AUTONEG_ENABLE)
3738                 MPASS(lc->supported & FW_PORT_CAP_ANEG);
3739         if (lc->requested_fc & PAUSE_TX)
3740                 MPASS(lc->supported & FW_PORT_CAP_FC_TX);
3741         if (lc->requested_fc & PAUSE_RX)
3742                 MPASS(lc->supported & FW_PORT_CAP_FC_RX);
3743         if (lc->requested_fec == FEC_RS)
3744                 MPASS(lc->supported & FW_PORT_CAP_FEC_RS);
3745         if (lc->requested_fec == FEC_BASER_RS)
3746                 MPASS(lc->supported & FW_PORT_CAP_FEC_BASER_RS);
3747         fwspeed = speed_to_fwspeed(lc->requested_speed);
3748         MPASS(fwspeed != 0);
3749         MPASS(lc->supported & fwspeed);
3750 #endif
3751         rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
3752         if (rc != 0) {
3753                 device_printf(pi->dev, "l1cfg failed: %d\n", rc);
3754         } else {
3755                 lc->fc = lc->requested_fc;
3756                 lc->fec = lc->requested_fec;
3757         }
3758         return (rc);
3759 }
3760
3761 #define FW_MAC_EXACT_CHUNK      7
3762
3763 /*
3764  * Program the port's XGMAC based on parameters in ifnet.  The caller also
3765  * indicates which parameters should be programmed (the rest are left alone).
3766  */
3767 int
3768 update_mac_settings(struct ifnet *ifp, int flags)
3769 {
3770         int rc = 0;
3771         struct vi_info *vi = ifp->if_softc;
3772         struct port_info *pi = vi->pi;
3773         struct adapter *sc = pi->adapter;
3774         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
3775
3776         ASSERT_SYNCHRONIZED_OP(sc);
3777         KASSERT(flags, ("%s: not told what to update.", __func__));
3778
3779         if (flags & XGMAC_MTU)
3780                 mtu = ifp->if_mtu;
3781
3782         if (flags & XGMAC_PROMISC)
3783                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
3784
3785         if (flags & XGMAC_ALLMULTI)
3786                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
3787
3788         if (flags & XGMAC_VLANEX)
3789                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
3790
3791         if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
3792                 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
3793                     allmulti, 1, vlanex, false);
3794                 if (rc) {
3795                         if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
3796                             rc);
3797                         return (rc);
3798                 }
3799         }
3800
3801         if (flags & XGMAC_UCADDR) {
3802                 uint8_t ucaddr[ETHER_ADDR_LEN];
3803
3804                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
3805                 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
3806                     ucaddr, true, true);
3807                 if (rc < 0) {
3808                         rc = -rc;
3809                         if_printf(ifp, "change_mac failed: %d\n", rc);
3810                         return (rc);
3811                 } else {
3812                         vi->xact_addr_filt = rc;
3813                         rc = 0;
3814                 }
3815         }
3816
3817         if (flags & XGMAC_MCADDRS) {
3818                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
3819                 int del = 1;
3820                 uint64_t hash = 0;
3821                 struct ifmultiaddr *ifma;
3822                 int i = 0, j;
3823
3824                 if_maddr_rlock(ifp);
3825                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3826                         if (ifma->ifma_addr->sa_family != AF_LINK)
3827                                 continue;
3828                         mcaddr[i] =
3829                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
3830                         MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
3831                         i++;
3832
3833                         if (i == FW_MAC_EXACT_CHUNK) {
3834                                 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
3835                                     del, i, mcaddr, NULL, &hash, 0);
3836                                 if (rc < 0) {
3837                                         rc = -rc;
3838                                         for (j = 0; j < i; j++) {
3839                                                 if_printf(ifp,
3840                                                     "failed to add mc address"
3841                                                     " %02x:%02x:%02x:"
3842                                                     "%02x:%02x:%02x rc=%d\n",
3843                                                     mcaddr[j][0], mcaddr[j][1],
3844                                                     mcaddr[j][2], mcaddr[j][3],
3845                                                     mcaddr[j][4], mcaddr[j][5],
3846                                                     rc);
3847                                         }
3848                                         goto mcfail;
3849                                 }
3850                                 del = 0;
3851                                 i = 0;
3852                         }
3853                 }
3854                 if (i > 0) {
3855                         rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
3856                             mcaddr, NULL, &hash, 0);
3857                         if (rc < 0) {
3858                                 rc = -rc;
3859                                 for (j = 0; j < i; j++) {
3860                                         if_printf(ifp,
3861                                             "failed to add mc address"
3862                                             " %02x:%02x:%02x:"
3863                                             "%02x:%02x:%02x rc=%d\n",
3864                                             mcaddr[j][0], mcaddr[j][1],
3865                                             mcaddr[j][2], mcaddr[j][3],
3866                                             mcaddr[j][4], mcaddr[j][5],
3867                                             rc);
3868                                 }
3869                                 goto mcfail;
3870                         }
3871                 }
3872
3873                 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
3874                 if (rc != 0)
3875                         if_printf(ifp, "failed to set mc address hash: %d", rc);
3876 mcfail:
3877                 if_maddr_runlock(ifp);
3878         }
3879
3880         return (rc);
3881 }
3882
3883 /*
3884  * {begin|end}_synchronized_op must be called from the same thread.
3885  */
3886 int
3887 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
3888     char *wmesg)
3889 {
3890         int rc, pri;
3891
3892 #ifdef WITNESS
3893         /* the caller thinks it's ok to sleep, but is it really? */
3894         if (flags & SLEEP_OK)
3895                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3896                     "begin_synchronized_op");
3897 #endif
3898
3899         if (INTR_OK)
3900                 pri = PCATCH;
3901         else
3902                 pri = 0;
3903
3904         ADAPTER_LOCK(sc);
3905         for (;;) {
3906
3907                 if (vi && IS_DOOMED(vi)) {
3908                         rc = ENXIO;
3909                         goto done;
3910                 }
3911
3912                 if (!IS_BUSY(sc)) {
3913                         rc = 0;
3914                         break;
3915                 }
3916
3917                 if (!(flags & SLEEP_OK)) {
3918                         rc = EBUSY;
3919                         goto done;
3920                 }
3921
3922                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3923                         rc = EINTR;
3924                         goto done;
3925                 }
3926         }
3927
3928         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3929         SET_BUSY(sc);
3930 #ifdef INVARIANTS
3931         sc->last_op = wmesg;
3932         sc->last_op_thr = curthread;
3933         sc->last_op_flags = flags;
3934 #endif
3935
3936 done:
3937         if (!(flags & HOLD_LOCK) || rc)
3938                 ADAPTER_UNLOCK(sc);
3939
3940         return (rc);
3941 }
3942
3943 /*
3944  * Tell if_ioctl and if_init that the VI is going away.  This is
3945  * special variant of begin_synchronized_op and must be paired with a
3946  * call to end_synchronized_op.
3947  */
3948 void
3949 doom_vi(struct adapter *sc, struct vi_info *vi)
3950 {
3951
3952         ADAPTER_LOCK(sc);
3953         SET_DOOMED(vi);
3954         wakeup(&sc->flags);
3955         while (IS_BUSY(sc))
3956                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
3957         SET_BUSY(sc);
3958 #ifdef INVARIANTS
3959         sc->last_op = "t4detach";
3960         sc->last_op_thr = curthread;
3961         sc->last_op_flags = 0;
3962 #endif
3963         ADAPTER_UNLOCK(sc);
3964 }
3965
3966 /*
3967  * {begin|end}_synchronized_op must be called from the same thread.
3968  */
3969 void
3970 end_synchronized_op(struct adapter *sc, int flags)
3971 {
3972
3973         if (flags & LOCK_HELD)
3974                 ADAPTER_LOCK_ASSERT_OWNED(sc);
3975         else
3976                 ADAPTER_LOCK(sc);
3977
3978         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3979         CLR_BUSY(sc);
3980         wakeup(&sc->flags);
3981         ADAPTER_UNLOCK(sc);
3982 }
3983
3984 static int
3985 cxgbe_init_synchronized(struct vi_info *vi)
3986 {
3987         struct port_info *pi = vi->pi;
3988         struct adapter *sc = pi->adapter;
3989         struct ifnet *ifp = vi->ifp;
3990         int rc = 0, i;
3991         struct sge_txq *txq;
3992
3993         ASSERT_SYNCHRONIZED_OP(sc);
3994
3995         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3996                 return (0);     /* already running */
3997
3998         if (!(sc->flags & FULL_INIT_DONE) &&
3999             ((rc = adapter_full_init(sc)) != 0))
4000                 return (rc);    /* error message displayed already */
4001
4002         if (!(vi->flags & VI_INIT_DONE) &&
4003             ((rc = vi_full_init(vi)) != 0))
4004                 return (rc); /* error message displayed already */
4005
4006         rc = update_mac_settings(ifp, XGMAC_ALL);
4007         if (rc)
4008                 goto done;      /* error message displayed already */
4009
4010         rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
4011         if (rc != 0) {
4012                 if_printf(ifp, "enable_vi failed: %d\n", rc);
4013                 goto done;
4014         }
4015
4016         /*
4017          * Can't fail from this point onwards.  Review cxgbe_uninit_synchronized
4018          * if this changes.
4019          */
4020
4021         for_each_txq(vi, i, txq) {
4022                 TXQ_LOCK(txq);
4023                 txq->eq.flags |= EQ_ENABLED;
4024                 TXQ_UNLOCK(txq);
4025         }
4026
4027         /*
4028          * The first iq of the first port to come up is used for tracing.
4029          */
4030         if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
4031                 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
4032                 t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
4033                     A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
4034                     V_QUEUENUMBER(sc->traceq));
4035                 pi->flags |= HAS_TRACEQ;
4036         }
4037
4038         /* all ok */
4039         PORT_LOCK(pi);
4040         if (pi->up_vis++ == 0) {
4041                 t4_update_port_info(pi);
4042                 build_medialist(pi, &pi->media);
4043                 apply_l1cfg(pi);
4044         }
4045         ifp->if_drv_flags |= IFF_DRV_RUNNING;
4046
4047         if (pi->nvi > 1 || sc->flags & IS_VF)
4048                 callout_reset(&vi->tick, hz, vi_tick, vi);
4049         else
4050                 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
4051         PORT_UNLOCK(pi);
4052 done:
4053         if (rc != 0)
4054                 cxgbe_uninit_synchronized(vi);
4055
4056         return (rc);
4057 }
4058
4059 /*
4060  * Idempotent.
4061  */
4062 static int
4063 cxgbe_uninit_synchronized(struct vi_info *vi)
4064 {
4065         struct port_info *pi = vi->pi;
4066         struct adapter *sc = pi->adapter;
4067         struct ifnet *ifp = vi->ifp;
4068         int rc, i;
4069         struct sge_txq *txq;
4070
4071         ASSERT_SYNCHRONIZED_OP(sc);
4072
4073         if (!(vi->flags & VI_INIT_DONE)) {
4074                 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
4075                     ("uninited VI is running"));
4076                 return (0);
4077         }
4078
4079         /*
4080          * Disable the VI so that all its data in either direction is discarded
4081          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
4082          * tick) intact as the TP can deliver negative advice or data that it's
4083          * holding in its RAM (for an offloaded connection) even after the VI is
4084          * disabled.
4085          */
4086         rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
4087         if (rc) {
4088                 if_printf(ifp, "disable_vi failed: %d\n", rc);
4089                 return (rc);
4090         }
4091
4092         for_each_txq(vi, i, txq) {
4093                 TXQ_LOCK(txq);
4094                 txq->eq.flags &= ~EQ_ENABLED;
4095                 TXQ_UNLOCK(txq);
4096         }
4097
4098         PORT_LOCK(pi);
4099         if (pi->nvi > 1 || sc->flags & IS_VF)
4100                 callout_stop(&vi->tick);
4101         else
4102                 callout_stop(&pi->tick);
4103         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4104                 PORT_UNLOCK(pi);
4105                 return (0);
4106         }
4107         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
4108         pi->up_vis--;
4109         if (pi->up_vis > 0) {
4110                 PORT_UNLOCK(pi);
4111                 return (0);
4112         }
4113
4114         pi->link_cfg.link_ok = 0;
4115         pi->link_cfg.speed = 0;
4116         pi->link_cfg.link_down_rc = 255;
4117         t4_os_link_changed(pi);
4118         pi->old_link_cfg = pi->link_cfg;
4119         PORT_UNLOCK(pi);
4120
4121         return (0);
4122 }
4123
4124 /*
4125  * It is ok for this function to fail midway and return right away.  t4_detach
4126  * will walk the entire sc->irq list and clean up whatever is valid.
4127  */
4128 int
4129 t4_setup_intr_handlers(struct adapter *sc)
4130 {
4131         int rc, rid, p, q, v;
4132         char s[8];
4133         struct irq *irq;
4134         struct port_info *pi;
4135         struct vi_info *vi;
4136         struct sge *sge = &sc->sge;
4137         struct sge_rxq *rxq;
4138 #ifdef TCP_OFFLOAD
4139         struct sge_ofld_rxq *ofld_rxq;
4140 #endif
4141 #ifdef DEV_NETMAP
4142         struct sge_nm_rxq *nm_rxq;
4143 #endif
4144
4145         /*
4146          * Setup interrupts.
4147          */
4148         irq = &sc->irq[0];
4149         rid = sc->intr_type == INTR_INTX ? 0 : 1;
4150         if (sc->intr_count == 1)
4151                 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
4152
4153         /* Multiple interrupts. */
4154         if (sc->flags & IS_VF)
4155                 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
4156                     ("%s: too few intr.", __func__));
4157         else
4158                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
4159                     ("%s: too few intr.", __func__));
4160
4161         /* The first one is always error intr on PFs */
4162         if (!(sc->flags & IS_VF)) {
4163                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
4164                 if (rc != 0)
4165                         return (rc);
4166                 irq++;
4167                 rid++;
4168         }
4169
4170         /* The second one is always the firmware event queue (first on VFs) */
4171         rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
4172         if (rc != 0)
4173                 return (rc);
4174         irq++;
4175         rid++;
4176
4177         for_each_port(sc, p) {
4178                 pi = sc->port[p];
4179                 for_each_vi(pi, v, vi) {
4180                         vi->first_intr = rid - 1;
4181
4182                         if (vi->nnmrxq > 0) {
4183                                 int n = max(vi->nrxq, vi->nnmrxq);
4184
4185                                 MPASS(vi->flags & INTR_RXQ);
4186
4187                                 rxq = &sge->rxq[vi->first_rxq];
4188 #ifdef DEV_NETMAP
4189                                 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
4190 #endif
4191                                 for (q = 0; q < n; q++) {
4192                                         snprintf(s, sizeof(s), "%x%c%x", p,
4193                                             'a' + v, q);
4194                                         if (q < vi->nrxq)
4195                                                 irq->rxq = rxq++;
4196 #ifdef DEV_NETMAP
4197                                         if (q < vi->nnmrxq)
4198                                                 irq->nm_rxq = nm_rxq++;
4199 #endif
4200                                         rc = t4_alloc_irq(sc, irq, rid,
4201                                             t4_vi_intr, irq, s);
4202                                         if (rc != 0)
4203                                                 return (rc);
4204                                         irq++;
4205                                         rid++;
4206                                         vi->nintr++;
4207                                 }
4208                         } else if (vi->flags & INTR_RXQ) {
4209                                 for_each_rxq(vi, q, rxq) {
4210                                         snprintf(s, sizeof(s), "%x%c%x", p,
4211                                             'a' + v, q);
4212                                         rc = t4_alloc_irq(sc, irq, rid,
4213                                             t4_intr, rxq, s);
4214                                         if (rc != 0)
4215                                                 return (rc);
4216                                         irq++;
4217                                         rid++;
4218                                         vi->nintr++;
4219                                 }
4220                         }
4221 #ifdef TCP_OFFLOAD
4222                         if (vi->flags & INTR_OFLD_RXQ) {
4223                                 for_each_ofld_rxq(vi, q, ofld_rxq) {
4224                                         snprintf(s, sizeof(s), "%x%c%x", p,
4225                                             'A' + v, q);
4226                                         rc = t4_alloc_irq(sc, irq, rid,
4227                                             t4_intr, ofld_rxq, s);
4228                                         if (rc != 0)
4229                                                 return (rc);
4230                                         irq++;
4231                                         rid++;
4232                                         vi->nintr++;
4233                                 }
4234                         }
4235 #endif
4236                 }
4237         }
4238         MPASS(irq == &sc->irq[sc->intr_count]);
4239
4240         return (0);
4241 }
4242
4243 int
4244 adapter_full_init(struct adapter *sc)
4245 {
4246         int rc, i;
4247 #ifdef RSS
4248         uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4249         uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
4250 #endif
4251
4252         ASSERT_SYNCHRONIZED_OP(sc);
4253         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4254         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
4255             ("%s: FULL_INIT_DONE already", __func__));
4256
4257         /*
4258          * queues that belong to the adapter (not any particular port).
4259          */
4260         rc = t4_setup_adapter_queues(sc);
4261         if (rc != 0)
4262                 goto done;
4263
4264         for (i = 0; i < nitems(sc->tq); i++) {
4265                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
4266                     taskqueue_thread_enqueue, &sc->tq[i]);
4267                 if (sc->tq[i] == NULL) {
4268                         device_printf(sc->dev,
4269                             "failed to allocate task queue %d\n", i);
4270                         rc = ENOMEM;
4271                         goto done;
4272                 }
4273                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
4274                     device_get_nameunit(sc->dev), i);
4275         }
4276 #ifdef RSS
4277         MPASS(RSS_KEYSIZE == 40);
4278         rss_getkey((void *)&raw_rss_key[0]);
4279         for (i = 0; i < nitems(rss_key); i++) {
4280                 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
4281         }
4282         t4_write_rss_key(sc, &rss_key[0], -1, 1);
4283 #endif
4284
4285         if (!(sc->flags & IS_VF))
4286                 t4_intr_enable(sc);
4287         sc->flags |= FULL_INIT_DONE;
4288 done:
4289         if (rc != 0)
4290                 adapter_full_uninit(sc);
4291
4292         return (rc);
4293 }
4294
4295 int
4296 adapter_full_uninit(struct adapter *sc)
4297 {
4298         int i;
4299
4300         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
4301
4302         t4_teardown_adapter_queues(sc);
4303
4304         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
4305                 taskqueue_free(sc->tq[i]);
4306                 sc->tq[i] = NULL;
4307         }
4308
4309         sc->flags &= ~FULL_INIT_DONE;
4310
4311         return (0);
4312 }
4313
4314 #ifdef RSS
4315 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
4316     RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
4317     RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
4318     RSS_HASHTYPE_RSS_UDP_IPV6)
4319
4320 /* Translates kernel hash types to hardware. */
4321 static int
4322 hashconfig_to_hashen(int hashconfig)
4323 {
4324         int hashen = 0;
4325
4326         if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
4327                 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
4328         if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
4329                 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
4330         if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
4331                 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4332                     F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4333         }
4334         if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
4335                 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
4336                     F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4337         }
4338         if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
4339                 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
4340         if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
4341                 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
4342
4343         return (hashen);
4344 }
4345
4346 /* Translates hardware hash types to kernel. */
4347 static int
4348 hashen_to_hashconfig(int hashen)
4349 {
4350         int hashconfig = 0;
4351
4352         if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
4353                 /*
4354                  * If UDP hashing was enabled it must have been enabled for
4355                  * either IPv4 or IPv6 (inclusive or).  Enabling UDP without
4356                  * enabling any 4-tuple hash is nonsense configuration.
4357                  */
4358                 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4359                     F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
4360
4361                 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4362                         hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
4363                 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4364                         hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
4365         }
4366         if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
4367                 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
4368         if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
4369                 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
4370         if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
4371                 hashconfig |= RSS_HASHTYPE_RSS_IPV4;
4372         if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
4373                 hashconfig |= RSS_HASHTYPE_RSS_IPV6;
4374
4375         return (hashconfig);
4376 }
4377 #endif
4378
4379 int
4380 vi_full_init(struct vi_info *vi)
4381 {
4382         struct adapter *sc = vi->pi->adapter;
4383         struct ifnet *ifp = vi->ifp;
4384         uint16_t *rss;
4385         struct sge_rxq *rxq;
4386         int rc, i, j, hashen;
4387 #ifdef RSS
4388         int nbuckets = rss_getnumbuckets();
4389         int hashconfig = rss_gethashconfig();
4390         int extra;
4391 #endif
4392
4393         ASSERT_SYNCHRONIZED_OP(sc);
4394         KASSERT((vi->flags & VI_INIT_DONE) == 0,
4395             ("%s: VI_INIT_DONE already", __func__));
4396
4397         sysctl_ctx_init(&vi->ctx);
4398         vi->flags |= VI_SYSCTL_CTX;
4399
4400         /*
4401          * Allocate tx/rx/fl queues for this VI.
4402          */
4403         rc = t4_setup_vi_queues(vi);
4404         if (rc != 0)
4405                 goto done;      /* error message displayed already */
4406
4407         /*
4408          * Setup RSS for this VI.  Save a copy of the RSS table for later use.
4409          */
4410         if (vi->nrxq > vi->rss_size) {
4411                 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
4412                     "some queues will never receive traffic.\n", vi->nrxq,
4413                     vi->rss_size);
4414         } else if (vi->rss_size % vi->nrxq) {
4415                 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
4416                     "expect uneven traffic distribution.\n", vi->nrxq,
4417                     vi->rss_size);
4418         }
4419 #ifdef RSS
4420         if (vi->nrxq != nbuckets) {
4421                 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
4422                     "performance will be impacted.\n", vi->nrxq, nbuckets);
4423         }
4424 #endif
4425         rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
4426         for (i = 0; i < vi->rss_size;) {
4427 #ifdef RSS
4428                 j = rss_get_indirection_to_bucket(i);
4429                 j %= vi->nrxq;
4430                 rxq = &sc->sge.rxq[vi->first_rxq + j];
4431                 rss[i++] = rxq->iq.abs_id;
4432 #else
4433                 for_each_rxq(vi, j, rxq) {
4434                         rss[i++] = rxq->iq.abs_id;
4435                         if (i == vi->rss_size)
4436                                 break;
4437                 }
4438 #endif
4439         }
4440
4441         rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
4442             vi->rss_size);
4443         if (rc != 0) {
4444                 if_printf(ifp, "rss_config failed: %d\n", rc);
4445                 goto done;
4446         }
4447
4448 #ifdef RSS
4449         hashen = hashconfig_to_hashen(hashconfig);
4450
4451         /*
4452          * We may have had to enable some hashes even though the global config
4453          * wants them disabled.  This is a potential problem that must be
4454          * reported to the user.
4455          */
4456         extra = hashen_to_hashconfig(hashen) ^ hashconfig;
4457
4458         /*
4459          * If we consider only the supported hash types, then the enabled hashes
4460          * are a superset of the requested hashes.  In other words, there cannot
4461          * be any supported hash that was requested but not enabled, but there
4462          * can be hashes that were not requested but had to be enabled.
4463          */
4464         extra &= SUPPORTED_RSS_HASHTYPES;
4465         MPASS((extra & hashconfig) == 0);
4466
4467         if (extra) {
4468                 if_printf(ifp,
4469                     "global RSS config (0x%x) cannot be accomodated.\n",
4470                     hashconfig);
4471         }
4472         if (extra & RSS_HASHTYPE_RSS_IPV4)
4473                 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
4474         if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
4475                 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
4476         if (extra & RSS_HASHTYPE_RSS_IPV6)
4477                 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
4478         if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
4479                 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
4480         if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
4481                 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
4482         if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
4483                 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
4484 #else
4485         hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
4486             F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
4487             F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
4488             F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
4489 #endif
4490         rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
4491         if (rc != 0) {
4492                 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
4493                 goto done;
4494         }
4495
4496         vi->rss = rss;
4497         vi->flags |= VI_INIT_DONE;
4498 done:
4499         if (rc != 0)
4500                 vi_full_uninit(vi);
4501
4502         return (rc);
4503 }
4504
4505 /*
4506  * Idempotent.
4507  */
4508 int
4509 vi_full_uninit(struct vi_info *vi)
4510 {
4511         struct port_info *pi = vi->pi;
4512         struct adapter *sc = pi->adapter;
4513         int i;
4514         struct sge_rxq *rxq;
4515         struct sge_txq *txq;
4516 #ifdef TCP_OFFLOAD
4517         struct sge_ofld_rxq *ofld_rxq;
4518         struct sge_wrq *ofld_txq;
4519 #endif
4520
4521         if (vi->flags & VI_INIT_DONE) {
4522
4523                 /* Need to quiesce queues.  */
4524
4525                 /* XXX: Only for the first VI? */
4526                 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
4527                         quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
4528
4529                 for_each_txq(vi, i, txq) {
4530                         quiesce_txq(sc, txq);
4531                 }
4532
4533 #ifdef TCP_OFFLOAD
4534                 for_each_ofld_txq(vi, i, ofld_txq) {
4535                         quiesce_wrq(sc, ofld_txq);
4536                 }
4537 #endif
4538
4539                 for_each_rxq(vi, i, rxq) {
4540                         quiesce_iq(sc, &rxq->iq);
4541                         quiesce_fl(sc, &rxq->fl);
4542                 }
4543
4544 #ifdef TCP_OFFLOAD
4545                 for_each_ofld_rxq(vi, i, ofld_rxq) {
4546                         quiesce_iq(sc, &ofld_rxq->iq);
4547                         quiesce_fl(sc, &ofld_rxq->fl);
4548                 }
4549 #endif
4550                 free(vi->rss, M_CXGBE);
4551                 free(vi->nm_rss, M_CXGBE);
4552         }
4553
4554         t4_teardown_vi_queues(vi);
4555         vi->flags &= ~VI_INIT_DONE;
4556
4557         return (0);
4558 }
4559
4560 static void
4561 quiesce_txq(struct adapter *sc, struct sge_txq *txq)
4562 {
4563         struct sge_eq *eq = &txq->eq;
4564         struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
4565
4566         (void) sc;      /* unused */
4567
4568 #ifdef INVARIANTS
4569         TXQ_LOCK(txq);
4570         MPASS((eq->flags & EQ_ENABLED) == 0);
4571         TXQ_UNLOCK(txq);
4572 #endif
4573
4574         /* Wait for the mp_ring to empty. */
4575         while (!mp_ring_is_idle(txq->r)) {
4576                 mp_ring_check_drainage(txq->r, 0);
4577                 pause("rquiesce", 1);
4578         }
4579
4580         /* Then wait for the hardware to finish. */
4581         while (spg->cidx != htobe16(eq->pidx))
4582                 pause("equiesce", 1);
4583
4584         /* Finally, wait for the driver to reclaim all descriptors. */
4585         while (eq->cidx != eq->pidx)
4586                 pause("dquiesce", 1);
4587 }
4588
4589 static void
4590 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
4591 {
4592
4593         /* XXXTX */
4594 }
4595
4596 static void
4597 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
4598 {
4599         (void) sc;      /* unused */
4600
4601         /* Synchronize with the interrupt handler */
4602         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
4603                 pause("iqfree", 1);
4604 }
4605
4606 static void
4607 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
4608 {
4609         mtx_lock(&sc->sfl_lock);
4610         FL_LOCK(fl);
4611         fl->flags |= FL_DOOMED;
4612         FL_UNLOCK(fl);
4613         callout_stop(&sc->sfl_callout);
4614         mtx_unlock(&sc->sfl_lock);
4615
4616         KASSERT((fl->flags & FL_STARVING) == 0,
4617             ("%s: still starving", __func__));
4618 }
4619
4620 static int
4621 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
4622     driver_intr_t *handler, void *arg, char *name)
4623 {
4624         int rc;
4625
4626         irq->rid = rid;
4627         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
4628             RF_SHAREABLE | RF_ACTIVE);
4629         if (irq->res == NULL) {
4630                 device_printf(sc->dev,
4631                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
4632                 return (ENOMEM);
4633         }
4634
4635         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
4636             NULL, handler, arg, &irq->tag);
4637         if (rc != 0) {
4638                 device_printf(sc->dev,
4639                     "failed to setup interrupt for rid %d, name %s: %d\n",
4640                     rid, name, rc);
4641         } else if (name)
4642                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
4643
4644         return (rc);
4645 }
4646
4647 static int
4648 t4_free_irq(struct adapter *sc, struct irq *irq)
4649 {
4650         if (irq->tag)
4651                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
4652         if (irq->res)
4653                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
4654
4655         bzero(irq, sizeof(*irq));
4656
4657         return (0);
4658 }
4659
4660 static void
4661 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
4662 {
4663
4664         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4665         t4_get_regs(sc, buf, regs->len);
4666 }
4667
4668 #define A_PL_INDIR_CMD  0x1f8
4669
4670 #define S_PL_AUTOINC    31
4671 #define M_PL_AUTOINC    0x1U
4672 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC)
4673 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
4674
4675 #define S_PL_VFID       20
4676 #define M_PL_VFID       0xffU
4677 #define V_PL_VFID(x)    ((x) << S_PL_VFID)
4678 #define G_PL_VFID(x)    (((x) >> S_PL_VFID) & M_PL_VFID)
4679
4680 #define S_PL_ADDR       0
4681 #define M_PL_ADDR       0xfffffU
4682 #define V_PL_ADDR(x)    ((x) << S_PL_ADDR)
4683 #define G_PL_ADDR(x)    (((x) >> S_PL_ADDR) & M_PL_ADDR)
4684
4685 #define A_PL_INDIR_DATA 0x1fc
4686
4687 static uint64_t
4688 read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
4689 {
4690         u32 stats[2];
4691
4692         mtx_assert(&sc->reg_lock, MA_OWNED);
4693         if (sc->flags & IS_VF) {
4694                 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
4695                 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
4696         } else {
4697                 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4698                     V_PL_VFID(G_FW_VIID_VIN(viid)) |
4699                     V_PL_ADDR(VF_MPS_REG(reg)));
4700                 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
4701                 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
4702         }
4703         return (((uint64_t)stats[1]) << 32 | stats[0]);
4704 }
4705
4706 static void
4707 t4_get_vi_stats(struct adapter *sc, unsigned int viid,
4708     struct fw_vi_stats_vf *stats)
4709 {
4710
4711 #define GET_STAT(name) \
4712         read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
4713
4714         stats->tx_bcast_bytes    = GET_STAT(TX_VF_BCAST_BYTES);
4715         stats->tx_bcast_frames   = GET_STAT(TX_VF_BCAST_FRAMES);
4716         stats->tx_mcast_bytes    = GET_STAT(TX_VF_MCAST_BYTES);
4717         stats->tx_mcast_frames   = GET_STAT(TX_VF_MCAST_FRAMES);
4718         stats->tx_ucast_bytes    = GET_STAT(TX_VF_UCAST_BYTES);
4719         stats->tx_ucast_frames   = GET_STAT(TX_VF_UCAST_FRAMES);
4720         stats->tx_drop_frames    = GET_STAT(TX_VF_DROP_FRAMES);
4721         stats->tx_offload_bytes  = GET_STAT(TX_VF_OFFLOAD_BYTES);
4722         stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
4723         stats->rx_bcast_bytes    = GET_STAT(RX_VF_BCAST_BYTES);
4724         stats->rx_bcast_frames   = GET_STAT(RX_VF_BCAST_FRAMES);
4725         stats->rx_mcast_bytes    = GET_STAT(RX_VF_MCAST_BYTES);
4726         stats->rx_mcast_frames   = GET_STAT(RX_VF_MCAST_FRAMES);
4727         stats->rx_ucast_bytes    = GET_STAT(RX_VF_UCAST_BYTES);
4728         stats->rx_ucast_frames   = GET_STAT(RX_VF_UCAST_FRAMES);
4729         stats->rx_err_frames     = GET_STAT(RX_VF_ERR_FRAMES);
4730
4731 #undef GET_STAT
4732 }
4733
4734 static void
4735 t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
4736 {
4737         int reg;
4738
4739         t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
4740             V_PL_VFID(G_FW_VIID_VIN(viid)) |
4741             V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
4742         for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
4743              reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
4744                 t4_write_reg(sc, A_PL_INDIR_DATA, 0);
4745 }
4746
4747 static void
4748 vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
4749 {
4750         struct ifnet *ifp = vi->ifp;
4751         struct sge_txq *txq;
4752         int i, drops;
4753         struct fw_vi_stats_vf *s = &vi->stats;
4754         struct timeval tv;
4755         const struct timeval interval = {0, 250000};    /* 250ms */
4756
4757         if (!(vi->flags & VI_INIT_DONE))
4758                 return;
4759
4760         getmicrotime(&tv);
4761         timevalsub(&tv, &interval);
4762         if (timevalcmp(&tv, &vi->last_refreshed, <))
4763                 return;
4764
4765         mtx_lock(&sc->reg_lock);
4766         t4_get_vi_stats(sc, vi->viid, &vi->stats);
4767
4768         ifp->if_ipackets = s->rx_bcast_frames + s->rx_mcast_frames +
4769             s->rx_ucast_frames;
4770         ifp->if_ierrors = s->rx_err_frames;
4771         ifp->if_opackets = s->tx_bcast_frames + s->tx_mcast_frames +
4772             s->tx_ucast_frames + s->tx_offload_frames;
4773         ifp->if_oerrors = s->tx_drop_frames;
4774         ifp->if_ibytes = s->rx_bcast_bytes + s->rx_mcast_bytes +
4775             s->rx_ucast_bytes;
4776         ifp->if_obytes = s->tx_bcast_bytes + s->tx_mcast_bytes +
4777             s->tx_ucast_bytes + s->tx_offload_bytes;
4778         ifp->if_imcasts = s->rx_mcast_frames;
4779         ifp->if_omcasts = s->tx_mcast_frames;
4780
4781         drops = 0;
4782         for_each_txq(vi, i, txq)
4783                 drops += counter_u64_fetch(txq->r->drops);
4784         ifp->if_snd.ifq_drops = drops;
4785
4786         getmicrotime(&vi->last_refreshed);
4787         mtx_unlock(&sc->reg_lock);
4788 }
4789
4790 static void
4791 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
4792 {
4793         struct vi_info *vi = &pi->vi[0];
4794         struct ifnet *ifp = vi->ifp;
4795         struct sge_txq *txq;
4796         int i, drops;
4797         struct port_stats *s = &pi->stats;
4798         struct timeval tv;
4799         const struct timeval interval = {0, 250000};    /* 250ms */
4800
4801         getmicrotime(&tv);
4802         timevalsub(&tv, &interval);
4803         if (timevalcmp(&tv, &pi->last_refreshed, <))
4804                 return;
4805
4806         t4_get_port_stats(sc, pi->tx_chan, s);
4807
4808         ifp->if_opackets = s->tx_frames;
4809         ifp->if_ipackets = s->rx_frames;
4810         ifp->if_obytes = s->tx_octets;
4811         ifp->if_ibytes = s->rx_octets;
4812         ifp->if_omcasts = s->tx_mcast_frames;
4813         ifp->if_imcasts = s->rx_mcast_frames;
4814         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4815             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4816             s->rx_trunc3;
4817         for (i = 0; i < sc->chip_params->nchan; i++) {
4818                 if (pi->rx_chan_map & (1 << i)) {
4819                         uint32_t v;
4820
4821                         mtx_lock(&sc->reg_lock);
4822                         t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4823                             1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4824                         mtx_unlock(&sc->reg_lock);
4825                         ifp->if_iqdrops += v;
4826                 }
4827         }
4828
4829         drops = s->tx_drop;
4830         for_each_txq(vi, i, txq)
4831                 drops += counter_u64_fetch(txq->r->drops);
4832         ifp->if_snd.ifq_drops = drops;
4833
4834         ifp->if_oerrors = s->tx_error_frames;
4835         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4836             s->rx_fcs_err + s->rx_len_err;
4837
4838         getmicrotime(&pi->last_refreshed);
4839 }
4840
4841 static void
4842 cxgbe_tick(void *arg)
4843 {
4844         struct port_info *pi = arg;
4845         struct adapter *sc = pi->adapter;
4846
4847         PORT_LOCK_ASSERT_OWNED(pi);
4848         cxgbe_refresh_stats(sc, pi);
4849
4850         callout_schedule(&pi->tick, hz);
4851 }
4852
4853 void
4854 vi_tick(void *arg)
4855 {
4856         struct vi_info *vi = arg;
4857         struct adapter *sc = vi->pi->adapter;
4858
4859         vi_refresh_stats(sc, vi);
4860
4861         callout_schedule(&vi->tick, hz);
4862 }
4863
4864 static void
4865 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4866 {
4867         struct ifnet *vlan;
4868
4869         if (arg != ifp || ifp->if_type != IFT_ETHER)
4870                 return;
4871
4872         vlan = VLAN_DEVAT(ifp, vid);
4873         VLAN_SETCOOKIE(vlan, ifp);
4874 }
4875
4876 /*
4877  * Should match fw_caps_config_<foo> enums in t4fw_interface.h
4878  */
4879 static char *caps_decoder[] = {
4880         "\20\001IPMI\002NCSI",                          /* 0: NBM */
4881         "\20\001PPP\002QFC\003DCBX",                    /* 1: link */
4882         "\20\001INGRESS\002EGRESS",                     /* 2: switch */
4883         "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL"      /* 3: NIC */
4884             "\006HASHFILTER\007ETHOFLD",
4885         "\20\001TOE",                                   /* 4: TOE */
4886         "\20\001RDDP\002RDMAC",                         /* 5: RDMA */
4887         "\20\001INITIATOR_PDU\002TARGET_PDU"            /* 6: iSCSI */
4888             "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
4889             "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
4890             "\007T10DIF"
4891             "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
4892         "\20\001LOOKASIDE\002TLSKEYS",                  /* 7: Crypto */
4893         "\20\001INITIATOR\002TARGET\003CTRL_OFLD"       /* 8: FCoE */
4894                     "\004PO_INITIATOR\005PO_TARGET",
4895 };
4896
4897 void
4898 t4_sysctls(struct adapter *sc)
4899 {
4900         struct sysctl_ctx_list *ctx;
4901         struct sysctl_oid *oid;
4902         struct sysctl_oid_list *children, *c0;
4903         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4904
4905         ctx = device_get_sysctl_ctx(sc->dev);
4906
4907         /*
4908          * dev.t4nex.X.
4909          */
4910         oid = device_get_sysctl_tree(sc->dev);
4911         c0 = children = SYSCTL_CHILDREN(oid);
4912
4913         sc->sc_do_rxcopy = 1;
4914         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4915             &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4916
4917         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4918             sc->params.nports, "# of ports");
4919
4920         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4921             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4922             sysctl_bitfield, "A", "available doorbells");
4923
4924         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4925             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4926
4927         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4928             CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
4929             sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
4930             "interrupt holdoff timer values (us)");
4931
4932         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4933             CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
4934             sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
4935             "interrupt holdoff packet counter values");
4936
4937         t4_sge_sysctls(sc, ctx, children);
4938
4939         sc->lro_timeout = 100;
4940         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4941             &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4942
4943         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
4944             &sc->debug_flags, 0, "flags to enable runtime debugging");
4945
4946         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
4947             CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
4948
4949         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4950             CTLFLAG_RD, sc->fw_version, 0, "firmware version");
4951
4952         if (sc->flags & IS_VF)
4953                 return;
4954
4955         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4956             NULL, chip_rev(sc), "chip hardware revision");
4957
4958         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
4959             CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
4960
4961         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
4962             CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
4963
4964         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
4965             CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
4966
4967         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
4968             CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
4969
4970         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
4971             sc->er_version, 0, "expansion ROM version");
4972
4973         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
4974             sc->bs_version, 0, "bootstrap firmware version");
4975
4976         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
4977             NULL, sc->params.scfg_vers, "serial config version");
4978
4979         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
4980             NULL, sc->params.vpd_vers, "VPD version");
4981
4982         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4983             CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
4984
4985         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4986             sc->cfcsum, "config file checksum");
4987
4988 #define SYSCTL_CAP(name, n, text) \
4989         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
4990             CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \
4991             sysctl_bitfield, "A", "available " text " capabilities")
4992
4993         SYSCTL_CAP(nbmcaps, 0, "NBM");
4994         SYSCTL_CAP(linkcaps, 1, "link");
4995         SYSCTL_CAP(switchcaps, 2, "switch");
4996         SYSCTL_CAP(niccaps, 3, "NIC");
4997         SYSCTL_CAP(toecaps, 4, "TCP offload");
4998         SYSCTL_CAP(rdmacaps, 5, "RDMA");
4999         SYSCTL_CAP(iscsicaps, 6, "iSCSI");
5000         SYSCTL_CAP(cryptocaps, 7, "crypto");
5001         SYSCTL_CAP(fcoecaps, 8, "FCoE");
5002 #undef SYSCTL_CAP
5003
5004         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
5005             NULL, sc->tids.nftids, "number of filters");
5006
5007         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
5008             CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
5009             "chip temperature (in Celsius)");
5010
5011 #ifdef SBUF_DRAIN
5012         /*
5013          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
5014          */
5015         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
5016             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
5017             "logs and miscellaneous information");
5018         children = SYSCTL_CHILDREN(oid);
5019
5020         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
5021             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5022             sysctl_cctrl, "A", "congestion control");
5023
5024         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
5025             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5026             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
5027
5028         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
5029             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
5030             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
5031
5032         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
5033             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
5034             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
5035
5036         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
5037             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
5038             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
5039
5040         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
5041             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
5042             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
5043
5044         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
5045             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
5046             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
5047
5048         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
5049             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5050             chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
5051             "A", "CIM logic analyzer");
5052
5053         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
5054             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5055             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
5056
5057         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
5058             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
5059             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
5060
5061         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
5062             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
5063             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
5064
5065         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
5066             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
5067             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
5068
5069         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
5070             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
5071             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
5072
5073         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
5074             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
5075             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
5076
5077         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
5078             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
5079             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
5080
5081         if (chip_id(sc) > CHELSIO_T4) {
5082                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
5083                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
5084                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
5085
5086                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
5087                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
5088                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
5089         }
5090
5091         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
5092             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5093             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
5094
5095         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
5096             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5097             sysctl_cim_qcfg, "A", "CIM queue configuration");
5098
5099         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
5100             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5101             sysctl_cpl_stats, "A", "CPL statistics");
5102
5103         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
5104             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5105             sysctl_ddp_stats, "A", "non-TCP DDP statistics");
5106
5107         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
5108             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5109             sysctl_devlog, "A", "firmware's device log");
5110
5111         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
5112             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5113             sysctl_fcoe_stats, "A", "FCoE statistics");
5114
5115         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
5116             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5117             sysctl_hw_sched, "A", "hardware scheduler ");
5118
5119         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
5120             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5121             sysctl_l2t, "A", "hardware L2 table");
5122
5123         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
5124             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5125             sysctl_lb_stats, "A", "loopback statistics");
5126
5127         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
5128             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5129             sysctl_meminfo, "A", "memory regions");
5130
5131         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
5132             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5133             chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
5134             "A", "MPS TCAM entries");
5135
5136         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
5137             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5138             sysctl_path_mtus, "A", "path MTUs");
5139
5140         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
5141             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5142             sysctl_pm_stats, "A", "PM statistics");
5143
5144         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
5145             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5146             sysctl_rdma_stats, "A", "RDMA statistics");
5147
5148         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
5149             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5150             sysctl_tcp_stats, "A", "TCP statistics");
5151
5152         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
5153             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5154             sysctl_tids, "A", "TID information");
5155
5156         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
5157             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5158             sysctl_tp_err_stats, "A", "TP error statistics");
5159
5160         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
5161             CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
5162             "TP logic analyzer event capture mask");
5163
5164         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
5165             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5166             sysctl_tp_la, "A", "TP logic analyzer");
5167
5168         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
5169             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5170             sysctl_tx_rate, "A", "Tx rate");
5171
5172         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
5173             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5174             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
5175
5176         if (chip_id(sc) >= CHELSIO_T5) {
5177                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
5178                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
5179                     sysctl_wcwr_stats, "A", "write combined work requests");
5180         }
5181 #endif
5182
5183 #ifdef TCP_OFFLOAD
5184         if (is_offload(sc)) {
5185                 /*
5186                  * dev.t4nex.X.toe.
5187                  */
5188                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
5189                     NULL, "TOE parameters");
5190                 children = SYSCTL_CHILDREN(oid);
5191
5192                 sc->tt.sndbuf = 256 * 1024;
5193                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
5194                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
5195
5196                 sc->tt.ddp = 0;
5197                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
5198                     &sc->tt.ddp, 0, "DDP allowed");
5199
5200                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
5201                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
5202                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
5203
5204                 sc->tt.ddp_thres =
5205                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
5206                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
5207                     &sc->tt.ddp_thres, 0, "DDP threshold");
5208
5209                 sc->tt.rx_coalesce = 1;
5210                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
5211                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
5212
5213                 sc->tt.tx_align = 1;
5214                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
5215                     CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
5216
5217                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
5218                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
5219                     "TP timer tick (us)");
5220
5221                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
5222                     CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
5223                     "TCP timestamp tick (us)");
5224
5225                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
5226                     CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
5227                     "DACK tick (us)");
5228
5229                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
5230                     CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
5231                     "IU", "DACK timer (us)");
5232
5233                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
5234                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
5235                     sysctl_tp_timer, "LU", "Retransmit min (us)");
5236
5237                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
5238                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
5239                     sysctl_tp_timer, "LU", "Retransmit max (us)");
5240
5241                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
5242                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
5243                     sysctl_tp_timer, "LU", "Persist timer min (us)");
5244
5245                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
5246                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
5247                     sysctl_tp_timer, "LU", "Persist timer max (us)");
5248
5249                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
5250                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
5251                     sysctl_tp_timer, "LU", "Keepidle idle timer (us)");
5252
5253                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl",
5254                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
5255                     sysctl_tp_timer, "LU", "Keepidle interval (us)");
5256
5257                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
5258                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
5259                     sysctl_tp_timer, "LU", "Initial SRTT (us)");
5260
5261                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
5262                     CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
5263                     sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
5264         }
5265 #endif
5266 }
5267
5268 void
5269 vi_sysctls(struct vi_info *vi)
5270 {
5271         struct sysctl_ctx_list *ctx;
5272         struct sysctl_oid *oid;
5273         struct sysctl_oid_list *children;
5274
5275         ctx = device_get_sysctl_ctx(vi->dev);
5276
5277         /*
5278          * dev.v?(cxgbe|cxl).X.
5279          */
5280         oid = device_get_sysctl_tree(vi->dev);
5281         children = SYSCTL_CHILDREN(oid);
5282
5283         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
5284             vi->viid, "VI identifer");
5285         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
5286             &vi->nrxq, 0, "# of rx queues");
5287         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
5288             &vi->ntxq, 0, "# of tx queues");
5289         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
5290             &vi->first_rxq, 0, "index of first rx queue");
5291         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
5292             &vi->first_txq, 0, "index of first tx queue");
5293         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
5294             vi->rss_size, "size of RSS indirection table");
5295
5296         if (IS_MAIN_VI(vi)) {
5297                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
5298                     CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
5299                     "Reserve queue 0 for non-flowid packets");
5300         }
5301
5302 #ifdef TCP_OFFLOAD
5303         if (vi->nofldrxq != 0) {
5304                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
5305                     &vi->nofldrxq, 0,
5306                     "# of rx queues for offloaded TCP connections");
5307                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
5308                     &vi->nofldtxq, 0,
5309                     "# of tx queues for offloaded TCP connections");
5310                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
5311                     CTLFLAG_RD, &vi->first_ofld_rxq, 0,
5312                     "index of first TOE rx queue");
5313                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
5314                     CTLFLAG_RD, &vi->first_ofld_txq, 0,
5315                     "index of first TOE tx queue");
5316         }
5317 #endif
5318 #ifdef DEV_NETMAP
5319         if (vi->nnmrxq != 0) {
5320                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
5321                     &vi->nnmrxq, 0, "# of netmap rx queues");
5322                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
5323                     &vi->nnmtxq, 0, "# of netmap tx queues");
5324                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
5325                     CTLFLAG_RD, &vi->first_nm_rxq, 0,
5326                     "index of first netmap rx queue");
5327                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
5328                     CTLFLAG_RD, &vi->first_nm_txq, 0,
5329                     "index of first netmap tx queue");
5330         }
5331 #endif
5332
5333         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
5334             CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
5335             "holdoff timer index");
5336         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
5337             CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
5338             "holdoff packet counter index");
5339
5340         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
5341             CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
5342             "rx queue size");
5343         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
5344             CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
5345             "tx queue size");
5346 }
5347
5348 static void
5349 cxgbe_sysctls(struct port_info *pi)
5350 {
5351         struct sysctl_ctx_list *ctx;
5352         struct sysctl_oid *oid;
5353         struct sysctl_oid_list *children, *children2;
5354         struct adapter *sc = pi->adapter;
5355         int i;
5356         char name[16];
5357
5358         ctx = device_get_sysctl_ctx(pi->dev);
5359
5360         /*
5361          * dev.cxgbe.X.
5362          */
5363         oid = device_get_sysctl_tree(pi->dev);
5364         children = SYSCTL_CHILDREN(oid);
5365
5366         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
5367            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
5368         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
5369                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
5370                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
5371                     "PHY temperature (in Celsius)");
5372                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
5373                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
5374                     "PHY firmware version");
5375         }
5376
5377         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
5378             CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
5379             "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
5380         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
5381             CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
5382             "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
5383         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
5384             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
5385             "autonegotiation (-1 = not supported)");
5386
5387         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
5388             port_top_speed(pi), "max speed (in Gbps)");
5389
5390         if (sc->flags & IS_VF)
5391                 return;
5392
5393         /*
5394          * dev.(cxgbe|cxl).X.tc.
5395          */
5396         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
5397             "Tx scheduler traffic classes (cl_rl)");
5398         for (i = 0; i < sc->chip_params->nsched_cls; i++) {
5399                 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
5400
5401                 snprintf(name, sizeof(name), "%d", i);
5402                 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
5403                     SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
5404                     "traffic class"));
5405                 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD,
5406                     &tc->flags, 0, "flags");
5407                 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
5408                     CTLFLAG_RD, &tc->refcount, 0, "references to this class");
5409 #ifdef SBUF_DRAIN
5410                 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
5411                     CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
5412                     sysctl_tc_params, "A", "traffic class parameters");
5413 #endif
5414         }
5415
5416         /*
5417          * dev.cxgbe.X.stats.
5418          */
5419         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
5420             NULL, "port statistics");
5421         children = SYSCTL_CHILDREN(oid);
5422         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
5423             &pi->tx_parse_error, 0,
5424             "# of tx packets with invalid length or # of segments");
5425
5426 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
5427         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
5428             CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
5429             sysctl_handle_t4_reg64, "QU", desc)
5430
5431         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
5432             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
5433         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
5434             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
5435         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
5436             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
5437         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
5438             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
5439         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
5440             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
5441         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
5442             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
5443         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
5444             "# of tx frames in this range",
5445             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
5446         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
5447             "# of tx frames in this range",
5448             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
5449         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
5450             "# of tx frames in this range",
5451             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
5452         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
5453             "# of tx frames in this range",
5454             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
5455         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
5456             "# of tx frames in this range",
5457             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
5458         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
5459             "# of tx frames in this range",
5460             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
5461         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
5462             "# of tx frames in this range",
5463             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
5464         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
5465             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
5466         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
5467             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
5468         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
5469             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
5470         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
5471             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
5472         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
5473             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
5474         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
5475             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
5476         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
5477             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
5478         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
5479             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
5480         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
5481             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
5482         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
5483             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
5484
5485         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
5486             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
5487         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
5488             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
5489         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
5490             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
5491         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
5492             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
5493         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
5494             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
5495         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
5496             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
5497         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
5498             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
5499         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
5500             "# of frames received with bad FCS",
5501             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
5502         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
5503             "# of frames received with length error",
5504             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
5505         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
5506             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
5507         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
5508             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
5509         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
5510             "# of rx frames in this range",
5511             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
5512         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
5513             "# of rx frames in this range",
5514             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
5515         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
5516             "# of rx frames in this range",
5517             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
5518         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
5519             "# of rx frames in this range",
5520             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
5521         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
5522             "# of rx frames in this range",
5523             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
5524         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
5525             "# of rx frames in this range",
5526             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
5527         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
5528             "# of rx frames in this range",
5529             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
5530         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
5531             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
5532         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
5533             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
5534         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
5535             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
5536         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
5537             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
5538         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
5539             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
5540         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
5541             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
5542         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
5543             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
5544         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
5545             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
5546         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
5547             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
5548
5549 #undef SYSCTL_ADD_T4_REG64
5550
5551 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
5552         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
5553             &pi->stats.name, desc)
5554
5555         /* We get these from port_stats and they may be stale by upto 1s */
5556         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
5557             "# drops due to buffer-group 0 overflows");
5558         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
5559             "# drops due to buffer-group 1 overflows");
5560         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
5561             "# drops due to buffer-group 2 overflows");
5562         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
5563             "# drops due to buffer-group 3 overflows");
5564         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
5565             "# of buffer-group 0 truncated packets");
5566         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
5567             "# of buffer-group 1 truncated packets");
5568         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
5569             "# of buffer-group 2 truncated packets");
5570         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
5571             "# of buffer-group 3 truncated packets");
5572
5573 #undef SYSCTL_ADD_T4_PORTSTAT
5574 }
5575
5576 static int
5577 sysctl_int_array(SYSCTL_HANDLER_ARGS)
5578 {
5579         int rc, *i, space = 0;
5580         struct sbuf sb;
5581
5582         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
5583         for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
5584                 if (space)
5585                         sbuf_printf(&sb, " ");
5586                 sbuf_printf(&sb, "%d", *i);
5587                 space = 1;
5588         }
5589         sbuf_finish(&sb);
5590         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
5591         sbuf_delete(&sb);
5592         return (rc);
5593 }
5594
5595 static int
5596 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
5597 {
5598         int rc;
5599         struct sbuf *sb;
5600
5601         rc = sysctl_wire_old_buffer(req, 0);
5602         if (rc != 0)
5603                 return(rc);
5604
5605         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5606         if (sb == NULL)
5607                 return (ENOMEM);
5608
5609         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
5610         rc = sbuf_finish(sb);
5611         sbuf_delete(sb);
5612
5613         return (rc);
5614 }
5615
5616 static int
5617 sysctl_btphy(SYSCTL_HANDLER_ARGS)
5618 {
5619         struct port_info *pi = arg1;
5620         int op = arg2;
5621         struct adapter *sc = pi->adapter;
5622         u_int v;
5623         int rc;
5624
5625         rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
5626         if (rc)
5627                 return (rc);
5628         /* XXX: magic numbers */
5629         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
5630             &v);
5631         end_synchronized_op(sc, 0);
5632         if (rc)
5633                 return (rc);
5634         if (op == 0)
5635                 v /= 256;
5636
5637         rc = sysctl_handle_int(oidp, &v, 0, req);
5638         return (rc);
5639 }
5640
5641 static int
5642 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
5643 {
5644         struct vi_info *vi = arg1;
5645         int rc, val;
5646
5647         val = vi->rsrv_noflowq;
5648         rc = sysctl_handle_int(oidp, &val, 0, req);
5649         if (rc != 0 || req->newptr == NULL)
5650                 return (rc);
5651
5652         if ((val >= 1) && (vi->ntxq > 1))
5653                 vi->rsrv_noflowq = 1;
5654         else
5655                 vi->rsrv_noflowq = 0;
5656
5657         return (rc);
5658 }
5659
5660 static int
5661 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5662 {
5663         struct vi_info *vi = arg1;
5664         struct adapter *sc = vi->pi->adapter;
5665         int idx, rc, i;
5666         struct sge_rxq *rxq;
5667 #ifdef TCP_OFFLOAD
5668         struct sge_ofld_rxq *ofld_rxq;
5669 #endif
5670         uint8_t v;
5671
5672         idx = vi->tmr_idx;
5673
5674         rc = sysctl_handle_int(oidp, &idx, 0, req);
5675         if (rc != 0 || req->newptr == NULL)
5676                 return (rc);
5677
5678         if (idx < 0 || idx >= SGE_NTIMERS)
5679                 return (EINVAL);
5680
5681         rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5682             "t4tmr");
5683         if (rc)
5684                 return (rc);
5685
5686         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
5687         for_each_rxq(vi, i, rxq) {
5688 #ifdef atomic_store_rel_8
5689                 atomic_store_rel_8(&rxq->iq.intr_params, v);
5690 #else
5691                 rxq->iq.intr_params = v;
5692 #endif
5693         }
5694 #ifdef TCP_OFFLOAD
5695         for_each_ofld_rxq(vi, i, ofld_rxq) {
5696 #ifdef atomic_store_rel_8
5697                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5698 #else
5699                 ofld_rxq->iq.intr_params = v;
5700 #endif
5701         }
5702 #endif
5703         vi->tmr_idx = idx;
5704
5705         end_synchronized_op(sc, LOCK_HELD);
5706         return (0);
5707 }
5708
5709 static int
5710 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5711 {
5712         struct vi_info *vi = arg1;
5713         struct adapter *sc = vi->pi->adapter;
5714         int idx, rc;
5715
5716         idx = vi->pktc_idx;
5717
5718         rc = sysctl_handle_int(oidp, &idx, 0, req);
5719         if (rc != 0 || req->newptr == NULL)
5720                 return (rc);
5721
5722         if (idx < -1 || idx >= SGE_NCOUNTERS)
5723                 return (EINVAL);
5724
5725         rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5726             "t4pktc");
5727         if (rc)
5728                 return (rc);
5729
5730         if (vi->flags & VI_INIT_DONE)
5731                 rc = EBUSY; /* cannot be changed once the queues are created */
5732         else
5733                 vi->pktc_idx = idx;
5734
5735         end_synchronized_op(sc, LOCK_HELD);
5736         return (rc);
5737 }
5738
5739 static int
5740 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5741 {
5742         struct vi_info *vi = arg1;
5743         struct adapter *sc = vi->pi->adapter;
5744         int qsize, rc;
5745
5746         qsize = vi->qsize_rxq;
5747
5748         rc = sysctl_handle_int(oidp, &qsize, 0, req);
5749         if (rc != 0 || req->newptr == NULL)
5750                 return (rc);
5751
5752         if (qsize < 128 || (qsize & 7))
5753                 return (EINVAL);
5754
5755         rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5756             "t4rxqs");
5757         if (rc)
5758                 return (rc);
5759
5760         if (vi->flags & VI_INIT_DONE)
5761                 rc = EBUSY; /* cannot be changed once the queues are created */
5762         else
5763                 vi->qsize_rxq = qsize;
5764
5765         end_synchronized_op(sc, LOCK_HELD);
5766         return (rc);
5767 }
5768
5769 static int
5770 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5771 {
5772         struct vi_info *vi = arg1;
5773         struct adapter *sc = vi->pi->adapter;
5774         int qsize, rc;
5775
5776         qsize = vi->qsize_txq;
5777
5778         rc = sysctl_handle_int(oidp, &qsize, 0, req);
5779         if (rc != 0 || req->newptr == NULL)
5780                 return (rc);
5781
5782         if (qsize < 128 || qsize > 65536)
5783                 return (EINVAL);
5784
5785         rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5786             "t4txqs");
5787         if (rc)
5788                 return (rc);
5789
5790         if (vi->flags & VI_INIT_DONE)
5791                 rc = EBUSY; /* cannot be changed once the queues are created */
5792         else
5793                 vi->qsize_txq = qsize;
5794
5795         end_synchronized_op(sc, LOCK_HELD);
5796         return (rc);
5797 }
5798
5799 static int
5800 sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
5801 {
5802         struct port_info *pi = arg1;
5803         struct adapter *sc = pi->adapter;
5804         struct link_config *lc = &pi->link_cfg;
5805         int rc;
5806
5807         if (req->newptr == NULL) {
5808                 struct sbuf *sb;
5809                 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
5810
5811                 rc = sysctl_wire_old_buffer(req, 0);
5812                 if (rc != 0)
5813                         return(rc);
5814
5815                 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5816                 if (sb == NULL)
5817                         return (ENOMEM);
5818
5819                 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
5820                 rc = sbuf_finish(sb);
5821                 sbuf_delete(sb);
5822         } else {
5823                 char s[2];
5824                 int n;
5825
5826                 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
5827                 s[1] = 0;
5828
5829                 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5830                 if (rc != 0)
5831                         return(rc);
5832
5833                 if (s[1] != 0)
5834                         return (EINVAL);
5835                 if (s[0] < '0' || s[0] > '9')
5836                         return (EINVAL);        /* not a number */
5837                 n = s[0] - '0';
5838                 if (n & ~(PAUSE_TX | PAUSE_RX))
5839                         return (EINVAL);        /* some other bit is set too */
5840
5841                 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5842                     "t4PAUSE");
5843                 if (rc)
5844                         return (rc);
5845                 PORT_LOCK(pi);
5846                 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
5847                         lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
5848                         lc->requested_fc |= n;
5849                         rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5850                         if (rc == 0) {
5851                                 lc->fc = lc->requested_fc;
5852                                 set_current_media(pi, &pi->media);
5853                         }
5854                 }
5855                 PORT_UNLOCK(pi);
5856                 end_synchronized_op(sc, 0);
5857         }
5858
5859         return (rc);
5860 }
5861
5862 static int
5863 sysctl_fec(SYSCTL_HANDLER_ARGS)
5864 {
5865         struct port_info *pi = arg1;
5866         struct adapter *sc = pi->adapter;
5867         struct link_config *lc = &pi->link_cfg;
5868         int rc;
5869
5870         if (req->newptr == NULL) {
5871                 struct sbuf *sb;
5872                 static char *bits = "\20\1RS\2BASER_RS\3RESERVED";
5873
5874                 rc = sysctl_wire_old_buffer(req, 0);
5875                 if (rc != 0)
5876                         return(rc);
5877
5878                 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5879                 if (sb == NULL)
5880                         return (ENOMEM);
5881
5882                 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits);
5883                 rc = sbuf_finish(sb);
5884                 sbuf_delete(sb);
5885         } else {
5886                 char s[2];
5887                 int n;
5888
5889                 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC);
5890                 s[1] = 0;
5891
5892                 rc = sysctl_handle_string(oidp, s, sizeof(s), req);
5893                 if (rc != 0)
5894                         return(rc);
5895
5896                 if (s[1] != 0)
5897                         return (EINVAL);
5898                 if (s[0] < '0' || s[0] > '9')
5899                         return (EINVAL);        /* not a number */
5900                 n = s[0] - '0';
5901                 if (n & ~M_FW_PORT_CAP_FEC)
5902                         return (EINVAL);        /* some other bit is set too */
5903                 if (!powerof2(n))
5904                         return (EINVAL);        /* one bit can be set at most */
5905
5906                 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5907                     "t4fec");
5908                 if (rc)
5909                         return (rc);
5910                 PORT_LOCK(pi);
5911                 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) {
5912                         lc->requested_fec = n &
5913                             G_FW_PORT_CAP_FEC(lc->supported);
5914                         rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5915                         if (rc == 0) {
5916                                 lc->fec = lc->requested_fec;
5917                         }
5918                 }
5919                 PORT_UNLOCK(pi);
5920                 end_synchronized_op(sc, 0);
5921         }
5922
5923         return (rc);
5924 }
5925
5926 static int
5927 sysctl_autoneg(SYSCTL_HANDLER_ARGS)
5928 {
5929         struct port_info *pi = arg1;
5930         struct adapter *sc = pi->adapter;
5931         struct link_config *lc = &pi->link_cfg;
5932         int rc, val, old;
5933
5934         if (lc->supported & FW_PORT_CAP_ANEG)
5935                 val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0;
5936         else
5937                 val = -1;
5938         rc = sysctl_handle_int(oidp, &val, 0, req);
5939         if (rc != 0 || req->newptr == NULL)
5940                 return (rc);
5941         if (val == 0)
5942                 val = AUTONEG_DISABLE;
5943         else if (val == 1)
5944                 val = AUTONEG_ENABLE;
5945         else
5946                 return (EINVAL);
5947
5948         rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
5949             "t4aneg");
5950         if (rc)
5951                 return (rc);
5952         PORT_LOCK(pi);
5953         if ((lc->supported & FW_PORT_CAP_ANEG) == 0) {
5954                 rc = ENOTSUP;
5955                 goto done;
5956         }
5957         if (lc->requested_aneg == val) {
5958                 rc = 0; /* no change, do nothing. */
5959                 goto done;
5960         }
5961         old = lc->requested_aneg;
5962         lc->requested_aneg = val;
5963         rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
5964         if (rc != 0)
5965                 lc->requested_aneg = old;
5966         else
5967                 set_current_media(pi, &pi->media);
5968 done:
5969         PORT_UNLOCK(pi);
5970         end_synchronized_op(sc, 0);
5971         return (rc);
5972 }
5973
5974 static int
5975 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5976 {
5977         struct adapter *sc = arg1;
5978         int reg = arg2;
5979         uint64_t val;
5980
5981         val = t4_read_reg64(sc, reg);
5982
5983         return (sysctl_handle_64(oidp, &val, 0, req));
5984 }
5985
5986 static int
5987 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5988 {
5989         struct adapter *sc = arg1;
5990         int rc, t;
5991         uint32_t param, val;
5992
5993         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5994         if (rc)
5995                 return (rc);
5996         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5997             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5998             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5999         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
6000         end_synchronized_op(sc, 0);
6001         if (rc)
6002                 return (rc);
6003
6004         /* unknown is returned as 0 but we display -1 in that case */
6005         t = val == 0 ? -1 : val;
6006
6007         rc = sysctl_handle_int(oidp, &t, 0, req);
6008         return (rc);
6009 }
6010
6011 #ifdef SBUF_DRAIN
6012 static int
6013 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
6014 {
6015         struct adapter *sc = arg1;
6016         struct sbuf *sb;
6017         int rc, i;
6018         uint16_t incr[NMTUS][NCCTRL_WIN];
6019         static const char *dec_fac[] = {
6020                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
6021                 "0.9375"
6022         };
6023
6024         rc = sysctl_wire_old_buffer(req, 0);
6025         if (rc != 0)
6026                 return (rc);
6027
6028         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6029         if (sb == NULL)
6030                 return (ENOMEM);
6031
6032         t4_read_cong_tbl(sc, incr);
6033
6034         for (i = 0; i < NCCTRL_WIN; ++i) {
6035                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
6036                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
6037                     incr[5][i], incr[6][i], incr[7][i]);
6038                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
6039                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
6040                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
6041                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
6042         }
6043
6044         rc = sbuf_finish(sb);
6045         sbuf_delete(sb);
6046
6047         return (rc);
6048 }
6049
6050 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
6051         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
6052         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
6053         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
6054 };
6055
6056 static int
6057 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
6058 {
6059         struct adapter *sc = arg1;
6060         struct sbuf *sb;
6061         int rc, i, n, qid = arg2;
6062         uint32_t *buf, *p;
6063         char *qtype;
6064         u_int cim_num_obq = sc->chip_params->cim_num_obq;
6065
6066         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
6067             ("%s: bad qid %d\n", __func__, qid));
6068
6069         if (qid < CIM_NUM_IBQ) {
6070                 /* inbound queue */
6071                 qtype = "IBQ";
6072                 n = 4 * CIM_IBQ_SIZE;
6073                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6074                 rc = t4_read_cim_ibq(sc, qid, buf, n);
6075         } else {
6076                 /* outbound queue */
6077                 qtype = "OBQ";
6078                 qid -= CIM_NUM_IBQ;
6079                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
6080                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
6081                 rc = t4_read_cim_obq(sc, qid, buf, n);
6082         }
6083
6084         if (rc < 0) {
6085                 rc = -rc;
6086                 goto done;
6087         }
6088         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
6089
6090         rc = sysctl_wire_old_buffer(req, 0);
6091         if (rc != 0)
6092                 goto done;
6093
6094         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6095         if (sb == NULL) {
6096                 rc = ENOMEM;
6097                 goto done;
6098         }
6099
6100         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
6101         for (i = 0, p = buf; i < n; i += 16, p += 4)
6102                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
6103                     p[2], p[3]);
6104
6105         rc = sbuf_finish(sb);
6106         sbuf_delete(sb);
6107 done:
6108         free(buf, M_CXGBE);
6109         return (rc);
6110 }
6111
6112 static int
6113 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
6114 {
6115         struct adapter *sc = arg1;
6116         u_int cfg;
6117         struct sbuf *sb;
6118         uint32_t *buf, *p;
6119         int rc;
6120
6121         MPASS(chip_id(sc) <= CHELSIO_T5);
6122
6123         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6124         if (rc != 0)
6125                 return (rc);
6126
6127         rc = sysctl_wire_old_buffer(req, 0);
6128         if (rc != 0)
6129                 return (rc);
6130
6131         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6132         if (sb == NULL)
6133                 return (ENOMEM);
6134
6135         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6136             M_ZERO | M_WAITOK);
6137
6138         rc = -t4_cim_read_la(sc, buf, NULL);
6139         if (rc != 0)
6140                 goto done;
6141
6142         sbuf_printf(sb, "Status   Data      PC%s",
6143             cfg & F_UPDBGLACAPTPCONLY ? "" :
6144             "     LS0Stat  LS0Addr             LS0Data");
6145
6146         for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
6147                 if (cfg & F_UPDBGLACAPTPCONLY) {
6148                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
6149                             p[6], p[7]);
6150                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
6151                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
6152                             p[4] & 0xff, p[5] >> 8);
6153                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
6154                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6155                             p[1] & 0xf, p[2] >> 4);
6156                 } else {
6157                         sbuf_printf(sb,
6158                             "\n  %02x   %x%07x %x%07x %08x %08x "
6159                             "%08x%08x%08x%08x",
6160                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
6161                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
6162                             p[6], p[7]);
6163                 }
6164         }
6165
6166         rc = sbuf_finish(sb);
6167         sbuf_delete(sb);
6168 done:
6169         free(buf, M_CXGBE);
6170         return (rc);
6171 }
6172
6173 static int
6174 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
6175 {
6176         struct adapter *sc = arg1;
6177         u_int cfg;
6178         struct sbuf *sb;
6179         uint32_t *buf, *p;
6180         int rc;
6181
6182         MPASS(chip_id(sc) > CHELSIO_T5);
6183
6184         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
6185         if (rc != 0)
6186                 return (rc);
6187
6188         rc = sysctl_wire_old_buffer(req, 0);
6189         if (rc != 0)
6190                 return (rc);
6191
6192         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6193         if (sb == NULL)
6194                 return (ENOMEM);
6195
6196         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
6197             M_ZERO | M_WAITOK);
6198
6199         rc = -t4_cim_read_la(sc, buf, NULL);
6200         if (rc != 0)
6201                 goto done;
6202
6203         sbuf_printf(sb, "Status   Inst    Data      PC%s",
6204             cfg & F_UPDBGLACAPTPCONLY ? "" :
6205             "     LS0Stat  LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data");
6206
6207         for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
6208                 if (cfg & F_UPDBGLACAPTPCONLY) {
6209                         sbuf_printf(sb, "\n  %02x   %08x %08x %08x",
6210                             p[3] & 0xff, p[2], p[1], p[0]);
6211                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x %02x%06x",
6212                             (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
6213                             p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
6214                         sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x",
6215                             (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
6216                             p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
6217                             p[6] >> 16);
6218                 } else {
6219                         sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x "
6220                             "%08x %08x %08x %08x %08x %08x",
6221                             (p[9] >> 16) & 0xff,
6222                             p[9] & 0xffff, p[8] >> 16,
6223                             p[8] & 0xffff, p[7] >> 16,
6224                             p[7] & 0xffff, p[6] >> 16,
6225                             p[2], p[1], p[0], p[5], p[4], p[3]);
6226                 }
6227         }
6228
6229         rc = sbuf_finish(sb);
6230         sbuf_delete(sb);
6231 done:
6232         free(buf, M_CXGBE);
6233         return (rc);
6234 }
6235
6236 static int
6237 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
6238 {
6239         struct adapter *sc = arg1;
6240         u_int i;
6241         struct sbuf *sb;
6242         uint32_t *buf, *p;
6243         int rc;
6244
6245         rc = sysctl_wire_old_buffer(req, 0);
6246         if (rc != 0)
6247                 return (rc);
6248
6249         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6250         if (sb == NULL)
6251                 return (ENOMEM);
6252
6253         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
6254             M_ZERO | M_WAITOK);
6255
6256         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
6257         p = buf;
6258
6259         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6260                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
6261                     p[1], p[0]);
6262         }
6263
6264         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
6265         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
6266                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
6267                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
6268                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
6269                     (p[1] >> 2) | ((p[2] & 3) << 30),
6270                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
6271                     p[0] & 1);
6272         }
6273
6274         rc = sbuf_finish(sb);
6275         sbuf_delete(sb);
6276         free(buf, M_CXGBE);
6277         return (rc);
6278 }
6279
6280 static int
6281 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
6282 {
6283         struct adapter *sc = arg1;
6284         u_int i;
6285         struct sbuf *sb;
6286         uint32_t *buf, *p;
6287         int rc;
6288
6289         rc = sysctl_wire_old_buffer(req, 0);
6290         if (rc != 0)
6291                 return (rc);
6292
6293         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6294         if (sb == NULL)
6295                 return (ENOMEM);
6296
6297         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
6298             M_ZERO | M_WAITOK);
6299
6300         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
6301         p = buf;
6302
6303         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
6304         for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6305                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
6306                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
6307                     p[4], p[3], p[2], p[1], p[0]);
6308         }
6309
6310         sbuf_printf(sb, "\n\nCntl ID               Data");
6311         for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
6312                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
6313                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
6314         }
6315
6316         rc = sbuf_finish(sb);
6317         sbuf_delete(sb);
6318         free(buf, M_CXGBE);
6319         return (rc);
6320 }
6321
6322 static int
6323 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
6324 {
6325         struct adapter *sc = arg1;
6326         struct sbuf *sb;
6327         int rc, i;
6328         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6329         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
6330         uint16_t thres[CIM_NUM_IBQ];
6331         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
6332         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
6333         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
6334
6335         cim_num_obq = sc->chip_params->cim_num_obq;
6336         if (is_t4(sc)) {
6337                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
6338                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
6339         } else {
6340                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
6341                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
6342         }
6343         nq = CIM_NUM_IBQ + cim_num_obq;
6344
6345         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
6346         if (rc == 0)
6347                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
6348         if (rc != 0)
6349                 return (rc);
6350
6351         t4_read_cimq_cfg(sc, base, size, thres);
6352
6353         rc = sysctl_wire_old_buffer(req, 0);
6354         if (rc != 0)
6355                 return (rc);
6356
6357         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
6358         if (sb == NULL)
6359                 return (ENOMEM);
6360
6361         sbuf_printf(sb,
6362             "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail");
6363
6364         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
6365                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
6366                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
6367                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6368                     G_QUEREMFLITS(p[2]) * 16);
6369         for ( ; i < nq; i++, p += 4, wr += 2)
6370                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
6371                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
6372                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
6373                     G_QUEREMFLITS(p[2]) * 16);
6374
6375         rc = sbuf_finish(sb);
6376         sbuf_delete(sb);
6377
6378         return (rc);
6379 }
6380
6381 static int
6382 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
6383 {
6384         struct adapter *sc = arg1;
6385         struct sbuf *sb;
6386         int rc;
6387         struct tp_cpl_stats stats;
6388
6389         rc = sysctl_wire_old_buffer(req, 0);
6390         if (rc != 0)
6391                 return (rc);
6392
6393         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6394         if (sb == NULL)
6395                 return (ENOMEM);
6396
6397         mtx_lock(&sc->reg_lock);
6398         t4_tp_get_cpl_stats(sc, &stats, 0);
6399         mtx_unlock(&sc->reg_lock);
6400
6401         if (sc->chip_params->nchan > 2) {
6402                 sbuf_printf(sb, "                 channel 0  channel 1"
6403                     "  channel 2  channel 3");
6404                 sbuf_printf(sb, "\nCPL requests:   %10u %10u %10u %10u",
6405                     stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
6406                 sbuf_printf(sb, "\nCPL responses:   %10u %10u %10u %10u",
6407                     stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
6408         } else {
6409                 sbuf_printf(sb, "                 channel 0  channel 1");
6410                 sbuf_printf(sb, "\nCPL requests:   %10u %10u",
6411                     stats.req[0], stats.req[1]);
6412                 sbuf_printf(sb, "\nCPL responses:   %10u %10u",
6413                     stats.rsp[0], stats.rsp[1]);
6414         }
6415
6416         rc = sbuf_finish(sb);
6417         sbuf_delete(sb);
6418
6419         return (rc);
6420 }
6421
6422 static int
6423 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
6424 {
6425         struct adapter *sc = arg1;
6426         struct sbuf *sb;
6427         int rc;
6428         struct tp_usm_stats stats;
6429
6430         rc = sysctl_wire_old_buffer(req, 0);
6431         if (rc != 0)
6432                 return(rc);
6433
6434         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6435         if (sb == NULL)
6436                 return (ENOMEM);
6437
6438         t4_get_usm_stats(sc, &stats, 1);
6439
6440         sbuf_printf(sb, "Frames: %u\n", stats.frames);
6441         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
6442         sbuf_printf(sb, "Drops:  %u", stats.drops);
6443
6444         rc = sbuf_finish(sb);
6445         sbuf_delete(sb);
6446
6447         return (rc);
6448 }
6449
6450 static const char * const devlog_level_strings[] = {
6451         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
6452         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
6453         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
6454         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
6455         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
6456         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
6457 };
6458
6459 static const char * const devlog_facility_strings[] = {
6460         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
6461         [FW_DEVLOG_FACILITY_CF]         = "CF",
6462         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
6463         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
6464         [FW_DEVLOG_FACILITY_RES]        = "RES",
6465         [FW_DEVLOG_FACILITY_HW]         = "HW",
6466         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
6467         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
6468         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
6469         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
6470         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
6471         [FW_DEVLOG_FACILITY_VI]         = "VI",
6472         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
6473         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
6474         [FW_DEVLOG_FACILITY_TM]         = "TM",
6475         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
6476         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
6477         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
6478         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
6479         [FW_DEVLOG_FACILITY_RI]         = "RI",
6480         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
6481         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
6482         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
6483         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE",
6484         [FW_DEVLOG_FACILITY_CHNET]      = "CHNET",
6485 };
6486
6487 static int
6488 sysctl_devlog(SYSCTL_HANDLER_ARGS)
6489 {
6490         struct adapter *sc = arg1;
6491         struct devlog_params *dparams = &sc->params.devlog;
6492         struct fw_devlog_e *buf, *e;
6493         int i, j, rc, nentries, first = 0;
6494         struct sbuf *sb;
6495         uint64_t ftstamp = UINT64_MAX;
6496
6497         if (dparams->addr == 0)
6498                 return (ENXIO);
6499
6500         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
6501         if (buf == NULL)
6502                 return (ENOMEM);
6503
6504         rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
6505         if (rc != 0)
6506                 goto done;
6507
6508         nentries = dparams->size / sizeof(struct fw_devlog_e);
6509         for (i = 0; i < nentries; i++) {
6510                 e = &buf[i];
6511
6512                 if (e->timestamp == 0)
6513                         break;  /* end */
6514
6515                 e->timestamp = be64toh(e->timestamp);
6516                 e->seqno = be32toh(e->seqno);
6517                 for (j = 0; j < 8; j++)
6518                         e->params[j] = be32toh(e->params[j]);
6519
6520                 if (e->timestamp < ftstamp) {
6521                         ftstamp = e->timestamp;
6522                         first = i;
6523                 }
6524         }
6525
6526         if (buf[first].timestamp == 0)
6527                 goto done;      /* nothing in the log */
6528
6529         rc = sysctl_wire_old_buffer(req, 0);
6530         if (rc != 0)
6531                 goto done;
6532
6533         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6534         if (sb == NULL) {
6535                 rc = ENOMEM;
6536                 goto done;
6537         }
6538         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
6539             "Seq#", "Tstamp", "Level", "Facility", "Message");
6540
6541         i = first;
6542         do {
6543                 e = &buf[i];
6544                 if (e->timestamp == 0)
6545                         break;  /* end */
6546
6547                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
6548                     e->seqno, e->timestamp,
6549                     (e->level < nitems(devlog_level_strings) ?
6550                         devlog_level_strings[e->level] : "UNKNOWN"),
6551                     (e->facility < nitems(devlog_facility_strings) ?
6552                         devlog_facility_strings[e->facility] : "UNKNOWN"));
6553                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
6554                     e->params[2], e->params[3], e->params[4],
6555                     e->params[5], e->params[6], e->params[7]);
6556
6557                 if (++i == nentries)
6558                         i = 0;
6559         } while (i != first);
6560
6561         rc = sbuf_finish(sb);
6562         sbuf_delete(sb);
6563 done:
6564         free(buf, M_CXGBE);
6565         return (rc);
6566 }
6567
6568 static int
6569 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
6570 {
6571         struct adapter *sc = arg1;
6572         struct sbuf *sb;
6573         int rc;
6574         struct tp_fcoe_stats stats[MAX_NCHAN];
6575         int i, nchan = sc->chip_params->nchan;
6576
6577         rc = sysctl_wire_old_buffer(req, 0);
6578         if (rc != 0)
6579                 return (rc);
6580
6581         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6582         if (sb == NULL)
6583                 return (ENOMEM);
6584
6585         for (i = 0; i < nchan; i++)
6586                 t4_get_fcoe_stats(sc, i, &stats[i], 1);
6587
6588         if (nchan > 2) {
6589                 sbuf_printf(sb, "                   channel 0        channel 1"
6590                     "        channel 2        channel 3");
6591                 sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju %16ju %16ju",
6592                     stats[0].octets_ddp, stats[1].octets_ddp,
6593                     stats[2].octets_ddp, stats[3].octets_ddp);
6594                 sbuf_printf(sb, "\nframesDDP:  %16u %16u %16u %16u",
6595                     stats[0].frames_ddp, stats[1].frames_ddp,
6596                     stats[2].frames_ddp, stats[3].frames_ddp);
6597                 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
6598                     stats[0].frames_drop, stats[1].frames_drop,
6599                     stats[2].frames_drop, stats[3].frames_drop);
6600         } else {
6601                 sbuf_printf(sb, "                   channel 0        channel 1");
6602                 sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju",
6603                     stats[0].octets_ddp, stats[1].octets_ddp);
6604                 sbuf_printf(sb, "\nframesDDP:  %16u %16u",
6605                     stats[0].frames_ddp, stats[1].frames_ddp);
6606                 sbuf_printf(sb, "\nframesDrop: %16u %16u",
6607                     stats[0].frames_drop, stats[1].frames_drop);
6608         }
6609
6610         rc = sbuf_finish(sb);
6611         sbuf_delete(sb);
6612
6613         return (rc);
6614 }
6615
6616 static int
6617 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
6618 {
6619         struct adapter *sc = arg1;
6620         struct sbuf *sb;
6621         int rc, i;
6622         unsigned int map, kbps, ipg, mode;
6623         unsigned int pace_tab[NTX_SCHED];
6624
6625         rc = sysctl_wire_old_buffer(req, 0);
6626         if (rc != 0)
6627                 return (rc);
6628
6629         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6630         if (sb == NULL)
6631                 return (ENOMEM);
6632
6633         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
6634         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
6635         t4_read_pace_tbl(sc, pace_tab);
6636
6637         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
6638             "Class IPG (0.1 ns)   Flow IPG (us)");
6639
6640         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
6641                 t4_get_tx_sched(sc, i, &kbps, &ipg, 1);
6642                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
6643                     (mode & (1 << i)) ? "flow" : "class", map & 3);
6644                 if (kbps)
6645                         sbuf_printf(sb, "%9u     ", kbps);
6646                 else
6647                         sbuf_printf(sb, " disabled     ");
6648
6649                 if (ipg)
6650                         sbuf_printf(sb, "%13u        ", ipg);
6651                 else
6652                         sbuf_printf(sb, "     disabled        ");
6653
6654                 if (pace_tab[i])
6655                         sbuf_printf(sb, "%10u", pace_tab[i]);
6656                 else
6657                         sbuf_printf(sb, "  disabled");
6658         }
6659
6660         rc = sbuf_finish(sb);
6661         sbuf_delete(sb);
6662
6663         return (rc);
6664 }
6665
6666 static int
6667 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
6668 {
6669         struct adapter *sc = arg1;
6670         struct sbuf *sb;
6671         int rc, i, j;
6672         uint64_t *p0, *p1;
6673         struct lb_port_stats s[2];
6674         static const char *stat_name[] = {
6675                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
6676                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
6677                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
6678                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
6679                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
6680                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
6681                 "BG2FramesTrunc:", "BG3FramesTrunc:"
6682         };
6683
6684         rc = sysctl_wire_old_buffer(req, 0);
6685         if (rc != 0)
6686                 return (rc);
6687
6688         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6689         if (sb == NULL)
6690                 return (ENOMEM);
6691
6692         memset(s, 0, sizeof(s));
6693
6694         for (i = 0; i < sc->chip_params->nchan; i += 2) {
6695                 t4_get_lb_stats(sc, i, &s[0]);
6696                 t4_get_lb_stats(sc, i + 1, &s[1]);
6697
6698                 p0 = &s[0].octets;
6699                 p1 = &s[1].octets;
6700                 sbuf_printf(sb, "%s                       Loopback %u"
6701                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
6702
6703                 for (j = 0; j < nitems(stat_name); j++)
6704                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
6705                                    *p0++, *p1++);
6706         }
6707
6708         rc = sbuf_finish(sb);
6709         sbuf_delete(sb);
6710
6711         return (rc);
6712 }
6713
6714 static int
6715 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
6716 {
6717         int rc = 0;
6718         struct port_info *pi = arg1;
6719         struct link_config *lc = &pi->link_cfg;
6720         struct sbuf *sb;
6721
6722         rc = sysctl_wire_old_buffer(req, 0);
6723         if (rc != 0)
6724                 return(rc);
6725         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
6726         if (sb == NULL)
6727                 return (ENOMEM);
6728
6729         if (lc->link_ok || lc->link_down_rc == 255)
6730                 sbuf_printf(sb, "n/a");
6731         else
6732                 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
6733
6734         rc = sbuf_finish(sb);
6735         sbuf_delete(sb);
6736
6737         return (rc);
6738 }
6739
6740 struct mem_desc {
6741         unsigned int base;
6742         unsigned int limit;
6743         unsigned int idx;
6744 };
6745
6746 static int
6747 mem_desc_cmp(const void *a, const void *b)
6748 {
6749         return ((const struct mem_desc *)a)->base -
6750                ((const struct mem_desc *)b)->base;
6751 }
6752
6753 static void
6754 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
6755     unsigned int to)
6756 {
6757         unsigned int size;
6758
6759         if (from == to)
6760                 return;
6761
6762         size = to - from + 1;
6763         if (size == 0)
6764                 return;
6765
6766         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
6767         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
6768 }
6769
6770 static int
6771 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
6772 {
6773         struct adapter *sc = arg1;
6774         struct sbuf *sb;
6775         int rc, i, n;
6776         uint32_t lo, hi, used, alloc;
6777         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
6778         static const char *region[] = {
6779                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
6780                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
6781                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
6782                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
6783                 "RQUDP region:", "PBL region:", "TXPBL region:",
6784                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
6785                 "On-chip queues:"
6786         };
6787         struct mem_desc avail[4];
6788         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
6789         struct mem_desc *md = mem;
6790
6791         rc = sysctl_wire_old_buffer(req, 0);
6792         if (rc != 0)
6793                 return (rc);
6794
6795         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6796         if (sb == NULL)
6797                 return (ENOMEM);
6798
6799         for (i = 0; i < nitems(mem); i++) {
6800                 mem[i].limit = 0;
6801                 mem[i].idx = i;
6802         }
6803
6804         /* Find and sort the populated memory ranges */
6805         i = 0;
6806         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
6807         if (lo & F_EDRAM0_ENABLE) {
6808                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
6809                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
6810                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
6811                 avail[i].idx = 0;
6812                 i++;
6813         }
6814         if (lo & F_EDRAM1_ENABLE) {
6815                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
6816                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
6817                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
6818                 avail[i].idx = 1;
6819                 i++;
6820         }
6821         if (lo & F_EXT_MEM_ENABLE) {
6822                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
6823                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
6824                 avail[i].limit = avail[i].base +
6825                     (G_EXT_MEM_SIZE(hi) << 20);
6826                 avail[i].idx = is_t5(sc) ? 3 : 2;       /* Call it MC0 for T5 */
6827                 i++;
6828         }
6829         if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
6830                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
6831                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
6832                 avail[i].limit = avail[i].base +
6833                     (G_EXT_MEM1_SIZE(hi) << 20);
6834                 avail[i].idx = 4;
6835                 i++;
6836         }
6837         if (!i)                                    /* no memory available */
6838                 return 0;
6839         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
6840
6841         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
6842         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
6843         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
6844         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6845         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
6846         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
6847         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
6848         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
6849         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
6850
6851         /* the next few have explicit upper bounds */
6852         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
6853         md->limit = md->base - 1 +
6854                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
6855                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
6856         md++;
6857
6858         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
6859         md->limit = md->base - 1 +
6860                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
6861                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
6862         md++;
6863
6864         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6865                 if (chip_id(sc) <= CHELSIO_T5)
6866                         md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
6867                 else
6868                         md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
6869                 md->limit = 0;
6870         } else {
6871                 md->base = 0;
6872                 md->idx = nitems(region);  /* hide it */
6873         }
6874         md++;
6875
6876 #define ulp_region(reg) \
6877         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
6878         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
6879
6880         ulp_region(RX_ISCSI);
6881         ulp_region(RX_TDDP);
6882         ulp_region(TX_TPT);
6883         ulp_region(RX_STAG);
6884         ulp_region(RX_RQ);
6885         ulp_region(RX_RQUDP);
6886         ulp_region(RX_PBL);
6887         ulp_region(TX_PBL);
6888 #undef ulp_region
6889
6890         md->base = 0;
6891         md->idx = nitems(region);
6892         if (!is_t4(sc)) {
6893                 uint32_t size = 0;
6894                 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
6895                 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
6896
6897                 if (is_t5(sc)) {
6898                         if (sge_ctrl & F_VFIFO_ENABLE)
6899                                 size = G_DBVFIFO_SIZE(fifo_size);
6900                 } else
6901                         size = G_T6_DBVFIFO_SIZE(fifo_size);
6902
6903                 if (size) {
6904                         md->base = G_BASEADDR(t4_read_reg(sc,
6905                             A_SGE_DBVFIFO_BADDR));
6906                         md->limit = md->base + (size << 2) - 1;
6907                 }
6908         }
6909         md++;
6910
6911         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
6912         md->limit = 0;
6913         md++;
6914         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
6915         md->limit = 0;
6916         md++;
6917
6918         md->base = sc->vres.ocq.start;
6919         if (sc->vres.ocq.size)
6920                 md->limit = md->base + sc->vres.ocq.size - 1;
6921         else
6922                 md->idx = nitems(region);  /* hide it */
6923         md++;
6924
6925         /* add any address-space holes, there can be up to 3 */
6926         for (n = 0; n < i - 1; n++)
6927                 if (avail[n].limit < avail[n + 1].base)
6928                         (md++)->base = avail[n].limit;
6929         if (avail[n].limit)
6930                 (md++)->base = avail[n].limit;
6931
6932         n = md - mem;
6933         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6934
6935         for (lo = 0; lo < i; lo++)
6936                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6937                                 avail[lo].limit - 1);
6938
6939         sbuf_printf(sb, "\n");
6940         for (i = 0; i < n; i++) {
6941                 if (mem[i].idx >= nitems(region))
6942                         continue;                        /* skip holes */
6943                 if (!mem[i].limit)
6944                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6945                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
6946                                 mem[i].limit);
6947         }
6948
6949         sbuf_printf(sb, "\n");
6950         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6951         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6952         mem_region_show(sb, "uP RAM:", lo, hi);
6953
6954         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6955         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6956         mem_region_show(sb, "uP Extmem2:", lo, hi);
6957
6958         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6959         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6960                    G_PMRXMAXPAGE(lo),
6961                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6962                    (lo & F_PMRXNUMCHN) ? 2 : 1);
6963
6964         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6965         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6966         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6967                    G_PMTXMAXPAGE(lo),
6968                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6969                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6970         sbuf_printf(sb, "%u p-structs\n",
6971                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6972
6973         for (i = 0; i < 4; i++) {
6974                 if (chip_id(sc) > CHELSIO_T5)
6975                         lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
6976                 else
6977                         lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6978                 if (is_t5(sc)) {
6979                         used = G_T5_USED(lo);
6980                         alloc = G_T5_ALLOC(lo);
6981                 } else {
6982                         used = G_USED(lo);
6983                         alloc = G_ALLOC(lo);
6984                 }
6985                 /* For T6 these are MAC buffer groups */
6986                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6987                     i, used, alloc);
6988         }
6989         for (i = 0; i < sc->chip_params->nchan; i++) {
6990                 if (chip_id(sc) > CHELSIO_T5)
6991                         lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
6992                 else
6993                         lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6994                 if (is_t5(sc)) {
6995                         used = G_T5_USED(lo);
6996                         alloc = G_T5_ALLOC(lo);
6997                 } else {
6998                         used = G_USED(lo);
6999                         alloc = G_ALLOC(lo);
7000                 }
7001                 /* For T6 these are MAC buffer groups */
7002                 sbuf_printf(sb,
7003                     "\nLoopback %d using %u pages out of %u allocated",
7004                     i, used, alloc);
7005         }
7006
7007         rc = sbuf_finish(sb);
7008         sbuf_delete(sb);
7009
7010         return (rc);
7011 }
7012
7013 static inline void
7014 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
7015 {
7016         *mask = x | y;
7017         y = htobe64(y);
7018         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
7019 }
7020
7021 static int
7022 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
7023 {
7024         struct adapter *sc = arg1;
7025         struct sbuf *sb;
7026         int rc, i;
7027
7028         MPASS(chip_id(sc) <= CHELSIO_T5);
7029
7030         rc = sysctl_wire_old_buffer(req, 0);
7031         if (rc != 0)
7032                 return (rc);
7033
7034         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7035         if (sb == NULL)
7036                 return (ENOMEM);
7037
7038         sbuf_printf(sb,
7039             "Idx  Ethernet address     Mask     Vld Ports PF"
7040             "  VF              Replication             P0 P1 P2 P3  ML");
7041         for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7042                 uint64_t tcamx, tcamy, mask;
7043                 uint32_t cls_lo, cls_hi;
7044                 uint8_t addr[ETHER_ADDR_LEN];
7045
7046                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
7047                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
7048                 if (tcamx & tcamy)
7049                         continue;
7050                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7051                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7052                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7053                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
7054                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
7055                            addr[3], addr[4], addr[5], (uintmax_t)mask,
7056                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
7057                            G_PORTMAP(cls_hi), G_PF(cls_lo),
7058                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
7059
7060                 if (cls_lo & F_REPLICATE) {
7061                         struct fw_ldst_cmd ldst_cmd;
7062
7063                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7064                         ldst_cmd.op_to_addrspace =
7065                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7066                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7067                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7068                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7069                         ldst_cmd.u.mps.rplc.fid_idx =
7070                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7071                                 V_FW_LDST_CMD_IDX(i));
7072
7073                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7074                             "t4mps");
7075                         if (rc)
7076                                 break;
7077                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7078                             sizeof(ldst_cmd), &ldst_cmd);
7079                         end_synchronized_op(sc, 0);
7080
7081                         if (rc != 0) {
7082                                 sbuf_printf(sb, "%36d", rc);
7083                                 rc = 0;
7084                         } else {
7085                                 sbuf_printf(sb, " %08x %08x %08x %08x",
7086                                     be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7087                                     be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7088                                     be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7089                                     be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7090                         }
7091                 } else
7092                         sbuf_printf(sb, "%36s", "");
7093
7094                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
7095                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
7096                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
7097         }
7098
7099         if (rc)
7100                 (void) sbuf_finish(sb);
7101         else
7102                 rc = sbuf_finish(sb);
7103         sbuf_delete(sb);
7104
7105         return (rc);
7106 }
7107
7108 static int
7109 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
7110 {
7111         struct adapter *sc = arg1;
7112         struct sbuf *sb;
7113         int rc, i;
7114
7115         MPASS(chip_id(sc) > CHELSIO_T5);
7116
7117         rc = sysctl_wire_old_buffer(req, 0);
7118         if (rc != 0)
7119                 return (rc);
7120
7121         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7122         if (sb == NULL)
7123                 return (ENOMEM);
7124
7125         sbuf_printf(sb, "Idx  Ethernet address     Mask       VNI   Mask"
7126             "   IVLAN Vld DIP_Hit   Lookup  Port Vld Ports PF  VF"
7127             "                           Replication"
7128             "                                    P0 P1 P2 P3  ML\n");
7129
7130         for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
7131                 uint8_t dip_hit, vlan_vld, lookup_type, port_num;
7132                 uint16_t ivlan;
7133                 uint64_t tcamx, tcamy, val, mask;
7134                 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
7135                 uint8_t addr[ETHER_ADDR_LEN];
7136
7137                 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
7138                 if (i < 256)
7139                         ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
7140                 else
7141                         ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
7142                 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7143                 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7144                 tcamy = G_DMACH(val) << 32;
7145                 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7146                 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7147                 lookup_type = G_DATALKPTYPE(data2);
7148                 port_num = G_DATAPORTNUM(data2);
7149                 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7150                         /* Inner header VNI */
7151                         vniy = ((data2 & F_DATAVIDH2) << 23) |
7152                                        (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7153                         dip_hit = data2 & F_DATADIPHIT;
7154                         vlan_vld = 0;
7155                 } else {
7156                         vniy = 0;
7157                         dip_hit = 0;
7158                         vlan_vld = data2 & F_DATAVIDH2;
7159                         ivlan = G_VIDL(val);
7160                 }
7161
7162                 ctl |= V_CTLXYBITSEL(1);
7163                 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
7164                 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
7165                 tcamx = G_DMACH(val) << 32;
7166                 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
7167                 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
7168                 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7169                         /* Inner header VNI mask */
7170                         vnix = ((data2 & F_DATAVIDH2) << 23) |
7171                                (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
7172                 } else
7173                         vnix = 0;
7174
7175                 if (tcamx & tcamy)
7176                         continue;
7177                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
7178
7179                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
7180                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
7181
7182                 if (lookup_type && lookup_type != M_DATALKPTYPE) {
7183                         sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7184                             "%012jx %06x %06x    -    -   %3c"
7185                             "      'I'  %4x   %3c   %#x%4u%4d", i, addr[0],
7186                             addr[1], addr[2], addr[3], addr[4], addr[5],
7187                             (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
7188                             port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7189                             G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7190                             cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7191                 } else {
7192                         sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
7193                             "%012jx    -       -   ", i, addr[0], addr[1],
7194                             addr[2], addr[3], addr[4], addr[5],
7195                             (uintmax_t)mask);
7196
7197                         if (vlan_vld)
7198                                 sbuf_printf(sb, "%4u   Y     ", ivlan);
7199                         else
7200                                 sbuf_printf(sb, "  -    N     ");
7201
7202                         sbuf_printf(sb, "-      %3c  %4x   %3c   %#x%4u%4d",
7203                             lookup_type ? 'I' : 'O', port_num,
7204                             cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
7205                             G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
7206                             cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
7207                 }
7208
7209
7210                 if (cls_lo & F_T6_REPLICATE) {
7211                         struct fw_ldst_cmd ldst_cmd;
7212
7213                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
7214                         ldst_cmd.op_to_addrspace =
7215                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
7216                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
7217                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
7218                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
7219                         ldst_cmd.u.mps.rplc.fid_idx =
7220                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
7221                                 V_FW_LDST_CMD_IDX(i));
7222
7223                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
7224                             "t6mps");
7225                         if (rc)
7226                                 break;
7227                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
7228                             sizeof(ldst_cmd), &ldst_cmd);
7229                         end_synchronized_op(sc, 0);
7230
7231                         if (rc != 0) {
7232                                 sbuf_printf(sb, "%72d", rc);
7233                                 rc = 0;
7234                         } else {
7235                                 sbuf_printf(sb, " %08x %08x %08x %08x"
7236                                     " %08x %08x %08x %08x",
7237                                     be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
7238                                     be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
7239                                     be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
7240                                     be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
7241                                     be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
7242                                     be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
7243                                     be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
7244                                     be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
7245                         }
7246                 } else
7247                         sbuf_printf(sb, "%72s", "");
7248
7249                 sbuf_printf(sb, "%4u%3u%3u%3u %#x",
7250                     G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
7251                     G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
7252                     (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
7253         }
7254
7255         if (rc)
7256                 (void) sbuf_finish(sb);
7257         else
7258                 rc = sbuf_finish(sb);
7259         sbuf_delete(sb);
7260
7261         return (rc);
7262 }
7263
7264 static int
7265 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
7266 {
7267         struct adapter *sc = arg1;
7268         struct sbuf *sb;
7269         int rc;
7270         uint16_t mtus[NMTUS];
7271
7272         rc = sysctl_wire_old_buffer(req, 0);
7273         if (rc != 0)
7274                 return (rc);
7275
7276         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7277         if (sb == NULL)
7278                 return (ENOMEM);
7279
7280         t4_read_mtu_tbl(sc, mtus, NULL);
7281
7282         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
7283             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
7284             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
7285             mtus[14], mtus[15]);
7286
7287         rc = sbuf_finish(sb);
7288         sbuf_delete(sb);
7289
7290         return (rc);
7291 }
7292
7293 static int
7294 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
7295 {
7296         struct adapter *sc = arg1;
7297         struct sbuf *sb;
7298         int rc, i;
7299         uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
7300         uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
7301         static const char *tx_stats[MAX_PM_NSTATS] = {
7302                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
7303                 "Tx FIFO wait", NULL, "Tx latency"
7304         };
7305         static const char *rx_stats[MAX_PM_NSTATS] = {
7306                 "Read:", "Write bypass:", "Write mem:", "Flush:",
7307                 "Rx FIFO wait", NULL, "Rx latency"
7308         };
7309
7310         rc = sysctl_wire_old_buffer(req, 0);
7311         if (rc != 0)
7312                 return (rc);
7313
7314         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7315         if (sb == NULL)
7316                 return (ENOMEM);
7317
7318         t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
7319         t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
7320
7321         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
7322         for (i = 0; i < 4; i++) {
7323                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7324                     tx_cyc[i]);
7325         }
7326
7327         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
7328         for (i = 0; i < 4; i++) {
7329                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7330                     rx_cyc[i]);
7331         }
7332
7333         if (chip_id(sc) > CHELSIO_T5) {
7334                 sbuf_printf(sb,
7335                     "\n              Total wait      Total occupancy");
7336                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7337                     tx_cyc[i]);
7338                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7339                     rx_cyc[i]);
7340
7341                 i += 2;
7342                 MPASS(i < nitems(tx_stats));
7343
7344                 sbuf_printf(sb,
7345                     "\n                   Reads           Total wait");
7346                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
7347                     tx_cyc[i]);
7348                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
7349                     rx_cyc[i]);
7350         }
7351
7352         rc = sbuf_finish(sb);
7353         sbuf_delete(sb);
7354
7355         return (rc);
7356 }
7357
7358 static int
7359 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
7360 {
7361         struct adapter *sc = arg1;
7362         struct sbuf *sb;
7363         int rc;
7364         struct tp_rdma_stats stats;
7365
7366         rc = sysctl_wire_old_buffer(req, 0);
7367         if (rc != 0)
7368                 return (rc);
7369
7370         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7371         if (sb == NULL)
7372                 return (ENOMEM);
7373
7374         mtx_lock(&sc->reg_lock);
7375         t4_tp_get_rdma_stats(sc, &stats, 0);
7376         mtx_unlock(&sc->reg_lock);
7377
7378         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
7379         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
7380
7381         rc = sbuf_finish(sb);
7382         sbuf_delete(sb);
7383
7384         return (rc);
7385 }
7386
7387 static int
7388 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
7389 {
7390         struct adapter *sc = arg1;
7391         struct sbuf *sb;
7392         int rc;
7393         struct tp_tcp_stats v4, v6;
7394
7395         rc = sysctl_wire_old_buffer(req, 0);
7396         if (rc != 0)
7397                 return (rc);
7398
7399         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7400         if (sb == NULL)
7401                 return (ENOMEM);
7402
7403         mtx_lock(&sc->reg_lock);
7404         t4_tp_get_tcp_stats(sc, &v4, &v6, 0);
7405         mtx_unlock(&sc->reg_lock);
7406
7407         sbuf_printf(sb,
7408             "                                IP                 IPv6\n");
7409         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
7410             v4.tcp_out_rsts, v6.tcp_out_rsts);
7411         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
7412             v4.tcp_in_segs, v6.tcp_in_segs);
7413         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
7414             v4.tcp_out_segs, v6.tcp_out_segs);
7415         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
7416             v4.tcp_retrans_segs, v6.tcp_retrans_segs);
7417
7418         rc = sbuf_finish(sb);
7419         sbuf_delete(sb);
7420
7421         return (rc);
7422 }
7423
7424 static int
7425 sysctl_tids(SYSCTL_HANDLER_ARGS)
7426 {
7427         struct adapter *sc = arg1;
7428         struct sbuf *sb;
7429         int rc;
7430         struct tid_info *t = &sc->tids;
7431
7432         rc = sysctl_wire_old_buffer(req, 0);
7433         if (rc != 0)
7434                 return (rc);
7435
7436         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7437         if (sb == NULL)
7438                 return (ENOMEM);
7439
7440         if (t->natids) {
7441                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
7442                     t->atids_in_use);
7443         }
7444
7445         if (t->ntids) {
7446                 sbuf_printf(sb, "TID range: ");
7447                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
7448                         uint32_t b, hb;
7449
7450                         if (chip_id(sc) <= CHELSIO_T5) {
7451                                 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
7452                                 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
7453                         } else {
7454                                 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
7455                                 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
7456                         }
7457
7458                         if (b)
7459                                 sbuf_printf(sb, "0-%u, ", b - 1);
7460                         sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
7461                 } else
7462                         sbuf_printf(sb, "0-%u", t->ntids - 1);
7463                 sbuf_printf(sb, ", in use: %u\n",
7464                     atomic_load_acq_int(&t->tids_in_use));
7465         }
7466
7467         if (t->nstids) {
7468                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
7469                     t->stid_base + t->nstids - 1, t->stids_in_use);
7470         }
7471
7472         if (t->nftids) {
7473                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
7474                     t->ftid_base + t->nftids - 1);
7475         }
7476
7477         if (t->netids) {
7478                 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
7479                     t->etid_base + t->netids - 1);
7480         }
7481
7482         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
7483             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
7484             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
7485
7486         rc = sbuf_finish(sb);
7487         sbuf_delete(sb);
7488
7489         return (rc);
7490 }
7491
7492 static int
7493 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
7494 {
7495         struct adapter *sc = arg1;
7496         struct sbuf *sb;
7497         int rc;
7498         struct tp_err_stats stats;
7499
7500         rc = sysctl_wire_old_buffer(req, 0);
7501         if (rc != 0)
7502                 return (rc);
7503
7504         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7505         if (sb == NULL)
7506                 return (ENOMEM);
7507
7508         mtx_lock(&sc->reg_lock);
7509         t4_tp_get_err_stats(sc, &stats, 0);
7510         mtx_unlock(&sc->reg_lock);
7511
7512         if (sc->chip_params->nchan > 2) {
7513                 sbuf_printf(sb, "                 channel 0  channel 1"
7514                     "  channel 2  channel 3\n");
7515                 sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
7516                     stats.mac_in_errs[0], stats.mac_in_errs[1],
7517                     stats.mac_in_errs[2], stats.mac_in_errs[3]);
7518                 sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
7519                     stats.hdr_in_errs[0], stats.hdr_in_errs[1],
7520                     stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
7521                 sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
7522                     stats.tcp_in_errs[0], stats.tcp_in_errs[1],
7523                     stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
7524                 sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
7525                     stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
7526                     stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
7527                 sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
7528                     stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
7529                     stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
7530                 sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
7531                     stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
7532                     stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
7533                 sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
7534                     stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
7535                     stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
7536                 sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
7537                     stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
7538                     stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
7539         } else {
7540                 sbuf_printf(sb, "                 channel 0  channel 1\n");
7541                 sbuf_printf(sb, "macInErrs:      %10u %10u\n",
7542                     stats.mac_in_errs[0], stats.mac_in_errs[1]);
7543                 sbuf_printf(sb, "hdrInErrs:      %10u %10u\n",
7544                     stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
7545                 sbuf_printf(sb, "tcpInErrs:      %10u %10u\n",
7546                     stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
7547                 sbuf_printf(sb, "tcp6InErrs:     %10u %10u\n",
7548                     stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
7549                 sbuf_printf(sb, "tnlCongDrops:   %10u %10u\n",
7550                     stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
7551                 sbuf_printf(sb, "tnlTxDrops:     %10u %10u\n",
7552                     stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
7553                 sbuf_printf(sb, "ofldVlanDrops:  %10u %10u\n",
7554                     stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
7555                 sbuf_printf(sb, "ofldChanDrops:  %10u %10u\n\n",
7556                     stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
7557         }
7558
7559         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
7560             stats.ofld_no_neigh, stats.ofld_cong_defer);
7561
7562         rc = sbuf_finish(sb);
7563         sbuf_delete(sb);
7564
7565         return (rc);
7566 }
7567
7568 static int
7569 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
7570 {
7571         struct adapter *sc = arg1;
7572         struct tp_params *tpp = &sc->params.tp;
7573         u_int mask;
7574         int rc;
7575
7576         mask = tpp->la_mask >> 16;
7577         rc = sysctl_handle_int(oidp, &mask, 0, req);
7578         if (rc != 0 || req->newptr == NULL)
7579                 return (rc);
7580         if (mask > 0xffff)
7581                 return (EINVAL);
7582         tpp->la_mask = mask << 16;
7583         t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
7584
7585         return (0);
7586 }
7587
7588 struct field_desc {
7589         const char *name;
7590         u_int start;
7591         u_int width;
7592 };
7593
7594 static void
7595 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
7596 {
7597         char buf[32];
7598         int line_size = 0;
7599
7600         while (f->name) {
7601                 uint64_t mask = (1ULL << f->width) - 1;
7602                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
7603                     ((uintmax_t)v >> f->start) & mask);
7604
7605                 if (line_size + len >= 79) {
7606                         line_size = 8;
7607                         sbuf_printf(sb, "\n        ");
7608                 }
7609                 sbuf_printf(sb, "%s ", buf);
7610                 line_size += len + 1;
7611                 f++;
7612         }
7613         sbuf_printf(sb, "\n");
7614 }
7615
7616 static const struct field_desc tp_la0[] = {
7617         { "RcfOpCodeOut", 60, 4 },
7618         { "State", 56, 4 },
7619         { "WcfState", 52, 4 },
7620         { "RcfOpcSrcOut", 50, 2 },
7621         { "CRxError", 49, 1 },
7622         { "ERxError", 48, 1 },
7623         { "SanityFailed", 47, 1 },
7624         { "SpuriousMsg", 46, 1 },
7625         { "FlushInputMsg", 45, 1 },
7626         { "FlushInputCpl", 44, 1 },
7627         { "RssUpBit", 43, 1 },
7628         { "RssFilterHit", 42, 1 },
7629         { "Tid", 32, 10 },
7630         { "InitTcb", 31, 1 },
7631         { "LineNumber", 24, 7 },
7632         { "Emsg", 23, 1 },
7633         { "EdataOut", 22, 1 },
7634         { "Cmsg", 21, 1 },
7635         { "CdataOut", 20, 1 },
7636         { "EreadPdu", 19, 1 },
7637         { "CreadPdu", 18, 1 },
7638         { "TunnelPkt", 17, 1 },
7639         { "RcfPeerFin", 16, 1 },
7640         { "RcfReasonOut", 12, 4 },
7641         { "TxCchannel", 10, 2 },
7642         { "RcfTxChannel", 8, 2 },
7643         { "RxEchannel", 6, 2 },
7644         { "RcfRxChannel", 5, 1 },
7645         { "RcfDataOutSrdy", 4, 1 },
7646         { "RxDvld", 3, 1 },
7647         { "RxOoDvld", 2, 1 },
7648         { "RxCongestion", 1, 1 },
7649         { "TxCongestion", 0, 1 },
7650         { NULL }
7651 };
7652
7653 static const struct field_desc tp_la1[] = {
7654         { "CplCmdIn", 56, 8 },
7655         { "CplCmdOut", 48, 8 },
7656         { "ESynOut", 47, 1 },
7657         { "EAckOut", 46, 1 },
7658         { "EFinOut", 45, 1 },
7659         { "ERstOut", 44, 1 },
7660         { "SynIn", 43, 1 },
7661         { "AckIn", 42, 1 },
7662         { "FinIn", 41, 1 },
7663         { "RstIn", 40, 1 },
7664         { "DataIn", 39, 1 },
7665         { "DataInVld", 38, 1 },
7666         { "PadIn", 37, 1 },
7667         { "RxBufEmpty", 36, 1 },
7668         { "RxDdp", 35, 1 },
7669         { "RxFbCongestion", 34, 1 },
7670         { "TxFbCongestion", 33, 1 },
7671         { "TxPktSumSrdy", 32, 1 },
7672         { "RcfUlpType", 28, 4 },
7673         { "Eread", 27, 1 },
7674         { "Ebypass", 26, 1 },
7675         { "Esave", 25, 1 },
7676         { "Static0", 24, 1 },
7677         { "Cread", 23, 1 },
7678         { "Cbypass", 22, 1 },
7679         { "Csave", 21, 1 },
7680         { "CPktOut", 20, 1 },
7681         { "RxPagePoolFull", 18, 2 },
7682         { "RxLpbkPkt", 17, 1 },
7683         { "TxLpbkPkt", 16, 1 },
7684         { "RxVfValid", 15, 1 },
7685         { "SynLearned", 14, 1 },
7686         { "SetDelEntry", 13, 1 },
7687         { "SetInvEntry", 12, 1 },
7688         { "CpcmdDvld", 11, 1 },
7689         { "CpcmdSave", 10, 1 },
7690         { "RxPstructsFull", 8, 2 },
7691         { "EpcmdDvld", 7, 1 },
7692         { "EpcmdFlush", 6, 1 },
7693         { "EpcmdTrimPrefix", 5, 1 },
7694         { "EpcmdTrimPostfix", 4, 1 },
7695         { "ERssIp4Pkt", 3, 1 },
7696         { "ERssIp6Pkt", 2, 1 },
7697         { "ERssTcpUdpPkt", 1, 1 },
7698         { "ERssFceFipPkt", 0, 1 },
7699         { NULL }
7700 };
7701
7702 static const struct field_desc tp_la2[] = {
7703         { "CplCmdIn", 56, 8 },
7704         { "MpsVfVld", 55, 1 },
7705         { "MpsPf", 52, 3 },
7706         { "MpsVf", 44, 8 },
7707         { "SynIn", 43, 1 },
7708         { "AckIn", 42, 1 },
7709         { "FinIn", 41, 1 },
7710         { "RstIn", 40, 1 },
7711         { "DataIn", 39, 1 },
7712         { "DataInVld", 38, 1 },
7713         { "PadIn", 37, 1 },
7714         { "RxBufEmpty", 36, 1 },
7715         { "RxDdp", 35, 1 },
7716         { "RxFbCongestion", 34, 1 },
7717         { "TxFbCongestion", 33, 1 },
7718         { "TxPktSumSrdy", 32, 1 },
7719         { "RcfUlpType", 28, 4 },
7720         { "Eread", 27, 1 },
7721         { "Ebypass", 26, 1 },
7722         { "Esave", 25, 1 },
7723         { "Static0", 24, 1 },
7724         { "Cread", 23, 1 },
7725         { "Cbypass", 22, 1 },
7726         { "Csave", 21, 1 },
7727         { "CPktOut", 20, 1 },
7728         { "RxPagePoolFull", 18, 2 },
7729         { "RxLpbkPkt", 17, 1 },
7730         { "TxLpbkPkt", 16, 1 },
7731         { "RxVfValid", 15, 1 },
7732         { "SynLearned", 14, 1 },
7733         { "SetDelEntry", 13, 1 },
7734         { "SetInvEntry", 12, 1 },
7735         { "CpcmdDvld", 11, 1 },
7736         { "CpcmdSave", 10, 1 },
7737         { "RxPstructsFull", 8, 2 },
7738         { "EpcmdDvld", 7, 1 },
7739         { "EpcmdFlush", 6, 1 },
7740         { "EpcmdTrimPrefix", 5, 1 },
7741         { "EpcmdTrimPostfix", 4, 1 },
7742         { "ERssIp4Pkt", 3, 1 },
7743         { "ERssIp6Pkt", 2, 1 },
7744         { "ERssTcpUdpPkt", 1, 1 },
7745         { "ERssFceFipPkt", 0, 1 },
7746         { NULL }
7747 };
7748
7749 static void
7750 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
7751 {
7752
7753         field_desc_show(sb, *p, tp_la0);
7754 }
7755
7756 static void
7757 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
7758 {
7759
7760         if (idx)
7761                 sbuf_printf(sb, "\n");
7762         field_desc_show(sb, p[0], tp_la0);
7763         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7764                 field_desc_show(sb, p[1], tp_la0);
7765 }
7766
7767 static void
7768 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
7769 {
7770
7771         if (idx)
7772                 sbuf_printf(sb, "\n");
7773         field_desc_show(sb, p[0], tp_la0);
7774         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
7775                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
7776 }
7777
7778 static int
7779 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
7780 {
7781         struct adapter *sc = arg1;
7782         struct sbuf *sb;
7783         uint64_t *buf, *p;
7784         int rc;
7785         u_int i, inc;
7786         void (*show_func)(struct sbuf *, uint64_t *, int);
7787
7788         rc = sysctl_wire_old_buffer(req, 0);
7789         if (rc != 0)
7790                 return (rc);
7791
7792         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7793         if (sb == NULL)
7794                 return (ENOMEM);
7795
7796         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
7797
7798         t4_tp_read_la(sc, buf, NULL);
7799         p = buf;
7800
7801         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
7802         case 2:
7803                 inc = 2;
7804                 show_func = tp_la_show2;
7805                 break;
7806         case 3:
7807                 inc = 2;
7808                 show_func = tp_la_show3;
7809                 break;
7810         default:
7811                 inc = 1;
7812                 show_func = tp_la_show;
7813         }
7814
7815         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
7816                 (*show_func)(sb, p, i);
7817
7818         rc = sbuf_finish(sb);
7819         sbuf_delete(sb);
7820         free(buf, M_CXGBE);
7821         return (rc);
7822 }
7823
7824 static int
7825 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
7826 {
7827         struct adapter *sc = arg1;
7828         struct sbuf *sb;
7829         int rc;
7830         u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
7831
7832         rc = sysctl_wire_old_buffer(req, 0);
7833         if (rc != 0)
7834                 return (rc);
7835
7836         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
7837         if (sb == NULL)
7838                 return (ENOMEM);
7839
7840         t4_get_chan_txrate(sc, nrate, orate);
7841
7842         if (sc->chip_params->nchan > 2) {
7843                 sbuf_printf(sb, "              channel 0   channel 1"
7844                     "   channel 2   channel 3\n");
7845                 sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
7846                     nrate[0], nrate[1], nrate[2], nrate[3]);
7847                 sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
7848                     orate[0], orate[1], orate[2], orate[3]);
7849         } else {
7850                 sbuf_printf(sb, "              channel 0   channel 1\n");
7851                 sbuf_printf(sb, "NIC B/s:     %10ju  %10ju\n",
7852                     nrate[0], nrate[1]);
7853                 sbuf_printf(sb, "Offload B/s: %10ju  %10ju",
7854                     orate[0], orate[1]);
7855         }
7856
7857         rc = sbuf_finish(sb);
7858         sbuf_delete(sb);
7859
7860         return (rc);
7861 }
7862
7863 static int
7864 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
7865 {
7866         struct adapter *sc = arg1;
7867         struct sbuf *sb;
7868         uint32_t *buf, *p;
7869         int rc, i;
7870
7871         rc = sysctl_wire_old_buffer(req, 0);
7872         if (rc != 0)
7873                 return (rc);
7874
7875         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7876         if (sb == NULL)
7877                 return (ENOMEM);
7878
7879         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
7880             M_ZERO | M_WAITOK);
7881
7882         t4_ulprx_read_la(sc, buf);
7883         p = buf;
7884
7885         sbuf_printf(sb, "      Pcmd        Type   Message"
7886             "                Data");
7887         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
7888                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
7889                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
7890         }
7891
7892         rc = sbuf_finish(sb);
7893         sbuf_delete(sb);
7894         free(buf, M_CXGBE);
7895         return (rc);
7896 }
7897
7898 static int
7899 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
7900 {
7901         struct adapter *sc = arg1;
7902         struct sbuf *sb;
7903         int rc, v;
7904
7905         MPASS(chip_id(sc) >= CHELSIO_T5);
7906
7907         rc = sysctl_wire_old_buffer(req, 0);
7908         if (rc != 0)
7909                 return (rc);
7910
7911         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7912         if (sb == NULL)
7913                 return (ENOMEM);
7914
7915         v = t4_read_reg(sc, A_SGE_STAT_CFG);
7916         if (G_STATSOURCE_T5(v) == 7) {
7917                 int mode;
7918
7919                 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
7920                 if (mode == 0) {
7921                         sbuf_printf(sb, "total %d, incomplete %d",
7922                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
7923                             t4_read_reg(sc, A_SGE_STAT_MATCH));
7924                 } else if (mode == 1) {
7925                         sbuf_printf(sb, "total %d, data overflow %d",
7926                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
7927                             t4_read_reg(sc, A_SGE_STAT_MATCH));
7928                 } else {
7929                         sbuf_printf(sb, "unknown mode %d", mode);
7930                 }
7931         }
7932         rc = sbuf_finish(sb);
7933         sbuf_delete(sb);
7934
7935         return (rc);
7936 }
7937
7938 static int
7939 sysctl_tc_params(SYSCTL_HANDLER_ARGS)
7940 {
7941         struct adapter *sc = arg1;
7942         struct tx_cl_rl_params tc;
7943         struct sbuf *sb;
7944         int i, rc, port_id, mbps, gbps;
7945
7946         rc = sysctl_wire_old_buffer(req, 0);
7947         if (rc != 0)
7948                 return (rc);
7949
7950         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
7951         if (sb == NULL)
7952                 return (ENOMEM);
7953
7954         port_id = arg2 >> 16;
7955         MPASS(port_id < sc->params.nports);
7956         MPASS(sc->port[port_id] != NULL);
7957         i = arg2 & 0xffff;
7958         MPASS(i < sc->chip_params->nsched_cls);
7959
7960         mtx_lock(&sc->tc_lock);
7961         tc = sc->port[port_id]->sched_params->cl_rl[i];
7962         mtx_unlock(&sc->tc_lock);
7963
7964         if (tc.flags & TX_CLRL_ERROR) {
7965                 sbuf_printf(sb, "error");
7966                 goto done;
7967         }
7968
7969         if (tc.ratemode == SCHED_CLASS_RATEMODE_REL) {
7970                 /* XXX: top speed or actual link speed? */
7971                 gbps = port_top_speed(sc->port[port_id]);
7972                 sbuf_printf(sb, " %u%% of %uGbps", tc.maxrate, gbps);
7973         } else if (tc.ratemode == SCHED_CLASS_RATEMODE_ABS) {
7974                 switch (tc.rateunit) {
7975                 case SCHED_CLASS_RATEUNIT_BITS:
7976                         mbps = tc.maxrate / 1000;
7977                         gbps = tc.maxrate / 1000000;
7978                         if (tc.maxrate == gbps * 1000000)
7979                                 sbuf_printf(sb, " %uGbps", gbps);
7980                         else if (tc.maxrate == mbps * 1000)
7981                                 sbuf_printf(sb, " %uMbps", mbps);
7982                         else
7983                                 sbuf_printf(sb, " %uKbps", tc.maxrate);
7984                         break;
7985                 case SCHED_CLASS_RATEUNIT_PKTS:
7986                         sbuf_printf(sb, " %upps", tc.maxrate);
7987                         break;
7988                 default:
7989                         rc = ENXIO;
7990                         goto done;
7991                 }
7992         }
7993
7994         switch (tc.mode) {
7995         case SCHED_CLASS_MODE_CLASS:
7996                 sbuf_printf(sb, " aggregate");
7997                 break;
7998         case SCHED_CLASS_MODE_FLOW:
7999                 sbuf_printf(sb, " per-flow");
8000                 break;
8001         default:
8002                 rc = ENXIO;
8003                 goto done;
8004         }
8005
8006 done:
8007         if (rc == 0)
8008                 rc = sbuf_finish(sb);
8009         sbuf_delete(sb);
8010
8011         return (rc);
8012 }
8013 #endif
8014
8015 #ifdef TCP_OFFLOAD
8016 static void
8017 unit_conv(char *buf, size_t len, u_int val, u_int factor)
8018 {
8019         u_int rem = val % factor;
8020
8021         if (rem == 0)
8022                 snprintf(buf, len, "%u", val / factor);
8023         else {
8024                 while (rem % 10 == 0)
8025                         rem /= 10;
8026                 snprintf(buf, len, "%u.%u", val / factor, rem);
8027         }
8028 }
8029
8030 static int
8031 sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
8032 {
8033         struct adapter *sc = arg1;
8034         char buf[16];
8035         u_int res, re;
8036         u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8037
8038         res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8039         switch (arg2) {
8040         case 0:
8041                 /* timer_tick */
8042                 re = G_TIMERRESOLUTION(res);
8043                 break;
8044         case 1:
8045                 /* TCP timestamp tick */
8046                 re = G_TIMESTAMPRESOLUTION(res);
8047                 break;
8048         case 2:
8049                 /* DACK tick */
8050                 re = G_DELAYEDACKRESOLUTION(res);
8051                 break;
8052         default:
8053                 return (EDOOFUS);
8054         }
8055
8056         unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
8057
8058         return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
8059 }
8060
8061 static int
8062 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
8063 {
8064         struct adapter *sc = arg1;
8065         u_int res, dack_re, v;
8066         u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8067
8068         res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
8069         dack_re = G_DELAYEDACKRESOLUTION(res);
8070         v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
8071
8072         return (sysctl_handle_int(oidp, &v, 0, req));
8073 }
8074
8075 static int
8076 sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
8077 {
8078         struct adapter *sc = arg1;
8079         int reg = arg2;
8080         u_int tre;
8081         u_long tp_tick_us, v;
8082         u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
8083
8084         MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
8085             reg == A_TP_PERS_MIN  || reg == A_TP_PERS_MAX ||
8086             reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
8087             reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
8088
8089         tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
8090         tp_tick_us = (cclk_ps << tre) / 1000000;
8091
8092         if (reg == A_TP_INIT_SRTT)
8093                 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
8094         else
8095                 v = tp_tick_us * t4_read_reg(sc, reg);
8096
8097         return (sysctl_handle_long(oidp, &v, 0, req));
8098 }
8099 #endif
8100
8101 static uint32_t
8102 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
8103 {
8104         uint32_t mode;
8105
8106         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
8107             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
8108
8109         if (fconf & F_FRAGMENTATION)
8110                 mode |= T4_FILTER_IP_FRAGMENT;
8111
8112         if (fconf & F_MPSHITTYPE)
8113                 mode |= T4_FILTER_MPS_HIT_TYPE;
8114
8115         if (fconf & F_MACMATCH)
8116                 mode |= T4_FILTER_MAC_IDX;
8117
8118         if (fconf & F_ETHERTYPE)
8119                 mode |= T4_FILTER_ETH_TYPE;
8120
8121         if (fconf & F_PROTOCOL)
8122                 mode |= T4_FILTER_IP_PROTO;
8123
8124         if (fconf & F_TOS)
8125                 mode |= T4_FILTER_IP_TOS;
8126
8127         if (fconf & F_VLAN)
8128                 mode |= T4_FILTER_VLAN;
8129
8130         if (fconf & F_VNIC_ID) {
8131                 mode |= T4_FILTER_VNIC;
8132                 if (iconf & F_VNIC)
8133                         mode |= T4_FILTER_IC_VNIC;
8134         }
8135
8136         if (fconf & F_PORT)
8137                 mode |= T4_FILTER_PORT;
8138
8139         if (fconf & F_FCOE)
8140                 mode |= T4_FILTER_FCoE;
8141
8142         return (mode);
8143 }
8144
8145 static uint32_t
8146 mode_to_fconf(uint32_t mode)
8147 {
8148         uint32_t fconf = 0;
8149
8150         if (mode & T4_FILTER_IP_FRAGMENT)
8151                 fconf |= F_FRAGMENTATION;
8152
8153         if (mode & T4_FILTER_MPS_HIT_TYPE)
8154                 fconf |= F_MPSHITTYPE;
8155
8156         if (mode & T4_FILTER_MAC_IDX)
8157                 fconf |= F_MACMATCH;
8158
8159         if (mode & T4_FILTER_ETH_TYPE)
8160                 fconf |= F_ETHERTYPE;
8161
8162         if (mode & T4_FILTER_IP_PROTO)
8163                 fconf |= F_PROTOCOL;
8164
8165         if (mode & T4_FILTER_IP_TOS)
8166                 fconf |= F_TOS;
8167
8168         if (mode & T4_FILTER_VLAN)
8169                 fconf |= F_VLAN;
8170
8171         if (mode & T4_FILTER_VNIC)
8172                 fconf |= F_VNIC_ID;
8173
8174         if (mode & T4_FILTER_PORT)
8175                 fconf |= F_PORT;
8176
8177         if (mode & T4_FILTER_FCoE)
8178                 fconf |= F_FCOE;
8179
8180         return (fconf);
8181 }
8182
8183 static uint32_t
8184 mode_to_iconf(uint32_t mode)
8185 {
8186
8187         if (mode & T4_FILTER_IC_VNIC)
8188                 return (F_VNIC);
8189         return (0);
8190 }
8191
8192 static int check_fspec_against_fconf_iconf(struct adapter *sc,
8193     struct t4_filter_specification *fs)
8194 {
8195         struct tp_params *tpp = &sc->params.tp;
8196         uint32_t fconf = 0;
8197
8198         if (fs->val.frag || fs->mask.frag)
8199                 fconf |= F_FRAGMENTATION;
8200
8201         if (fs->val.matchtype || fs->mask.matchtype)
8202                 fconf |= F_MPSHITTYPE;
8203
8204         if (fs->val.macidx || fs->mask.macidx)
8205                 fconf |= F_MACMATCH;
8206
8207         if (fs->val.ethtype || fs->mask.ethtype)
8208                 fconf |= F_ETHERTYPE;
8209
8210         if (fs->val.proto || fs->mask.proto)
8211                 fconf |= F_PROTOCOL;
8212
8213         if (fs->val.tos || fs->mask.tos)
8214                 fconf |= F_TOS;
8215
8216         if (fs->val.vlan_vld || fs->mask.vlan_vld)
8217                 fconf |= F_VLAN;
8218
8219         if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
8220                 fconf |= F_VNIC_ID;
8221                 if (tpp->ingress_config & F_VNIC)
8222                         return (EINVAL);
8223         }
8224
8225         if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
8226                 fconf |= F_VNIC_ID;
8227                 if ((tpp->ingress_config & F_VNIC) == 0)
8228                         return (EINVAL);
8229         }
8230
8231         if (fs->val.iport || fs->mask.iport)
8232                 fconf |= F_PORT;
8233
8234         if (fs->val.fcoe || fs->mask.fcoe)
8235                 fconf |= F_FCOE;
8236
8237         if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
8238                 return (E2BIG);
8239
8240         return (0);
8241 }
8242
8243 static int
8244 get_filter_mode(struct adapter *sc, uint32_t *mode)
8245 {
8246         struct tp_params *tpp = &sc->params.tp;
8247
8248         /*
8249          * We trust the cached values of the relevant TP registers.  This means
8250          * things work reliably only if writes to those registers are always via
8251          * t4_set_filter_mode.
8252          */
8253         *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
8254
8255         return (0);
8256 }
8257
8258 static int
8259 set_filter_mode(struct adapter *sc, uint32_t mode)
8260 {
8261         struct tp_params *tpp = &sc->params.tp;
8262         uint32_t fconf, iconf;
8263         int rc;
8264
8265         iconf = mode_to_iconf(mode);
8266         if ((iconf ^ tpp->ingress_config) & F_VNIC) {
8267                 /*
8268                  * For now we just complain if A_TP_INGRESS_CONFIG is not
8269                  * already set to the correct value for the requested filter
8270                  * mode.  It's not clear if it's safe to write to this register
8271                  * on the fly.  (And we trust the cached value of the register).
8272                  */
8273                 return (EBUSY);
8274         }
8275
8276         fconf = mode_to_fconf(mode);
8277
8278         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8279             "t4setfm");
8280         if (rc)
8281                 return (rc);
8282
8283         if (sc->tids.ftids_in_use > 0) {
8284                 rc = EBUSY;
8285                 goto done;
8286         }
8287
8288 #ifdef TCP_OFFLOAD
8289         if (uld_active(sc, ULD_TOM)) {
8290                 rc = EBUSY;
8291                 goto done;
8292         }
8293 #endif
8294
8295         rc = -t4_set_filter_mode(sc, fconf, true);
8296 done:
8297         end_synchronized_op(sc, LOCK_HELD);
8298         return (rc);
8299 }
8300
8301 static inline uint64_t
8302 get_filter_hits(struct adapter *sc, uint32_t fid)
8303 {
8304         uint32_t tcb_addr;
8305
8306         tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) +
8307             (fid + sc->tids.ftid_base) * TCB_SIZE;
8308
8309         if (is_t4(sc)) {
8310                 uint64_t hits;
8311
8312                 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
8313                 return (be64toh(hits));
8314         } else {
8315                 uint32_t hits;
8316
8317                 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
8318                 return (be32toh(hits));
8319         }
8320 }
8321
8322 static int
8323 get_filter(struct adapter *sc, struct t4_filter *t)
8324 {
8325         int i, rc, nfilters = sc->tids.nftids;
8326         struct filter_entry *f;
8327
8328         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
8329             "t4getf");
8330         if (rc)
8331                 return (rc);
8332
8333         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
8334             t->idx >= nfilters) {
8335                 t->idx = 0xffffffff;
8336                 goto done;
8337         }
8338
8339         f = &sc->tids.ftid_tab[t->idx];
8340         for (i = t->idx; i < nfilters; i++, f++) {
8341                 if (f->valid) {
8342                         t->idx = i;
8343                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
8344                         t->smtidx = f->smtidx;
8345                         if (f->fs.hitcnts)
8346                                 t->hits = get_filter_hits(sc, t->idx);
8347                         else
8348                                 t->hits = UINT64_MAX;
8349                         t->fs = f->fs;
8350
8351                         goto done;
8352                 }
8353         }
8354
8355         t->idx = 0xffffffff;
8356 done:
8357         end_synchronized_op(sc, LOCK_HELD);
8358         return (0);
8359 }
8360
8361 static int
8362 set_filter(struct adapter *sc, struct t4_filter *t)
8363 {
8364         unsigned int nfilters, nports;
8365         struct filter_entry *f;
8366         int i, rc;
8367
8368         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
8369         if (rc)
8370                 return (rc);
8371
8372         nfilters = sc->tids.nftids;
8373         nports = sc->params.nports;
8374
8375         if (nfilters == 0) {
8376                 rc = ENOTSUP;
8377                 goto done;
8378         }
8379
8380         if (t->idx >= nfilters) {
8381                 rc = EINVAL;
8382                 goto done;
8383         }
8384
8385         /* Validate against the global filter mode and ingress config */
8386         rc = check_fspec_against_fconf_iconf(sc, &t->fs);
8387         if (rc != 0)
8388                 goto done;
8389
8390         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
8391                 rc = EINVAL;
8392                 goto done;
8393         }
8394
8395         if (t->fs.val.iport >= nports) {
8396                 rc = EINVAL;
8397                 goto done;
8398         }
8399
8400         /* Can't specify an iq if not steering to it */
8401         if (!t->fs.dirsteer && t->fs.iq) {
8402                 rc = EINVAL;
8403                 goto done;
8404         }
8405
8406         /* IPv6 filter idx must be 4 aligned */
8407         if (t->fs.type == 1 &&
8408             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
8409                 rc = EINVAL;
8410                 goto done;
8411         }
8412
8413         if (!(sc->flags & FULL_INIT_DONE) &&
8414             ((rc = adapter_full_init(sc)) != 0))
8415                 goto done;
8416
8417         if (sc->tids.ftid_tab == NULL) {
8418                 KASSERT(sc->tids.ftids_in_use == 0,
8419                     ("%s: no memory allocated but filters_in_use > 0",
8420                     __func__));
8421
8422                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
8423                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
8424                 if (sc->tids.ftid_tab == NULL) {
8425                         rc = ENOMEM;
8426                         goto done;
8427                 }
8428                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
8429         }
8430
8431         for (i = 0; i < 4; i++) {
8432                 f = &sc->tids.ftid_tab[t->idx + i];
8433
8434                 if (f->pending || f->valid) {
8435                         rc = EBUSY;
8436                         goto done;
8437                 }
8438                 if (f->locked) {
8439                         rc = EPERM;
8440                         goto done;
8441                 }
8442
8443                 if (t->fs.type == 0)
8444                         break;
8445         }
8446
8447         f = &sc->tids.ftid_tab[t->idx];
8448         f->fs = t->fs;
8449
8450         rc = set_filter_wr(sc, t->idx);
8451 done:
8452         end_synchronized_op(sc, 0);
8453
8454         if (rc == 0) {
8455                 mtx_lock(&sc->tids.ftid_lock);
8456                 for (;;) {
8457                         if (f->pending == 0) {
8458                                 rc = f->valid ? 0 : EIO;
8459                                 break;
8460                         }
8461
8462                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8463                             PCATCH, "t4setfw", 0)) {
8464                                 rc = EINPROGRESS;
8465                                 break;
8466                         }
8467                 }
8468                 mtx_unlock(&sc->tids.ftid_lock);
8469         }
8470         return (rc);
8471 }
8472
8473 static int
8474 del_filter(struct adapter *sc, struct t4_filter *t)
8475 {
8476         unsigned int nfilters;
8477         struct filter_entry *f;
8478         int rc;
8479
8480         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
8481         if (rc)
8482                 return (rc);
8483
8484         nfilters = sc->tids.nftids;
8485
8486         if (nfilters == 0) {
8487                 rc = ENOTSUP;
8488                 goto done;
8489         }
8490
8491         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
8492             t->idx >= nfilters) {
8493                 rc = EINVAL;
8494                 goto done;
8495         }
8496
8497         if (!(sc->flags & FULL_INIT_DONE)) {
8498                 rc = EAGAIN;
8499                 goto done;
8500         }
8501
8502         f = &sc->tids.ftid_tab[t->idx];
8503
8504         if (f->pending) {
8505                 rc = EBUSY;
8506                 goto done;
8507         }
8508         if (f->locked) {
8509                 rc = EPERM;
8510                 goto done;
8511         }
8512
8513         if (f->valid) {
8514                 t->fs = f->fs;  /* extra info for the caller */
8515                 rc = del_filter_wr(sc, t->idx);
8516         }
8517
8518 done:
8519         end_synchronized_op(sc, 0);
8520
8521         if (rc == 0) {
8522                 mtx_lock(&sc->tids.ftid_lock);
8523                 for (;;) {
8524                         if (f->pending == 0) {
8525                                 rc = f->valid ? EIO : 0;
8526                                 break;
8527                         }
8528
8529                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
8530                             PCATCH, "t4delfw", 0)) {
8531                                 rc = EINPROGRESS;
8532                                 break;
8533                         }
8534                 }
8535                 mtx_unlock(&sc->tids.ftid_lock);
8536         }
8537
8538         return (rc);
8539 }
8540
8541 static void
8542 clear_filter(struct filter_entry *f)
8543 {
8544         if (f->l2t)
8545                 t4_l2t_release(f->l2t);
8546
8547         bzero(f, sizeof (*f));
8548 }
8549
8550 static int
8551 set_filter_wr(struct adapter *sc, int fidx)
8552 {
8553         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8554         struct fw_filter_wr *fwr;
8555         unsigned int ftid, vnic_vld, vnic_vld_mask;
8556         struct wrq_cookie cookie;
8557
8558         ASSERT_SYNCHRONIZED_OP(sc);
8559
8560         if (f->fs.newdmac || f->fs.newvlan) {
8561                 /* This filter needs an L2T entry; allocate one. */
8562                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
8563                 if (f->l2t == NULL)
8564                         return (EAGAIN);
8565                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
8566                     f->fs.dmac)) {
8567                         t4_l2t_release(f->l2t);
8568                         f->l2t = NULL;
8569                         return (ENOMEM);
8570                 }
8571         }
8572
8573         /* Already validated against fconf, iconf */
8574         MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0);
8575         MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0);
8576         if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld)
8577                 vnic_vld = 1;
8578         else
8579                 vnic_vld = 0;
8580         if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld)
8581                 vnic_vld_mask = 1;
8582         else
8583                 vnic_vld_mask = 0;
8584
8585         ftid = sc->tids.ftid_base + fidx;
8586
8587         fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8588         if (fwr == NULL)
8589                 return (ENOMEM);
8590         bzero(fwr, sizeof(*fwr));
8591
8592         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
8593         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
8594         fwr->tid_to_iq =
8595             htobe32(V_FW_FILTER_WR_TID(ftid) |
8596                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
8597                 V_FW_FILTER_WR_NOREPLY(0) |
8598                 V_FW_FILTER_WR_IQ(f->fs.iq));
8599         fwr->del_filter_to_l2tix =
8600             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
8601                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
8602                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
8603                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
8604                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
8605                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
8606                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
8607                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
8608                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
8609                     f->fs.newvlan == VLAN_REWRITE) |
8610                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
8611                     f->fs.newvlan == VLAN_REWRITE) |
8612                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
8613                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
8614                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
8615                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
8616         fwr->ethtype = htobe16(f->fs.val.ethtype);
8617         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
8618         fwr->frag_to_ovlan_vldm =
8619             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
8620                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
8621                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
8622                 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
8623                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
8624                 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
8625         fwr->smac_sel = 0;
8626         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
8627             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
8628         fwr->maci_to_matchtypem =
8629             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
8630                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
8631                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
8632                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
8633                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
8634                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
8635                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
8636                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
8637         fwr->ptcl = f->fs.val.proto;
8638         fwr->ptclm = f->fs.mask.proto;
8639         fwr->ttyp = f->fs.val.tos;
8640         fwr->ttypm = f->fs.mask.tos;
8641         fwr->ivlan = htobe16(f->fs.val.vlan);
8642         fwr->ivlanm = htobe16(f->fs.mask.vlan);
8643         fwr->ovlan = htobe16(f->fs.val.vnic);
8644         fwr->ovlanm = htobe16(f->fs.mask.vnic);
8645         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
8646         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
8647         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
8648         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
8649         fwr->lp = htobe16(f->fs.val.dport);
8650         fwr->lpm = htobe16(f->fs.mask.dport);
8651         fwr->fp = htobe16(f->fs.val.sport);
8652         fwr->fpm = htobe16(f->fs.mask.sport);
8653         if (f->fs.newsmac)
8654                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
8655
8656         f->pending = 1;
8657         sc->tids.ftids_in_use++;
8658
8659         commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8660         return (0);
8661 }
8662
8663 static int
8664 del_filter_wr(struct adapter *sc, int fidx)
8665 {
8666         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
8667         struct fw_filter_wr *fwr;
8668         unsigned int ftid;
8669         struct wrq_cookie cookie;
8670
8671         ftid = sc->tids.ftid_base + fidx;
8672
8673         fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
8674         if (fwr == NULL)
8675                 return (ENOMEM);
8676         bzero(fwr, sizeof (*fwr));
8677
8678         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
8679
8680         f->pending = 1;
8681         commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
8682         return (0);
8683 }
8684
8685 int
8686 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8687 {
8688         struct adapter *sc = iq->adapter;
8689         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
8690         unsigned int idx = GET_TID(rpl);
8691         unsigned int rc;
8692         struct filter_entry *f;
8693
8694         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
8695             rss->opcode));
8696         MPASS(iq == &sc->sge.fwq);
8697         MPASS(is_ftid(sc, idx));
8698
8699         idx -= sc->tids.ftid_base;
8700         f = &sc->tids.ftid_tab[idx];
8701         rc = G_COOKIE(rpl->cookie);
8702
8703         mtx_lock(&sc->tids.ftid_lock);
8704         if (rc == FW_FILTER_WR_FLT_ADDED) {
8705                 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
8706                     __func__, idx));
8707                 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
8708                 f->pending = 0;  /* asynchronous setup completed */
8709                 f->valid = 1;
8710         } else {
8711                 if (rc != FW_FILTER_WR_FLT_DELETED) {
8712                         /* Add or delete failed, display an error */
8713                         log(LOG_ERR,
8714                             "filter %u setup failed with error %u\n",
8715                             idx, rc);
8716                 }
8717
8718                 clear_filter(f);
8719                 sc->tids.ftids_in_use--;
8720         }
8721         wakeup(&sc->tids.ftid_tab);
8722         mtx_unlock(&sc->tids.ftid_lock);
8723
8724         return (0);
8725 }
8726
8727 static int
8728 set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8729 {
8730
8731         MPASS(iq->set_tcb_rpl != NULL);
8732         return (iq->set_tcb_rpl(iq, rss, m));
8733 }
8734
8735 static int
8736 l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
8737 {
8738
8739         MPASS(iq->l2t_write_rpl != NULL);
8740         return (iq->l2t_write_rpl(iq, rss, m));
8741 }
8742
8743 static int
8744 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
8745 {
8746         int rc;
8747
8748         if (cntxt->cid > M_CTXTQID)
8749                 return (EINVAL);
8750
8751         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
8752             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
8753                 return (EINVAL);
8754
8755         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
8756         if (rc)
8757                 return (rc);
8758
8759         if (sc->flags & FW_OK) {
8760                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
8761                     &cntxt->data[0]);
8762                 if (rc == 0)
8763                         goto done;
8764         }
8765
8766         /*
8767          * Read via firmware failed or wasn't even attempted.  Read directly via
8768          * the backdoor.
8769          */
8770         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
8771 done:
8772         end_synchronized_op(sc, 0);
8773         return (rc);
8774 }
8775
8776 static int
8777 load_fw(struct adapter *sc, struct t4_data *fw)
8778 {
8779         int rc;
8780         uint8_t *fw_data;
8781
8782         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
8783         if (rc)
8784                 return (rc);
8785
8786         if (sc->flags & FULL_INIT_DONE) {
8787                 rc = EBUSY;
8788                 goto done;
8789         }
8790
8791         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
8792         if (fw_data == NULL) {
8793                 rc = ENOMEM;
8794                 goto done;
8795         }
8796
8797         rc = copyin(fw->data, fw_data, fw->len);
8798         if (rc == 0)
8799                 rc = -t4_load_fw(sc, fw_data, fw->len);
8800
8801         free(fw_data, M_CXGBE);
8802 done:
8803         end_synchronized_op(sc, 0);
8804         return (rc);
8805 }
8806
8807 static int
8808 load_cfg(struct adapter *sc, struct t4_data *cfg)
8809 {
8810         int rc;
8811         uint8_t *cfg_data = NULL;
8812
8813         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
8814         if (rc)
8815                 return (rc);
8816
8817         if (cfg->len == 0) {
8818                 /* clear */
8819                 rc = -t4_load_cfg(sc, NULL, 0);
8820                 goto done;
8821         }
8822
8823         cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
8824         if (cfg_data == NULL) {
8825                 rc = ENOMEM;
8826                 goto done;
8827         }
8828
8829         rc = copyin(cfg->data, cfg_data, cfg->len);
8830         if (rc == 0)
8831                 rc = -t4_load_cfg(sc, cfg_data, cfg->len);
8832
8833         free(cfg_data, M_CXGBE);
8834 done:
8835         end_synchronized_op(sc, 0);
8836         return (rc);
8837 }
8838
8839 #define MAX_READ_BUF_SIZE (128 * 1024)
8840 static int
8841 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
8842 {
8843         uint32_t addr, remaining, n;
8844         uint32_t *buf;
8845         int rc;
8846         uint8_t *dst;
8847
8848         rc = validate_mem_range(sc, mr->addr, mr->len);
8849         if (rc != 0)
8850                 return (rc);
8851
8852         buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
8853         addr = mr->addr;
8854         remaining = mr->len;
8855         dst = (void *)mr->data;
8856
8857         while (remaining) {
8858                 n = min(remaining, MAX_READ_BUF_SIZE);
8859                 read_via_memwin(sc, 2, addr, buf, n);
8860
8861                 rc = copyout(buf, dst, n);
8862                 if (rc != 0)
8863                         break;
8864
8865                 dst += n;
8866                 remaining -= n;
8867                 addr += n;
8868         }
8869
8870         free(buf, M_CXGBE);
8871         return (rc);
8872 }
8873 #undef MAX_READ_BUF_SIZE
8874
8875 static int
8876 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
8877 {
8878         int rc;
8879
8880         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
8881                 return (EINVAL);
8882
8883         if (i2cd->len > sizeof(i2cd->data))
8884                 return (EFBIG);
8885
8886         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
8887         if (rc)
8888                 return (rc);
8889         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
8890             i2cd->offset, i2cd->len, &i2cd->data[0]);
8891         end_synchronized_op(sc, 0);
8892
8893         return (rc);
8894 }
8895
8896 int
8897 t4_os_find_pci_capability(struct adapter *sc, int cap)
8898 {
8899         int i;
8900
8901         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
8902 }
8903
8904 int
8905 t4_os_pci_save_state(struct adapter *sc)
8906 {
8907         device_t dev;
8908         struct pci_devinfo *dinfo;
8909
8910         dev = sc->dev;
8911         dinfo = device_get_ivars(dev);
8912
8913         pci_cfg_save(dev, dinfo, 0);
8914         return (0);
8915 }
8916
8917 int
8918 t4_os_pci_restore_state(struct adapter *sc)
8919 {
8920         device_t dev;
8921         struct pci_devinfo *dinfo;
8922
8923         dev = sc->dev;
8924         dinfo = device_get_ivars(dev);
8925
8926         pci_cfg_restore(dev, dinfo);
8927         return (0);
8928 }
8929
8930 void
8931 t4_os_portmod_changed(struct port_info *pi)
8932 {
8933         struct adapter *sc = pi->adapter;
8934         struct vi_info *vi;
8935         struct ifnet *ifp;
8936         static const char *mod_str[] = {
8937                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
8938         };
8939
8940         MPASS((pi->flags & FIXED_IFMEDIA) == 0);
8941
8942         vi = &pi->vi[0];
8943         if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) {
8944                 PORT_LOCK(pi);
8945                 build_medialist(pi, &pi->media);
8946                 apply_l1cfg(pi);
8947                 PORT_UNLOCK(pi);
8948                 end_synchronized_op(sc, LOCK_HELD);
8949         }
8950
8951         ifp = vi->ifp;
8952         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
8953                 if_printf(ifp, "transceiver unplugged.\n");
8954         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
8955                 if_printf(ifp, "unknown transceiver inserted.\n");
8956         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
8957                 if_printf(ifp, "unsupported transceiver inserted.\n");
8958         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
8959                 if_printf(ifp, "%dGbps %s transceiver inserted.\n",
8960                     port_top_speed(pi), mod_str[pi->mod_type]);
8961         } else {
8962                 if_printf(ifp, "transceiver (type %d) inserted.\n",
8963                     pi->mod_type);
8964         }
8965 }
8966
8967 void
8968 t4_os_link_changed(struct port_info *pi)
8969 {
8970         struct vi_info *vi;
8971         struct ifnet *ifp;
8972         struct link_config *lc;
8973         int v;
8974
8975         PORT_LOCK_ASSERT_OWNED(pi);
8976
8977         for_each_vi(pi, v, vi) {
8978                 ifp = vi->ifp;
8979                 if (ifp == NULL)
8980                         continue;
8981
8982                 lc = &pi->link_cfg;
8983                 if (lc->link_ok) {
8984                         ifp->if_baudrate = IF_Mbps(lc->speed);
8985                         if_link_state_change(ifp, LINK_STATE_UP);
8986                 } else {
8987                         if_link_state_change(ifp, LINK_STATE_DOWN);
8988                 }
8989         }
8990 }
8991
8992 void
8993 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
8994 {
8995         struct adapter *sc;
8996
8997         sx_slock(&t4_list_lock);
8998         SLIST_FOREACH(sc, &t4_list, link) {
8999                 /*
9000                  * func should not make any assumptions about what state sc is
9001                  * in - the only guarantee is that sc->sc_lock is a valid lock.
9002                  */
9003                 func(sc, arg);
9004         }
9005         sx_sunlock(&t4_list_lock);
9006 }
9007
9008 static int
9009 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
9010     struct thread *td)
9011 {
9012         int rc;
9013         struct adapter *sc = dev->si_drv1;
9014
9015         rc = priv_check(td, PRIV_DRIVER);
9016         if (rc != 0)
9017                 return (rc);
9018
9019         switch (cmd) {
9020         case CHELSIO_T4_GETREG: {
9021                 struct t4_reg *edata = (struct t4_reg *)data;
9022
9023                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9024                         return (EFAULT);
9025
9026                 if (edata->size == 4)
9027                         edata->val = t4_read_reg(sc, edata->addr);
9028                 else if (edata->size == 8)
9029                         edata->val = t4_read_reg64(sc, edata->addr);
9030                 else
9031                         return (EINVAL);
9032
9033                 break;
9034         }
9035         case CHELSIO_T4_SETREG: {
9036                 struct t4_reg *edata = (struct t4_reg *)data;
9037
9038                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
9039                         return (EFAULT);
9040
9041                 if (edata->size == 4) {
9042                         if (edata->val & 0xffffffff00000000)
9043                                 return (EINVAL);
9044                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
9045                 } else if (edata->size == 8)
9046                         t4_write_reg64(sc, edata->addr, edata->val);
9047                 else
9048                         return (EINVAL);
9049                 break;
9050         }
9051         case CHELSIO_T4_REGDUMP: {
9052                 struct t4_regdump *regs = (struct t4_regdump *)data;
9053                 int reglen = t4_get_regs_len(sc);
9054                 uint8_t *buf;
9055
9056                 if (regs->len < reglen) {
9057                         regs->len = reglen; /* hint to the caller */
9058                         return (ENOBUFS);
9059                 }
9060
9061                 regs->len = reglen;
9062                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
9063                 get_regs(sc, regs, buf);
9064                 rc = copyout(buf, regs->data, reglen);
9065                 free(buf, M_CXGBE);
9066                 break;
9067         }
9068         case CHELSIO_T4_GET_FILTER_MODE:
9069                 rc = get_filter_mode(sc, (uint32_t *)data);
9070                 break;
9071         case CHELSIO_T4_SET_FILTER_MODE:
9072                 rc = set_filter_mode(sc, *(uint32_t *)data);
9073                 break;
9074         case CHELSIO_T4_GET_FILTER:
9075                 rc = get_filter(sc, (struct t4_filter *)data);
9076                 break;
9077         case CHELSIO_T4_SET_FILTER:
9078                 rc = set_filter(sc, (struct t4_filter *)data);
9079                 break;
9080         case CHELSIO_T4_DEL_FILTER:
9081                 rc = del_filter(sc, (struct t4_filter *)data);
9082                 break;
9083         case CHELSIO_T4_GET_SGE_CONTEXT:
9084                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
9085                 break;
9086         case CHELSIO_T4_LOAD_FW:
9087                 rc = load_fw(sc, (struct t4_data *)data);
9088                 break;
9089         case CHELSIO_T4_GET_MEM:
9090                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
9091                 break;
9092         case CHELSIO_T4_GET_I2C:
9093                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
9094                 break;
9095         case CHELSIO_T4_CLEAR_STATS: {
9096                 int i, v;
9097                 u_int port_id = *(uint32_t *)data;
9098                 struct port_info *pi;
9099                 struct vi_info *vi;
9100
9101                 if (port_id >= sc->params.nports)
9102                         return (EINVAL);
9103                 pi = sc->port[port_id];
9104                 if (pi == NULL)
9105                         return (EIO);
9106
9107                 /* MAC stats */
9108                 t4_clr_port_stats(sc, pi->tx_chan);
9109                 pi->tx_parse_error = 0;
9110                 mtx_lock(&sc->reg_lock);
9111                 for_each_vi(pi, v, vi) {
9112                         if (vi->flags & VI_INIT_DONE)
9113                                 t4_clr_vi_stats(sc, vi->viid);
9114                 }
9115                 mtx_unlock(&sc->reg_lock);
9116
9117                 /*
9118                  * Since this command accepts a port, clear stats for
9119                  * all VIs on this port.
9120                  */
9121                 for_each_vi(pi, v, vi) {
9122                         if (vi->flags & VI_INIT_DONE) {
9123                                 struct sge_rxq *rxq;
9124                                 struct sge_txq *txq;
9125                                 struct sge_wrq *wrq;
9126
9127                                 for_each_rxq(vi, i, rxq) {
9128 #if defined(INET) || defined(INET6)
9129                                         rxq->lro.lro_queued = 0;
9130                                         rxq->lro.lro_flushed = 0;
9131 #endif
9132                                         rxq->rxcsum = 0;
9133                                         rxq->vlan_extraction = 0;
9134                                 }
9135
9136                                 for_each_txq(vi, i, txq) {
9137                                         txq->txcsum = 0;
9138                                         txq->tso_wrs = 0;
9139                                         txq->vlan_insertion = 0;
9140                                         txq->imm_wrs = 0;
9141                                         txq->sgl_wrs = 0;
9142                                         txq->txpkt_wrs = 0;
9143                                         txq->txpkts0_wrs = 0;
9144                                         txq->txpkts1_wrs = 0;
9145                                         txq->txpkts0_pkts = 0;
9146                                         txq->txpkts1_pkts = 0;
9147                                         mp_ring_reset_stats(txq->r);
9148                                 }
9149
9150 #ifdef TCP_OFFLOAD
9151                                 /* nothing to clear for each ofld_rxq */
9152
9153                                 for_each_ofld_txq(vi, i, wrq) {
9154                                         wrq->tx_wrs_direct = 0;
9155                                         wrq->tx_wrs_copied = 0;
9156                                 }
9157 #endif
9158
9159                                 if (IS_MAIN_VI(vi)) {
9160                                         wrq = &sc->sge.ctrlq[pi->port_id];
9161                                         wrq->tx_wrs_direct = 0;
9162                                         wrq->tx_wrs_copied = 0;
9163                                 }
9164                         }
9165                 }
9166                 break;
9167         }
9168         case CHELSIO_T4_SCHED_CLASS:
9169                 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
9170                 break;
9171         case CHELSIO_T4_SCHED_QUEUE:
9172                 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
9173                 break;
9174         case CHELSIO_T4_GET_TRACER:
9175                 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
9176                 break;
9177         case CHELSIO_T4_SET_TRACER:
9178                 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
9179                 break;
9180         case CHELSIO_T4_LOAD_CFG:
9181                 rc = load_cfg(sc, (struct t4_data *)data);
9182                 break;
9183         default:
9184                 rc = ENOTTY;
9185         }
9186
9187         return (rc);
9188 }
9189
9190 void
9191 t4_db_full(struct adapter *sc)
9192 {
9193
9194         CXGBE_UNIMPLEMENTED(__func__);
9195 }
9196
9197 void
9198 t4_db_dropped(struct adapter *sc)
9199 {
9200
9201         CXGBE_UNIMPLEMENTED(__func__);
9202 }
9203
9204 #ifdef TCP_OFFLOAD
9205 void
9206 t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order)
9207 {
9208
9209         t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
9210         t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
9211                 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
9212                 V_HPZ3(pgsz_order[3]));
9213 }
9214
9215 static int
9216 toe_capability(struct vi_info *vi, int enable)
9217 {
9218         int rc;
9219         struct port_info *pi = vi->pi;
9220         struct adapter *sc = pi->adapter;
9221
9222         ASSERT_SYNCHRONIZED_OP(sc);
9223
9224         if (!is_offload(sc))
9225                 return (ENODEV);
9226
9227         if (enable) {
9228                 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
9229                         /* TOE is already enabled. */
9230                         return (0);
9231                 }
9232
9233                 /*
9234                  * We need the port's queues around so that we're able to send
9235                  * and receive CPLs to/from the TOE even if the ifnet for this
9236                  * port has never been UP'd administratively.
9237                  */
9238                 if (!(vi->flags & VI_INIT_DONE)) {
9239                         rc = vi_full_init(vi);
9240                         if (rc)
9241                                 return (rc);
9242                 }
9243                 if (!(pi->vi[0].flags & VI_INIT_DONE)) {
9244                         rc = vi_full_init(&pi->vi[0]);
9245                         if (rc)
9246                                 return (rc);
9247                 }
9248
9249                 if (isset(&sc->offload_map, pi->port_id)) {
9250                         /* TOE is enabled on another VI of this port. */
9251                         pi->uld_vis++;
9252                         return (0);
9253                 }
9254
9255                 if (!uld_active(sc, ULD_TOM)) {
9256                         rc = t4_activate_uld(sc, ULD_TOM);
9257                         if (rc == EAGAIN) {
9258                                 log(LOG_WARNING,
9259                                     "You must kldload t4_tom.ko before trying "
9260                                     "to enable TOE on a cxgbe interface.\n");
9261                         }
9262                         if (rc != 0)
9263                                 return (rc);
9264                         KASSERT(sc->tom_softc != NULL,
9265                             ("%s: TOM activated but softc NULL", __func__));
9266                         KASSERT(uld_active(sc, ULD_TOM),
9267                             ("%s: TOM activated but flag not set", __func__));
9268                 }
9269
9270                 /* Activate iWARP and iSCSI too, if the modules are loaded. */
9271                 if (!uld_active(sc, ULD_IWARP))
9272                         (void) t4_activate_uld(sc, ULD_IWARP);
9273                 if (!uld_active(sc, ULD_ISCSI))
9274                         (void) t4_activate_uld(sc, ULD_ISCSI);
9275
9276                 pi->uld_vis++;
9277                 setbit(&sc->offload_map, pi->port_id);
9278         } else {
9279                 pi->uld_vis--;
9280
9281                 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
9282                         return (0);
9283
9284                 KASSERT(uld_active(sc, ULD_TOM),
9285                     ("%s: TOM never initialized?", __func__));
9286                 clrbit(&sc->offload_map, pi->port_id);
9287         }
9288
9289         return (0);
9290 }
9291
9292 /*
9293  * Add an upper layer driver to the global list.
9294  */
9295 int
9296 t4_register_uld(struct uld_info *ui)
9297 {
9298         int rc = 0;
9299         struct uld_info *u;
9300
9301         sx_xlock(&t4_uld_list_lock);
9302         SLIST_FOREACH(u, &t4_uld_list, link) {
9303             if (u->uld_id == ui->uld_id) {
9304                     rc = EEXIST;
9305                     goto done;
9306             }
9307         }
9308
9309         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
9310         ui->refcount = 0;
9311 done:
9312         sx_xunlock(&t4_uld_list_lock);
9313         return (rc);
9314 }
9315
9316 int
9317 t4_unregister_uld(struct uld_info *ui)
9318 {
9319         int rc = EINVAL;
9320         struct uld_info *u;
9321
9322         sx_xlock(&t4_uld_list_lock);
9323
9324         SLIST_FOREACH(u, &t4_uld_list, link) {
9325             if (u == ui) {
9326                     if (ui->refcount > 0) {
9327                             rc = EBUSY;
9328                             goto done;
9329                     }
9330
9331                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
9332                     rc = 0;
9333                     goto done;
9334             }
9335         }
9336 done:
9337         sx_xunlock(&t4_uld_list_lock);
9338         return (rc);
9339 }
9340
9341 int
9342 t4_activate_uld(struct adapter *sc, int id)
9343 {
9344         int rc;
9345         struct uld_info *ui;
9346
9347         ASSERT_SYNCHRONIZED_OP(sc);
9348
9349         if (id < 0 || id > ULD_MAX)
9350                 return (EINVAL);
9351         rc = EAGAIN;    /* kldoad the module with this ULD and try again. */
9352
9353         sx_slock(&t4_uld_list_lock);
9354
9355         SLIST_FOREACH(ui, &t4_uld_list, link) {
9356                 if (ui->uld_id == id) {
9357                         if (!(sc->flags & FULL_INIT_DONE)) {
9358                                 rc = adapter_full_init(sc);
9359                                 if (rc != 0)
9360                                         break;
9361                         }
9362
9363                         rc = ui->activate(sc);
9364                         if (rc == 0) {
9365                                 setbit(&sc->active_ulds, id);
9366                                 ui->refcount++;
9367                         }
9368                         break;
9369                 }
9370         }
9371
9372         sx_sunlock(&t4_uld_list_lock);
9373
9374         return (rc);
9375 }
9376
9377 int
9378 t4_deactivate_uld(struct adapter *sc, int id)
9379 {
9380         int rc;
9381         struct uld_info *ui;
9382
9383         ASSERT_SYNCHRONIZED_OP(sc);
9384
9385         if (id < 0 || id > ULD_MAX)
9386                 return (EINVAL);
9387         rc = ENXIO;
9388
9389         sx_slock(&t4_uld_list_lock);
9390
9391         SLIST_FOREACH(ui, &t4_uld_list, link) {
9392                 if (ui->uld_id == id) {
9393                         rc = ui->deactivate(sc);
9394                         if (rc == 0) {
9395                                 clrbit(&sc->active_ulds, id);
9396                                 ui->refcount--;
9397                         }
9398                         break;
9399                 }
9400         }
9401
9402         sx_sunlock(&t4_uld_list_lock);
9403
9404         return (rc);
9405 }
9406
9407 int
9408 uld_active(struct adapter *sc, int uld_id)
9409 {
9410
9411         MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
9412
9413         return (isset(&sc->active_ulds, uld_id));
9414 }
9415 #endif
9416
9417 /*
9418  * t  = ptr to tunable.
9419  * nc = number of CPUs.
9420  * c  = compiled in default for that tunable.
9421  */
9422 static void
9423 calculate_nqueues(int *t, int nc, const int c)
9424 {
9425         int nq;
9426
9427         if (*t > 0)
9428                 return;
9429         nq = *t < 0 ? -*t : c;
9430         *t = min(nc, nq);
9431 }
9432
9433 /*
9434  * Come up with reasonable defaults for some of the tunables, provided they're
9435  * not set by the user (in which case we'll use the values as is).
9436  */
9437 static void
9438 tweak_tunables(void)
9439 {
9440         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
9441
9442         if (t4_ntxq10g < 1) {
9443 #ifdef RSS
9444                 t4_ntxq10g = rss_getnumbuckets();
9445 #else
9446                 calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G);
9447 #endif
9448         }
9449
9450         if (t4_ntxq1g < 1) {
9451 #ifdef RSS
9452                 /* XXX: way too many for 1GbE? */
9453                 t4_ntxq1g = rss_getnumbuckets();
9454 #else
9455                 calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G);
9456 #endif
9457         }
9458
9459         calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
9460
9461         if (t4_nrxq10g < 1) {
9462 #ifdef RSS
9463                 t4_nrxq10g = rss_getnumbuckets();
9464 #else
9465                 calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G);
9466 #endif
9467         }
9468
9469         if (t4_nrxq1g < 1) {
9470 #ifdef RSS
9471                 /* XXX: way too many for 1GbE? */
9472                 t4_nrxq1g = rss_getnumbuckets();
9473 #else
9474                 calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G);
9475 #endif
9476         }
9477
9478         calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
9479
9480 #ifdef TCP_OFFLOAD
9481         calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G);
9482         calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G);
9483         calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
9484         calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G);
9485         calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G);
9486         calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
9487
9488         if (t4_toecaps_allowed == -1)
9489                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
9490
9491         if (t4_rdmacaps_allowed == -1) {
9492                 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
9493                     FW_CAPS_CONFIG_RDMA_RDMAC;
9494         }
9495
9496         if (t4_iscsicaps_allowed == -1) {
9497                 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
9498                     FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
9499                     FW_CAPS_CONFIG_ISCSI_T10DIF;
9500         }
9501 #else
9502         if (t4_toecaps_allowed == -1)
9503                 t4_toecaps_allowed = 0;
9504
9505         if (t4_rdmacaps_allowed == -1)
9506                 t4_rdmacaps_allowed = 0;
9507
9508         if (t4_iscsicaps_allowed == -1)
9509                 t4_iscsicaps_allowed = 0;
9510 #endif
9511
9512 #ifdef DEV_NETMAP
9513         calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
9514         calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
9515 #endif
9516
9517         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
9518                 t4_tmr_idx_10g = TMR_IDX_10G;
9519
9520         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
9521                 t4_pktc_idx_10g = PKTC_IDX_10G;
9522
9523         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
9524                 t4_tmr_idx_1g = TMR_IDX_1G;
9525
9526         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
9527                 t4_pktc_idx_1g = PKTC_IDX_1G;
9528
9529         if (t4_qsize_txq < 128)
9530                 t4_qsize_txq = 128;
9531
9532         if (t4_qsize_rxq < 128)
9533                 t4_qsize_rxq = 128;
9534         while (t4_qsize_rxq & 7)
9535                 t4_qsize_rxq++;
9536
9537         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
9538 }
9539
9540 #ifdef DDB
9541 static void
9542 t4_dump_tcb(struct adapter *sc, int tid)
9543 {
9544         uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
9545
9546         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
9547         save = t4_read_reg(sc, reg);
9548         base = sc->memwin[2].mw_base;
9549
9550         /* Dump TCB for the tid */
9551         tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
9552         tcb_addr += tid * TCB_SIZE;
9553
9554         if (is_t4(sc)) {
9555                 pf = 0;
9556                 win_pos = tcb_addr & ~0xf;      /* start must be 16B aligned */
9557         } else {
9558                 pf = V_PFNUM(sc->pf);
9559                 win_pos = tcb_addr & ~0x7f;     /* start must be 128B aligned */
9560         }
9561         t4_write_reg(sc, reg, win_pos | pf);
9562         t4_read_reg(sc, reg);
9563
9564         off = tcb_addr - win_pos;
9565         for (i = 0; i < 4; i++) {
9566                 uint32_t buf[8];
9567                 for (j = 0; j < 8; j++, off += 4)
9568                         buf[j] = htonl(t4_read_reg(sc, base + off));
9569
9570                 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
9571                     buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
9572                     buf[7]);
9573         }
9574
9575         t4_write_reg(sc, reg, save);
9576         t4_read_reg(sc, reg);
9577 }
9578
9579 static void
9580 t4_dump_devlog(struct adapter *sc)
9581 {
9582         struct devlog_params *dparams = &sc->params.devlog;
9583         struct fw_devlog_e e;
9584         int i, first, j, m, nentries, rc;
9585         uint64_t ftstamp = UINT64_MAX;
9586
9587         if (dparams->start == 0) {
9588                 db_printf("devlog params not valid\n");
9589                 return;
9590         }
9591
9592         nentries = dparams->size / sizeof(struct fw_devlog_e);
9593         m = fwmtype_to_hwmtype(dparams->memtype);
9594
9595         /* Find the first entry. */
9596         first = -1;
9597         for (i = 0; i < nentries && !db_pager_quit; i++) {
9598                 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
9599                     sizeof(e), (void *)&e);
9600                 if (rc != 0)
9601                         break;
9602
9603                 if (e.timestamp == 0)
9604                         break;
9605
9606                 e.timestamp = be64toh(e.timestamp);
9607                 if (e.timestamp < ftstamp) {
9608                         ftstamp = e.timestamp;
9609                         first = i;
9610                 }
9611         }
9612
9613         if (first == -1)
9614                 return;
9615
9616         i = first;
9617         do {
9618                 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
9619                     sizeof(e), (void *)&e);
9620                 if (rc != 0)
9621                         return;
9622
9623                 if (e.timestamp == 0)
9624                         return;
9625
9626                 e.timestamp = be64toh(e.timestamp);
9627                 e.seqno = be32toh(e.seqno);
9628                 for (j = 0; j < 8; j++)
9629                         e.params[j] = be32toh(e.params[j]);
9630
9631                 db_printf("%10d  %15ju  %8s  %8s  ",
9632                     e.seqno, e.timestamp,
9633                     (e.level < nitems(devlog_level_strings) ?
9634                         devlog_level_strings[e.level] : "UNKNOWN"),
9635                     (e.facility < nitems(devlog_facility_strings) ?
9636                         devlog_facility_strings[e.facility] : "UNKNOWN"));
9637                 db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
9638                     e.params[3], e.params[4], e.params[5], e.params[6],
9639                     e.params[7]);
9640
9641                 if (++i == nentries)
9642                         i = 0;
9643         } while (i != first && !db_pager_quit);
9644 }
9645
9646 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
9647 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
9648
9649 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
9650 {
9651         device_t dev;
9652         int t;
9653         bool valid;
9654
9655         valid = false;
9656         t = db_read_token();
9657         if (t == tIDENT) {
9658                 dev = device_lookup_by_name(db_tok_string);
9659                 valid = true;
9660         }
9661         db_skip_to_eol();
9662         if (!valid) {
9663                 db_printf("usage: show t4 devlog <nexus>\n");
9664                 return;
9665         }
9666
9667         if (dev == NULL) {
9668                 db_printf("device not found\n");
9669                 return;
9670         }
9671
9672         t4_dump_devlog(device_get_softc(dev));
9673 }
9674
9675 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
9676 {
9677         device_t dev;
9678         int radix, tid, t;
9679         bool valid;
9680
9681         valid = false;
9682         radix = db_radix;
9683         db_radix = 10;
9684         t = db_read_token();
9685         if (t == tIDENT) {
9686                 dev = device_lookup_by_name(db_tok_string);
9687                 t = db_read_token();
9688                 if (t == tNUMBER) {
9689                         tid = db_tok_number;
9690                         valid = true;
9691                 }
9692         }       
9693         db_radix = radix;
9694         db_skip_to_eol();
9695         if (!valid) {
9696                 db_printf("usage: show t4 tcb <nexus> <tid>\n");
9697                 return;
9698         }
9699
9700         if (dev == NULL) {
9701                 db_printf("device not found\n");
9702                 return;
9703         }
9704         if (tid < 0) {
9705                 db_printf("invalid tid\n");
9706                 return;
9707         }
9708
9709         t4_dump_tcb(device_get_softc(dev), tid);
9710 }
9711 #endif
9712
9713 static struct sx mlu;   /* mod load unload */
9714 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
9715
9716 static int
9717 mod_event(module_t mod, int cmd, void *arg)
9718 {
9719         int rc = 0;
9720         static int loaded = 0;
9721
9722         switch (cmd) {
9723         case MOD_LOAD:
9724                 sx_xlock(&mlu);
9725                 if (loaded++ == 0) {
9726                         t4_sge_modload();
9727                         t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl);
9728                         t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl);
9729                         t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
9730                         t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
9731                         sx_init(&t4_list_lock, "T4/T5 adapters");
9732                         SLIST_INIT(&t4_list);
9733 #ifdef TCP_OFFLOAD
9734                         sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
9735                         SLIST_INIT(&t4_uld_list);
9736 #endif
9737                         t4_tracer_modload();
9738                         tweak_tunables();
9739                 }
9740                 sx_xunlock(&mlu);
9741                 break;
9742
9743         case MOD_UNLOAD:
9744                 sx_xlock(&mlu);
9745                 if (--loaded == 0) {
9746                         int tries;
9747
9748                         sx_slock(&t4_list_lock);
9749                         if (!SLIST_EMPTY(&t4_list)) {
9750                                 rc = EBUSY;
9751                                 sx_sunlock(&t4_list_lock);
9752                                 goto done_unload;
9753                         }
9754 #ifdef TCP_OFFLOAD
9755                         sx_slock(&t4_uld_list_lock);
9756                         if (!SLIST_EMPTY(&t4_uld_list)) {
9757                                 rc = EBUSY;
9758                                 sx_sunlock(&t4_uld_list_lock);
9759                                 sx_sunlock(&t4_list_lock);
9760                                 goto done_unload;
9761                         }
9762 #endif
9763                         tries = 0;
9764                         while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
9765                                 uprintf("%ju clusters with custom free routine "
9766                                     "still is use.\n", t4_sge_extfree_refs());
9767                                 pause("t4unload", 2 * hz);
9768                         }
9769 #ifdef TCP_OFFLOAD
9770                         sx_sunlock(&t4_uld_list_lock);
9771 #endif
9772                         sx_sunlock(&t4_list_lock);
9773
9774                         if (t4_sge_extfree_refs() == 0) {
9775                                 t4_tracer_modunload();
9776 #ifdef TCP_OFFLOAD
9777                                 sx_destroy(&t4_uld_list_lock);
9778 #endif
9779                                 sx_destroy(&t4_list_lock);
9780                                 t4_sge_modunload();
9781                                 loaded = 0;
9782                         } else {
9783                                 rc = EBUSY;
9784                                 loaded++;       /* undo earlier decrement */
9785                         }
9786                 }
9787 done_unload:
9788                 sx_xunlock(&mlu);
9789                 break;
9790         }
9791
9792         return (rc);
9793 }
9794
9795 static devclass_t t4_devclass, t5_devclass, t6_devclass;
9796 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
9797 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
9798
9799 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
9800 MODULE_VERSION(t4nex, 1);
9801 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
9802
9803 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
9804 MODULE_VERSION(t5nex, 1);
9805 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
9806
9807 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
9808 MODULE_VERSION(t6nex, 1);
9809 MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
9810 #ifdef DEV_NETMAP
9811 MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
9812 #endif /* DEV_NETMAP */
9813
9814 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
9815 MODULE_VERSION(cxgbe, 1);
9816
9817 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
9818 MODULE_VERSION(cxl, 1);
9819
9820 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
9821 MODULE_VERSION(cc, 1);
9822
9823 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
9824 MODULE_VERSION(vcxgbe, 1);
9825
9826 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
9827 MODULE_VERSION(vcxl, 1);
9828
9829 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
9830 MODULE_VERSION(vcc, 1);