]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/cxgbe/t4_main.c
MFC r266571, r266757, r268536, r269076, r269364, r269366, r269411,
[FreeBSD/stable/10.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 static int t4_rsrv_noflowq = 0;
201 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203 #ifdef TCP_OFFLOAD
204 #define NOFLDTXQ_10G 8
205 static int t4_nofldtxq10g = -1;
206 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208 #define NOFLDRXQ_10G 2
209 static int t4_nofldrxq10g = -1;
210 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212 #define NOFLDTXQ_1G 2
213 static int t4_nofldtxq1g = -1;
214 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216 #define NOFLDRXQ_1G 1
217 static int t4_nofldrxq1g = -1;
218 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 #endif
220
221 #ifdef DEV_NETMAP
222 #define NNMTXQ_10G 2
223 static int t4_nnmtxq10g = -1;
224 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
225
226 #define NNMRXQ_10G 2
227 static int t4_nnmrxq10g = -1;
228 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
229
230 #define NNMTXQ_1G 1
231 static int t4_nnmtxq1g = -1;
232 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
233
234 #define NNMRXQ_1G 1
235 static int t4_nnmrxq1g = -1;
236 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
237 #endif
238
239 /*
240  * Holdoff parameters for 10G and 1G ports.
241  */
242 #define TMR_IDX_10G 1
243 static int t4_tmr_idx_10g = TMR_IDX_10G;
244 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
245
246 #define PKTC_IDX_10G (-1)
247 static int t4_pktc_idx_10g = PKTC_IDX_10G;
248 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
249
250 #define TMR_IDX_1G 1
251 static int t4_tmr_idx_1g = TMR_IDX_1G;
252 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
253
254 #define PKTC_IDX_1G (-1)
255 static int t4_pktc_idx_1g = PKTC_IDX_1G;
256 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
257
258 /*
259  * Size (# of entries) of each tx and rx queue.
260  */
261 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
262 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
263
264 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
265 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
266
267 /*
268  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
269  */
270 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
271 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
272
273 /*
274  * Configuration file.
275  */
276 #define DEFAULT_CF      "default"
277 #define FLASH_CF        "flash"
278 #define UWIRE_CF        "uwire"
279 #define FPGA_CF         "fpga"
280 static char t4_cfg_file[32] = DEFAULT_CF;
281 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
282
283 /*
284  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
285  * encouraged respectively).
286  */
287 static unsigned int t4_fw_install = 1;
288 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
289
290 /*
291  * ASIC features that will be used.  Disable the ones you don't want so that the
292  * chip resources aren't wasted on features that will not be used.
293  */
294 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
295 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
296
297 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
298 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
299
300 static int t4_toecaps_allowed = -1;
301 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
302
303 static int t4_rdmacaps_allowed = 0;
304 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
305
306 static int t4_iscsicaps_allowed = 0;
307 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
308
309 static int t4_fcoecaps_allowed = 0;
310 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
311
312 static int t5_write_combine = 0;
313 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
314
315 struct intrs_and_queues {
316         uint16_t intr_type;     /* INTx, MSI, or MSI-X */
317         uint16_t nirq;          /* Total # of vectors */
318         uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
319         uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */
320         uint16_t ntxq10g;       /* # of NIC txq's for each 10G port */
321         uint16_t nrxq10g;       /* # of NIC rxq's for each 10G port */
322         uint16_t ntxq1g;        /* # of NIC txq's for each 1G port */
323         uint16_t nrxq1g;        /* # of NIC rxq's for each 1G port */
324         uint16_t rsrv_noflowq;  /* Flag whether to reserve queue 0 */
325 #ifdef TCP_OFFLOAD
326         uint16_t nofldtxq10g;   /* # of TOE txq's for each 10G port */
327         uint16_t nofldrxq10g;   /* # of TOE rxq's for each 10G port */
328         uint16_t nofldtxq1g;    /* # of TOE txq's for each 1G port */
329         uint16_t nofldrxq1g;    /* # of TOE rxq's for each 1G port */
330 #endif
331 #ifdef DEV_NETMAP
332         uint16_t nnmtxq10g;     /* # of netmap txq's for each 10G port */
333         uint16_t nnmrxq10g;     /* # of netmap rxq's for each 10G port */
334         uint16_t nnmtxq1g;      /* # of netmap txq's for each 1G port */
335         uint16_t nnmrxq1g;      /* # of netmap rxq's for each 1G port */
336 #endif
337 };
338
339 struct filter_entry {
340         uint32_t valid:1;       /* filter allocated and valid */
341         uint32_t locked:1;      /* filter is administratively locked */
342         uint32_t pending:1;     /* filter action is pending firmware reply */
343         uint32_t smtidx:8;      /* Source MAC Table index for smac */
344         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
345
346         struct t4_filter_specification fs;
347 };
348
349 static int map_bars_0_and_4(struct adapter *);
350 static int map_bar_2(struct adapter *);
351 static void setup_memwin(struct adapter *);
352 static int validate_mem_range(struct adapter *, uint32_t, int);
353 static int fwmtype_to_hwmtype(int);
354 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
355     uint32_t *);
356 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
357 static uint32_t position_memwin(struct adapter *, int, uint32_t);
358 static int cfg_itype_and_nqueues(struct adapter *, int, int,
359     struct intrs_and_queues *);
360 static int prep_firmware(struct adapter *);
361 static int partition_resources(struct adapter *, const struct firmware *,
362     const char *);
363 static int get_params__pre_init(struct adapter *);
364 static int get_params__post_init(struct adapter *);
365 static int set_params__post_init(struct adapter *);
366 static void t4_set_desc(struct adapter *);
367 static void build_medialist(struct port_info *, struct ifmedia *);
368 static int cxgbe_init_synchronized(struct port_info *);
369 static int cxgbe_uninit_synchronized(struct port_info *);
370 static int setup_intr_handlers(struct adapter *);
371 static void quiesce_eq(struct adapter *, struct sge_eq *);
372 static void quiesce_iq(struct adapter *, struct sge_iq *);
373 static void quiesce_fl(struct adapter *, struct sge_fl *);
374 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
375     driver_intr_t *, void *, char *);
376 static int t4_free_irq(struct adapter *, struct irq *);
377 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
378     unsigned int);
379 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
380 static void cxgbe_tick(void *);
381 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
382 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
383     struct mbuf *);
384 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
385 static int fw_msg_not_handled(struct adapter *, const __be64 *);
386 static int t4_sysctls(struct adapter *);
387 static int cxgbe_sysctls(struct port_info *);
388 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
389 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
390 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
391 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
392 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
393 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
394 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
395 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
396 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
397 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
398 #ifdef SBUF_DRAIN
399 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
400 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
401 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
402 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
403 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
404 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
405 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
406 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
407 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
408 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
409 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
410 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
411 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
412 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
413 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
414 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
415 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
416 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
417 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
418 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
419 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
420 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
421 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
422 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
423 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
424 #endif
425 static inline void txq_start(struct ifnet *, struct sge_txq *);
426 static uint32_t fconf_to_mode(uint32_t);
427 static uint32_t mode_to_fconf(uint32_t);
428 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
429 static int get_filter_mode(struct adapter *, uint32_t *);
430 static int set_filter_mode(struct adapter *, uint32_t);
431 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
432 static int get_filter(struct adapter *, struct t4_filter *);
433 static int set_filter(struct adapter *, struct t4_filter *);
434 static int del_filter(struct adapter *, struct t4_filter *);
435 static void clear_filter(struct filter_entry *);
436 static int set_filter_wr(struct adapter *, int);
437 static int del_filter_wr(struct adapter *, int);
438 static int get_sge_context(struct adapter *, struct t4_sge_context *);
439 static int load_fw(struct adapter *, struct t4_data *);
440 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
441 static int read_i2c(struct adapter *, struct t4_i2c_data *);
442 static int set_sched_class(struct adapter *, struct t4_sched_params *);
443 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
444 #ifdef TCP_OFFLOAD
445 static int toe_capability(struct port_info *, int);
446 #endif
447 static int mod_event(module_t, int, void *);
448
449 struct {
450         uint16_t device;
451         char *desc;
452 } t4_pciids[] = {
453         {0xa000, "Chelsio Terminator 4 FPGA"},
454         {0x4400, "Chelsio T440-dbg"},
455         {0x4401, "Chelsio T420-CR"},
456         {0x4402, "Chelsio T422-CR"},
457         {0x4403, "Chelsio T440-CR"},
458         {0x4404, "Chelsio T420-BCH"},
459         {0x4405, "Chelsio T440-BCH"},
460         {0x4406, "Chelsio T440-CH"},
461         {0x4407, "Chelsio T420-SO"},
462         {0x4408, "Chelsio T420-CX"},
463         {0x4409, "Chelsio T420-BT"},
464         {0x440a, "Chelsio T404-BT"},
465         {0x440e, "Chelsio T440-LP-CR"},
466 }, t5_pciids[] = {
467         {0xb000, "Chelsio Terminator 5 FPGA"},
468         {0x5400, "Chelsio T580-dbg"},
469         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
470         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
471         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
472         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
473         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
474         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
475         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
476         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
477         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
478         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
479         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
480         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
481 #ifdef notyet
482         {0x5404,  "Chelsio T520-BCH"},
483         {0x5405,  "Chelsio T540-BCH"},
484         {0x5406,  "Chelsio T540-CH"},
485         {0x5408,  "Chelsio T520-CX"},
486         {0x540b,  "Chelsio B520-SR"},
487         {0x540c,  "Chelsio B504-BT"},
488         {0x540f,  "Chelsio Amsterdam"},
489         {0x5413,  "Chelsio T580-CHR"},
490 #endif
491 };
492
493 #ifdef TCP_OFFLOAD
494 /*
495  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
496  * exactly the same for both rxq and ofld_rxq.
497  */
498 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
499 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
500 #endif
501
502 /* No easy way to include t4_msg.h before adapter.h so we check this way */
503 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
504 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
505
506 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
507
508 static int
509 t4_probe(device_t dev)
510 {
511         int i;
512         uint16_t v = pci_get_vendor(dev);
513         uint16_t d = pci_get_device(dev);
514         uint8_t f = pci_get_function(dev);
515
516         if (v != PCI_VENDOR_ID_CHELSIO)
517                 return (ENXIO);
518
519         /* Attach only to PF0 of the FPGA */
520         if (d == 0xa000 && f != 0)
521                 return (ENXIO);
522
523         for (i = 0; i < nitems(t4_pciids); i++) {
524                 if (d == t4_pciids[i].device) {
525                         device_set_desc(dev, t4_pciids[i].desc);
526                         return (BUS_PROBE_DEFAULT);
527                 }
528         }
529
530         return (ENXIO);
531 }
532
533 static int
534 t5_probe(device_t dev)
535 {
536         int i;
537         uint16_t v = pci_get_vendor(dev);
538         uint16_t d = pci_get_device(dev);
539         uint8_t f = pci_get_function(dev);
540
541         if (v != PCI_VENDOR_ID_CHELSIO)
542                 return (ENXIO);
543
544         /* Attach only to PF0 of the FPGA */
545         if (d == 0xb000 && f != 0)
546                 return (ENXIO);
547
548         for (i = 0; i < nitems(t5_pciids); i++) {
549                 if (d == t5_pciids[i].device) {
550                         device_set_desc(dev, t5_pciids[i].desc);
551                         return (BUS_PROBE_DEFAULT);
552                 }
553         }
554
555         return (ENXIO);
556 }
557
558 static int
559 t4_attach(device_t dev)
560 {
561         struct adapter *sc;
562         int rc = 0, i, n10g, n1g, rqidx, tqidx;
563         struct intrs_and_queues iaq;
564         struct sge *s;
565 #ifdef TCP_OFFLOAD
566         int ofld_rqidx, ofld_tqidx;
567 #endif
568 #ifdef DEV_NETMAP
569         int nm_rqidx, nm_tqidx;
570 #endif
571
572         sc = device_get_softc(dev);
573         sc->dev = dev;
574
575         pci_enable_busmaster(dev);
576         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
577                 uint32_t v;
578
579                 pci_set_max_read_req(dev, 4096);
580                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
581                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
582                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
583         }
584
585         sc->traceq = -1;
586         mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
587         snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
588             device_get_nameunit(dev));
589
590         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
591             device_get_nameunit(dev));
592         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
593         sx_xlock(&t4_list_lock);
594         SLIST_INSERT_HEAD(&t4_list, sc, link);
595         sx_xunlock(&t4_list_lock);
596
597         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
598         TAILQ_INIT(&sc->sfl);
599         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
600
601         rc = map_bars_0_and_4(sc);
602         if (rc != 0)
603                 goto done; /* error message displayed already */
604
605         /*
606          * This is the real PF# to which we're attaching.  Works from within PCI
607          * passthrough environments too, where pci_get_function() could return a
608          * different PF# depending on the passthrough configuration.  We need to
609          * use the real PF# in all our communication with the firmware.
610          */
611         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
612         sc->mbox = sc->pf;
613
614         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
615         sc->an_handler = an_not_handled;
616         for (i = 0; i < nitems(sc->cpl_handler); i++)
617                 sc->cpl_handler[i] = cpl_not_handled;
618         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
619                 sc->fw_msg_handler[i] = fw_msg_not_handled;
620         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
621         t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
622         t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
623         t4_init_sge_cpl_handlers(sc);
624
625         /* Prepare the adapter for operation */
626         rc = -t4_prep_adapter(sc);
627         if (rc != 0) {
628                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
629                 goto done;
630         }
631
632         /*
633          * Do this really early, with the memory windows set up even before the
634          * character device.  The userland tool's register i/o and mem read
635          * will work even in "recovery mode".
636          */
637         setup_memwin(sc);
638         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
639             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
640             device_get_nameunit(dev));
641         if (sc->cdev == NULL)
642                 device_printf(dev, "failed to create nexus char device.\n");
643         else
644                 sc->cdev->si_drv1 = sc;
645
646         /* Go no further if recovery mode has been requested. */
647         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
648                 device_printf(dev, "recovery mode.\n");
649                 goto done;
650         }
651
652         /* Prepare the firmware for operation */
653         rc = prep_firmware(sc);
654         if (rc != 0)
655                 goto done; /* error message displayed already */
656
657         rc = get_params__post_init(sc);
658         if (rc != 0)
659                 goto done; /* error message displayed already */
660
661         rc = set_params__post_init(sc);
662         if (rc != 0)
663                 goto done; /* error message displayed already */
664
665         rc = map_bar_2(sc);
666         if (rc != 0)
667                 goto done; /* error message displayed already */
668
669         rc = t4_create_dma_tag(sc);
670         if (rc != 0)
671                 goto done; /* error message displayed already */
672
673         /*
674          * First pass over all the ports - allocate VIs and initialize some
675          * basic parameters like mac address, port type, etc.  We also figure
676          * out whether a port is 10G or 1G and use that information when
677          * calculating how many interrupts to attempt to allocate.
678          */
679         n10g = n1g = 0;
680         for_each_port(sc, i) {
681                 struct port_info *pi;
682
683                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
684                 sc->port[i] = pi;
685
686                 /* These must be set before t4_port_init */
687                 pi->adapter = sc;
688                 pi->port_id = i;
689
690                 /* Allocate the vi and initialize parameters like mac addr */
691                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
692                 if (rc != 0) {
693                         device_printf(dev, "unable to initialize port %d: %d\n",
694                             i, rc);
695                         free(pi, M_CXGBE);
696                         sc->port[i] = NULL;
697                         goto done;
698                 }
699                 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
700                 if (rc != 0) {
701                         device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
702                         free(pi, M_CXGBE);
703                         sc->port[i] = NULL;
704                         goto done;
705                 }
706
707                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
708                     device_get_nameunit(dev), i);
709                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
710                 sc->chan_map[pi->tx_chan] = i;
711
712                 if (is_10G_port(pi) || is_40G_port(pi)) {
713                         n10g++;
714                         pi->tmr_idx = t4_tmr_idx_10g;
715                         pi->pktc_idx = t4_pktc_idx_10g;
716                 } else {
717                         n1g++;
718                         pi->tmr_idx = t4_tmr_idx_1g;
719                         pi->pktc_idx = t4_pktc_idx_1g;
720                 }
721
722                 pi->xact_addr_filt = -1;
723                 pi->linkdnrc = -1;
724
725                 pi->qsize_rxq = t4_qsize_rxq;
726                 pi->qsize_txq = t4_qsize_txq;
727
728                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
729                 if (pi->dev == NULL) {
730                         device_printf(dev,
731                             "failed to add device for port %d.\n", i);
732                         rc = ENXIO;
733                         goto done;
734                 }
735                 device_set_softc(pi->dev, pi);
736         }
737
738         /*
739          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
740          */
741         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
742         if (rc != 0)
743                 goto done; /* error message displayed already */
744
745         sc->intr_type = iaq.intr_type;
746         sc->intr_count = iaq.nirq;
747
748         s = &sc->sge;
749         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
750         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
751         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
752         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
753         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
754 #ifdef TCP_OFFLOAD
755         if (is_offload(sc)) {
756                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
757                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
758                 s->neq += s->nofldtxq + s->nofldrxq;
759                 s->niq += s->nofldrxq;
760
761                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
762                     M_CXGBE, M_ZERO | M_WAITOK);
763                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
764                     M_CXGBE, M_ZERO | M_WAITOK);
765         }
766 #endif
767 #ifdef DEV_NETMAP
768         s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
769         s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
770         s->neq += s->nnmtxq + s->nnmrxq;
771         s->niq += s->nnmrxq;
772
773         s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
774             M_CXGBE, M_ZERO | M_WAITOK);
775         s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
776             M_CXGBE, M_ZERO | M_WAITOK);
777 #endif
778
779         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
780             M_ZERO | M_WAITOK);
781         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
782             M_ZERO | M_WAITOK);
783         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
784             M_ZERO | M_WAITOK);
785         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
786             M_ZERO | M_WAITOK);
787         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
788             M_ZERO | M_WAITOK);
789
790         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
791             M_ZERO | M_WAITOK);
792
793         t4_init_l2t(sc, M_WAITOK);
794
795         /*
796          * Second pass over the ports.  This time we know the number of rx and
797          * tx queues that each port should get.
798          */
799         rqidx = tqidx = 0;
800 #ifdef TCP_OFFLOAD
801         ofld_rqidx = ofld_tqidx = 0;
802 #endif
803 #ifdef DEV_NETMAP
804         nm_rqidx = nm_tqidx = 0;
805 #endif
806         for_each_port(sc, i) {
807                 struct port_info *pi = sc->port[i];
808
809                 if (pi == NULL)
810                         continue;
811
812                 pi->first_rxq = rqidx;
813                 pi->first_txq = tqidx;
814                 if (is_10G_port(pi) || is_40G_port(pi)) {
815                         pi->flags |= iaq.intr_flags_10g;
816                         pi->nrxq = iaq.nrxq10g;
817                         pi->ntxq = iaq.ntxq10g;
818                 } else {
819                         pi->flags |= iaq.intr_flags_1g;
820                         pi->nrxq = iaq.nrxq1g;
821                         pi->ntxq = iaq.ntxq1g;
822                 }
823
824                 if (pi->ntxq > 1)
825                         pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
826                 else
827                         pi->rsrv_noflowq = 0;
828
829                 rqidx += pi->nrxq;
830                 tqidx += pi->ntxq;
831 #ifdef TCP_OFFLOAD
832                 if (is_offload(sc)) {
833                         pi->first_ofld_rxq = ofld_rqidx;
834                         pi->first_ofld_txq = ofld_tqidx;
835                         if (is_10G_port(pi) || is_40G_port(pi)) {
836                                 pi->nofldrxq = iaq.nofldrxq10g;
837                                 pi->nofldtxq = iaq.nofldtxq10g;
838                         } else {
839                                 pi->nofldrxq = iaq.nofldrxq1g;
840                                 pi->nofldtxq = iaq.nofldtxq1g;
841                         }
842                         ofld_rqidx += pi->nofldrxq;
843                         ofld_tqidx += pi->nofldtxq;
844                 }
845 #endif
846 #ifdef DEV_NETMAP
847                 pi->first_nm_rxq = nm_rqidx;
848                 pi->first_nm_txq = nm_tqidx;
849                 if (is_10G_port(pi) || is_40G_port(pi)) {
850                         pi->nnmrxq = iaq.nnmrxq10g;
851                         pi->nnmtxq = iaq.nnmtxq10g;
852                 } else {
853                         pi->nnmrxq = iaq.nnmrxq1g;
854                         pi->nnmtxq = iaq.nnmtxq1g;
855                 }
856                 nm_rqidx += pi->nnmrxq;
857                 nm_tqidx += pi->nnmtxq;
858 #endif
859         }
860
861         rc = setup_intr_handlers(sc);
862         if (rc != 0) {
863                 device_printf(dev,
864                     "failed to setup interrupt handlers: %d\n", rc);
865                 goto done;
866         }
867
868         rc = bus_generic_attach(dev);
869         if (rc != 0) {
870                 device_printf(dev,
871                     "failed to attach all child ports: %d\n", rc);
872                 goto done;
873         }
874
875         device_printf(dev,
876             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
877             sc->params.pci.width, sc->params.nports, sc->intr_count,
878             sc->intr_type == INTR_MSIX ? "MSI-X" :
879             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
880             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
881
882         t4_set_desc(sc);
883
884 done:
885         if (rc != 0 && sc->cdev) {
886                 /* cdev was created and so cxgbetool works; recover that way. */
887                 device_printf(dev,
888                     "error during attach, adapter is now in recovery mode.\n");
889                 rc = 0;
890         }
891
892         if (rc != 0)
893                 t4_detach(dev);
894         else
895                 t4_sysctls(sc);
896
897         return (rc);
898 }
899
900 /*
901  * Idempotent
902  */
903 static int
904 t4_detach(device_t dev)
905 {
906         struct adapter *sc;
907         struct port_info *pi;
908         int i, rc;
909
910         sc = device_get_softc(dev);
911
912         if (sc->flags & FULL_INIT_DONE)
913                 t4_intr_disable(sc);
914
915         if (sc->cdev) {
916                 destroy_dev(sc->cdev);
917                 sc->cdev = NULL;
918         }
919
920         rc = bus_generic_detach(dev);
921         if (rc) {
922                 device_printf(dev,
923                     "failed to detach child devices: %d\n", rc);
924                 return (rc);
925         }
926
927         for (i = 0; i < sc->intr_count; i++)
928                 t4_free_irq(sc, &sc->irq[i]);
929
930         for (i = 0; i < MAX_NPORTS; i++) {
931                 pi = sc->port[i];
932                 if (pi) {
933                         t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid);
934                         if (pi->dev)
935                                 device_delete_child(dev, pi->dev);
936
937                         mtx_destroy(&pi->pi_lock);
938                         free(pi, M_CXGBE);
939                 }
940         }
941
942         if (sc->flags & FULL_INIT_DONE)
943                 adapter_full_uninit(sc);
944
945         if (sc->flags & FW_OK)
946                 t4_fw_bye(sc, sc->mbox);
947
948         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
949                 pci_release_msi(dev);
950
951         if (sc->regs_res)
952                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
953                     sc->regs_res);
954
955         if (sc->udbs_res)
956                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
957                     sc->udbs_res);
958
959         if (sc->msix_res)
960                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
961                     sc->msix_res);
962
963         if (sc->l2t)
964                 t4_free_l2t(sc->l2t);
965
966 #ifdef TCP_OFFLOAD
967         free(sc->sge.ofld_rxq, M_CXGBE);
968         free(sc->sge.ofld_txq, M_CXGBE);
969 #endif
970 #ifdef DEV_NETMAP
971         free(sc->sge.nm_rxq, M_CXGBE);
972         free(sc->sge.nm_txq, M_CXGBE);
973 #endif
974         free(sc->irq, M_CXGBE);
975         free(sc->sge.rxq, M_CXGBE);
976         free(sc->sge.txq, M_CXGBE);
977         free(sc->sge.ctrlq, M_CXGBE);
978         free(sc->sge.iqmap, M_CXGBE);
979         free(sc->sge.eqmap, M_CXGBE);
980         free(sc->tids.ftid_tab, M_CXGBE);
981         t4_destroy_dma_tag(sc);
982         if (mtx_initialized(&sc->sc_lock)) {
983                 sx_xlock(&t4_list_lock);
984                 SLIST_REMOVE(&t4_list, sc, adapter, link);
985                 sx_xunlock(&t4_list_lock);
986                 mtx_destroy(&sc->sc_lock);
987         }
988
989         if (mtx_initialized(&sc->tids.ftid_lock))
990                 mtx_destroy(&sc->tids.ftid_lock);
991         if (mtx_initialized(&sc->sfl_lock))
992                 mtx_destroy(&sc->sfl_lock);
993         if (mtx_initialized(&sc->ifp_lock))
994                 mtx_destroy(&sc->ifp_lock);
995
996         bzero(sc, sizeof(*sc));
997
998         return (0);
999 }
1000
1001 static int
1002 cxgbe_probe(device_t dev)
1003 {
1004         char buf[128];
1005         struct port_info *pi = device_get_softc(dev);
1006
1007         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
1008         device_set_desc_copy(dev, buf);
1009
1010         return (BUS_PROBE_DEFAULT);
1011 }
1012
1013 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
1014     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
1015     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
1016 #define T4_CAP_ENABLE (T4_CAP)
1017
1018 static int
1019 cxgbe_attach(device_t dev)
1020 {
1021         struct port_info *pi = device_get_softc(dev);
1022         struct ifnet *ifp;
1023         char *s;
1024         int n, o;
1025
1026         /* Allocate an ifnet and set it up */
1027         ifp = if_alloc(IFT_ETHER);
1028         if (ifp == NULL) {
1029                 device_printf(dev, "Cannot allocate ifnet\n");
1030                 return (ENOMEM);
1031         }
1032         pi->ifp = ifp;
1033         ifp->if_softc = pi;
1034
1035         callout_init(&pi->tick, CALLOUT_MPSAFE);
1036
1037         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1038         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1039
1040         ifp->if_init = cxgbe_init;
1041         ifp->if_ioctl = cxgbe_ioctl;
1042         ifp->if_transmit = cxgbe_transmit;
1043         ifp->if_qflush = cxgbe_qflush;
1044
1045         ifp->if_capabilities = T4_CAP;
1046 #ifdef TCP_OFFLOAD
1047         if (is_offload(pi->adapter))
1048                 ifp->if_capabilities |= IFCAP_TOE;
1049 #endif
1050         ifp->if_capenable = T4_CAP_ENABLE;
1051         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1052             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1053
1054         /* Initialize ifmedia for this port */
1055         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1056             cxgbe_media_status);
1057         build_medialist(pi, &pi->media);
1058
1059         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1060             EVENTHANDLER_PRI_ANY);
1061
1062         ether_ifattach(ifp, pi->hw_addr);
1063
1064         n = 128;
1065         s = malloc(n, M_CXGBE, M_WAITOK);
1066         o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq);
1067         MPASS(n > o);
1068 #ifdef TCP_OFFLOAD
1069         if (is_offload(pi->adapter)) {
1070                 o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)",
1071                     pi->nofldtxq, pi->nofldrxq);
1072                 MPASS(n > o);
1073         }
1074 #endif
1075 #ifdef DEV_NETMAP
1076         o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq,
1077             pi->nnmrxq);
1078         MPASS(n > o);
1079 #endif
1080         device_printf(dev, "%s\n", s);
1081         free(s, M_CXGBE);
1082
1083 #ifdef DEV_NETMAP
1084         /* nm_media handled here to keep implementation private to this file */
1085         ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change,
1086             cxgbe_media_status);
1087         build_medialist(pi, &pi->nm_media);
1088         create_netmap_ifnet(pi);        /* logs errors it something fails */
1089 #endif
1090         cxgbe_sysctls(pi);
1091
1092         return (0);
1093 }
1094
1095 static int
1096 cxgbe_detach(device_t dev)
1097 {
1098         struct port_info *pi = device_get_softc(dev);
1099         struct adapter *sc = pi->adapter;
1100         struct ifnet *ifp = pi->ifp;
1101
1102         /* Tell if_ioctl and if_init that the port is going away */
1103         ADAPTER_LOCK(sc);
1104         SET_DOOMED(pi);
1105         wakeup(&sc->flags);
1106         while (IS_BUSY(sc))
1107                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1108         SET_BUSY(sc);
1109 #ifdef INVARIANTS
1110         sc->last_op = "t4detach";
1111         sc->last_op_thr = curthread;
1112 #endif
1113         ADAPTER_UNLOCK(sc);
1114
1115         if (pi->flags & HAS_TRACEQ) {
1116                 sc->traceq = -1;        /* cloner should not create ifnet */
1117                 t4_tracer_port_detach(sc);
1118         }
1119
1120         if (pi->vlan_c)
1121                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1122
1123         PORT_LOCK(pi);
1124         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1125         callout_stop(&pi->tick);
1126         PORT_UNLOCK(pi);
1127         callout_drain(&pi->tick);
1128
1129         /* Let detach proceed even if these fail. */
1130         cxgbe_uninit_synchronized(pi);
1131         port_full_uninit(pi);
1132
1133         ifmedia_removeall(&pi->media);
1134         ether_ifdetach(pi->ifp);
1135         if_free(pi->ifp);
1136
1137 #ifdef DEV_NETMAP
1138         /* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */
1139         destroy_netmap_ifnet(pi);
1140 #endif
1141
1142         ADAPTER_LOCK(sc);
1143         CLR_BUSY(sc);
1144         wakeup(&sc->flags);
1145         ADAPTER_UNLOCK(sc);
1146
1147         return (0);
1148 }
1149
1150 static void
1151 cxgbe_init(void *arg)
1152 {
1153         struct port_info *pi = arg;
1154         struct adapter *sc = pi->adapter;
1155
1156         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1157                 return;
1158         cxgbe_init_synchronized(pi);
1159         end_synchronized_op(sc, 0);
1160 }
1161
1162 static int
1163 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1164 {
1165         int rc = 0, mtu, flags, can_sleep;
1166         struct port_info *pi = ifp->if_softc;
1167         struct adapter *sc = pi->adapter;
1168         struct ifreq *ifr = (struct ifreq *)data;
1169         uint32_t mask;
1170
1171         switch (cmd) {
1172         case SIOCSIFMTU:
1173                 mtu = ifr->ifr_mtu;
1174                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1175                         return (EINVAL);
1176
1177                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1178                 if (rc)
1179                         return (rc);
1180                 ifp->if_mtu = mtu;
1181                 if (pi->flags & PORT_INIT_DONE) {
1182                         t4_update_fl_bufsize(ifp);
1183                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1184                                 rc = update_mac_settings(ifp, XGMAC_MTU);
1185                 }
1186                 end_synchronized_op(sc, 0);
1187                 break;
1188
1189         case SIOCSIFFLAGS:
1190                 can_sleep = 0;
1191 redo_sifflags:
1192                 rc = begin_synchronized_op(sc, pi,
1193                     can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
1194                 if (rc)
1195                         return (rc);
1196
1197                 if (ifp->if_flags & IFF_UP) {
1198                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1199                                 flags = pi->if_flags;
1200                                 if ((ifp->if_flags ^ flags) &
1201                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1202                                         if (can_sleep == 1) {
1203                                                 end_synchronized_op(sc, 0);
1204                                                 can_sleep = 0;
1205                                                 goto redo_sifflags;
1206                                         }
1207                                         rc = update_mac_settings(ifp,
1208                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1209                                 }
1210                         } else {
1211                                 if (can_sleep == 0) {
1212                                         end_synchronized_op(sc, LOCK_HELD);
1213                                         can_sleep = 1;
1214                                         goto redo_sifflags;
1215                                 }
1216                                 rc = cxgbe_init_synchronized(pi);
1217                         }
1218                         pi->if_flags = ifp->if_flags;
1219                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1220                         if (can_sleep == 0) {
1221                                 end_synchronized_op(sc, LOCK_HELD);
1222                                 can_sleep = 1;
1223                                 goto redo_sifflags;
1224                         }
1225                         rc = cxgbe_uninit_synchronized(pi);
1226                 }
1227                 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
1228                 break;
1229
1230         case SIOCADDMULTI:
1231         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1232                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1233                 if (rc)
1234                         return (rc);
1235                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1236                         rc = update_mac_settings(ifp, XGMAC_MCADDRS);
1237                 end_synchronized_op(sc, LOCK_HELD);
1238                 break;
1239
1240         case SIOCSIFCAP:
1241                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1242                 if (rc)
1243                         return (rc);
1244
1245                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1246                 if (mask & IFCAP_TXCSUM) {
1247                         ifp->if_capenable ^= IFCAP_TXCSUM;
1248                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1249
1250                         if (IFCAP_TSO4 & ifp->if_capenable &&
1251                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1252                                 ifp->if_capenable &= ~IFCAP_TSO4;
1253                                 if_printf(ifp,
1254                                     "tso4 disabled due to -txcsum.\n");
1255                         }
1256                 }
1257                 if (mask & IFCAP_TXCSUM_IPV6) {
1258                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1259                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1260
1261                         if (IFCAP_TSO6 & ifp->if_capenable &&
1262                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1263                                 ifp->if_capenable &= ~IFCAP_TSO6;
1264                                 if_printf(ifp,
1265                                     "tso6 disabled due to -txcsum6.\n");
1266                         }
1267                 }
1268                 if (mask & IFCAP_RXCSUM)
1269                         ifp->if_capenable ^= IFCAP_RXCSUM;
1270                 if (mask & IFCAP_RXCSUM_IPV6)
1271                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1272
1273                 /*
1274                  * Note that we leave CSUM_TSO alone (it is always set).  The
1275                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1276                  * sending a TSO request our way, so it's sufficient to toggle
1277                  * IFCAP_TSOx only.
1278                  */
1279                 if (mask & IFCAP_TSO4) {
1280                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1281                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1282                                 if_printf(ifp, "enable txcsum first.\n");
1283                                 rc = EAGAIN;
1284                                 goto fail;
1285                         }
1286                         ifp->if_capenable ^= IFCAP_TSO4;
1287                 }
1288                 if (mask & IFCAP_TSO6) {
1289                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1290                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1291                                 if_printf(ifp, "enable txcsum6 first.\n");
1292                                 rc = EAGAIN;
1293                                 goto fail;
1294                         }
1295                         ifp->if_capenable ^= IFCAP_TSO6;
1296                 }
1297                 if (mask & IFCAP_LRO) {
1298 #if defined(INET) || defined(INET6)
1299                         int i;
1300                         struct sge_rxq *rxq;
1301
1302                         ifp->if_capenable ^= IFCAP_LRO;
1303                         for_each_rxq(pi, i, rxq) {
1304                                 if (ifp->if_capenable & IFCAP_LRO)
1305                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1306                                 else
1307                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1308                         }
1309 #endif
1310                 }
1311 #ifdef TCP_OFFLOAD
1312                 if (mask & IFCAP_TOE) {
1313                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1314
1315                         rc = toe_capability(pi, enable);
1316                         if (rc != 0)
1317                                 goto fail;
1318
1319                         ifp->if_capenable ^= mask;
1320                 }
1321 #endif
1322                 if (mask & IFCAP_VLAN_HWTAGGING) {
1323                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1324                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1325                                 rc = update_mac_settings(ifp, XGMAC_VLANEX);
1326                 }
1327                 if (mask & IFCAP_VLAN_MTU) {
1328                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1329
1330                         /* Need to find out how to disable auto-mtu-inflation */
1331                 }
1332                 if (mask & IFCAP_VLAN_HWTSO)
1333                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1334                 if (mask & IFCAP_VLAN_HWCSUM)
1335                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1336
1337 #ifdef VLAN_CAPABILITIES
1338                 VLAN_CAPABILITIES(ifp);
1339 #endif
1340 fail:
1341                 end_synchronized_op(sc, 0);
1342                 break;
1343
1344         case SIOCSIFMEDIA:
1345         case SIOCGIFMEDIA:
1346                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1347                 break;
1348
1349         default:
1350                 rc = ether_ioctl(ifp, cmd, data);
1351         }
1352
1353         return (rc);
1354 }
1355
1356 static int
1357 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1358 {
1359         struct port_info *pi = ifp->if_softc;
1360         struct adapter *sc = pi->adapter;
1361         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1362         struct buf_ring *br;
1363         int rc;
1364
1365         M_ASSERTPKTHDR(m);
1366
1367         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1368                 m_freem(m);
1369                 return (ENETDOWN);
1370         }
1371
1372         if (m->m_flags & M_FLOWID)
1373                 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1374                     + pi->rsrv_noflowq);
1375         br = txq->br;
1376
1377         if (TXQ_TRYLOCK(txq) == 0) {
1378                 struct sge_eq *eq = &txq->eq;
1379
1380                 /*
1381                  * It is possible that t4_eth_tx finishes up and releases the
1382                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1383                  * need to make sure that this mbuf doesn't just sit there in
1384                  * the drbr.
1385                  */
1386
1387                 rc = drbr_enqueue(ifp, br, m);
1388                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1389                     !(eq->flags & EQ_DOOMED))
1390                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1391                 return (rc);
1392         }
1393
1394         /*
1395          * txq->m is the mbuf that is held up due to a temporary shortage of
1396          * resources and it should be put on the wire first.  Then what's in
1397          * drbr and finally the mbuf that was just passed in to us.
1398          *
1399          * Return code should indicate the fate of the mbuf that was passed in
1400          * this time.
1401          */
1402
1403         TXQ_LOCK_ASSERT_OWNED(txq);
1404         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1405
1406                 /* Queued for transmission. */
1407
1408                 rc = drbr_enqueue(ifp, br, m);
1409                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1410                 (void) t4_eth_tx(ifp, txq, m);
1411                 TXQ_UNLOCK(txq);
1412                 return (rc);
1413         }
1414
1415         /* Direct transmission. */
1416         rc = t4_eth_tx(ifp, txq, m);
1417         if (rc != 0 && txq->m)
1418                 rc = 0; /* held, will be transmitted soon (hopefully) */
1419
1420         TXQ_UNLOCK(txq);
1421         return (rc);
1422 }
1423
1424 static void
1425 cxgbe_qflush(struct ifnet *ifp)
1426 {
1427         struct port_info *pi = ifp->if_softc;
1428         struct sge_txq *txq;
1429         int i;
1430         struct mbuf *m;
1431
1432         /* queues do not exist if !PORT_INIT_DONE. */
1433         if (pi->flags & PORT_INIT_DONE) {
1434                 for_each_txq(pi, i, txq) {
1435                         TXQ_LOCK(txq);
1436                         m_freem(txq->m);
1437                         txq->m = NULL;
1438                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1439                                 m_freem(m);
1440                         TXQ_UNLOCK(txq);
1441                 }
1442         }
1443         if_qflush(ifp);
1444 }
1445
1446 static int
1447 cxgbe_media_change(struct ifnet *ifp)
1448 {
1449         struct port_info *pi = ifp->if_softc;
1450
1451         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1452
1453         return (EOPNOTSUPP);
1454 }
1455
1456 static void
1457 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1458 {
1459         struct port_info *pi = ifp->if_softc;
1460         struct ifmedia *media = NULL;
1461         struct ifmedia_entry *cur;
1462         int speed = pi->link_cfg.speed;
1463         int data = (pi->port_type << 8) | pi->mod_type;
1464
1465         if (ifp == pi->ifp)
1466                 media = &pi->media;
1467 #ifdef DEV_NETMAP
1468         else if (ifp == pi->nm_ifp)
1469                 media = &pi->nm_media;
1470 #endif
1471         MPASS(media != NULL);
1472
1473         cur = media->ifm_cur;
1474         if (cur->ifm_data != data) {
1475                 build_medialist(pi, media);
1476                 cur = media->ifm_cur;
1477         }
1478
1479         ifmr->ifm_status = IFM_AVALID;
1480         if (!pi->link_cfg.link_ok)
1481                 return;
1482
1483         ifmr->ifm_status |= IFM_ACTIVE;
1484
1485         /* active and current will differ iff current media is autoselect. */
1486         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1487                 return;
1488
1489         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1490         if (speed == SPEED_10000)
1491                 ifmr->ifm_active |= IFM_10G_T;
1492         else if (speed == SPEED_1000)
1493                 ifmr->ifm_active |= IFM_1000_T;
1494         else if (speed == SPEED_100)
1495                 ifmr->ifm_active |= IFM_100_TX;
1496         else if (speed == SPEED_10)
1497                 ifmr->ifm_active |= IFM_10_T;
1498         else
1499                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1500                             speed));
1501 }
1502
1503 void
1504 t4_fatal_err(struct adapter *sc)
1505 {
1506         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1507         t4_intr_disable(sc);
1508         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1509             device_get_nameunit(sc->dev));
1510 }
1511
1512 static int
1513 map_bars_0_and_4(struct adapter *sc)
1514 {
1515         sc->regs_rid = PCIR_BAR(0);
1516         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1517             &sc->regs_rid, RF_ACTIVE);
1518         if (sc->regs_res == NULL) {
1519                 device_printf(sc->dev, "cannot map registers.\n");
1520                 return (ENXIO);
1521         }
1522         sc->bt = rman_get_bustag(sc->regs_res);
1523         sc->bh = rman_get_bushandle(sc->regs_res);
1524         sc->mmio_len = rman_get_size(sc->regs_res);
1525         setbit(&sc->doorbells, DOORBELL_KDB);
1526
1527         sc->msix_rid = PCIR_BAR(4);
1528         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1529             &sc->msix_rid, RF_ACTIVE);
1530         if (sc->msix_res == NULL) {
1531                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1532                 return (ENXIO);
1533         }
1534
1535         return (0);
1536 }
1537
1538 static int
1539 map_bar_2(struct adapter *sc)
1540 {
1541
1542         /*
1543          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1544          * to map it if RDMA is disabled.
1545          */
1546         if (is_t4(sc) && sc->rdmacaps == 0)
1547                 return (0);
1548
1549         sc->udbs_rid = PCIR_BAR(2);
1550         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1551             &sc->udbs_rid, RF_ACTIVE);
1552         if (sc->udbs_res == NULL) {
1553                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1554                 return (ENXIO);
1555         }
1556         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1557
1558         if (is_t5(sc)) {
1559                 setbit(&sc->doorbells, DOORBELL_UDB);
1560 #if defined(__i386__) || defined(__amd64__)
1561                 if (t5_write_combine) {
1562                         int rc;
1563
1564                         /*
1565                          * Enable write combining on BAR2.  This is the
1566                          * userspace doorbell BAR and is split into 128B
1567                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1568                          * with an egress queue.  The first 64B has the doorbell
1569                          * and the second 64B can be used to submit a tx work
1570                          * request with an implicit doorbell.
1571                          */
1572
1573                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1574                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1575                         if (rc == 0) {
1576                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1577                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1578                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1579                         } else {
1580                                 device_printf(sc->dev,
1581                                     "couldn't enable write combining: %d\n",
1582                                     rc);
1583                         }
1584
1585                         t4_write_reg(sc, A_SGE_STAT_CFG,
1586                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1587                 }
1588 #endif
1589         }
1590
1591         return (0);
1592 }
1593
1594 static const struct memwin t4_memwin[] = {
1595         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1596         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1597         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1598 };
1599
1600 static const struct memwin t5_memwin[] = {
1601         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1602         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1603         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1604 };
1605
1606 static void
1607 setup_memwin(struct adapter *sc)
1608 {
1609         const struct memwin *mw;
1610         int i, n;
1611         uint32_t bar0;
1612
1613         if (is_t4(sc)) {
1614                 /*
1615                  * Read low 32b of bar0 indirectly via the hardware backdoor
1616                  * mechanism.  Works from within PCI passthrough environments
1617                  * too, where rman_get_start() can return a different value.  We
1618                  * need to program the T4 memory window decoders with the actual
1619                  * addresses that will be coming across the PCIe link.
1620                  */
1621                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1622                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1623
1624                 mw = &t4_memwin[0];
1625                 n = nitems(t4_memwin);
1626         } else {
1627                 /* T5 uses the relative offset inside the PCIe BAR */
1628                 bar0 = 0;
1629
1630                 mw = &t5_memwin[0];
1631                 n = nitems(t5_memwin);
1632         }
1633
1634         for (i = 0; i < n; i++, mw++) {
1635                 t4_write_reg(sc,
1636                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1637                     (mw->base + bar0) | V_BIR(0) |
1638                     V_WINDOW(ilog2(mw->aperture) - 10));
1639         }
1640
1641         /* flush */
1642         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1643 }
1644
1645 /*
1646  * Verify that the memory range specified by the addr/len pair is valid and lies
1647  * entirely within a single region (EDCx or MCx).
1648  */
1649 static int
1650 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1651 {
1652         uint32_t em, addr_len, maddr, mlen;
1653
1654         /* Memory can only be accessed in naturally aligned 4 byte units */
1655         if (addr & 3 || len & 3 || len == 0)
1656                 return (EINVAL);
1657
1658         /* Enabled memories */
1659         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1660         if (em & F_EDRAM0_ENABLE) {
1661                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1662                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1663                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1664                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1665                     addr + len <= maddr + mlen)
1666                         return (0);
1667         }
1668         if (em & F_EDRAM1_ENABLE) {
1669                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1670                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1671                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1672                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1673                     addr + len <= maddr + mlen)
1674                         return (0);
1675         }
1676         if (em & F_EXT_MEM_ENABLE) {
1677                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1678                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1679                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1680                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1681                     addr + len <= maddr + mlen)
1682                         return (0);
1683         }
1684         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1685                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1686                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1687                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1688                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1689                     addr + len <= maddr + mlen)
1690                         return (0);
1691         }
1692
1693         return (EFAULT);
1694 }
1695
1696 static int
1697 fwmtype_to_hwmtype(int mtype)
1698 {
1699
1700         switch (mtype) {
1701         case FW_MEMTYPE_EDC0:
1702                 return (MEM_EDC0);
1703         case FW_MEMTYPE_EDC1:
1704                 return (MEM_EDC1);
1705         case FW_MEMTYPE_EXTMEM:
1706                 return (MEM_MC0);
1707         case FW_MEMTYPE_EXTMEM1:
1708                 return (MEM_MC1);
1709         default:
1710                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1711         }
1712 }
1713
1714 /*
1715  * Verify that the memory range specified by the memtype/offset/len pair is
1716  * valid and lies entirely within the memtype specified.  The global address of
1717  * the start of the range is returned in addr.
1718  */
1719 static int
1720 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1721     uint32_t *addr)
1722 {
1723         uint32_t em, addr_len, maddr, mlen;
1724
1725         /* Memory can only be accessed in naturally aligned 4 byte units */
1726         if (off & 3 || len & 3 || len == 0)
1727                 return (EINVAL);
1728
1729         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1730         switch (fwmtype_to_hwmtype(mtype)) {
1731         case MEM_EDC0:
1732                 if (!(em & F_EDRAM0_ENABLE))
1733                         return (EINVAL);
1734                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1735                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1736                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1737                 break;
1738         case MEM_EDC1:
1739                 if (!(em & F_EDRAM1_ENABLE))
1740                         return (EINVAL);
1741                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1742                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1743                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1744                 break;
1745         case MEM_MC:
1746                 if (!(em & F_EXT_MEM_ENABLE))
1747                         return (EINVAL);
1748                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1749                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1750                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1751                 break;
1752         case MEM_MC1:
1753                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1754                         return (EINVAL);
1755                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1756                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1757                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1758                 break;
1759         default:
1760                 return (EINVAL);
1761         }
1762
1763         if (mlen > 0 && off < mlen && off + len <= mlen) {
1764                 *addr = maddr + off;    /* global address */
1765                 return (0);
1766         }
1767
1768         return (EFAULT);
1769 }
1770
1771 static void
1772 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1773 {
1774         const struct memwin *mw;
1775
1776         if (is_t4(sc)) {
1777                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1778                     ("%s: incorrect memwin# (%d)", __func__, win));
1779                 mw = &t4_memwin[win];
1780         } else {
1781                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1782                     ("%s: incorrect memwin# (%d)", __func__, win));
1783                 mw = &t5_memwin[win];
1784         }
1785
1786         if (base != NULL)
1787                 *base = mw->base;
1788         if (aperture != NULL)
1789                 *aperture = mw->aperture;
1790 }
1791
1792 /*
1793  * Positions the memory window such that it can be used to access the specified
1794  * address in the chip's address space.  The return value is the offset of addr
1795  * from the start of the window.
1796  */
1797 static uint32_t
1798 position_memwin(struct adapter *sc, int n, uint32_t addr)
1799 {
1800         uint32_t start, pf;
1801         uint32_t reg;
1802
1803         KASSERT(n >= 0 && n <= 3,
1804             ("%s: invalid window %d.", __func__, n));
1805         KASSERT((addr & 3) == 0,
1806             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1807
1808         if (is_t4(sc)) {
1809                 pf = 0;
1810                 start = addr & ~0xf;    /* start must be 16B aligned */
1811         } else {
1812                 pf = V_PFNUM(sc->pf);
1813                 start = addr & ~0x7f;   /* start must be 128B aligned */
1814         }
1815         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1816
1817         t4_write_reg(sc, reg, start | pf);
1818         t4_read_reg(sc, reg);
1819
1820         return (addr - start);
1821 }
1822
1823 static int
1824 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1825     struct intrs_and_queues *iaq)
1826 {
1827         int rc, itype, navail, nrxq10g, nrxq1g, n;
1828         int nofldrxq10g = 0, nofldrxq1g = 0;
1829         int nnmrxq10g = 0, nnmrxq1g = 0;
1830
1831         bzero(iaq, sizeof(*iaq));
1832
1833         iaq->ntxq10g = t4_ntxq10g;
1834         iaq->ntxq1g = t4_ntxq1g;
1835         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1836         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1837         iaq->rsrv_noflowq = t4_rsrv_noflowq;
1838 #ifdef TCP_OFFLOAD
1839         if (is_offload(sc)) {
1840                 iaq->nofldtxq10g = t4_nofldtxq10g;
1841                 iaq->nofldtxq1g = t4_nofldtxq1g;
1842                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1843                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1844         }
1845 #endif
1846 #ifdef DEV_NETMAP
1847         iaq->nnmtxq10g = t4_nnmtxq10g;
1848         iaq->nnmtxq1g = t4_nnmtxq1g;
1849         iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
1850         iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
1851 #endif
1852
1853         for (itype = INTR_MSIX; itype; itype >>= 1) {
1854
1855                 if ((itype & t4_intr_types) == 0)
1856                         continue;       /* not allowed */
1857
1858                 if (itype == INTR_MSIX)
1859                         navail = pci_msix_count(sc->dev);
1860                 else if (itype == INTR_MSI)
1861                         navail = pci_msi_count(sc->dev);
1862                 else
1863                         navail = 1;
1864 restart:
1865                 if (navail == 0)
1866                         continue;
1867
1868                 iaq->intr_type = itype;
1869                 iaq->intr_flags_10g = 0;
1870                 iaq->intr_flags_1g = 0;
1871
1872                 /*
1873                  * Best option: an interrupt vector for errors, one for the
1874                  * firmware event queue, and one for every rxq (NIC, TOE, and
1875                  * netmap).
1876                  */
1877                 iaq->nirq = T4_EXTRA_INTR;
1878                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
1879                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
1880                 if (iaq->nirq <= navail &&
1881                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1882                         iaq->intr_flags_10g = INTR_ALL;
1883                         iaq->intr_flags_1g = INTR_ALL;
1884                         goto allocate;
1885                 }
1886
1887                 /*
1888                  * Second best option: a vector for errors, one for the firmware
1889                  * event queue, and vectors for either all the NIC rx queues or
1890                  * all the TOE rx queues.  The queues that don't get vectors
1891                  * will forward their interrupts to those that do.
1892                  *
1893                  * Note: netmap rx queues cannot be created early and so they
1894                  * can't be setup to receive forwarded interrupts for others.
1895                  */
1896                 iaq->nirq = T4_EXTRA_INTR;
1897                 if (nrxq10g >= nofldrxq10g) {
1898                         iaq->intr_flags_10g = INTR_RXQ;
1899                         iaq->nirq += n10g * nrxq10g;
1900 #ifdef DEV_NETMAP
1901                         iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
1902 #endif
1903                 } else {
1904                         iaq->intr_flags_10g = INTR_OFLD_RXQ;
1905                         iaq->nirq += n10g * nofldrxq10g;
1906 #ifdef DEV_NETMAP
1907                         iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
1908 #endif
1909                 }
1910                 if (nrxq1g >= nofldrxq1g) {
1911                         iaq->intr_flags_1g = INTR_RXQ;
1912                         iaq->nirq += n1g * nrxq1g;
1913 #ifdef DEV_NETMAP
1914                         iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
1915 #endif
1916                 } else {
1917                         iaq->intr_flags_1g = INTR_OFLD_RXQ;
1918                         iaq->nirq += n1g * nofldrxq1g;
1919 #ifdef DEV_NETMAP
1920                         iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
1921 #endif
1922                 }
1923                 if (iaq->nirq <= navail &&
1924                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1925                         goto allocate;
1926
1927                 /*
1928                  * Next best option: an interrupt vector for errors, one for the
1929                  * firmware event queue, and at least one per port.  At this
1930                  * point we know we'll have to downsize nrxq and/or nofldrxq
1931                  * and/or nnmrxq to fit what's available to us.
1932                  */
1933                 iaq->nirq = T4_EXTRA_INTR;
1934                 iaq->nirq += n10g + n1g;
1935                 if (iaq->nirq <= navail) {
1936                         int leftover = navail - iaq->nirq;
1937
1938                         if (n10g > 0) {
1939                                 int target = max(nrxq10g, nofldrxq10g);
1940
1941                                 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
1942                                     INTR_RXQ : INTR_OFLD_RXQ;
1943
1944                                 n = 1;
1945                                 while (n < target && leftover >= n10g) {
1946                                         leftover -= n10g;
1947                                         iaq->nirq += n10g;
1948                                         n++;
1949                                 }
1950                                 iaq->nrxq10g = min(n, nrxq10g);
1951 #ifdef TCP_OFFLOAD
1952                                 iaq->nofldrxq10g = min(n, nofldrxq10g);
1953 #endif
1954 #ifdef DEV_NETMAP
1955                                 iaq->nnmrxq10g = min(n, nnmrxq10g);
1956 #endif
1957                         }
1958
1959                         if (n1g > 0) {
1960                                 int target = max(nrxq1g, nofldrxq1g);
1961
1962                                 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
1963                                     INTR_RXQ : INTR_OFLD_RXQ;
1964
1965                                 n = 1;
1966                                 while (n < target && leftover >= n1g) {
1967                                         leftover -= n1g;
1968                                         iaq->nirq += n1g;
1969                                         n++;
1970                                 }
1971                                 iaq->nrxq1g = min(n, nrxq1g);
1972 #ifdef TCP_OFFLOAD
1973                                 iaq->nofldrxq1g = min(n, nofldrxq1g);
1974 #endif
1975 #ifdef DEV_NETMAP
1976                                 iaq->nnmrxq1g = min(n, nnmrxq1g);
1977 #endif
1978                         }
1979
1980                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1981                                 goto allocate;
1982                 }
1983
1984                 /*
1985                  * Least desirable option: one interrupt vector for everything.
1986                  */
1987                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1988                 iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
1989 #ifdef TCP_OFFLOAD
1990                 if (is_offload(sc))
1991                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1992 #endif
1993 #ifdef DEV_NETMAP
1994                 iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
1995 #endif
1996
1997 allocate:
1998                 navail = iaq->nirq;
1999                 rc = 0;
2000                 if (itype == INTR_MSIX)
2001                         rc = pci_alloc_msix(sc->dev, &navail);
2002                 else if (itype == INTR_MSI)
2003                         rc = pci_alloc_msi(sc->dev, &navail);
2004
2005                 if (rc == 0) {
2006                         if (navail == iaq->nirq)
2007                                 return (0);
2008
2009                         /*
2010                          * Didn't get the number requested.  Use whatever number
2011                          * the kernel is willing to allocate (it's in navail).
2012                          */
2013                         device_printf(sc->dev, "fewer vectors than requested, "
2014                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
2015                             itype, iaq->nirq, navail);
2016                         pci_release_msi(sc->dev);
2017                         goto restart;
2018                 }
2019
2020                 device_printf(sc->dev,
2021                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
2022                     itype, rc, iaq->nirq, navail);
2023         }
2024
2025         device_printf(sc->dev,
2026             "failed to find a usable interrupt type.  "
2027             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
2028             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
2029
2030         return (ENXIO);
2031 }
2032
2033 #define FW_VERSION(chip) ( \
2034     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
2035     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
2036     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
2037     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
2038 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
2039
2040 struct fw_info {
2041         uint8_t chip;
2042         char *kld_name;
2043         char *fw_mod_name;
2044         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
2045 } fw_info[] = {
2046         {
2047                 .chip = CHELSIO_T4,
2048                 .kld_name = "t4fw_cfg",
2049                 .fw_mod_name = "t4fw",
2050                 .fw_hdr = {
2051                         .chip = FW_HDR_CHIP_T4,
2052                         .fw_ver = htobe32_const(FW_VERSION(T4)),
2053                         .intfver_nic = FW_INTFVER(T4, NIC),
2054                         .intfver_vnic = FW_INTFVER(T4, VNIC),
2055                         .intfver_ofld = FW_INTFVER(T4, OFLD),
2056                         .intfver_ri = FW_INTFVER(T4, RI),
2057                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
2058                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
2059                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
2060                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
2061                 },
2062         }, {
2063                 .chip = CHELSIO_T5,
2064                 .kld_name = "t5fw_cfg",
2065                 .fw_mod_name = "t5fw",
2066                 .fw_hdr = {
2067                         .chip = FW_HDR_CHIP_T5,
2068                         .fw_ver = htobe32_const(FW_VERSION(T5)),
2069                         .intfver_nic = FW_INTFVER(T5, NIC),
2070                         .intfver_vnic = FW_INTFVER(T5, VNIC),
2071                         .intfver_ofld = FW_INTFVER(T5, OFLD),
2072                         .intfver_ri = FW_INTFVER(T5, RI),
2073                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
2074                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2075                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
2076                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
2077                 },
2078         }
2079 };
2080
2081 static struct fw_info *
2082 find_fw_info(int chip)
2083 {
2084         int i;
2085
2086         for (i = 0; i < nitems(fw_info); i++) {
2087                 if (fw_info[i].chip == chip)
2088                         return (&fw_info[i]);
2089         }
2090         return (NULL);
2091 }
2092
2093 /*
2094  * Is the given firmware API compatible with the one the driver was compiled
2095  * with?
2096  */
2097 static int
2098 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2099 {
2100
2101         /* short circuit if it's the exact same firmware version */
2102         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2103                 return (1);
2104
2105         /*
2106          * XXX: Is this too conservative?  Perhaps I should limit this to the
2107          * features that are supported in the driver.
2108          */
2109 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2110         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2111             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
2112             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
2113                 return (1);
2114 #undef SAME_INTF
2115
2116         return (0);
2117 }
2118
2119 /*
2120  * The firmware in the KLD is usable, but should it be installed?  This routine
2121  * explains itself in detail if it indicates the KLD firmware should be
2122  * installed.
2123  */
2124 static int
2125 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
2126 {
2127         const char *reason;
2128
2129         if (!card_fw_usable) {
2130                 reason = "incompatible or unusable";
2131                 goto install;
2132         }
2133
2134         if (k > c) {
2135                 reason = "older than the version bundled with this driver";
2136                 goto install;
2137         }
2138
2139         if (t4_fw_install == 2 && k != c) {
2140                 reason = "different than the version bundled with this driver";
2141                 goto install;
2142         }
2143
2144         return (0);
2145
2146 install:
2147         if (t4_fw_install == 0) {
2148                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2149                     "but the driver is prohibited from installing a different "
2150                     "firmware on the card.\n",
2151                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2152                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2153
2154                 return (0);
2155         }
2156
2157         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2158             "installing firmware %u.%u.%u.%u on card.\n",
2159             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2160             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2161             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2162             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2163
2164         return (1);
2165 }
2166 /*
2167  * Establish contact with the firmware and determine if we are the master driver
2168  * or not, and whether we are responsible for chip initialization.
2169  */
2170 static int
2171 prep_firmware(struct adapter *sc)
2172 {
2173         const struct firmware *fw = NULL, *default_cfg;
2174         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2175         enum dev_state state;
2176         struct fw_info *fw_info;
2177         struct fw_hdr *card_fw;         /* fw on the card */
2178         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2179         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2180                                            against */
2181
2182         /* Contact firmware. */
2183         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2184         if (rc < 0 || state == DEV_STATE_ERR) {
2185                 rc = -rc;
2186                 device_printf(sc->dev,
2187                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2188                 return (rc);
2189         }
2190         pf = rc;
2191         if (pf == sc->mbox)
2192                 sc->flags |= MASTER_PF;
2193         else if (state == DEV_STATE_UNINIT) {
2194                 /*
2195                  * We didn't get to be the master so we definitely won't be
2196                  * configuring the chip.  It's a bug if someone else hasn't
2197                  * configured it already.
2198                  */
2199                 device_printf(sc->dev, "couldn't be master(%d), "
2200                     "device not already initialized either(%d).\n", rc, state);
2201                 return (EDOOFUS);
2202         }
2203
2204         /* This is the firmware whose headers the driver was compiled against */
2205         fw_info = find_fw_info(chip_id(sc));
2206         if (fw_info == NULL) {
2207                 device_printf(sc->dev,
2208                     "unable to look up firmware information for chip %d.\n",
2209                     chip_id(sc));
2210                 return (EINVAL);
2211         }
2212         drv_fw = &fw_info->fw_hdr;
2213
2214         /*
2215          * The firmware KLD contains many modules.  The KLD name is also the
2216          * name of the module that contains the default config file.
2217          */
2218         default_cfg = firmware_get(fw_info->kld_name);
2219
2220         /* Read the header of the firmware on the card */
2221         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2222         rc = -t4_read_flash(sc, FLASH_FW_START,
2223             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2224         if (rc == 0)
2225                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2226         else {
2227                 device_printf(sc->dev,
2228                     "Unable to read card's firmware header: %d\n", rc);
2229                 card_fw_usable = 0;
2230         }
2231
2232         /* This is the firmware in the KLD */
2233         fw = firmware_get(fw_info->fw_mod_name);
2234         if (fw != NULL) {
2235                 kld_fw = (const void *)fw->data;
2236                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2237         } else {
2238                 kld_fw = NULL;
2239                 kld_fw_usable = 0;
2240         }
2241
2242         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2243             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2244                 /*
2245                  * Common case: the firmware on the card is an exact match and
2246                  * the KLD is an exact match too, or the KLD is
2247                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2248                  * here -- use cxgbetool loadfw if you want to reinstall the
2249                  * same firmware as the one on the card.
2250                  */
2251         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2252             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2253             be32toh(card_fw->fw_ver))) {
2254
2255                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2256                 if (rc != 0) {
2257                         device_printf(sc->dev,
2258                             "failed to install firmware: %d\n", rc);
2259                         goto done;
2260                 }
2261
2262                 /* Installed successfully, update the cached header too. */
2263                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2264                 card_fw_usable = 1;
2265                 need_fw_reset = 0;      /* already reset as part of load_fw */
2266         }
2267
2268         if (!card_fw_usable) {
2269                 uint32_t d, c, k;
2270
2271                 d = ntohl(drv_fw->fw_ver);
2272                 c = ntohl(card_fw->fw_ver);
2273                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2274
2275                 device_printf(sc->dev, "Cannot find a usable firmware: "
2276                     "fw_install %d, chip state %d, "
2277                     "driver compiled with %d.%d.%d.%d, "
2278                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2279                     t4_fw_install, state,
2280                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2281                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2282                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2283                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2284                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2285                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2286                 rc = EINVAL;
2287                 goto done;
2288         }
2289
2290         /* We're using whatever's on the card and it's known to be good. */
2291         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2292         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2293             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2294             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2295             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2296             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2297         t4_get_tp_version(sc, &sc->params.tp_vers);
2298
2299         /* Reset device */
2300         if (need_fw_reset &&
2301             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2302                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2303                 if (rc != ETIMEDOUT && rc != EIO)
2304                         t4_fw_bye(sc, sc->mbox);
2305                 goto done;
2306         }
2307         sc->flags |= FW_OK;
2308
2309         rc = get_params__pre_init(sc);
2310         if (rc != 0)
2311                 goto done; /* error message displayed already */
2312
2313         /* Partition adapter resources as specified in the config file. */
2314         if (state == DEV_STATE_UNINIT) {
2315
2316                 KASSERT(sc->flags & MASTER_PF,
2317                     ("%s: trying to change chip settings when not master.",
2318                     __func__));
2319
2320                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2321                 if (rc != 0)
2322                         goto done;      /* error message displayed already */
2323
2324                 t4_tweak_chip_settings(sc);
2325
2326                 /* get basic stuff going */
2327                 rc = -t4_fw_initialize(sc, sc->mbox);
2328                 if (rc != 0) {
2329                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2330                         goto done;
2331                 }
2332         } else {
2333                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2334                 sc->cfcsum = 0;
2335         }
2336
2337 done:
2338         free(card_fw, M_CXGBE);
2339         if (fw != NULL)
2340                 firmware_put(fw, FIRMWARE_UNLOAD);
2341         if (default_cfg != NULL)
2342                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2343
2344         return (rc);
2345 }
2346
2347 #define FW_PARAM_DEV(param) \
2348         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2349          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2350 #define FW_PARAM_PFVF(param) \
2351         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2352          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2353
2354 /*
2355  * Partition chip resources for use between various PFs, VFs, etc.
2356  */
2357 static int
2358 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2359     const char *name_prefix)
2360 {
2361         const struct firmware *cfg = NULL;
2362         int rc = 0;
2363         struct fw_caps_config_cmd caps;
2364         uint32_t mtype, moff, finicsum, cfcsum;
2365
2366         /*
2367          * Figure out what configuration file to use.  Pick the default config
2368          * file for the card if the user hasn't specified one explicitly.
2369          */
2370         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2371         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2372                 /* Card specific overrides go here. */
2373                 if (pci_get_device(sc->dev) == 0x440a)
2374                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2375                 if (is_fpga(sc))
2376                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2377         }
2378
2379         /*
2380          * We need to load another module if the profile is anything except
2381          * "default" or "flash".
2382          */
2383         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2384             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2385                 char s[32];
2386
2387                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2388                 cfg = firmware_get(s);
2389                 if (cfg == NULL) {
2390                         if (default_cfg != NULL) {
2391                                 device_printf(sc->dev,
2392                                     "unable to load module \"%s\" for "
2393                                     "configuration profile \"%s\", will use "
2394                                     "the default config file instead.\n",
2395                                     s, sc->cfg_file);
2396                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2397                                     "%s", DEFAULT_CF);
2398                         } else {
2399                                 device_printf(sc->dev,
2400                                     "unable to load module \"%s\" for "
2401                                     "configuration profile \"%s\", will use "
2402                                     "the config file on the card's flash "
2403                                     "instead.\n", s, sc->cfg_file);
2404                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2405                                     "%s", FLASH_CF);
2406                         }
2407                 }
2408         }
2409
2410         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2411             default_cfg == NULL) {
2412                 device_printf(sc->dev,
2413                     "default config file not available, will use the config "
2414                     "file on the card's flash instead.\n");
2415                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2416         }
2417
2418         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2419                 u_int cflen, i, n;
2420                 const uint32_t *cfdata;
2421                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2422
2423                 KASSERT(cfg != NULL || default_cfg != NULL,
2424                     ("%s: no config to upload", __func__));
2425
2426                 /*
2427                  * Ask the firmware where it wants us to upload the config file.
2428                  */
2429                 param = FW_PARAM_DEV(CF);
2430                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2431                 if (rc != 0) {
2432                         /* No support for config file?  Shouldn't happen. */
2433                         device_printf(sc->dev,
2434                             "failed to query config file location: %d.\n", rc);
2435                         goto done;
2436                 }
2437                 mtype = G_FW_PARAMS_PARAM_Y(val);
2438                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2439
2440                 /*
2441                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2442                  * useless stuffing/comments at the end of the config file so
2443                  * it's ok to simply throw away the last remaining bytes when
2444                  * the config file is not an exact multiple of 4.  This also
2445                  * helps with the validate_mt_off_len check.
2446                  */
2447                 if (cfg != NULL) {
2448                         cflen = cfg->datasize & ~3;
2449                         cfdata = cfg->data;
2450                 } else {
2451                         cflen = default_cfg->datasize & ~3;
2452                         cfdata = default_cfg->data;
2453                 }
2454
2455                 if (cflen > FLASH_CFG_MAX_SIZE) {
2456                         device_printf(sc->dev,
2457                             "config file too long (%d, max allowed is %d).  "
2458                             "Will try to use the config on the card, if any.\n",
2459                             cflen, FLASH_CFG_MAX_SIZE);
2460                         goto use_config_on_flash;
2461                 }
2462
2463                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2464                 if (rc != 0) {
2465                         device_printf(sc->dev,
2466                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2467                             "Will try to use the config on the card, if any.\n",
2468                             __func__, mtype, moff, cflen, rc);
2469                         goto use_config_on_flash;
2470                 }
2471
2472                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2473                 while (cflen) {
2474                         off = position_memwin(sc, 2, addr);
2475                         n = min(cflen, mw_aperture - off);
2476                         for (i = 0; i < n; i += 4)
2477                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2478                         cflen -= n;
2479                         addr += n;
2480                 }
2481         } else {
2482 use_config_on_flash:
2483                 mtype = FW_MEMTYPE_FLASH;
2484                 moff = t4_flash_cfg_addr(sc);
2485         }
2486
2487         bzero(&caps, sizeof(caps));
2488         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2489             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2490         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2491             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2492             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2493         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2494         if (rc != 0) {
2495                 device_printf(sc->dev,
2496                     "failed to pre-process config file: %d "
2497                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2498                 goto done;
2499         }
2500
2501         finicsum = be32toh(caps.finicsum);
2502         cfcsum = be32toh(caps.cfcsum);
2503         if (finicsum != cfcsum) {
2504                 device_printf(sc->dev,
2505                     "WARNING: config file checksum mismatch: %08x %08x\n",
2506                     finicsum, cfcsum);
2507         }
2508         sc->cfcsum = cfcsum;
2509
2510 #define LIMIT_CAPS(x) do { \
2511         caps.x &= htobe16(t4_##x##_allowed); \
2512 } while (0)
2513
2514         /*
2515          * Let the firmware know what features will (not) be used so it can tune
2516          * things accordingly.
2517          */
2518         LIMIT_CAPS(linkcaps);
2519         LIMIT_CAPS(niccaps);
2520         LIMIT_CAPS(toecaps);
2521         LIMIT_CAPS(rdmacaps);
2522         LIMIT_CAPS(iscsicaps);
2523         LIMIT_CAPS(fcoecaps);
2524 #undef LIMIT_CAPS
2525
2526         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2527             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2528         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2529         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2530         if (rc != 0) {
2531                 device_printf(sc->dev,
2532                     "failed to process config file: %d.\n", rc);
2533         }
2534 done:
2535         if (cfg != NULL)
2536                 firmware_put(cfg, FIRMWARE_UNLOAD);
2537         return (rc);
2538 }
2539
2540 /*
2541  * Retrieve parameters that are needed (or nice to have) very early.
2542  */
2543 static int
2544 get_params__pre_init(struct adapter *sc)
2545 {
2546         int rc;
2547         uint32_t param[2], val[2];
2548         struct fw_devlog_cmd cmd;
2549         struct devlog_params *dlog = &sc->params.devlog;
2550
2551         param[0] = FW_PARAM_DEV(PORTVEC);
2552         param[1] = FW_PARAM_DEV(CCLK);
2553         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2554         if (rc != 0) {
2555                 device_printf(sc->dev,
2556                     "failed to query parameters (pre_init): %d.\n", rc);
2557                 return (rc);
2558         }
2559
2560         sc->params.portvec = val[0];
2561         sc->params.nports = bitcount32(val[0]);
2562         sc->params.vpd.cclk = val[1];
2563
2564         /* Read device log parameters. */
2565         bzero(&cmd, sizeof(cmd));
2566         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2567             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2568         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2569         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2570         if (rc != 0) {
2571                 device_printf(sc->dev,
2572                     "failed to get devlog parameters: %d.\n", rc);
2573                 bzero(dlog, sizeof (*dlog));
2574                 rc = 0; /* devlog isn't critical for device operation */
2575         } else {
2576                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2577                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2578                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2579                 dlog->size = be32toh(cmd.memsize_devlog);
2580         }
2581
2582         return (rc);
2583 }
2584
2585 /*
2586  * Retrieve various parameters that are of interest to the driver.  The device
2587  * has been initialized by the firmware at this point.
2588  */
2589 static int
2590 get_params__post_init(struct adapter *sc)
2591 {
2592         int rc;
2593         uint32_t param[7], val[7];
2594         struct fw_caps_config_cmd caps;
2595
2596         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2597         param[1] = FW_PARAM_PFVF(EQ_START);
2598         param[2] = FW_PARAM_PFVF(FILTER_START);
2599         param[3] = FW_PARAM_PFVF(FILTER_END);
2600         param[4] = FW_PARAM_PFVF(L2T_START);
2601         param[5] = FW_PARAM_PFVF(L2T_END);
2602         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2603         if (rc != 0) {
2604                 device_printf(sc->dev,
2605                     "failed to query parameters (post_init): %d.\n", rc);
2606                 return (rc);
2607         }
2608
2609         sc->sge.iq_start = val[0];
2610         sc->sge.eq_start = val[1];
2611         sc->tids.ftid_base = val[2];
2612         sc->tids.nftids = val[3] - val[2] + 1;
2613         sc->params.ftid_min = val[2];
2614         sc->params.ftid_max = val[3];
2615         sc->vres.l2t.start = val[4];
2616         sc->vres.l2t.size = val[5] - val[4] + 1;
2617         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2618             ("%s: L2 table size (%u) larger than expected (%u)",
2619             __func__, sc->vres.l2t.size, L2T_SIZE));
2620
2621         /* get capabilites */
2622         bzero(&caps, sizeof(caps));
2623         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2624             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2625         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2626         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2627         if (rc != 0) {
2628                 device_printf(sc->dev,
2629                     "failed to get card capabilities: %d.\n", rc);
2630                 return (rc);
2631         }
2632
2633 #define READ_CAPS(x) do { \
2634         sc->x = htobe16(caps.x); \
2635 } while (0)
2636         READ_CAPS(linkcaps);
2637         READ_CAPS(niccaps);
2638         READ_CAPS(toecaps);
2639         READ_CAPS(rdmacaps);
2640         READ_CAPS(iscsicaps);
2641         READ_CAPS(fcoecaps);
2642
2643         if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2644                 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2645                 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2646                 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2647                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2648                 if (rc != 0) {
2649                         device_printf(sc->dev,
2650                             "failed to query NIC parameters: %d.\n", rc);
2651                         return (rc);
2652                 }
2653                 sc->tids.etid_base = val[0];
2654                 sc->params.etid_min = val[0];
2655                 sc->tids.netids = val[1] - val[0] + 1;
2656                 sc->params.netids = sc->tids.netids;
2657                 sc->params.eo_wr_cred = val[2];
2658                 sc->params.ethoffload = 1;
2659         }
2660
2661         if (sc->toecaps) {
2662                 /* query offload-related parameters */
2663                 param[0] = FW_PARAM_DEV(NTID);
2664                 param[1] = FW_PARAM_PFVF(SERVER_START);
2665                 param[2] = FW_PARAM_PFVF(SERVER_END);
2666                 param[3] = FW_PARAM_PFVF(TDDP_START);
2667                 param[4] = FW_PARAM_PFVF(TDDP_END);
2668                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2669                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2670                 if (rc != 0) {
2671                         device_printf(sc->dev,
2672                             "failed to query TOE parameters: %d.\n", rc);
2673                         return (rc);
2674                 }
2675                 sc->tids.ntids = val[0];
2676                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2677                 sc->tids.stid_base = val[1];
2678                 sc->tids.nstids = val[2] - val[1] + 1;
2679                 sc->vres.ddp.start = val[3];
2680                 sc->vres.ddp.size = val[4] - val[3] + 1;
2681                 sc->params.ofldq_wr_cred = val[5];
2682                 sc->params.offload = 1;
2683         }
2684         if (sc->rdmacaps) {
2685                 param[0] = FW_PARAM_PFVF(STAG_START);
2686                 param[1] = FW_PARAM_PFVF(STAG_END);
2687                 param[2] = FW_PARAM_PFVF(RQ_START);
2688                 param[3] = FW_PARAM_PFVF(RQ_END);
2689                 param[4] = FW_PARAM_PFVF(PBL_START);
2690                 param[5] = FW_PARAM_PFVF(PBL_END);
2691                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2692                 if (rc != 0) {
2693                         device_printf(sc->dev,
2694                             "failed to query RDMA parameters(1): %d.\n", rc);
2695                         return (rc);
2696                 }
2697                 sc->vres.stag.start = val[0];
2698                 sc->vres.stag.size = val[1] - val[0] + 1;
2699                 sc->vres.rq.start = val[2];
2700                 sc->vres.rq.size = val[3] - val[2] + 1;
2701                 sc->vres.pbl.start = val[4];
2702                 sc->vres.pbl.size = val[5] - val[4] + 1;
2703
2704                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2705                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2706                 param[2] = FW_PARAM_PFVF(CQ_START);
2707                 param[3] = FW_PARAM_PFVF(CQ_END);
2708                 param[4] = FW_PARAM_PFVF(OCQ_START);
2709                 param[5] = FW_PARAM_PFVF(OCQ_END);
2710                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2711                 if (rc != 0) {
2712                         device_printf(sc->dev,
2713                             "failed to query RDMA parameters(2): %d.\n", rc);
2714                         return (rc);
2715                 }
2716                 sc->vres.qp.start = val[0];
2717                 sc->vres.qp.size = val[1] - val[0] + 1;
2718                 sc->vres.cq.start = val[2];
2719                 sc->vres.cq.size = val[3] - val[2] + 1;
2720                 sc->vres.ocq.start = val[4];
2721                 sc->vres.ocq.size = val[5] - val[4] + 1;
2722         }
2723         if (sc->iscsicaps) {
2724                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2725                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2726                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2727                 if (rc != 0) {
2728                         device_printf(sc->dev,
2729                             "failed to query iSCSI parameters: %d.\n", rc);
2730                         return (rc);
2731                 }
2732                 sc->vres.iscsi.start = val[0];
2733                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2734         }
2735
2736         /*
2737          * We've got the params we wanted to query via the firmware.  Now grab
2738          * some others directly from the chip.
2739          */
2740         rc = t4_read_chip_settings(sc);
2741
2742         return (rc);
2743 }
2744
2745 static int
2746 set_params__post_init(struct adapter *sc)
2747 {
2748         uint32_t param, val;
2749
2750         /* ask for encapsulated CPLs */
2751         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2752         val = 1;
2753         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2754
2755         return (0);
2756 }
2757
2758 #undef FW_PARAM_PFVF
2759 #undef FW_PARAM_DEV
2760
2761 static void
2762 t4_set_desc(struct adapter *sc)
2763 {
2764         char buf[128];
2765         struct adapter_params *p = &sc->params;
2766
2767         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2768             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2769             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2770
2771         device_set_desc_copy(sc->dev, buf);
2772 }
2773
2774 static void
2775 build_medialist(struct port_info *pi, struct ifmedia *media)
2776 {
2777         int data, m;
2778
2779         PORT_LOCK(pi);
2780
2781         ifmedia_removeall(media);
2782
2783         m = IFM_ETHER | IFM_FDX;
2784         data = (pi->port_type << 8) | pi->mod_type;
2785
2786         switch(pi->port_type) {
2787         case FW_PORT_TYPE_BT_XFI:
2788                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2789                 break;
2790
2791         case FW_PORT_TYPE_BT_XAUI:
2792                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2793                 /* fall through */
2794
2795         case FW_PORT_TYPE_BT_SGMII:
2796                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2797                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2798                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2799                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2800                 break;
2801
2802         case FW_PORT_TYPE_CX4:
2803                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2804                 ifmedia_set(media, m | IFM_10G_CX4);
2805                 break;
2806
2807         case FW_PORT_TYPE_QSFP_10G:
2808         case FW_PORT_TYPE_SFP:
2809         case FW_PORT_TYPE_FIBER_XFI:
2810         case FW_PORT_TYPE_FIBER_XAUI:
2811                 switch (pi->mod_type) {
2812
2813                 case FW_PORT_MOD_TYPE_LR:
2814                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2815                         ifmedia_set(media, m | IFM_10G_LR);
2816                         break;
2817
2818                 case FW_PORT_MOD_TYPE_SR:
2819                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2820                         ifmedia_set(media, m | IFM_10G_SR);
2821                         break;
2822
2823                 case FW_PORT_MOD_TYPE_LRM:
2824                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2825                         ifmedia_set(media, m | IFM_10G_LRM);
2826                         break;
2827
2828                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2829                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2830                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2831                         ifmedia_set(media, m | IFM_10G_TWINAX);
2832                         break;
2833
2834                 case FW_PORT_MOD_TYPE_NONE:
2835                         m &= ~IFM_FDX;
2836                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2837                         ifmedia_set(media, m | IFM_NONE);
2838                         break;
2839
2840                 case FW_PORT_MOD_TYPE_NA:
2841                 case FW_PORT_MOD_TYPE_ER:
2842                 default:
2843                         device_printf(pi->dev,
2844                             "unknown port_type (%d), mod_type (%d)\n",
2845                             pi->port_type, pi->mod_type);
2846                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2847                         ifmedia_set(media, m | IFM_UNKNOWN);
2848                         break;
2849                 }
2850                 break;
2851
2852         case FW_PORT_TYPE_QSFP:
2853                 switch (pi->mod_type) {
2854
2855                 case FW_PORT_MOD_TYPE_LR:
2856                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2857                         ifmedia_set(media, m | IFM_40G_LR4);
2858                         break;
2859
2860                 case FW_PORT_MOD_TYPE_SR:
2861                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2862                         ifmedia_set(media, m | IFM_40G_SR4);
2863                         break;
2864
2865                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2866                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2867                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2868                         ifmedia_set(media, m | IFM_40G_CR4);
2869                         break;
2870
2871                 case FW_PORT_MOD_TYPE_NONE:
2872                         m &= ~IFM_FDX;
2873                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2874                         ifmedia_set(media, m | IFM_NONE);
2875                         break;
2876
2877                 default:
2878                         device_printf(pi->dev,
2879                             "unknown port_type (%d), mod_type (%d)\n",
2880                             pi->port_type, pi->mod_type);
2881                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2882                         ifmedia_set(media, m | IFM_UNKNOWN);
2883                         break;
2884                 }
2885                 break;
2886
2887         default:
2888                 device_printf(pi->dev,
2889                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2890                     pi->mod_type);
2891                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2892                 ifmedia_set(media, m | IFM_UNKNOWN);
2893                 break;
2894         }
2895
2896         PORT_UNLOCK(pi);
2897 }
2898
2899 #define FW_MAC_EXACT_CHUNK      7
2900
2901 /*
2902  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2903  * indicates which parameters should be programmed (the rest are left alone).
2904  */
2905 int
2906 update_mac_settings(struct ifnet *ifp, int flags)
2907 {
2908         int rc = 0;
2909         struct port_info *pi = ifp->if_softc;
2910         struct adapter *sc = pi->adapter;
2911         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2912         uint16_t viid = 0xffff;
2913         int16_t *xact_addr_filt = NULL;
2914
2915         ASSERT_SYNCHRONIZED_OP(sc);
2916         KASSERT(flags, ("%s: not told what to update.", __func__));
2917
2918         if (ifp == pi->ifp) {
2919                 viid = pi->viid;
2920                 xact_addr_filt = &pi->xact_addr_filt;
2921         }
2922 #ifdef DEV_NETMAP
2923         else if (ifp == pi->nm_ifp) {
2924                 viid = pi->nm_viid;
2925                 xact_addr_filt = &pi->nm_xact_addr_filt;
2926         }
2927 #endif
2928         if (flags & XGMAC_MTU)
2929                 mtu = ifp->if_mtu;
2930
2931         if (flags & XGMAC_PROMISC)
2932                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2933
2934         if (flags & XGMAC_ALLMULTI)
2935                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2936
2937         if (flags & XGMAC_VLANEX)
2938                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2939
2940         if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
2941                 rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti,
2942                     1, vlanex, false);
2943                 if (rc) {
2944                         if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
2945                             rc);
2946                         return (rc);
2947                 }
2948         }
2949
2950         if (flags & XGMAC_UCADDR) {
2951                 uint8_t ucaddr[ETHER_ADDR_LEN];
2952
2953                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2954                 rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr,
2955                     true, true);
2956                 if (rc < 0) {
2957                         rc = -rc;
2958                         if_printf(ifp, "change_mac failed: %d\n", rc);
2959                         return (rc);
2960                 } else {
2961                         *xact_addr_filt = rc;
2962                         rc = 0;
2963                 }
2964         }
2965
2966         if (flags & XGMAC_MCADDRS) {
2967                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2968                 int del = 1;
2969                 uint64_t hash = 0;
2970                 struct ifmultiaddr *ifma;
2971                 int i = 0, j;
2972
2973                 if_maddr_rlock(ifp);
2974                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2975                         if (ifma->ifma_addr->sa_family != AF_LINK)
2976                                 continue;
2977                         mcaddr[i++] =
2978                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2979
2980                         if (i == FW_MAC_EXACT_CHUNK) {
2981                                 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del,
2982                                     i, mcaddr, NULL, &hash, 0);
2983                                 if (rc < 0) {
2984                                         rc = -rc;
2985                                         for (j = 0; j < i; j++) {
2986                                                 if_printf(ifp,
2987                                                     "failed to add mc address"
2988                                                     " %02x:%02x:%02x:"
2989                                                     "%02x:%02x:%02x rc=%d\n",
2990                                                     mcaddr[j][0], mcaddr[j][1],
2991                                                     mcaddr[j][2], mcaddr[j][3],
2992                                                     mcaddr[j][4], mcaddr[j][5],
2993                                                     rc);
2994                                         }
2995                                         goto mcfail;
2996                                 }
2997                                 del = 0;
2998                                 i = 0;
2999                         }
3000                 }
3001                 if (i > 0) {
3002                         rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i,
3003                             mcaddr, NULL, &hash, 0);
3004                         if (rc < 0) {
3005                                 rc = -rc;
3006                                 for (j = 0; j < i; j++) {
3007                                         if_printf(ifp,
3008                                             "failed to add mc address"
3009                                             " %02x:%02x:%02x:"
3010                                             "%02x:%02x:%02x rc=%d\n",
3011                                             mcaddr[j][0], mcaddr[j][1],
3012                                             mcaddr[j][2], mcaddr[j][3],
3013                                             mcaddr[j][4], mcaddr[j][5],
3014                                             rc);
3015                                 }
3016                                 goto mcfail;
3017                         }
3018                 }
3019
3020                 rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0);
3021                 if (rc != 0)
3022                         if_printf(ifp, "failed to set mc address hash: %d", rc);
3023 mcfail:
3024                 if_maddr_runlock(ifp);
3025         }
3026
3027         return (rc);
3028 }
3029
3030 int
3031 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
3032     char *wmesg)
3033 {
3034         int rc, pri;
3035
3036 #ifdef WITNESS
3037         /* the caller thinks it's ok to sleep, but is it really? */
3038         if (flags & SLEEP_OK)
3039                 pause("t4slptst", 1);
3040 #endif
3041
3042         if (INTR_OK)
3043                 pri = PCATCH;
3044         else
3045                 pri = 0;
3046
3047         ADAPTER_LOCK(sc);
3048         for (;;) {
3049
3050                 if (pi && IS_DOOMED(pi)) {
3051                         rc = ENXIO;
3052                         goto done;
3053                 }
3054
3055                 if (!IS_BUSY(sc)) {
3056                         rc = 0;
3057                         break;
3058                 }
3059
3060                 if (!(flags & SLEEP_OK)) {
3061                         rc = EBUSY;
3062                         goto done;
3063                 }
3064
3065                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
3066                         rc = EINTR;
3067                         goto done;
3068                 }
3069         }
3070
3071         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
3072         SET_BUSY(sc);
3073 #ifdef INVARIANTS
3074         sc->last_op = wmesg;
3075         sc->last_op_thr = curthread;
3076 #endif
3077
3078 done:
3079         if (!(flags & HOLD_LOCK) || rc)
3080                 ADAPTER_UNLOCK(sc);
3081
3082         return (rc);
3083 }
3084
3085 void
3086 end_synchronized_op(struct adapter *sc, int flags)
3087 {
3088
3089         if (flags & LOCK_HELD)
3090                 ADAPTER_LOCK_ASSERT_OWNED(sc);
3091         else
3092                 ADAPTER_LOCK(sc);
3093
3094         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
3095         CLR_BUSY(sc);
3096         wakeup(&sc->flags);
3097         ADAPTER_UNLOCK(sc);
3098 }
3099
3100 static int
3101 cxgbe_init_synchronized(struct port_info *pi)
3102 {
3103         struct adapter *sc = pi->adapter;
3104         struct ifnet *ifp = pi->ifp;
3105         int rc = 0;
3106
3107         ASSERT_SYNCHRONIZED_OP(sc);
3108
3109         if (isset(&sc->open_device_map, pi->port_id)) {
3110                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
3111                     ("mismatch between open_device_map and if_drv_flags"));
3112                 return (0);     /* already running */
3113         }
3114
3115         if (!(sc->flags & FULL_INIT_DONE) &&
3116             ((rc = adapter_full_init(sc)) != 0))
3117                 return (rc);    /* error message displayed already */
3118
3119         if (!(pi->flags & PORT_INIT_DONE) &&
3120             ((rc = port_full_init(pi)) != 0))
3121                 return (rc); /* error message displayed already */
3122
3123         rc = update_mac_settings(ifp, XGMAC_ALL);
3124         if (rc)
3125                 goto done;      /* error message displayed already */
3126
3127         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
3128         if (rc != 0) {
3129                 if_printf(ifp, "enable_vi failed: %d\n", rc);
3130                 goto done;
3131         }
3132
3133         /*
3134          * The first iq of the first port to come up is used for tracing.
3135          */
3136         if (sc->traceq < 0) {
3137                 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
3138                 t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
3139                     A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
3140                     V_QUEUENUMBER(sc->traceq));
3141                 pi->flags |= HAS_TRACEQ;
3142         }
3143
3144         /* all ok */
3145         setbit(&sc->open_device_map, pi->port_id);
3146         PORT_LOCK(pi);
3147         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3148         PORT_UNLOCK(pi);
3149
3150         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3151 done:
3152         if (rc != 0)
3153                 cxgbe_uninit_synchronized(pi);
3154
3155         return (rc);
3156 }
3157
3158 /*
3159  * Idempotent.
3160  */
3161 static int
3162 cxgbe_uninit_synchronized(struct port_info *pi)
3163 {
3164         struct adapter *sc = pi->adapter;
3165         struct ifnet *ifp = pi->ifp;
3166         int rc;
3167
3168         ASSERT_SYNCHRONIZED_OP(sc);
3169
3170         /*
3171          * Disable the VI so that all its data in either direction is discarded
3172          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
3173          * tick) intact as the TP can deliver negative advice or data that it's
3174          * holding in its RAM (for an offloaded connection) even after the VI is
3175          * disabled.
3176          */
3177         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
3178         if (rc) {
3179                 if_printf(ifp, "disable_vi failed: %d\n", rc);
3180                 return (rc);
3181         }
3182
3183         clrbit(&sc->open_device_map, pi->port_id);
3184         PORT_LOCK(pi);
3185         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3186         PORT_UNLOCK(pi);
3187
3188         pi->link_cfg.link_ok = 0;
3189         pi->link_cfg.speed = 0;
3190         pi->linkdnrc = -1;
3191         t4_os_link_changed(sc, pi->port_id, 0, -1);
3192
3193         return (0);
3194 }
3195
3196 /*
3197  * It is ok for this function to fail midway and return right away.  t4_detach
3198  * will walk the entire sc->irq list and clean up whatever is valid.
3199  */
3200 static int
3201 setup_intr_handlers(struct adapter *sc)
3202 {
3203         int rc, rid, p, q;
3204         char s[8];
3205         struct irq *irq;
3206         struct port_info *pi;
3207         struct sge_rxq *rxq;
3208 #ifdef TCP_OFFLOAD
3209         struct sge_ofld_rxq *ofld_rxq;
3210 #endif
3211 #ifdef DEV_NETMAP
3212         struct sge_nm_rxq *nm_rxq;
3213 #endif
3214
3215         /*
3216          * Setup interrupts.
3217          */
3218         irq = &sc->irq[0];
3219         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3220         if (sc->intr_count == 1)
3221                 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
3222
3223         /* Multiple interrupts. */
3224         KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3225             ("%s: too few intr.", __func__));
3226
3227         /* The first one is always error intr */
3228         rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3229         if (rc != 0)
3230                 return (rc);
3231         irq++;
3232         rid++;
3233
3234         /* The second one is always the firmware event queue */
3235         rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
3236         if (rc != 0)
3237                 return (rc);
3238         irq++;
3239         rid++;
3240
3241         for_each_port(sc, p) {
3242                 pi = sc->port[p];
3243
3244                 if (pi->flags & INTR_RXQ) {
3245                         for_each_rxq(pi, q, rxq) {
3246                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3247                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3248                                     s);
3249                                 if (rc != 0)
3250                                         return (rc);
3251                                 irq++;
3252                                 rid++;
3253                         }
3254                 }
3255 #ifdef TCP_OFFLOAD
3256                 if (pi->flags & INTR_OFLD_RXQ) {
3257                         for_each_ofld_rxq(pi, q, ofld_rxq) {
3258                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3259                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3260                                     ofld_rxq, s);
3261                                 if (rc != 0)
3262                                         return (rc);
3263                                 irq++;
3264                                 rid++;
3265                         }
3266                 }
3267 #endif
3268 #ifdef DEV_NETMAP
3269                 if (pi->flags & INTR_NM_RXQ) {
3270                         for_each_nm_rxq(pi, q, nm_rxq) {
3271                                 snprintf(s, sizeof(s), "%d-%d", p, q);
3272                                 rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr,
3273                                     nm_rxq, s);
3274                                 if (rc != 0)
3275                                         return (rc);
3276                                 irq++;
3277                                 rid++;
3278                         }
3279                 }
3280 #endif
3281         }
3282         MPASS(irq == &sc->irq[sc->intr_count]);
3283
3284         return (0);
3285 }
3286
3287 int
3288 adapter_full_init(struct adapter *sc)
3289 {
3290         int rc, i;
3291
3292         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3293         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3294             ("%s: FULL_INIT_DONE already", __func__));
3295
3296         /*
3297          * queues that belong to the adapter (not any particular port).
3298          */
3299         rc = t4_setup_adapter_queues(sc);
3300         if (rc != 0)
3301                 goto done;
3302
3303         for (i = 0; i < nitems(sc->tq); i++) {
3304                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3305                     taskqueue_thread_enqueue, &sc->tq[i]);
3306                 if (sc->tq[i] == NULL) {
3307                         device_printf(sc->dev,
3308                             "failed to allocate task queue %d\n", i);
3309                         rc = ENOMEM;
3310                         goto done;
3311                 }
3312                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3313                     device_get_nameunit(sc->dev), i);
3314         }
3315
3316         t4_intr_enable(sc);
3317         sc->flags |= FULL_INIT_DONE;
3318 done:
3319         if (rc != 0)
3320                 adapter_full_uninit(sc);
3321
3322         return (rc);
3323 }
3324
3325 int
3326 adapter_full_uninit(struct adapter *sc)
3327 {
3328         int i;
3329
3330         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3331
3332         t4_teardown_adapter_queues(sc);
3333
3334         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3335                 taskqueue_free(sc->tq[i]);
3336                 sc->tq[i] = NULL;
3337         }
3338
3339         sc->flags &= ~FULL_INIT_DONE;
3340
3341         return (0);
3342 }
3343
3344 int
3345 port_full_init(struct port_info *pi)
3346 {
3347         struct adapter *sc = pi->adapter;
3348         struct ifnet *ifp = pi->ifp;
3349         uint16_t *rss;
3350         struct sge_rxq *rxq;
3351         int rc, i, j;
3352
3353         ASSERT_SYNCHRONIZED_OP(sc);
3354         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3355             ("%s: PORT_INIT_DONE already", __func__));
3356
3357         sysctl_ctx_init(&pi->ctx);
3358         pi->flags |= PORT_SYSCTL_CTX;
3359
3360         /*
3361          * Allocate tx/rx/fl queues for this port.
3362          */
3363         rc = t4_setup_port_queues(pi);
3364         if (rc != 0)
3365                 goto done;      /* error message displayed already */
3366
3367         /*
3368          * Setup RSS for this port.  Save a copy of the RSS table for later use.
3369          */
3370         rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3371         for (i = 0; i < pi->rss_size;) {
3372                 for_each_rxq(pi, j, rxq) {
3373                         rss[i++] = rxq->iq.abs_id;
3374                         if (i == pi->rss_size)
3375                                 break;
3376                 }
3377         }
3378
3379         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3380             pi->rss_size);
3381         if (rc != 0) {
3382                 if_printf(ifp, "rss_config failed: %d\n", rc);
3383                 goto done;
3384         }
3385
3386         pi->rss = rss;
3387         pi->flags |= PORT_INIT_DONE;
3388 done:
3389         if (rc != 0)
3390                 port_full_uninit(pi);
3391
3392         return (rc);
3393 }
3394
3395 /*
3396  * Idempotent.
3397  */
3398 int
3399 port_full_uninit(struct port_info *pi)
3400 {
3401         struct adapter *sc = pi->adapter;
3402         int i;
3403         struct sge_rxq *rxq;
3404         struct sge_txq *txq;
3405 #ifdef TCP_OFFLOAD
3406         struct sge_ofld_rxq *ofld_rxq;
3407         struct sge_wrq *ofld_txq;
3408 #endif
3409
3410         if (pi->flags & PORT_INIT_DONE) {
3411
3412                 /* Need to quiesce queues.  XXX: ctrl queues? */
3413
3414                 for_each_txq(pi, i, txq) {
3415                         quiesce_eq(sc, &txq->eq);
3416                 }
3417
3418 #ifdef TCP_OFFLOAD
3419                 for_each_ofld_txq(pi, i, ofld_txq) {
3420                         quiesce_eq(sc, &ofld_txq->eq);
3421                 }
3422 #endif
3423
3424                 for_each_rxq(pi, i, rxq) {
3425                         quiesce_iq(sc, &rxq->iq);
3426                         quiesce_fl(sc, &rxq->fl);
3427                 }
3428
3429 #ifdef TCP_OFFLOAD
3430                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3431                         quiesce_iq(sc, &ofld_rxq->iq);
3432                         quiesce_fl(sc, &ofld_rxq->fl);
3433                 }
3434 #endif
3435                 free(pi->rss, M_CXGBE);
3436         }
3437
3438         t4_teardown_port_queues(pi);
3439         pi->flags &= ~PORT_INIT_DONE;
3440
3441         return (0);
3442 }
3443
3444 static void
3445 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3446 {
3447         EQ_LOCK(eq);
3448         eq->flags |= EQ_DOOMED;
3449
3450         /*
3451          * Wait for the response to a credit flush if one's
3452          * pending.
3453          */
3454         while (eq->flags & EQ_CRFLUSHED)
3455                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3456         EQ_UNLOCK(eq);
3457
3458         callout_drain(&eq->tx_callout); /* XXX: iffy */
3459         pause("callout", 10);           /* Still iffy */
3460
3461         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3462 }
3463
3464 static void
3465 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3466 {
3467         (void) sc;      /* unused */
3468
3469         /* Synchronize with the interrupt handler */
3470         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3471                 pause("iqfree", 1);
3472 }
3473
3474 static void
3475 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3476 {
3477         mtx_lock(&sc->sfl_lock);
3478         FL_LOCK(fl);
3479         fl->flags |= FL_DOOMED;
3480         FL_UNLOCK(fl);
3481         mtx_unlock(&sc->sfl_lock);
3482
3483         callout_drain(&sc->sfl_callout);
3484         KASSERT((fl->flags & FL_STARVING) == 0,
3485             ("%s: still starving", __func__));
3486 }
3487
3488 static int
3489 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3490     driver_intr_t *handler, void *arg, char *name)
3491 {
3492         int rc;
3493
3494         irq->rid = rid;
3495         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3496             RF_SHAREABLE | RF_ACTIVE);
3497         if (irq->res == NULL) {
3498                 device_printf(sc->dev,
3499                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3500                 return (ENOMEM);
3501         }
3502
3503         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3504             NULL, handler, arg, &irq->tag);
3505         if (rc != 0) {
3506                 device_printf(sc->dev,
3507                     "failed to setup interrupt for rid %d, name %s: %d\n",
3508                     rid, name, rc);
3509         } else if (name)
3510                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3511
3512         return (rc);
3513 }
3514
3515 static int
3516 t4_free_irq(struct adapter *sc, struct irq *irq)
3517 {
3518         if (irq->tag)
3519                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3520         if (irq->res)
3521                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3522
3523         bzero(irq, sizeof(*irq));
3524
3525         return (0);
3526 }
3527
3528 static void
3529 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3530     unsigned int end)
3531 {
3532         uint32_t *p = (uint32_t *)(buf + start);
3533
3534         for ( ; start <= end; start += sizeof(uint32_t))
3535                 *p++ = t4_read_reg(sc, start);
3536 }
3537
3538 static void
3539 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3540 {
3541         int i, n;
3542         const unsigned int *reg_ranges;
3543         static const unsigned int t4_reg_ranges[] = {
3544                 0x1008, 0x1108,
3545                 0x1180, 0x11b4,
3546                 0x11fc, 0x123c,
3547                 0x1300, 0x173c,
3548                 0x1800, 0x18fc,
3549                 0x3000, 0x30d8,
3550                 0x30e0, 0x5924,
3551                 0x5960, 0x59d4,
3552                 0x5a00, 0x5af8,
3553                 0x6000, 0x6098,
3554                 0x6100, 0x6150,
3555                 0x6200, 0x6208,
3556                 0x6240, 0x6248,
3557                 0x6280, 0x6338,
3558                 0x6370, 0x638c,
3559                 0x6400, 0x643c,
3560                 0x6500, 0x6524,
3561                 0x6a00, 0x6a38,
3562                 0x6a60, 0x6a78,
3563                 0x6b00, 0x6b84,
3564                 0x6bf0, 0x6c84,
3565                 0x6cf0, 0x6d84,
3566                 0x6df0, 0x6e84,
3567                 0x6ef0, 0x6f84,
3568                 0x6ff0, 0x7084,
3569                 0x70f0, 0x7184,
3570                 0x71f0, 0x7284,
3571                 0x72f0, 0x7384,
3572                 0x73f0, 0x7450,
3573                 0x7500, 0x7530,
3574                 0x7600, 0x761c,
3575                 0x7680, 0x76cc,
3576                 0x7700, 0x7798,
3577                 0x77c0, 0x77fc,
3578                 0x7900, 0x79fc,
3579                 0x7b00, 0x7c38,
3580                 0x7d00, 0x7efc,
3581                 0x8dc0, 0x8e1c,
3582                 0x8e30, 0x8e78,
3583                 0x8ea0, 0x8f6c,
3584                 0x8fc0, 0x9074,
3585                 0x90fc, 0x90fc,
3586                 0x9400, 0x9458,
3587                 0x9600, 0x96bc,
3588                 0x9800, 0x9808,
3589                 0x9820, 0x983c,
3590                 0x9850, 0x9864,
3591                 0x9c00, 0x9c6c,
3592                 0x9c80, 0x9cec,
3593                 0x9d00, 0x9d6c,
3594                 0x9d80, 0x9dec,
3595                 0x9e00, 0x9e6c,
3596                 0x9e80, 0x9eec,
3597                 0x9f00, 0x9f6c,
3598                 0x9f80, 0x9fec,
3599                 0xd004, 0xd03c,
3600                 0xdfc0, 0xdfe0,
3601                 0xe000, 0xea7c,
3602                 0xf000, 0x11110,
3603                 0x11118, 0x11190,
3604                 0x19040, 0x1906c,
3605                 0x19078, 0x19080,
3606                 0x1908c, 0x19124,
3607                 0x19150, 0x191b0,
3608                 0x191d0, 0x191e8,
3609                 0x19238, 0x1924c,
3610                 0x193f8, 0x19474,
3611                 0x19490, 0x194f8,
3612                 0x19800, 0x19f30,
3613                 0x1a000, 0x1a06c,
3614                 0x1a0b0, 0x1a120,
3615                 0x1a128, 0x1a138,
3616                 0x1a190, 0x1a1c4,
3617                 0x1a1fc, 0x1a1fc,
3618                 0x1e040, 0x1e04c,
3619                 0x1e284, 0x1e28c,
3620                 0x1e2c0, 0x1e2c0,
3621                 0x1e2e0, 0x1e2e0,
3622                 0x1e300, 0x1e384,
3623                 0x1e3c0, 0x1e3c8,
3624                 0x1e440, 0x1e44c,
3625                 0x1e684, 0x1e68c,
3626                 0x1e6c0, 0x1e6c0,
3627                 0x1e6e0, 0x1e6e0,
3628                 0x1e700, 0x1e784,
3629                 0x1e7c0, 0x1e7c8,
3630                 0x1e840, 0x1e84c,
3631                 0x1ea84, 0x1ea8c,
3632                 0x1eac0, 0x1eac0,
3633                 0x1eae0, 0x1eae0,
3634                 0x1eb00, 0x1eb84,
3635                 0x1ebc0, 0x1ebc8,
3636                 0x1ec40, 0x1ec4c,
3637                 0x1ee84, 0x1ee8c,
3638                 0x1eec0, 0x1eec0,
3639                 0x1eee0, 0x1eee0,
3640                 0x1ef00, 0x1ef84,
3641                 0x1efc0, 0x1efc8,
3642                 0x1f040, 0x1f04c,
3643                 0x1f284, 0x1f28c,
3644                 0x1f2c0, 0x1f2c0,
3645                 0x1f2e0, 0x1f2e0,
3646                 0x1f300, 0x1f384,
3647                 0x1f3c0, 0x1f3c8,
3648                 0x1f440, 0x1f44c,
3649                 0x1f684, 0x1f68c,
3650                 0x1f6c0, 0x1f6c0,
3651                 0x1f6e0, 0x1f6e0,
3652                 0x1f700, 0x1f784,
3653                 0x1f7c0, 0x1f7c8,
3654                 0x1f840, 0x1f84c,
3655                 0x1fa84, 0x1fa8c,
3656                 0x1fac0, 0x1fac0,
3657                 0x1fae0, 0x1fae0,
3658                 0x1fb00, 0x1fb84,
3659                 0x1fbc0, 0x1fbc8,
3660                 0x1fc40, 0x1fc4c,
3661                 0x1fe84, 0x1fe8c,
3662                 0x1fec0, 0x1fec0,
3663                 0x1fee0, 0x1fee0,
3664                 0x1ff00, 0x1ff84,
3665                 0x1ffc0, 0x1ffc8,
3666                 0x20000, 0x2002c,
3667                 0x20100, 0x2013c,
3668                 0x20190, 0x201c8,
3669                 0x20200, 0x20318,
3670                 0x20400, 0x20528,
3671                 0x20540, 0x20614,
3672                 0x21000, 0x21040,
3673                 0x2104c, 0x21060,
3674                 0x210c0, 0x210ec,
3675                 0x21200, 0x21268,
3676                 0x21270, 0x21284,
3677                 0x212fc, 0x21388,
3678                 0x21400, 0x21404,
3679                 0x21500, 0x21518,
3680                 0x2152c, 0x2153c,
3681                 0x21550, 0x21554,
3682                 0x21600, 0x21600,
3683                 0x21608, 0x21628,
3684                 0x21630, 0x2163c,
3685                 0x21700, 0x2171c,
3686                 0x21780, 0x2178c,
3687                 0x21800, 0x21c38,
3688                 0x21c80, 0x21d7c,
3689                 0x21e00, 0x21e04,
3690                 0x22000, 0x2202c,
3691                 0x22100, 0x2213c,
3692                 0x22190, 0x221c8,
3693                 0x22200, 0x22318,
3694                 0x22400, 0x22528,
3695                 0x22540, 0x22614,
3696                 0x23000, 0x23040,
3697                 0x2304c, 0x23060,
3698                 0x230c0, 0x230ec,
3699                 0x23200, 0x23268,
3700                 0x23270, 0x23284,
3701                 0x232fc, 0x23388,
3702                 0x23400, 0x23404,
3703                 0x23500, 0x23518,
3704                 0x2352c, 0x2353c,
3705                 0x23550, 0x23554,
3706                 0x23600, 0x23600,
3707                 0x23608, 0x23628,
3708                 0x23630, 0x2363c,
3709                 0x23700, 0x2371c,
3710                 0x23780, 0x2378c,
3711                 0x23800, 0x23c38,
3712                 0x23c80, 0x23d7c,
3713                 0x23e00, 0x23e04,
3714                 0x24000, 0x2402c,
3715                 0x24100, 0x2413c,
3716                 0x24190, 0x241c8,
3717                 0x24200, 0x24318,
3718                 0x24400, 0x24528,
3719                 0x24540, 0x24614,
3720                 0x25000, 0x25040,
3721                 0x2504c, 0x25060,
3722                 0x250c0, 0x250ec,
3723                 0x25200, 0x25268,
3724                 0x25270, 0x25284,
3725                 0x252fc, 0x25388,
3726                 0x25400, 0x25404,
3727                 0x25500, 0x25518,
3728                 0x2552c, 0x2553c,
3729                 0x25550, 0x25554,
3730                 0x25600, 0x25600,
3731                 0x25608, 0x25628,
3732                 0x25630, 0x2563c,
3733                 0x25700, 0x2571c,
3734                 0x25780, 0x2578c,
3735                 0x25800, 0x25c38,
3736                 0x25c80, 0x25d7c,
3737                 0x25e00, 0x25e04,
3738                 0x26000, 0x2602c,
3739                 0x26100, 0x2613c,
3740                 0x26190, 0x261c8,
3741                 0x26200, 0x26318,
3742                 0x26400, 0x26528,
3743                 0x26540, 0x26614,
3744                 0x27000, 0x27040,
3745                 0x2704c, 0x27060,
3746                 0x270c0, 0x270ec,
3747                 0x27200, 0x27268,
3748                 0x27270, 0x27284,
3749                 0x272fc, 0x27388,
3750                 0x27400, 0x27404,
3751                 0x27500, 0x27518,
3752                 0x2752c, 0x2753c,
3753                 0x27550, 0x27554,
3754                 0x27600, 0x27600,
3755                 0x27608, 0x27628,
3756                 0x27630, 0x2763c,
3757                 0x27700, 0x2771c,
3758                 0x27780, 0x2778c,
3759                 0x27800, 0x27c38,
3760                 0x27c80, 0x27d7c,
3761                 0x27e00, 0x27e04
3762         };
3763         static const unsigned int t5_reg_ranges[] = {
3764                 0x1008, 0x1148,
3765                 0x1180, 0x11b4,
3766                 0x11fc, 0x123c,
3767                 0x1280, 0x173c,
3768                 0x1800, 0x18fc,
3769                 0x3000, 0x3028,
3770                 0x3060, 0x30d8,
3771                 0x30e0, 0x30fc,
3772                 0x3140, 0x357c,
3773                 0x35a8, 0x35cc,
3774                 0x35ec, 0x35ec,
3775                 0x3600, 0x5624,
3776                 0x56cc, 0x575c,
3777                 0x580c, 0x5814,
3778                 0x5890, 0x58bc,
3779                 0x5940, 0x59dc,
3780                 0x59fc, 0x5a18,
3781                 0x5a60, 0x5a9c,
3782                 0x5b94, 0x5bfc,
3783                 0x6000, 0x6040,
3784                 0x6058, 0x614c,
3785                 0x7700, 0x7798,
3786                 0x77c0, 0x78fc,
3787                 0x7b00, 0x7c54,
3788                 0x7d00, 0x7efc,
3789                 0x8dc0, 0x8de0,
3790                 0x8df8, 0x8e84,
3791                 0x8ea0, 0x8f84,
3792                 0x8fc0, 0x90f8,
3793                 0x9400, 0x9470,
3794                 0x9600, 0x96f4,
3795                 0x9800, 0x9808,
3796                 0x9820, 0x983c,
3797                 0x9850, 0x9864,
3798                 0x9c00, 0x9c6c,
3799                 0x9c80, 0x9cec,
3800                 0x9d00, 0x9d6c,
3801                 0x9d80, 0x9dec,
3802                 0x9e00, 0x9e6c,
3803                 0x9e80, 0x9eec,
3804                 0x9f00, 0x9f6c,
3805                 0x9f80, 0xa020,
3806                 0xd004, 0xd03c,
3807                 0xdfc0, 0xdfe0,
3808                 0xe000, 0x11088,
3809                 0x1109c, 0x11110,
3810                 0x11118, 0x1117c,
3811                 0x11190, 0x11204,
3812                 0x19040, 0x1906c,
3813                 0x19078, 0x19080,
3814                 0x1908c, 0x19124,
3815                 0x19150, 0x191b0,
3816                 0x191d0, 0x191e8,
3817                 0x19238, 0x19290,
3818                 0x193f8, 0x19474,
3819                 0x19490, 0x194cc,
3820                 0x194f0, 0x194f8,
3821                 0x19c00, 0x19c60,
3822                 0x19c94, 0x19e10,
3823                 0x19e50, 0x19f34,
3824                 0x19f40, 0x19f50,
3825                 0x19f90, 0x19fe4,
3826                 0x1a000, 0x1a06c,
3827                 0x1a0b0, 0x1a120,
3828                 0x1a128, 0x1a138,
3829                 0x1a190, 0x1a1c4,
3830                 0x1a1fc, 0x1a1fc,
3831                 0x1e008, 0x1e00c,
3832                 0x1e040, 0x1e04c,
3833                 0x1e284, 0x1e290,
3834                 0x1e2c0, 0x1e2c0,
3835                 0x1e2e0, 0x1e2e0,
3836                 0x1e300, 0x1e384,
3837                 0x1e3c0, 0x1e3c8,
3838                 0x1e408, 0x1e40c,
3839                 0x1e440, 0x1e44c,
3840                 0x1e684, 0x1e690,
3841                 0x1e6c0, 0x1e6c0,
3842                 0x1e6e0, 0x1e6e0,
3843                 0x1e700, 0x1e784,
3844                 0x1e7c0, 0x1e7c8,
3845                 0x1e808, 0x1e80c,
3846                 0x1e840, 0x1e84c,
3847                 0x1ea84, 0x1ea90,
3848                 0x1eac0, 0x1eac0,
3849                 0x1eae0, 0x1eae0,
3850                 0x1eb00, 0x1eb84,
3851                 0x1ebc0, 0x1ebc8,
3852                 0x1ec08, 0x1ec0c,
3853                 0x1ec40, 0x1ec4c,
3854                 0x1ee84, 0x1ee90,
3855                 0x1eec0, 0x1eec0,
3856                 0x1eee0, 0x1eee0,
3857                 0x1ef00, 0x1ef84,
3858                 0x1efc0, 0x1efc8,
3859                 0x1f008, 0x1f00c,
3860                 0x1f040, 0x1f04c,
3861                 0x1f284, 0x1f290,
3862                 0x1f2c0, 0x1f2c0,
3863                 0x1f2e0, 0x1f2e0,
3864                 0x1f300, 0x1f384,
3865                 0x1f3c0, 0x1f3c8,
3866                 0x1f408, 0x1f40c,
3867                 0x1f440, 0x1f44c,
3868                 0x1f684, 0x1f690,
3869                 0x1f6c0, 0x1f6c0,
3870                 0x1f6e0, 0x1f6e0,
3871                 0x1f700, 0x1f784,
3872                 0x1f7c0, 0x1f7c8,
3873                 0x1f808, 0x1f80c,
3874                 0x1f840, 0x1f84c,
3875                 0x1fa84, 0x1fa90,
3876                 0x1fac0, 0x1fac0,
3877                 0x1fae0, 0x1fae0,
3878                 0x1fb00, 0x1fb84,
3879                 0x1fbc0, 0x1fbc8,
3880                 0x1fc08, 0x1fc0c,
3881                 0x1fc40, 0x1fc4c,
3882                 0x1fe84, 0x1fe90,
3883                 0x1fec0, 0x1fec0,
3884                 0x1fee0, 0x1fee0,
3885                 0x1ff00, 0x1ff84,
3886                 0x1ffc0, 0x1ffc8,
3887                 0x30000, 0x30030,
3888                 0x30100, 0x30144,
3889                 0x30190, 0x301d0,
3890                 0x30200, 0x30318,
3891                 0x30400, 0x3052c,
3892                 0x30540, 0x3061c,
3893                 0x30800, 0x30834,
3894                 0x308c0, 0x30908,
3895                 0x30910, 0x309ac,
3896                 0x30a00, 0x30a2c,
3897                 0x30a44, 0x30a50,
3898                 0x30a74, 0x30c24,
3899                 0x30d00, 0x30d00,
3900                 0x30d08, 0x30d14,
3901                 0x30d1c, 0x30d20,
3902                 0x30d3c, 0x30d50,
3903                 0x31200, 0x3120c,
3904                 0x31220, 0x31220,
3905                 0x31240, 0x31240,
3906                 0x31600, 0x3160c,
3907                 0x31a00, 0x31a1c,
3908                 0x31e00, 0x31e20,
3909                 0x31e38, 0x31e3c,
3910                 0x31e80, 0x31e80,
3911                 0x31e88, 0x31ea8,
3912                 0x31eb0, 0x31eb4,
3913                 0x31ec8, 0x31ed4,
3914                 0x31fb8, 0x32004,
3915                 0x32200, 0x32200,
3916                 0x32208, 0x32240,
3917                 0x32248, 0x32280,
3918                 0x32288, 0x322c0,
3919                 0x322c8, 0x322fc,
3920                 0x32600, 0x32630,
3921                 0x32a00, 0x32abc,
3922                 0x32b00, 0x32b70,
3923                 0x33000, 0x33048,
3924                 0x33060, 0x3309c,
3925                 0x330f0, 0x33148,
3926                 0x33160, 0x3319c,
3927                 0x331f0, 0x332e4,
3928                 0x332f8, 0x333e4,
3929                 0x333f8, 0x33448,
3930                 0x33460, 0x3349c,
3931                 0x334f0, 0x33548,
3932                 0x33560, 0x3359c,
3933                 0x335f0, 0x336e4,
3934                 0x336f8, 0x337e4,
3935                 0x337f8, 0x337fc,
3936                 0x33814, 0x33814,
3937                 0x3382c, 0x3382c,
3938                 0x33880, 0x3388c,
3939                 0x338e8, 0x338ec,
3940                 0x33900, 0x33948,
3941                 0x33960, 0x3399c,
3942                 0x339f0, 0x33ae4,
3943                 0x33af8, 0x33b10,
3944                 0x33b28, 0x33b28,
3945                 0x33b3c, 0x33b50,
3946                 0x33bf0, 0x33c10,
3947                 0x33c28, 0x33c28,
3948                 0x33c3c, 0x33c50,
3949                 0x33cf0, 0x33cfc,
3950                 0x34000, 0x34030,
3951                 0x34100, 0x34144,
3952                 0x34190, 0x341d0,
3953                 0x34200, 0x34318,
3954                 0x34400, 0x3452c,
3955                 0x34540, 0x3461c,
3956                 0x34800, 0x34834,
3957                 0x348c0, 0x34908,
3958                 0x34910, 0x349ac,
3959                 0x34a00, 0x34a2c,
3960                 0x34a44, 0x34a50,
3961                 0x34a74, 0x34c24,
3962                 0x34d00, 0x34d00,
3963                 0x34d08, 0x34d14,
3964                 0x34d1c, 0x34d20,
3965                 0x34d3c, 0x34d50,
3966                 0x35200, 0x3520c,
3967                 0x35220, 0x35220,
3968                 0x35240, 0x35240,
3969                 0x35600, 0x3560c,
3970                 0x35a00, 0x35a1c,
3971                 0x35e00, 0x35e20,
3972                 0x35e38, 0x35e3c,
3973                 0x35e80, 0x35e80,
3974                 0x35e88, 0x35ea8,
3975                 0x35eb0, 0x35eb4,
3976                 0x35ec8, 0x35ed4,
3977                 0x35fb8, 0x36004,
3978                 0x36200, 0x36200,
3979                 0x36208, 0x36240,
3980                 0x36248, 0x36280,
3981                 0x36288, 0x362c0,
3982                 0x362c8, 0x362fc,
3983                 0x36600, 0x36630,
3984                 0x36a00, 0x36abc,
3985                 0x36b00, 0x36b70,
3986                 0x37000, 0x37048,
3987                 0x37060, 0x3709c,
3988                 0x370f0, 0x37148,
3989                 0x37160, 0x3719c,
3990                 0x371f0, 0x372e4,
3991                 0x372f8, 0x373e4,
3992                 0x373f8, 0x37448,
3993                 0x37460, 0x3749c,
3994                 0x374f0, 0x37548,
3995                 0x37560, 0x3759c,
3996                 0x375f0, 0x376e4,
3997                 0x376f8, 0x377e4,
3998                 0x377f8, 0x377fc,
3999                 0x37814, 0x37814,
4000                 0x3782c, 0x3782c,
4001                 0x37880, 0x3788c,
4002                 0x378e8, 0x378ec,
4003                 0x37900, 0x37948,
4004                 0x37960, 0x3799c,
4005                 0x379f0, 0x37ae4,
4006                 0x37af8, 0x37b10,
4007                 0x37b28, 0x37b28,
4008                 0x37b3c, 0x37b50,
4009                 0x37bf0, 0x37c10,
4010                 0x37c28, 0x37c28,
4011                 0x37c3c, 0x37c50,
4012                 0x37cf0, 0x37cfc,
4013                 0x38000, 0x38030,
4014                 0x38100, 0x38144,
4015                 0x38190, 0x381d0,
4016                 0x38200, 0x38318,
4017                 0x38400, 0x3852c,
4018                 0x38540, 0x3861c,
4019                 0x38800, 0x38834,
4020                 0x388c0, 0x38908,
4021                 0x38910, 0x389ac,
4022                 0x38a00, 0x38a2c,
4023                 0x38a44, 0x38a50,
4024                 0x38a74, 0x38c24,
4025                 0x38d00, 0x38d00,
4026                 0x38d08, 0x38d14,
4027                 0x38d1c, 0x38d20,
4028                 0x38d3c, 0x38d50,
4029                 0x39200, 0x3920c,
4030                 0x39220, 0x39220,
4031                 0x39240, 0x39240,
4032                 0x39600, 0x3960c,
4033                 0x39a00, 0x39a1c,
4034                 0x39e00, 0x39e20,
4035                 0x39e38, 0x39e3c,
4036                 0x39e80, 0x39e80,
4037                 0x39e88, 0x39ea8,
4038                 0x39eb0, 0x39eb4,
4039                 0x39ec8, 0x39ed4,
4040                 0x39fb8, 0x3a004,
4041                 0x3a200, 0x3a200,
4042                 0x3a208, 0x3a240,
4043                 0x3a248, 0x3a280,
4044                 0x3a288, 0x3a2c0,
4045                 0x3a2c8, 0x3a2fc,
4046                 0x3a600, 0x3a630,
4047                 0x3aa00, 0x3aabc,
4048                 0x3ab00, 0x3ab70,
4049                 0x3b000, 0x3b048,
4050                 0x3b060, 0x3b09c,
4051                 0x3b0f0, 0x3b148,
4052                 0x3b160, 0x3b19c,
4053                 0x3b1f0, 0x3b2e4,
4054                 0x3b2f8, 0x3b3e4,
4055                 0x3b3f8, 0x3b448,
4056                 0x3b460, 0x3b49c,
4057                 0x3b4f0, 0x3b548,
4058                 0x3b560, 0x3b59c,
4059                 0x3b5f0, 0x3b6e4,
4060                 0x3b6f8, 0x3b7e4,
4061                 0x3b7f8, 0x3b7fc,
4062                 0x3b814, 0x3b814,
4063                 0x3b82c, 0x3b82c,
4064                 0x3b880, 0x3b88c,
4065                 0x3b8e8, 0x3b8ec,
4066                 0x3b900, 0x3b948,
4067                 0x3b960, 0x3b99c,
4068                 0x3b9f0, 0x3bae4,
4069                 0x3baf8, 0x3bb10,
4070                 0x3bb28, 0x3bb28,
4071                 0x3bb3c, 0x3bb50,
4072                 0x3bbf0, 0x3bc10,
4073                 0x3bc28, 0x3bc28,
4074                 0x3bc3c, 0x3bc50,
4075                 0x3bcf0, 0x3bcfc,
4076                 0x3c000, 0x3c030,
4077                 0x3c100, 0x3c144,
4078                 0x3c190, 0x3c1d0,
4079                 0x3c200, 0x3c318,
4080                 0x3c400, 0x3c52c,
4081                 0x3c540, 0x3c61c,
4082                 0x3c800, 0x3c834,
4083                 0x3c8c0, 0x3c908,
4084                 0x3c910, 0x3c9ac,
4085                 0x3ca00, 0x3ca2c,
4086                 0x3ca44, 0x3ca50,
4087                 0x3ca74, 0x3cc24,
4088                 0x3cd00, 0x3cd00,
4089                 0x3cd08, 0x3cd14,
4090                 0x3cd1c, 0x3cd20,
4091                 0x3cd3c, 0x3cd50,
4092                 0x3d200, 0x3d20c,
4093                 0x3d220, 0x3d220,
4094                 0x3d240, 0x3d240,
4095                 0x3d600, 0x3d60c,
4096                 0x3da00, 0x3da1c,
4097                 0x3de00, 0x3de20,
4098                 0x3de38, 0x3de3c,
4099                 0x3de80, 0x3de80,
4100                 0x3de88, 0x3dea8,
4101                 0x3deb0, 0x3deb4,
4102                 0x3dec8, 0x3ded4,
4103                 0x3dfb8, 0x3e004,
4104                 0x3e200, 0x3e200,
4105                 0x3e208, 0x3e240,
4106                 0x3e248, 0x3e280,
4107                 0x3e288, 0x3e2c0,
4108                 0x3e2c8, 0x3e2fc,
4109                 0x3e600, 0x3e630,
4110                 0x3ea00, 0x3eabc,
4111                 0x3eb00, 0x3eb70,
4112                 0x3f000, 0x3f048,
4113                 0x3f060, 0x3f09c,
4114                 0x3f0f0, 0x3f148,
4115                 0x3f160, 0x3f19c,
4116                 0x3f1f0, 0x3f2e4,
4117                 0x3f2f8, 0x3f3e4,
4118                 0x3f3f8, 0x3f448,
4119                 0x3f460, 0x3f49c,
4120                 0x3f4f0, 0x3f548,
4121                 0x3f560, 0x3f59c,
4122                 0x3f5f0, 0x3f6e4,
4123                 0x3f6f8, 0x3f7e4,
4124                 0x3f7f8, 0x3f7fc,
4125                 0x3f814, 0x3f814,
4126                 0x3f82c, 0x3f82c,
4127                 0x3f880, 0x3f88c,
4128                 0x3f8e8, 0x3f8ec,
4129                 0x3f900, 0x3f948,
4130                 0x3f960, 0x3f99c,
4131                 0x3f9f0, 0x3fae4,
4132                 0x3faf8, 0x3fb10,
4133                 0x3fb28, 0x3fb28,
4134                 0x3fb3c, 0x3fb50,
4135                 0x3fbf0, 0x3fc10,
4136                 0x3fc28, 0x3fc28,
4137                 0x3fc3c, 0x3fc50,
4138                 0x3fcf0, 0x3fcfc,
4139                 0x40000, 0x4000c,
4140                 0x40040, 0x40068,
4141                 0x4007c, 0x40144,
4142                 0x40180, 0x4018c,
4143                 0x40200, 0x40298,
4144                 0x402ac, 0x4033c,
4145                 0x403f8, 0x403fc,
4146                 0x41304, 0x413c4,
4147                 0x41400, 0x4141c,
4148                 0x41480, 0x414d0,
4149                 0x44000, 0x44078,
4150                 0x440c0, 0x44278,
4151                 0x442c0, 0x44478,
4152                 0x444c0, 0x44678,
4153                 0x446c0, 0x44878,
4154                 0x448c0, 0x449fc,
4155                 0x45000, 0x45068,
4156                 0x45080, 0x45084,
4157                 0x450a0, 0x450b0,
4158                 0x45200, 0x45268,
4159                 0x45280, 0x45284,
4160                 0x452a0, 0x452b0,
4161                 0x460c0, 0x460e4,
4162                 0x47000, 0x4708c,
4163                 0x47200, 0x47250,
4164                 0x47400, 0x47420,
4165                 0x47600, 0x47618,
4166                 0x47800, 0x47814,
4167                 0x48000, 0x4800c,
4168                 0x48040, 0x48068,
4169                 0x4807c, 0x48144,
4170                 0x48180, 0x4818c,
4171                 0x48200, 0x48298,
4172                 0x482ac, 0x4833c,
4173                 0x483f8, 0x483fc,
4174                 0x49304, 0x493c4,
4175                 0x49400, 0x4941c,
4176                 0x49480, 0x494d0,
4177                 0x4c000, 0x4c078,
4178                 0x4c0c0, 0x4c278,
4179                 0x4c2c0, 0x4c478,
4180                 0x4c4c0, 0x4c678,
4181                 0x4c6c0, 0x4c878,
4182                 0x4c8c0, 0x4c9fc,
4183                 0x4d000, 0x4d068,
4184                 0x4d080, 0x4d084,
4185                 0x4d0a0, 0x4d0b0,
4186                 0x4d200, 0x4d268,
4187                 0x4d280, 0x4d284,
4188                 0x4d2a0, 0x4d2b0,
4189                 0x4e0c0, 0x4e0e4,
4190                 0x4f000, 0x4f08c,
4191                 0x4f200, 0x4f250,
4192                 0x4f400, 0x4f420,
4193                 0x4f600, 0x4f618,
4194                 0x4f800, 0x4f814,
4195                 0x50000, 0x500cc,
4196                 0x50400, 0x50400,
4197                 0x50800, 0x508cc,
4198                 0x50c00, 0x50c00,
4199                 0x51000, 0x5101c,
4200                 0x51300, 0x51308,
4201         };
4202
4203         if (is_t4(sc)) {
4204                 reg_ranges = &t4_reg_ranges[0];
4205                 n = nitems(t4_reg_ranges);
4206         } else {
4207                 reg_ranges = &t5_reg_ranges[0];
4208                 n = nitems(t5_reg_ranges);
4209         }
4210
4211         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4212         for (i = 0; i < n; i += 2)
4213                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4214 }
4215
4216 static void
4217 cxgbe_tick(void *arg)
4218 {
4219         struct port_info *pi = arg;
4220         struct adapter *sc = pi->adapter;
4221         struct ifnet *ifp = pi->ifp;
4222         struct sge_txq *txq;
4223         int i, drops;
4224         struct port_stats *s = &pi->stats;
4225
4226         PORT_LOCK(pi);
4227         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4228                 PORT_UNLOCK(pi);
4229                 return; /* without scheduling another callout */
4230         }
4231
4232         t4_get_port_stats(sc, pi->tx_chan, s);
4233
4234         ifp->if_opackets = s->tx_frames - s->tx_pause;
4235         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4236         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4237         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4238         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4239         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4240         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4241             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4242             s->rx_trunc3;
4243         for (i = 0; i < 4; i++) {
4244                 if (pi->rx_chan_map & (1 << i)) {
4245                         uint32_t v;
4246
4247                         /*
4248                          * XXX: indirect reads from the same ADDR/DATA pair can
4249                          * race with each other.
4250                          */
4251                         t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4252                             1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4253                         ifp->if_iqdrops += v;
4254                 }
4255         }
4256
4257         drops = s->tx_drop;
4258         for_each_txq(pi, i, txq)
4259                 drops += txq->br->br_drops;
4260         ifp->if_snd.ifq_drops = drops;
4261
4262         ifp->if_oerrors = s->tx_error_frames;
4263         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4264             s->rx_fcs_err + s->rx_len_err;
4265
4266         callout_schedule(&pi->tick, hz);
4267         PORT_UNLOCK(pi);
4268 }
4269
4270 static void
4271 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4272 {
4273         struct ifnet *vlan;
4274
4275         if (arg != ifp || ifp->if_type != IFT_ETHER)
4276                 return;
4277
4278         vlan = VLAN_DEVAT(ifp, vid);
4279         VLAN_SETCOOKIE(vlan, ifp);
4280 }
4281
4282 static int
4283 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4284 {
4285
4286 #ifdef INVARIANTS
4287         panic("%s: opcode 0x%02x on iq %p with payload %p",
4288             __func__, rss->opcode, iq, m);
4289 #else
4290         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4291             __func__, rss->opcode, iq, m);
4292         m_freem(m);
4293 #endif
4294         return (EDOOFUS);
4295 }
4296
4297 int
4298 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4299 {
4300         uintptr_t *loc, new;
4301
4302         if (opcode >= nitems(sc->cpl_handler))
4303                 return (EINVAL);
4304
4305         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4306         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4307         atomic_store_rel_ptr(loc, new);
4308
4309         return (0);
4310 }
4311
4312 static int
4313 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4314 {
4315
4316 #ifdef INVARIANTS
4317         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4318 #else
4319         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4320             __func__, iq, ctrl);
4321 #endif
4322         return (EDOOFUS);
4323 }
4324
4325 int
4326 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4327 {
4328         uintptr_t *loc, new;
4329
4330         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4331         loc = (uintptr_t *) &sc->an_handler;
4332         atomic_store_rel_ptr(loc, new);
4333
4334         return (0);
4335 }
4336
4337 static int
4338 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4339 {
4340         const struct cpl_fw6_msg *cpl =
4341             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4342
4343 #ifdef INVARIANTS
4344         panic("%s: fw_msg type %d", __func__, cpl->type);
4345 #else
4346         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4347 #endif
4348         return (EDOOFUS);
4349 }
4350
4351 int
4352 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4353 {
4354         uintptr_t *loc, new;
4355
4356         if (type >= nitems(sc->fw_msg_handler))
4357                 return (EINVAL);
4358
4359         /*
4360          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4361          * handler dispatch table.  Reject any attempt to install a handler for
4362          * this subtype.
4363          */
4364         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4365                 return (EINVAL);
4366
4367         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4368         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4369         atomic_store_rel_ptr(loc, new);
4370
4371         return (0);
4372 }
4373
4374 static int
4375 t4_sysctls(struct adapter *sc)
4376 {
4377         struct sysctl_ctx_list *ctx;
4378         struct sysctl_oid *oid;
4379         struct sysctl_oid_list *children, *c0;
4380         static char *caps[] = {
4381                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4382                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4383                     "\6HASHFILTER\7ETHOFLD",
4384                 "\20\1TOE",                             /* caps[2] toecaps */
4385                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4386                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4387                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4388                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4389                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4390                     "\4PO_INITIAOR\5PO_TARGET"
4391         };
4392         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4393
4394         ctx = device_get_sysctl_ctx(sc->dev);
4395
4396         /*
4397          * dev.t4nex.X.
4398          */
4399         oid = device_get_sysctl_tree(sc->dev);
4400         c0 = children = SYSCTL_CHILDREN(oid);
4401
4402         sc->sc_do_rxcopy = 1;
4403         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4404             &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4405
4406         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4407             sc->params.nports, "# of ports");
4408
4409         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4410             NULL, chip_rev(sc), "chip hardware revision");
4411
4412         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4413             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4414
4415         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4416             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4417
4418         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4419             sc->cfcsum, "config file checksum");
4420
4421         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4422             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4423             sysctl_bitfield, "A", "available doorbells");
4424
4425         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4426             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4427             sysctl_bitfield, "A", "available link capabilities");
4428
4429         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4430             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4431             sysctl_bitfield, "A", "available NIC capabilities");
4432
4433         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4434             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4435             sysctl_bitfield, "A", "available TCP offload capabilities");
4436
4437         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4438             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4439             sysctl_bitfield, "A", "available RDMA capabilities");
4440
4441         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4442             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4443             sysctl_bitfield, "A", "available iSCSI capabilities");
4444
4445         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4446             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4447             sysctl_bitfield, "A", "available FCoE capabilities");
4448
4449         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4450             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4451
4452         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4453             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4454             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4455             "interrupt holdoff timer values (us)");
4456
4457         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4458             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4459             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4460             "interrupt holdoff packet counter values");
4461
4462         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4463             NULL, sc->tids.nftids, "number of filters");
4464
4465         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4466             CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4467             "chip temperature (in Celsius)");
4468
4469         t4_sge_sysctls(sc, ctx, children);
4470
4471         sc->lro_timeout = 100;
4472         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4473             &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4474
4475 #ifdef SBUF_DRAIN
4476         /*
4477          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4478          */
4479         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4480             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4481             "logs and miscellaneous information");
4482         children = SYSCTL_CHILDREN(oid);
4483
4484         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4485             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4486             sysctl_cctrl, "A", "congestion control");
4487
4488         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4489             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4490             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4491
4492         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4493             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4494             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4495
4496         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4497             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4498             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4499
4500         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4501             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4502             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4503
4504         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4505             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4506             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4507
4508         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4509             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4510             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4511
4512         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4513             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4514             sysctl_cim_la, "A", "CIM logic analyzer");
4515
4516         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4517             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4518             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4519
4520         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4521             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4522             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4523
4524         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4525             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4526             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4527
4528         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4529             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4530             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4531
4532         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4533             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4534             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4535
4536         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4537             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4538             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4539
4540         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4541             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4542             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4543
4544         if (is_t5(sc)) {
4545                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4546                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4547                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4548
4549                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4550                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4551                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4552         }
4553
4554         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4555             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4556             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4557
4558         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4559             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4560             sysctl_cim_qcfg, "A", "CIM queue configuration");
4561
4562         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4563             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4564             sysctl_cpl_stats, "A", "CPL statistics");
4565
4566         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4567             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4568             sysctl_ddp_stats, "A", "DDP statistics");
4569
4570         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4571             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4572             sysctl_devlog, "A", "firmware's device log");
4573
4574         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4575             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4576             sysctl_fcoe_stats, "A", "FCoE statistics");
4577
4578         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4579             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4580             sysctl_hw_sched, "A", "hardware scheduler ");
4581
4582         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4583             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4584             sysctl_l2t, "A", "hardware L2 table");
4585
4586         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4587             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4588             sysctl_lb_stats, "A", "loopback statistics");
4589
4590         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4591             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4592             sysctl_meminfo, "A", "memory regions");
4593
4594         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4595             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4596             sysctl_mps_tcam, "A", "MPS TCAM entries");
4597
4598         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4599             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4600             sysctl_path_mtus, "A", "path MTUs");
4601
4602         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4603             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4604             sysctl_pm_stats, "A", "PM statistics");
4605
4606         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4607             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4608             sysctl_rdma_stats, "A", "RDMA statistics");
4609
4610         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4611             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4612             sysctl_tcp_stats, "A", "TCP statistics");
4613
4614         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4615             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4616             sysctl_tids, "A", "TID information");
4617
4618         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4619             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4620             sysctl_tp_err_stats, "A", "TP error statistics");
4621
4622         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4623             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4624             sysctl_tp_la, "A", "TP logic analyzer");
4625
4626         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4627             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4628             sysctl_tx_rate, "A", "Tx rate");
4629
4630         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4631             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4632             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4633
4634         if (is_t5(sc)) {
4635                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4636                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4637                     sysctl_wcwr_stats, "A", "write combined work requests");
4638         }
4639 #endif
4640
4641 #ifdef TCP_OFFLOAD
4642         if (is_offload(sc)) {
4643                 /*
4644                  * dev.t4nex.X.toe.
4645                  */
4646                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4647                     NULL, "TOE parameters");
4648                 children = SYSCTL_CHILDREN(oid);
4649
4650                 sc->tt.sndbuf = 256 * 1024;
4651                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4652                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4653
4654                 sc->tt.ddp = 0;
4655                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4656                     &sc->tt.ddp, 0, "DDP allowed");
4657
4658                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4659                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4660                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4661
4662                 sc->tt.ddp_thres =
4663                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4664                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4665                     &sc->tt.ddp_thres, 0, "DDP threshold");
4666
4667                 sc->tt.rx_coalesce = 1;
4668                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4669                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4670         }
4671 #endif
4672
4673
4674         return (0);
4675 }
4676
4677 static int
4678 cxgbe_sysctls(struct port_info *pi)
4679 {
4680         struct sysctl_ctx_list *ctx;
4681         struct sysctl_oid *oid;
4682         struct sysctl_oid_list *children;
4683         struct adapter *sc = pi->adapter;
4684
4685         ctx = device_get_sysctl_ctx(pi->dev);
4686
4687         /*
4688          * dev.cxgbe.X.
4689          */
4690         oid = device_get_sysctl_tree(pi->dev);
4691         children = SYSCTL_CHILDREN(oid);
4692
4693         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4694            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4695         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4696                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4697                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4698                     "PHY temperature (in Celsius)");
4699                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4700                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4701                     "PHY firmware version");
4702         }
4703         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4704             &pi->nrxq, 0, "# of rx queues");
4705         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4706             &pi->ntxq, 0, "# of tx queues");
4707         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4708             &pi->first_rxq, 0, "index of first rx queue");
4709         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4710             &pi->first_txq, 0, "index of first tx queue");
4711         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4712             CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4713             "Reserve queue 0 for non-flowid packets");
4714
4715 #ifdef TCP_OFFLOAD
4716         if (is_offload(sc)) {
4717                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4718                     &pi->nofldrxq, 0,
4719                     "# of rx queues for offloaded TCP connections");
4720                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4721                     &pi->nofldtxq, 0,
4722                     "# of tx queues for offloaded TCP connections");
4723                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4724                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4725                     "index of first TOE rx queue");
4726                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4727                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4728                     "index of first TOE tx queue");
4729         }
4730 #endif
4731 #ifdef DEV_NETMAP
4732         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
4733             &pi->nnmrxq, 0, "# of rx queues for netmap");
4734         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
4735             &pi->nnmtxq, 0, "# of tx queues for netmap");
4736         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
4737             CTLFLAG_RD, &pi->first_nm_rxq, 0,
4738             "index of first netmap rx queue");
4739         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
4740             CTLFLAG_RD, &pi->first_nm_txq, 0,
4741             "index of first netmap tx queue");
4742 #endif
4743
4744         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4745             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4746             "holdoff timer index");
4747         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4748             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4749             "holdoff packet counter index");
4750
4751         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4752             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4753             "rx queue size");
4754         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4755             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4756             "tx queue size");
4757
4758         /*
4759          * dev.cxgbe.X.stats.
4760          */
4761         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4762             NULL, "port statistics");
4763         children = SYSCTL_CHILDREN(oid);
4764
4765 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4766         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4767             CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
4768             sysctl_handle_t4_reg64, "QU", desc)
4769
4770         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4771             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4772         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4773             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4774         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4775             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4776         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4777             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4778         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4779             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4780         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4781             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4782         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4783             "# of tx frames in this range",
4784             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4785         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4786             "# of tx frames in this range",
4787             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4788         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4789             "# of tx frames in this range",
4790             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4791         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4792             "# of tx frames in this range",
4793             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4794         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4795             "# of tx frames in this range",
4796             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4797         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4798             "# of tx frames in this range",
4799             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4800         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4801             "# of tx frames in this range",
4802             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4803         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4804             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4805         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4806             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4807         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4808             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4809         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4810             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4811         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4812             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4813         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4814             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4815         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4816             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4817         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4818             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4819         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4820             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4821         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4822             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4823
4824         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4825             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4826         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4827             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4828         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4829             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4830         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4831             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4832         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4833             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4834         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4835             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4836         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4837             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4838         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4839             "# of frames received with bad FCS",
4840             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4841         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4842             "# of frames received with length error",
4843             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4844         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4845             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4846         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4847             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4848         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4849             "# of rx frames in this range",
4850             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4851         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4852             "# of rx frames in this range",
4853             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4854         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4855             "# of rx frames in this range",
4856             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4857         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4858             "# of rx frames in this range",
4859             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4860         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4861             "# of rx frames in this range",
4862             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4863         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4864             "# of rx frames in this range",
4865             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4866         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4867             "# of rx frames in this range",
4868             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4869         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4870             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4871         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4872             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4873         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4874             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4875         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4876             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4877         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4878             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4879         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4880             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4881         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4882             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4883         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4884             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4885         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4886             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4887
4888 #undef SYSCTL_ADD_T4_REG64
4889
4890 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4891         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4892             &pi->stats.name, desc)
4893
4894         /* We get these from port_stats and they may be stale by upto 1s */
4895         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4896             "# drops due to buffer-group 0 overflows");
4897         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4898             "# drops due to buffer-group 1 overflows");
4899         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4900             "# drops due to buffer-group 2 overflows");
4901         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4902             "# drops due to buffer-group 3 overflows");
4903         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4904             "# of buffer-group 0 truncated packets");
4905         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4906             "# of buffer-group 1 truncated packets");
4907         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4908             "# of buffer-group 2 truncated packets");
4909         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4910             "# of buffer-group 3 truncated packets");
4911
4912 #undef SYSCTL_ADD_T4_PORTSTAT
4913
4914         return (0);
4915 }
4916
4917 static int
4918 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4919 {
4920         int rc, *i;
4921         struct sbuf sb;
4922
4923         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4924         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4925                 sbuf_printf(&sb, "%d ", *i);
4926         sbuf_trim(&sb);
4927         sbuf_finish(&sb);
4928         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4929         sbuf_delete(&sb);
4930         return (rc);
4931 }
4932
4933 static int
4934 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4935 {
4936         int rc;
4937         struct sbuf *sb;
4938
4939         rc = sysctl_wire_old_buffer(req, 0);
4940         if (rc != 0)
4941                 return(rc);
4942
4943         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4944         if (sb == NULL)
4945                 return (ENOMEM);
4946
4947         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4948         rc = sbuf_finish(sb);
4949         sbuf_delete(sb);
4950
4951         return (rc);
4952 }
4953
4954 static int
4955 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4956 {
4957         struct port_info *pi = arg1;
4958         int op = arg2;
4959         struct adapter *sc = pi->adapter;
4960         u_int v;
4961         int rc;
4962
4963         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4964         if (rc)
4965                 return (rc);
4966         /* XXX: magic numbers */
4967         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4968             &v);
4969         end_synchronized_op(sc, 0);
4970         if (rc)
4971                 return (rc);
4972         if (op == 0)
4973                 v /= 256;
4974
4975         rc = sysctl_handle_int(oidp, &v, 0, req);
4976         return (rc);
4977 }
4978
4979 static int
4980 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4981 {
4982         struct port_info *pi = arg1;
4983         int rc, val;
4984
4985         val = pi->rsrv_noflowq;
4986         rc = sysctl_handle_int(oidp, &val, 0, req);
4987         if (rc != 0 || req->newptr == NULL)
4988                 return (rc);
4989
4990         if ((val >= 1) && (pi->ntxq > 1))
4991                 pi->rsrv_noflowq = 1;
4992         else
4993                 pi->rsrv_noflowq = 0;
4994
4995         return (rc);
4996 }
4997
4998 static int
4999 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
5000 {
5001         struct port_info *pi = arg1;
5002         struct adapter *sc = pi->adapter;
5003         int idx, rc, i;
5004         struct sge_rxq *rxq;
5005 #ifdef TCP_OFFLOAD
5006         struct sge_ofld_rxq *ofld_rxq;
5007 #endif
5008         uint8_t v;
5009
5010         idx = pi->tmr_idx;
5011
5012         rc = sysctl_handle_int(oidp, &idx, 0, req);
5013         if (rc != 0 || req->newptr == NULL)
5014                 return (rc);
5015
5016         if (idx < 0 || idx >= SGE_NTIMERS)
5017                 return (EINVAL);
5018
5019         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5020             "t4tmr");
5021         if (rc)
5022                 return (rc);
5023
5024         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
5025         for_each_rxq(pi, i, rxq) {
5026 #ifdef atomic_store_rel_8
5027                 atomic_store_rel_8(&rxq->iq.intr_params, v);
5028 #else
5029                 rxq->iq.intr_params = v;
5030 #endif
5031         }
5032 #ifdef TCP_OFFLOAD
5033         for_each_ofld_rxq(pi, i, ofld_rxq) {
5034 #ifdef atomic_store_rel_8
5035                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
5036 #else
5037                 ofld_rxq->iq.intr_params = v;
5038 #endif
5039         }
5040 #endif
5041         pi->tmr_idx = idx;
5042
5043         end_synchronized_op(sc, LOCK_HELD);
5044         return (0);
5045 }
5046
5047 static int
5048 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
5049 {
5050         struct port_info *pi = arg1;
5051         struct adapter *sc = pi->adapter;
5052         int idx, rc;
5053
5054         idx = pi->pktc_idx;
5055
5056         rc = sysctl_handle_int(oidp, &idx, 0, req);
5057         if (rc != 0 || req->newptr == NULL)
5058                 return (rc);
5059
5060         if (idx < -1 || idx >= SGE_NCOUNTERS)
5061                 return (EINVAL);
5062
5063         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5064             "t4pktc");
5065         if (rc)
5066                 return (rc);
5067
5068         if (pi->flags & PORT_INIT_DONE)
5069                 rc = EBUSY; /* cannot be changed once the queues are created */
5070         else
5071                 pi->pktc_idx = idx;
5072
5073         end_synchronized_op(sc, LOCK_HELD);
5074         return (rc);
5075 }
5076
5077 static int
5078 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
5079 {
5080         struct port_info *pi = arg1;
5081         struct adapter *sc = pi->adapter;
5082         int qsize, rc;
5083
5084         qsize = pi->qsize_rxq;
5085
5086         rc = sysctl_handle_int(oidp, &qsize, 0, req);
5087         if (rc != 0 || req->newptr == NULL)
5088                 return (rc);
5089
5090         if (qsize < 128 || (qsize & 7))
5091                 return (EINVAL);
5092
5093         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5094             "t4rxqs");
5095         if (rc)
5096                 return (rc);
5097
5098         if (pi->flags & PORT_INIT_DONE)
5099                 rc = EBUSY; /* cannot be changed once the queues are created */
5100         else
5101                 pi->qsize_rxq = qsize;
5102
5103         end_synchronized_op(sc, LOCK_HELD);
5104         return (rc);
5105 }
5106
5107 static int
5108 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
5109 {
5110         struct port_info *pi = arg1;
5111         struct adapter *sc = pi->adapter;
5112         int qsize, rc;
5113
5114         qsize = pi->qsize_txq;
5115
5116         rc = sysctl_handle_int(oidp, &qsize, 0, req);
5117         if (rc != 0 || req->newptr == NULL)
5118                 return (rc);
5119
5120         /* bufring size must be powerof2 */
5121         if (qsize < 128 || !powerof2(qsize))
5122                 return (EINVAL);
5123
5124         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
5125             "t4txqs");
5126         if (rc)
5127                 return (rc);
5128
5129         if (pi->flags & PORT_INIT_DONE)
5130                 rc = EBUSY; /* cannot be changed once the queues are created */
5131         else
5132                 pi->qsize_txq = qsize;
5133
5134         end_synchronized_op(sc, LOCK_HELD);
5135         return (rc);
5136 }
5137
5138 static int
5139 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
5140 {
5141         struct adapter *sc = arg1;
5142         int reg = arg2;
5143         uint64_t val;
5144
5145         val = t4_read_reg64(sc, reg);
5146
5147         return (sysctl_handle_64(oidp, &val, 0, req));
5148 }
5149
5150 static int
5151 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5152 {
5153         struct adapter *sc = arg1;
5154         int rc, t;
5155         uint32_t param, val;
5156
5157         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5158         if (rc)
5159                 return (rc);
5160         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5161             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5162             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5163         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5164         end_synchronized_op(sc, 0);
5165         if (rc)
5166                 return (rc);
5167
5168         /* unknown is returned as 0 but we display -1 in that case */
5169         t = val == 0 ? -1 : val;
5170
5171         rc = sysctl_handle_int(oidp, &t, 0, req);
5172         return (rc);
5173 }
5174
5175 #ifdef SBUF_DRAIN
5176 static int
5177 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5178 {
5179         struct adapter *sc = arg1;
5180         struct sbuf *sb;
5181         int rc, i;
5182         uint16_t incr[NMTUS][NCCTRL_WIN];
5183         static const char *dec_fac[] = {
5184                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5185                 "0.9375"
5186         };
5187
5188         rc = sysctl_wire_old_buffer(req, 0);
5189         if (rc != 0)
5190                 return (rc);
5191
5192         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5193         if (sb == NULL)
5194                 return (ENOMEM);
5195
5196         t4_read_cong_tbl(sc, incr);
5197
5198         for (i = 0; i < NCCTRL_WIN; ++i) {
5199                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5200                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5201                     incr[5][i], incr[6][i], incr[7][i]);
5202                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5203                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5204                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5205                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5206         }
5207
5208         rc = sbuf_finish(sb);
5209         sbuf_delete(sb);
5210
5211         return (rc);
5212 }
5213
5214 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5215         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
5216         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5217         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
5218 };
5219
5220 static int
5221 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5222 {
5223         struct adapter *sc = arg1;
5224         struct sbuf *sb;
5225         int rc, i, n, qid = arg2;
5226         uint32_t *buf, *p;
5227         char *qtype;
5228         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5229
5230         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5231             ("%s: bad qid %d\n", __func__, qid));
5232
5233         if (qid < CIM_NUM_IBQ) {
5234                 /* inbound queue */
5235                 qtype = "IBQ";
5236                 n = 4 * CIM_IBQ_SIZE;
5237                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5238                 rc = t4_read_cim_ibq(sc, qid, buf, n);
5239         } else {
5240                 /* outbound queue */
5241                 qtype = "OBQ";
5242                 qid -= CIM_NUM_IBQ;
5243                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5244                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5245                 rc = t4_read_cim_obq(sc, qid, buf, n);
5246         }
5247
5248         if (rc < 0) {
5249                 rc = -rc;
5250                 goto done;
5251         }
5252         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5253
5254         rc = sysctl_wire_old_buffer(req, 0);
5255         if (rc != 0)
5256                 goto done;
5257
5258         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5259         if (sb == NULL) {
5260                 rc = ENOMEM;
5261                 goto done;
5262         }
5263
5264         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5265         for (i = 0, p = buf; i < n; i += 16, p += 4)
5266                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5267                     p[2], p[3]);
5268
5269         rc = sbuf_finish(sb);
5270         sbuf_delete(sb);
5271 done:
5272         free(buf, M_CXGBE);
5273         return (rc);
5274 }
5275
5276 static int
5277 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5278 {
5279         struct adapter *sc = arg1;
5280         u_int cfg;
5281         struct sbuf *sb;
5282         uint32_t *buf, *p;
5283         int rc;
5284
5285         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5286         if (rc != 0)
5287                 return (rc);
5288
5289         rc = sysctl_wire_old_buffer(req, 0);
5290         if (rc != 0)
5291                 return (rc);
5292
5293         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5294         if (sb == NULL)
5295                 return (ENOMEM);
5296
5297         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5298             M_ZERO | M_WAITOK);
5299
5300         rc = -t4_cim_read_la(sc, buf, NULL);
5301         if (rc != 0)
5302                 goto done;
5303
5304         sbuf_printf(sb, "Status   Data      PC%s",
5305             cfg & F_UPDBGLACAPTPCONLY ? "" :
5306             "     LS0Stat  LS0Addr             LS0Data");
5307
5308         KASSERT((sc->params.cim_la_size & 7) == 0,
5309             ("%s: p will walk off the end of buf", __func__));
5310
5311         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5312                 if (cfg & F_UPDBGLACAPTPCONLY) {
5313                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5314                             p[6], p[7]);
5315                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5316                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5317                             p[4] & 0xff, p[5] >> 8);
5318                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5319                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5320                             p[1] & 0xf, p[2] >> 4);
5321                 } else {
5322                         sbuf_printf(sb,
5323                             "\n  %02x   %x%07x %x%07x %08x %08x "
5324                             "%08x%08x%08x%08x",
5325                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5326                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5327                             p[6], p[7]);
5328                 }
5329         }
5330
5331         rc = sbuf_finish(sb);
5332         sbuf_delete(sb);
5333 done:
5334         free(buf, M_CXGBE);
5335         return (rc);
5336 }
5337
5338 static int
5339 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5340 {
5341         struct adapter *sc = arg1;
5342         u_int i;
5343         struct sbuf *sb;
5344         uint32_t *buf, *p;
5345         int rc;
5346
5347         rc = sysctl_wire_old_buffer(req, 0);
5348         if (rc != 0)
5349                 return (rc);
5350
5351         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5352         if (sb == NULL)
5353                 return (ENOMEM);
5354
5355         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5356             M_ZERO | M_WAITOK);
5357
5358         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5359         p = buf;
5360
5361         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5362                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5363                     p[1], p[0]);
5364         }
5365
5366         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5367         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5368                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5369                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5370                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5371                     (p[1] >> 2) | ((p[2] & 3) << 30),
5372                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5373                     p[0] & 1);
5374         }
5375
5376         rc = sbuf_finish(sb);
5377         sbuf_delete(sb);
5378         free(buf, M_CXGBE);
5379         return (rc);
5380 }
5381
5382 static int
5383 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5384 {
5385         struct adapter *sc = arg1;
5386         u_int i;
5387         struct sbuf *sb;
5388         uint32_t *buf, *p;
5389         int rc;
5390
5391         rc = sysctl_wire_old_buffer(req, 0);
5392         if (rc != 0)
5393                 return (rc);
5394
5395         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5396         if (sb == NULL)
5397                 return (ENOMEM);
5398
5399         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5400             M_ZERO | M_WAITOK);
5401
5402         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5403         p = buf;
5404
5405         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5406         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5407                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5408                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5409                     p[4], p[3], p[2], p[1], p[0]);
5410         }
5411
5412         sbuf_printf(sb, "\n\nCntl ID               Data");
5413         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5414                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5415                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5416         }
5417
5418         rc = sbuf_finish(sb);
5419         sbuf_delete(sb);
5420         free(buf, M_CXGBE);
5421         return (rc);
5422 }
5423
5424 static int
5425 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5426 {
5427         struct adapter *sc = arg1;
5428         struct sbuf *sb;
5429         int rc, i;
5430         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5431         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5432         uint16_t thres[CIM_NUM_IBQ];
5433         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5434         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5435         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5436
5437         if (is_t4(sc)) {
5438                 cim_num_obq = CIM_NUM_OBQ;
5439                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5440                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5441         } else {
5442                 cim_num_obq = CIM_NUM_OBQ_T5;
5443                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5444                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5445         }
5446         nq = CIM_NUM_IBQ + cim_num_obq;
5447
5448         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5449         if (rc == 0)
5450                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5451         if (rc != 0)
5452                 return (rc);
5453
5454         t4_read_cimq_cfg(sc, base, size, thres);
5455
5456         rc = sysctl_wire_old_buffer(req, 0);
5457         if (rc != 0)
5458                 return (rc);
5459
5460         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5461         if (sb == NULL)
5462                 return (ENOMEM);
5463
5464         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5465
5466         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5467                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5468                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5469                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5470                     G_QUEREMFLITS(p[2]) * 16);
5471         for ( ; i < nq; i++, p += 4, wr += 2)
5472                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5473                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5474                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5475                     G_QUEREMFLITS(p[2]) * 16);
5476
5477         rc = sbuf_finish(sb);
5478         sbuf_delete(sb);
5479
5480         return (rc);
5481 }
5482
5483 static int
5484 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5485 {
5486         struct adapter *sc = arg1;
5487         struct sbuf *sb;
5488         int rc;
5489         struct tp_cpl_stats stats;
5490
5491         rc = sysctl_wire_old_buffer(req, 0);
5492         if (rc != 0)
5493                 return (rc);
5494
5495         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5496         if (sb == NULL)
5497                 return (ENOMEM);
5498
5499         t4_tp_get_cpl_stats(sc, &stats);
5500
5501         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5502             "channel 3\n");
5503         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5504                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5505         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5506                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5507
5508         rc = sbuf_finish(sb);
5509         sbuf_delete(sb);
5510
5511         return (rc);
5512 }
5513
5514 static int
5515 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5516 {
5517         struct adapter *sc = arg1;
5518         struct sbuf *sb;
5519         int rc;
5520         struct tp_usm_stats stats;
5521
5522         rc = sysctl_wire_old_buffer(req, 0);
5523         if (rc != 0)
5524                 return(rc);
5525
5526         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5527         if (sb == NULL)
5528                 return (ENOMEM);
5529
5530         t4_get_usm_stats(sc, &stats);
5531
5532         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5533         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5534         sbuf_printf(sb, "Drops:  %u", stats.drops);
5535
5536         rc = sbuf_finish(sb);
5537         sbuf_delete(sb);
5538
5539         return (rc);
5540 }
5541
5542 const char *devlog_level_strings[] = {
5543         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5544         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5545         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5546         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5547         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5548         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5549 };
5550
5551 const char *devlog_facility_strings[] = {
5552         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5553         [FW_DEVLOG_FACILITY_CF]         = "CF",
5554         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5555         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5556         [FW_DEVLOG_FACILITY_RES]        = "RES",
5557         [FW_DEVLOG_FACILITY_HW]         = "HW",
5558         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5559         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5560         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5561         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5562         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5563         [FW_DEVLOG_FACILITY_VI]         = "VI",
5564         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5565         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5566         [FW_DEVLOG_FACILITY_TM]         = "TM",
5567         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5568         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5569         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5570         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5571         [FW_DEVLOG_FACILITY_RI]         = "RI",
5572         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5573         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5574         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5575         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5576 };
5577
5578 static int
5579 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5580 {
5581         struct adapter *sc = arg1;
5582         struct devlog_params *dparams = &sc->params.devlog;
5583         struct fw_devlog_e *buf, *e;
5584         int i, j, rc, nentries, first = 0, m;
5585         struct sbuf *sb;
5586         uint64_t ftstamp = UINT64_MAX;
5587
5588         if (dparams->start == 0) {
5589                 dparams->memtype = FW_MEMTYPE_EDC0;
5590                 dparams->start = 0x84000;
5591                 dparams->size = 32768;
5592         }
5593
5594         nentries = dparams->size / sizeof(struct fw_devlog_e);
5595
5596         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5597         if (buf == NULL)
5598                 return (ENOMEM);
5599
5600         m = fwmtype_to_hwmtype(dparams->memtype);
5601         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5602         if (rc != 0)
5603                 goto done;
5604
5605         for (i = 0; i < nentries; i++) {
5606                 e = &buf[i];
5607
5608                 if (e->timestamp == 0)
5609                         break;  /* end */
5610
5611                 e->timestamp = be64toh(e->timestamp);
5612                 e->seqno = be32toh(e->seqno);
5613                 for (j = 0; j < 8; j++)
5614                         e->params[j] = be32toh(e->params[j]);
5615
5616                 if (e->timestamp < ftstamp) {
5617                         ftstamp = e->timestamp;
5618                         first = i;
5619                 }
5620         }
5621
5622         if (buf[first].timestamp == 0)
5623                 goto done;      /* nothing in the log */
5624
5625         rc = sysctl_wire_old_buffer(req, 0);
5626         if (rc != 0)
5627                 goto done;
5628
5629         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5630         if (sb == NULL) {
5631                 rc = ENOMEM;
5632                 goto done;
5633         }
5634         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5635             "Seq#", "Tstamp", "Level", "Facility", "Message");
5636
5637         i = first;
5638         do {
5639                 e = &buf[i];
5640                 if (e->timestamp == 0)
5641                         break;  /* end */
5642
5643                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5644                     e->seqno, e->timestamp,
5645                     (e->level < nitems(devlog_level_strings) ?
5646                         devlog_level_strings[e->level] : "UNKNOWN"),
5647                     (e->facility < nitems(devlog_facility_strings) ?
5648                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5649                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5650                     e->params[2], e->params[3], e->params[4],
5651                     e->params[5], e->params[6], e->params[7]);
5652
5653                 if (++i == nentries)
5654                         i = 0;
5655         } while (i != first);
5656
5657         rc = sbuf_finish(sb);
5658         sbuf_delete(sb);
5659 done:
5660         free(buf, M_CXGBE);
5661         return (rc);
5662 }
5663
5664 static int
5665 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5666 {
5667         struct adapter *sc = arg1;
5668         struct sbuf *sb;
5669         int rc;
5670         struct tp_fcoe_stats stats[4];
5671
5672         rc = sysctl_wire_old_buffer(req, 0);
5673         if (rc != 0)
5674                 return (rc);
5675
5676         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5677         if (sb == NULL)
5678                 return (ENOMEM);
5679
5680         t4_get_fcoe_stats(sc, 0, &stats[0]);
5681         t4_get_fcoe_stats(sc, 1, &stats[1]);
5682         t4_get_fcoe_stats(sc, 2, &stats[2]);
5683         t4_get_fcoe_stats(sc, 3, &stats[3]);
5684
5685         sbuf_printf(sb, "                   channel 0        channel 1        "
5686             "channel 2        channel 3\n");
5687         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5688             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5689             stats[3].octetsDDP);
5690         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5691             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5692         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5693             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5694             stats[3].framesDrop);
5695
5696         rc = sbuf_finish(sb);
5697         sbuf_delete(sb);
5698
5699         return (rc);
5700 }
5701
5702 static int
5703 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5704 {
5705         struct adapter *sc = arg1;
5706         struct sbuf *sb;
5707         int rc, i;
5708         unsigned int map, kbps, ipg, mode;
5709         unsigned int pace_tab[NTX_SCHED];
5710
5711         rc = sysctl_wire_old_buffer(req, 0);
5712         if (rc != 0)
5713                 return (rc);
5714
5715         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5716         if (sb == NULL)
5717                 return (ENOMEM);
5718
5719         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5720         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5721         t4_read_pace_tbl(sc, pace_tab);
5722
5723         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5724             "Class IPG (0.1 ns)   Flow IPG (us)");
5725
5726         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5727                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5728                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5729                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5730                 if (kbps)
5731                         sbuf_printf(sb, "%9u     ", kbps);
5732                 else
5733                         sbuf_printf(sb, " disabled     ");
5734
5735                 if (ipg)
5736                         sbuf_printf(sb, "%13u        ", ipg);
5737                 else
5738                         sbuf_printf(sb, "     disabled        ");
5739
5740                 if (pace_tab[i])
5741                         sbuf_printf(sb, "%10u", pace_tab[i]);
5742                 else
5743                         sbuf_printf(sb, "  disabled");
5744         }
5745
5746         rc = sbuf_finish(sb);
5747         sbuf_delete(sb);
5748
5749         return (rc);
5750 }
5751
5752 static int
5753 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5754 {
5755         struct adapter *sc = arg1;
5756         struct sbuf *sb;
5757         int rc, i, j;
5758         uint64_t *p0, *p1;
5759         struct lb_port_stats s[2];
5760         static const char *stat_name[] = {
5761                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5762                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5763                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5764                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5765                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5766                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5767                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5768         };
5769
5770         rc = sysctl_wire_old_buffer(req, 0);
5771         if (rc != 0)
5772                 return (rc);
5773
5774         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5775         if (sb == NULL)
5776                 return (ENOMEM);
5777
5778         memset(s, 0, sizeof(s));
5779
5780         for (i = 0; i < 4; i += 2) {
5781                 t4_get_lb_stats(sc, i, &s[0]);
5782                 t4_get_lb_stats(sc, i + 1, &s[1]);
5783
5784                 p0 = &s[0].octets;
5785                 p1 = &s[1].octets;
5786                 sbuf_printf(sb, "%s                       Loopback %u"
5787                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5788
5789                 for (j = 0; j < nitems(stat_name); j++)
5790                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5791                                    *p0++, *p1++);
5792         }
5793
5794         rc = sbuf_finish(sb);
5795         sbuf_delete(sb);
5796
5797         return (rc);
5798 }
5799
5800 static int
5801 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5802 {
5803         int rc = 0;
5804         struct port_info *pi = arg1;
5805         struct sbuf *sb;
5806         static const char *linkdnreasons[] = {
5807                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5808                 "PHY overheated", "unknown", "rx los", "reserved7"
5809         };
5810
5811         rc = sysctl_wire_old_buffer(req, 0);
5812         if (rc != 0)
5813                 return(rc);
5814         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5815         if (sb == NULL)
5816                 return (ENOMEM);
5817
5818         if (pi->linkdnrc < 0)
5819                 sbuf_printf(sb, "n/a");
5820         else if (pi->linkdnrc < nitems(linkdnreasons))
5821                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5822         else
5823                 sbuf_printf(sb, "%d", pi->linkdnrc);
5824
5825         rc = sbuf_finish(sb);
5826         sbuf_delete(sb);
5827
5828         return (rc);
5829 }
5830
5831 struct mem_desc {
5832         unsigned int base;
5833         unsigned int limit;
5834         unsigned int idx;
5835 };
5836
5837 static int
5838 mem_desc_cmp(const void *a, const void *b)
5839 {
5840         return ((const struct mem_desc *)a)->base -
5841                ((const struct mem_desc *)b)->base;
5842 }
5843
5844 static void
5845 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5846     unsigned int to)
5847 {
5848         unsigned int size;
5849
5850         size = to - from + 1;
5851         if (size == 0)
5852                 return;
5853
5854         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5855         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5856 }
5857
5858 static int
5859 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5860 {
5861         struct adapter *sc = arg1;
5862         struct sbuf *sb;
5863         int rc, i, n;
5864         uint32_t lo, hi, used, alloc;
5865         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5866         static const char *region[] = {
5867                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5868                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5869                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5870                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5871                 "RQUDP region:", "PBL region:", "TXPBL region:",
5872                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5873                 "On-chip queues:"
5874         };
5875         struct mem_desc avail[4];
5876         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5877         struct mem_desc *md = mem;
5878
5879         rc = sysctl_wire_old_buffer(req, 0);
5880         if (rc != 0)
5881                 return (rc);
5882
5883         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5884         if (sb == NULL)
5885                 return (ENOMEM);
5886
5887         for (i = 0; i < nitems(mem); i++) {
5888                 mem[i].limit = 0;
5889                 mem[i].idx = i;
5890         }
5891
5892         /* Find and sort the populated memory ranges */
5893         i = 0;
5894         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5895         if (lo & F_EDRAM0_ENABLE) {
5896                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5897                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5898                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5899                 avail[i].idx = 0;
5900                 i++;
5901         }
5902         if (lo & F_EDRAM1_ENABLE) {
5903                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5904                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5905                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5906                 avail[i].idx = 1;
5907                 i++;
5908         }
5909         if (lo & F_EXT_MEM_ENABLE) {
5910                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5911                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5912                 avail[i].limit = avail[i].base +
5913                     (G_EXT_MEM_SIZE(hi) << 20);
5914                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5915                 i++;
5916         }
5917         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5918                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5919                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5920                 avail[i].limit = avail[i].base +
5921                     (G_EXT_MEM1_SIZE(hi) << 20);
5922                 avail[i].idx = 4;
5923                 i++;
5924         }
5925         if (!i)                                    /* no memory available */
5926                 return 0;
5927         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5928
5929         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5930         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5931         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5932         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5933         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5934         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5935         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5936         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5937         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5938
5939         /* the next few have explicit upper bounds */
5940         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5941         md->limit = md->base - 1 +
5942                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5943                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5944         md++;
5945
5946         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5947         md->limit = md->base - 1 +
5948                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5949                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5950         md++;
5951
5952         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5953                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5954                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5955                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5956         } else {
5957                 md->base = 0;
5958                 md->idx = nitems(region);  /* hide it */
5959         }
5960         md++;
5961
5962 #define ulp_region(reg) \
5963         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5964         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5965
5966         ulp_region(RX_ISCSI);
5967         ulp_region(RX_TDDP);
5968         ulp_region(TX_TPT);
5969         ulp_region(RX_STAG);
5970         ulp_region(RX_RQ);
5971         ulp_region(RX_RQUDP);
5972         ulp_region(RX_PBL);
5973         ulp_region(TX_PBL);
5974 #undef ulp_region
5975
5976         md->base = 0;
5977         md->idx = nitems(region);
5978         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5979                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5980                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5981                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5982         }
5983         md++;
5984
5985         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5986         md->limit = md->base + sc->tids.ntids - 1;
5987         md++;
5988         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5989         md->limit = md->base + sc->tids.ntids - 1;
5990         md++;
5991
5992         md->base = sc->vres.ocq.start;
5993         if (sc->vres.ocq.size)
5994                 md->limit = md->base + sc->vres.ocq.size - 1;
5995         else
5996                 md->idx = nitems(region);  /* hide it */
5997         md++;
5998
5999         /* add any address-space holes, there can be up to 3 */
6000         for (n = 0; n < i - 1; n++)
6001                 if (avail[n].limit < avail[n + 1].base)
6002                         (md++)->base = avail[n].limit;
6003         if (avail[n].limit)
6004                 (md++)->base = avail[n].limit;
6005
6006         n = md - mem;
6007         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
6008
6009         for (lo = 0; lo < i; lo++)
6010                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
6011                                 avail[lo].limit - 1);
6012
6013         sbuf_printf(sb, "\n");
6014         for (i = 0; i < n; i++) {
6015                 if (mem[i].idx >= nitems(region))
6016                         continue;                        /* skip holes */
6017                 if (!mem[i].limit)
6018                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
6019                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
6020                                 mem[i].limit);
6021         }
6022
6023         sbuf_printf(sb, "\n");
6024         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
6025         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
6026         mem_region_show(sb, "uP RAM:", lo, hi);
6027
6028         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
6029         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
6030         mem_region_show(sb, "uP Extmem2:", lo, hi);
6031
6032         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
6033         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
6034                    G_PMRXMAXPAGE(lo),
6035                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
6036                    (lo & F_PMRXNUMCHN) ? 2 : 1);
6037
6038         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
6039         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
6040         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
6041                    G_PMTXMAXPAGE(lo),
6042                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
6043                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
6044         sbuf_printf(sb, "%u p-structs\n",
6045                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
6046
6047         for (i = 0; i < 4; i++) {
6048                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
6049                 if (is_t4(sc)) {
6050                         used = G_USED(lo);
6051                         alloc = G_ALLOC(lo);
6052                 } else {
6053                         used = G_T5_USED(lo);
6054                         alloc = G_T5_ALLOC(lo);
6055                 }
6056                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
6057                            i, used, alloc);
6058         }
6059         for (i = 0; i < 4; i++) {
6060                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
6061                 if (is_t4(sc)) {
6062                         used = G_USED(lo);
6063                         alloc = G_ALLOC(lo);
6064                 } else {
6065                         used = G_T5_USED(lo);
6066                         alloc = G_T5_ALLOC(lo);
6067                 }
6068                 sbuf_printf(sb,
6069                            "\nLoopback %d using %u pages out of %u allocated",
6070                            i, used, alloc);
6071         }
6072
6073         rc = sbuf_finish(sb);
6074         sbuf_delete(sb);
6075
6076         return (rc);
6077 }
6078
6079 static inline void
6080 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
6081 {
6082         *mask = x | y;
6083         y = htobe64(y);
6084         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
6085 }
6086
6087 static int
6088 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
6089 {
6090         struct adapter *sc = arg1;
6091         struct sbuf *sb;
6092         int rc, i, n;
6093
6094         rc = sysctl_wire_old_buffer(req, 0);
6095         if (rc != 0)
6096                 return (rc);
6097
6098         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6099         if (sb == NULL)
6100                 return (ENOMEM);
6101
6102         sbuf_printf(sb,
6103             "Idx  Ethernet address     Mask     Vld Ports PF"
6104             "  VF              Replication             P0 P1 P2 P3  ML");
6105         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
6106             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6107         for (i = 0; i < n; i++) {
6108                 uint64_t tcamx, tcamy, mask;
6109                 uint32_t cls_lo, cls_hi;
6110                 uint8_t addr[ETHER_ADDR_LEN];
6111
6112                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
6113                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
6114                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
6115                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
6116
6117                 if (tcamx & tcamy)
6118                         continue;
6119
6120                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
6121                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
6122                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
6123                            addr[3], addr[4], addr[5], (uintmax_t)mask,
6124                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
6125                            G_PORTMAP(cls_hi), G_PF(cls_lo),
6126                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
6127
6128                 if (cls_lo & F_REPLICATE) {
6129                         struct fw_ldst_cmd ldst_cmd;
6130
6131                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
6132                         ldst_cmd.op_to_addrspace =
6133                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
6134                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
6135                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
6136                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
6137                         ldst_cmd.u.mps.fid_ctl =
6138                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
6139                                 V_FW_LDST_CMD_CTL(i));
6140
6141                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
6142                             "t4mps");
6143                         if (rc)
6144                                 break;
6145                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6146                             sizeof(ldst_cmd), &ldst_cmd);
6147                         end_synchronized_op(sc, 0);
6148
6149                         if (rc != 0) {
6150                                 sbuf_printf(sb,
6151                                     " ------------ error %3u ------------", rc);
6152                                 rc = 0;
6153                         } else {
6154                                 sbuf_printf(sb, " %08x %08x %08x %08x",
6155                                     be32toh(ldst_cmd.u.mps.rplc127_96),
6156                                     be32toh(ldst_cmd.u.mps.rplc95_64),
6157                                     be32toh(ldst_cmd.u.mps.rplc63_32),
6158                                     be32toh(ldst_cmd.u.mps.rplc31_0));
6159                         }
6160                 } else
6161                         sbuf_printf(sb, "%36s", "");
6162
6163                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6164                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6165                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6166         }
6167
6168         if (rc)
6169                 (void) sbuf_finish(sb);
6170         else
6171                 rc = sbuf_finish(sb);
6172         sbuf_delete(sb);
6173
6174         return (rc);
6175 }
6176
6177 static int
6178 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6179 {
6180         struct adapter *sc = arg1;
6181         struct sbuf *sb;
6182         int rc;
6183         uint16_t mtus[NMTUS];
6184
6185         rc = sysctl_wire_old_buffer(req, 0);
6186         if (rc != 0)
6187                 return (rc);
6188
6189         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6190         if (sb == NULL)
6191                 return (ENOMEM);
6192
6193         t4_read_mtu_tbl(sc, mtus, NULL);
6194
6195         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6196             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6197             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6198             mtus[14], mtus[15]);
6199
6200         rc = sbuf_finish(sb);
6201         sbuf_delete(sb);
6202
6203         return (rc);
6204 }
6205
6206 static int
6207 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6208 {
6209         struct adapter *sc = arg1;
6210         struct sbuf *sb;
6211         int rc, i;
6212         uint32_t cnt[PM_NSTATS];
6213         uint64_t cyc[PM_NSTATS];
6214         static const char *rx_stats[] = {
6215                 "Read:", "Write bypass:", "Write mem:", "Flush:"
6216         };
6217         static const char *tx_stats[] = {
6218                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6219         };
6220
6221         rc = sysctl_wire_old_buffer(req, 0);
6222         if (rc != 0)
6223                 return (rc);
6224
6225         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6226         if (sb == NULL)
6227                 return (ENOMEM);
6228
6229         t4_pmtx_get_stats(sc, cnt, cyc);
6230         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6231         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6232                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6233                     cyc[i]);
6234
6235         t4_pmrx_get_stats(sc, cnt, cyc);
6236         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6237         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6238                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6239                     cyc[i]);
6240
6241         rc = sbuf_finish(sb);
6242         sbuf_delete(sb);
6243
6244         return (rc);
6245 }
6246
6247 static int
6248 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6249 {
6250         struct adapter *sc = arg1;
6251         struct sbuf *sb;
6252         int rc;
6253         struct tp_rdma_stats stats;
6254
6255         rc = sysctl_wire_old_buffer(req, 0);
6256         if (rc != 0)
6257                 return (rc);
6258
6259         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6260         if (sb == NULL)
6261                 return (ENOMEM);
6262
6263         t4_tp_get_rdma_stats(sc, &stats);
6264         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6265         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6266
6267         rc = sbuf_finish(sb);
6268         sbuf_delete(sb);
6269
6270         return (rc);
6271 }
6272
6273 static int
6274 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6275 {
6276         struct adapter *sc = arg1;
6277         struct sbuf *sb;
6278         int rc;
6279         struct tp_tcp_stats v4, v6;
6280
6281         rc = sysctl_wire_old_buffer(req, 0);
6282         if (rc != 0)
6283                 return (rc);
6284
6285         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6286         if (sb == NULL)
6287                 return (ENOMEM);
6288
6289         t4_tp_get_tcp_stats(sc, &v4, &v6);
6290         sbuf_printf(sb,
6291             "                                IP                 IPv6\n");
6292         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6293             v4.tcpOutRsts, v6.tcpOutRsts);
6294         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6295             v4.tcpInSegs, v6.tcpInSegs);
6296         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6297             v4.tcpOutSegs, v6.tcpOutSegs);
6298         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6299             v4.tcpRetransSegs, v6.tcpRetransSegs);
6300
6301         rc = sbuf_finish(sb);
6302         sbuf_delete(sb);
6303
6304         return (rc);
6305 }
6306
6307 static int
6308 sysctl_tids(SYSCTL_HANDLER_ARGS)
6309 {
6310         struct adapter *sc = arg1;
6311         struct sbuf *sb;
6312         int rc;
6313         struct tid_info *t = &sc->tids;
6314
6315         rc = sysctl_wire_old_buffer(req, 0);
6316         if (rc != 0)
6317                 return (rc);
6318
6319         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6320         if (sb == NULL)
6321                 return (ENOMEM);
6322
6323         if (t->natids) {
6324                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6325                     t->atids_in_use);
6326         }
6327
6328         if (t->ntids) {
6329                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6330                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6331
6332                         if (b) {
6333                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6334                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6335                                     t->ntids - 1);
6336                         } else {
6337                                 sbuf_printf(sb, "TID range: %u-%u",
6338                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6339                                     t->ntids - 1);
6340                         }
6341                 } else
6342                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6343                 sbuf_printf(sb, ", in use: %u\n",
6344                     atomic_load_acq_int(&t->tids_in_use));
6345         }
6346
6347         if (t->nstids) {
6348                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6349                     t->stid_base + t->nstids - 1, t->stids_in_use);
6350         }
6351
6352         if (t->nftids) {
6353                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6354                     t->ftid_base + t->nftids - 1);
6355         }
6356
6357         if (t->netids) {
6358                 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6359                     t->etid_base + t->netids - 1);
6360         }
6361
6362         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6363             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6364             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6365
6366         rc = sbuf_finish(sb);
6367         sbuf_delete(sb);
6368
6369         return (rc);
6370 }
6371
6372 static int
6373 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6374 {
6375         struct adapter *sc = arg1;
6376         struct sbuf *sb;
6377         int rc;
6378         struct tp_err_stats stats;
6379
6380         rc = sysctl_wire_old_buffer(req, 0);
6381         if (rc != 0)
6382                 return (rc);
6383
6384         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6385         if (sb == NULL)
6386                 return (ENOMEM);
6387
6388         t4_tp_get_err_stats(sc, &stats);
6389
6390         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6391                       "channel 3\n");
6392         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6393             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6394             stats.macInErrs[3]);
6395         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6396             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6397             stats.hdrInErrs[3]);
6398         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6399             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6400             stats.tcpInErrs[3]);
6401         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6402             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6403             stats.tcp6InErrs[3]);
6404         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6405             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6406             stats.tnlCongDrops[3]);
6407         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6408             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6409             stats.tnlTxDrops[3]);
6410         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6411             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6412             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6413         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6414             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6415             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6416         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6417             stats.ofldNoNeigh, stats.ofldCongDefer);
6418
6419         rc = sbuf_finish(sb);
6420         sbuf_delete(sb);
6421
6422         return (rc);
6423 }
6424
6425 struct field_desc {
6426         const char *name;
6427         u_int start;
6428         u_int width;
6429 };
6430
6431 static void
6432 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6433 {
6434         char buf[32];
6435         int line_size = 0;
6436
6437         while (f->name) {
6438                 uint64_t mask = (1ULL << f->width) - 1;
6439                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6440                     ((uintmax_t)v >> f->start) & mask);
6441
6442                 if (line_size + len >= 79) {
6443                         line_size = 8;
6444                         sbuf_printf(sb, "\n        ");
6445                 }
6446                 sbuf_printf(sb, "%s ", buf);
6447                 line_size += len + 1;
6448                 f++;
6449         }
6450         sbuf_printf(sb, "\n");
6451 }
6452
6453 static struct field_desc tp_la0[] = {
6454         { "RcfOpCodeOut", 60, 4 },
6455         { "State", 56, 4 },
6456         { "WcfState", 52, 4 },
6457         { "RcfOpcSrcOut", 50, 2 },
6458         { "CRxError", 49, 1 },
6459         { "ERxError", 48, 1 },
6460         { "SanityFailed", 47, 1 },
6461         { "SpuriousMsg", 46, 1 },
6462         { "FlushInputMsg", 45, 1 },
6463         { "FlushInputCpl", 44, 1 },
6464         { "RssUpBit", 43, 1 },
6465         { "RssFilterHit", 42, 1 },
6466         { "Tid", 32, 10 },
6467         { "InitTcb", 31, 1 },
6468         { "LineNumber", 24, 7 },
6469         { "Emsg", 23, 1 },
6470         { "EdataOut", 22, 1 },
6471         { "Cmsg", 21, 1 },
6472         { "CdataOut", 20, 1 },
6473         { "EreadPdu", 19, 1 },
6474         { "CreadPdu", 18, 1 },
6475         { "TunnelPkt", 17, 1 },
6476         { "RcfPeerFin", 16, 1 },
6477         { "RcfReasonOut", 12, 4 },
6478         { "TxCchannel", 10, 2 },
6479         { "RcfTxChannel", 8, 2 },
6480         { "RxEchannel", 6, 2 },
6481         { "RcfRxChannel", 5, 1 },
6482         { "RcfDataOutSrdy", 4, 1 },
6483         { "RxDvld", 3, 1 },
6484         { "RxOoDvld", 2, 1 },
6485         { "RxCongestion", 1, 1 },
6486         { "TxCongestion", 0, 1 },
6487         { NULL }
6488 };
6489
6490 static struct field_desc tp_la1[] = {
6491         { "CplCmdIn", 56, 8 },
6492         { "CplCmdOut", 48, 8 },
6493         { "ESynOut", 47, 1 },
6494         { "EAckOut", 46, 1 },
6495         { "EFinOut", 45, 1 },
6496         { "ERstOut", 44, 1 },
6497         { "SynIn", 43, 1 },
6498         { "AckIn", 42, 1 },
6499         { "FinIn", 41, 1 },
6500         { "RstIn", 40, 1 },
6501         { "DataIn", 39, 1 },
6502         { "DataInVld", 38, 1 },
6503         { "PadIn", 37, 1 },
6504         { "RxBufEmpty", 36, 1 },
6505         { "RxDdp", 35, 1 },
6506         { "RxFbCongestion", 34, 1 },
6507         { "TxFbCongestion", 33, 1 },
6508         { "TxPktSumSrdy", 32, 1 },
6509         { "RcfUlpType", 28, 4 },
6510         { "Eread", 27, 1 },
6511         { "Ebypass", 26, 1 },
6512         { "Esave", 25, 1 },
6513         { "Static0", 24, 1 },
6514         { "Cread", 23, 1 },
6515         { "Cbypass", 22, 1 },
6516         { "Csave", 21, 1 },
6517         { "CPktOut", 20, 1 },
6518         { "RxPagePoolFull", 18, 2 },
6519         { "RxLpbkPkt", 17, 1 },
6520         { "TxLpbkPkt", 16, 1 },
6521         { "RxVfValid", 15, 1 },
6522         { "SynLearned", 14, 1 },
6523         { "SetDelEntry", 13, 1 },
6524         { "SetInvEntry", 12, 1 },
6525         { "CpcmdDvld", 11, 1 },
6526         { "CpcmdSave", 10, 1 },
6527         { "RxPstructsFull", 8, 2 },
6528         { "EpcmdDvld", 7, 1 },
6529         { "EpcmdFlush", 6, 1 },
6530         { "EpcmdTrimPrefix", 5, 1 },
6531         { "EpcmdTrimPostfix", 4, 1 },
6532         { "ERssIp4Pkt", 3, 1 },
6533         { "ERssIp6Pkt", 2, 1 },
6534         { "ERssTcpUdpPkt", 1, 1 },
6535         { "ERssFceFipPkt", 0, 1 },
6536         { NULL }
6537 };
6538
6539 static struct field_desc tp_la2[] = {
6540         { "CplCmdIn", 56, 8 },
6541         { "MpsVfVld", 55, 1 },
6542         { "MpsPf", 52, 3 },
6543         { "MpsVf", 44, 8 },
6544         { "SynIn", 43, 1 },
6545         { "AckIn", 42, 1 },
6546         { "FinIn", 41, 1 },
6547         { "RstIn", 40, 1 },
6548         { "DataIn", 39, 1 },
6549         { "DataInVld", 38, 1 },
6550         { "PadIn", 37, 1 },
6551         { "RxBufEmpty", 36, 1 },
6552         { "RxDdp", 35, 1 },
6553         { "RxFbCongestion", 34, 1 },
6554         { "TxFbCongestion", 33, 1 },
6555         { "TxPktSumSrdy", 32, 1 },
6556         { "RcfUlpType", 28, 4 },
6557         { "Eread", 27, 1 },
6558         { "Ebypass", 26, 1 },
6559         { "Esave", 25, 1 },
6560         { "Static0", 24, 1 },
6561         { "Cread", 23, 1 },
6562         { "Cbypass", 22, 1 },
6563         { "Csave", 21, 1 },
6564         { "CPktOut", 20, 1 },
6565         { "RxPagePoolFull", 18, 2 },
6566         { "RxLpbkPkt", 17, 1 },
6567         { "TxLpbkPkt", 16, 1 },
6568         { "RxVfValid", 15, 1 },
6569         { "SynLearned", 14, 1 },
6570         { "SetDelEntry", 13, 1 },
6571         { "SetInvEntry", 12, 1 },
6572         { "CpcmdDvld", 11, 1 },
6573         { "CpcmdSave", 10, 1 },
6574         { "RxPstructsFull", 8, 2 },
6575         { "EpcmdDvld", 7, 1 },
6576         { "EpcmdFlush", 6, 1 },
6577         { "EpcmdTrimPrefix", 5, 1 },
6578         { "EpcmdTrimPostfix", 4, 1 },
6579         { "ERssIp4Pkt", 3, 1 },
6580         { "ERssIp6Pkt", 2, 1 },
6581         { "ERssTcpUdpPkt", 1, 1 },
6582         { "ERssFceFipPkt", 0, 1 },
6583         { NULL }
6584 };
6585
6586 static void
6587 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6588 {
6589
6590         field_desc_show(sb, *p, tp_la0);
6591 }
6592
6593 static void
6594 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6595 {
6596
6597         if (idx)
6598                 sbuf_printf(sb, "\n");
6599         field_desc_show(sb, p[0], tp_la0);
6600         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6601                 field_desc_show(sb, p[1], tp_la0);
6602 }
6603
6604 static void
6605 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6606 {
6607
6608         if (idx)
6609                 sbuf_printf(sb, "\n");
6610         field_desc_show(sb, p[0], tp_la0);
6611         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6612                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6613 }
6614
6615 static int
6616 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6617 {
6618         struct adapter *sc = arg1;
6619         struct sbuf *sb;
6620         uint64_t *buf, *p;
6621         int rc;
6622         u_int i, inc;
6623         void (*show_func)(struct sbuf *, uint64_t *, int);
6624
6625         rc = sysctl_wire_old_buffer(req, 0);
6626         if (rc != 0)
6627                 return (rc);
6628
6629         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6630         if (sb == NULL)
6631                 return (ENOMEM);
6632
6633         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6634
6635         t4_tp_read_la(sc, buf, NULL);
6636         p = buf;
6637
6638         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6639         case 2:
6640                 inc = 2;
6641                 show_func = tp_la_show2;
6642                 break;
6643         case 3:
6644                 inc = 2;
6645                 show_func = tp_la_show3;
6646                 break;
6647         default:
6648                 inc = 1;
6649                 show_func = tp_la_show;
6650         }
6651
6652         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6653                 (*show_func)(sb, p, i);
6654
6655         rc = sbuf_finish(sb);
6656         sbuf_delete(sb);
6657         free(buf, M_CXGBE);
6658         return (rc);
6659 }
6660
6661 static int
6662 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6663 {
6664         struct adapter *sc = arg1;
6665         struct sbuf *sb;
6666         int rc;
6667         u64 nrate[NCHAN], orate[NCHAN];
6668
6669         rc = sysctl_wire_old_buffer(req, 0);
6670         if (rc != 0)
6671                 return (rc);
6672
6673         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6674         if (sb == NULL)
6675                 return (ENOMEM);
6676
6677         t4_get_chan_txrate(sc, nrate, orate);
6678         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6679                  "channel 3\n");
6680         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6681             nrate[0], nrate[1], nrate[2], nrate[3]);
6682         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6683             orate[0], orate[1], orate[2], orate[3]);
6684
6685         rc = sbuf_finish(sb);
6686         sbuf_delete(sb);
6687
6688         return (rc);
6689 }
6690
6691 static int
6692 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6693 {
6694         struct adapter *sc = arg1;
6695         struct sbuf *sb;
6696         uint32_t *buf, *p;
6697         int rc, i;
6698
6699         rc = sysctl_wire_old_buffer(req, 0);
6700         if (rc != 0)
6701                 return (rc);
6702
6703         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6704         if (sb == NULL)
6705                 return (ENOMEM);
6706
6707         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6708             M_ZERO | M_WAITOK);
6709
6710         t4_ulprx_read_la(sc, buf);
6711         p = buf;
6712
6713         sbuf_printf(sb, "      Pcmd        Type   Message"
6714             "                Data");
6715         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6716                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6717                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6718         }
6719
6720         rc = sbuf_finish(sb);
6721         sbuf_delete(sb);
6722         free(buf, M_CXGBE);
6723         return (rc);
6724 }
6725
6726 static int
6727 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6728 {
6729         struct adapter *sc = arg1;
6730         struct sbuf *sb;
6731         int rc, v;
6732
6733         rc = sysctl_wire_old_buffer(req, 0);
6734         if (rc != 0)
6735                 return (rc);
6736
6737         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6738         if (sb == NULL)
6739                 return (ENOMEM);
6740
6741         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6742         if (G_STATSOURCE_T5(v) == 7) {
6743                 if (G_STATMODE(v) == 0) {
6744                         sbuf_printf(sb, "total %d, incomplete %d",
6745                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6746                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6747                 } else if (G_STATMODE(v) == 1) {
6748                         sbuf_printf(sb, "total %d, data overflow %d",
6749                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6750                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6751                 }
6752         }
6753         rc = sbuf_finish(sb);
6754         sbuf_delete(sb);
6755
6756         return (rc);
6757 }
6758 #endif
6759
6760 static inline void
6761 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6762 {
6763         struct buf_ring *br;
6764         struct mbuf *m;
6765
6766         TXQ_LOCK_ASSERT_OWNED(txq);
6767
6768         br = txq->br;
6769         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6770         if (m)
6771                 t4_eth_tx(ifp, txq, m);
6772 }
6773
6774 void
6775 t4_tx_callout(void *arg)
6776 {
6777         struct sge_eq *eq = arg;
6778         struct adapter *sc;
6779
6780         if (EQ_TRYLOCK(eq) == 0)
6781                 goto reschedule;
6782
6783         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6784                 EQ_UNLOCK(eq);
6785 reschedule:
6786                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6787                         callout_schedule(&eq->tx_callout, 1);
6788                 return;
6789         }
6790
6791         EQ_LOCK_ASSERT_OWNED(eq);
6792
6793         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6794
6795                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6796                         struct sge_txq *txq = arg;
6797                         struct port_info *pi = txq->ifp->if_softc;
6798
6799                         sc = pi->adapter;
6800                 } else {
6801                         struct sge_wrq *wrq = arg;
6802
6803                         sc = wrq->adapter;
6804                 }
6805
6806                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6807         }
6808
6809         EQ_UNLOCK(eq);
6810 }
6811
6812 void
6813 t4_tx_task(void *arg, int count)
6814 {
6815         struct sge_eq *eq = arg;
6816
6817         EQ_LOCK(eq);
6818         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6819                 struct sge_txq *txq = arg;
6820                 txq_start(txq->ifp, txq);
6821         } else {
6822                 struct sge_wrq *wrq = arg;
6823                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6824         }
6825         EQ_UNLOCK(eq);
6826 }
6827
6828 static uint32_t
6829 fconf_to_mode(uint32_t fconf)
6830 {
6831         uint32_t mode;
6832
6833         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6834             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6835
6836         if (fconf & F_FRAGMENTATION)
6837                 mode |= T4_FILTER_IP_FRAGMENT;
6838
6839         if (fconf & F_MPSHITTYPE)
6840                 mode |= T4_FILTER_MPS_HIT_TYPE;
6841
6842         if (fconf & F_MACMATCH)
6843                 mode |= T4_FILTER_MAC_IDX;
6844
6845         if (fconf & F_ETHERTYPE)
6846                 mode |= T4_FILTER_ETH_TYPE;
6847
6848         if (fconf & F_PROTOCOL)
6849                 mode |= T4_FILTER_IP_PROTO;
6850
6851         if (fconf & F_TOS)
6852                 mode |= T4_FILTER_IP_TOS;
6853
6854         if (fconf & F_VLAN)
6855                 mode |= T4_FILTER_VLAN;
6856
6857         if (fconf & F_VNIC_ID)
6858                 mode |= T4_FILTER_VNIC;
6859
6860         if (fconf & F_PORT)
6861                 mode |= T4_FILTER_PORT;
6862
6863         if (fconf & F_FCOE)
6864                 mode |= T4_FILTER_FCoE;
6865
6866         return (mode);
6867 }
6868
6869 static uint32_t
6870 mode_to_fconf(uint32_t mode)
6871 {
6872         uint32_t fconf = 0;
6873
6874         if (mode & T4_FILTER_IP_FRAGMENT)
6875                 fconf |= F_FRAGMENTATION;
6876
6877         if (mode & T4_FILTER_MPS_HIT_TYPE)
6878                 fconf |= F_MPSHITTYPE;
6879
6880         if (mode & T4_FILTER_MAC_IDX)
6881                 fconf |= F_MACMATCH;
6882
6883         if (mode & T4_FILTER_ETH_TYPE)
6884                 fconf |= F_ETHERTYPE;
6885
6886         if (mode & T4_FILTER_IP_PROTO)
6887                 fconf |= F_PROTOCOL;
6888
6889         if (mode & T4_FILTER_IP_TOS)
6890                 fconf |= F_TOS;
6891
6892         if (mode & T4_FILTER_VLAN)
6893                 fconf |= F_VLAN;
6894
6895         if (mode & T4_FILTER_VNIC)
6896                 fconf |= F_VNIC_ID;
6897
6898         if (mode & T4_FILTER_PORT)
6899                 fconf |= F_PORT;
6900
6901         if (mode & T4_FILTER_FCoE)
6902                 fconf |= F_FCOE;
6903
6904         return (fconf);
6905 }
6906
6907 static uint32_t
6908 fspec_to_fconf(struct t4_filter_specification *fs)
6909 {
6910         uint32_t fconf = 0;
6911
6912         if (fs->val.frag || fs->mask.frag)
6913                 fconf |= F_FRAGMENTATION;
6914
6915         if (fs->val.matchtype || fs->mask.matchtype)
6916                 fconf |= F_MPSHITTYPE;
6917
6918         if (fs->val.macidx || fs->mask.macidx)
6919                 fconf |= F_MACMATCH;
6920
6921         if (fs->val.ethtype || fs->mask.ethtype)
6922                 fconf |= F_ETHERTYPE;
6923
6924         if (fs->val.proto || fs->mask.proto)
6925                 fconf |= F_PROTOCOL;
6926
6927         if (fs->val.tos || fs->mask.tos)
6928                 fconf |= F_TOS;
6929
6930         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6931                 fconf |= F_VLAN;
6932
6933         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6934                 fconf |= F_VNIC_ID;
6935
6936         if (fs->val.iport || fs->mask.iport)
6937                 fconf |= F_PORT;
6938
6939         if (fs->val.fcoe || fs->mask.fcoe)
6940                 fconf |= F_FCOE;
6941
6942         return (fconf);
6943 }
6944
6945 static int
6946 get_filter_mode(struct adapter *sc, uint32_t *mode)
6947 {
6948         int rc;
6949         uint32_t fconf;
6950
6951         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6952             "t4getfm");
6953         if (rc)
6954                 return (rc);
6955
6956         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6957             A_TP_VLAN_PRI_MAP);
6958
6959         if (sc->params.tp.vlan_pri_map != fconf) {
6960                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6961                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6962                     fconf);
6963                 sc->params.tp.vlan_pri_map = fconf;
6964         }
6965
6966         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6967
6968         end_synchronized_op(sc, LOCK_HELD);
6969         return (0);
6970 }
6971
6972 static int
6973 set_filter_mode(struct adapter *sc, uint32_t mode)
6974 {
6975         uint32_t fconf;
6976         int rc;
6977
6978         fconf = mode_to_fconf(mode);
6979
6980         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6981             "t4setfm");
6982         if (rc)
6983                 return (rc);
6984
6985         if (sc->tids.ftids_in_use > 0) {
6986                 rc = EBUSY;
6987                 goto done;
6988         }
6989
6990 #ifdef TCP_OFFLOAD
6991         if (sc->offload_map) {
6992                 rc = EBUSY;
6993                 goto done;
6994         }
6995 #endif
6996
6997 #ifdef notyet
6998         rc = -t4_set_filter_mode(sc, fconf);
6999         if (rc == 0)
7000                 sc->filter_mode = fconf;
7001 #else
7002         rc = ENOTSUP;
7003 #endif
7004
7005 done:
7006         end_synchronized_op(sc, LOCK_HELD);
7007         return (rc);
7008 }
7009
7010 static inline uint64_t
7011 get_filter_hits(struct adapter *sc, uint32_t fid)
7012 {
7013         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
7014         uint64_t hits;
7015
7016         memwin_info(sc, 0, &mw_base, NULL);
7017         off = position_memwin(sc, 0,
7018             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
7019         if (is_t4(sc)) {
7020                 hits = t4_read_reg64(sc, mw_base + off + 16);
7021                 hits = be64toh(hits);
7022         } else {
7023                 hits = t4_read_reg(sc, mw_base + off + 24);
7024                 hits = be32toh(hits);
7025         }
7026
7027         return (hits);
7028 }
7029
7030 static int
7031 get_filter(struct adapter *sc, struct t4_filter *t)
7032 {
7033         int i, rc, nfilters = sc->tids.nftids;
7034         struct filter_entry *f;
7035
7036         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
7037             "t4getf");
7038         if (rc)
7039                 return (rc);
7040
7041         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
7042             t->idx >= nfilters) {
7043                 t->idx = 0xffffffff;
7044                 goto done;
7045         }
7046
7047         f = &sc->tids.ftid_tab[t->idx];
7048         for (i = t->idx; i < nfilters; i++, f++) {
7049                 if (f->valid) {
7050                         t->idx = i;
7051                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
7052                         t->smtidx = f->smtidx;
7053                         if (f->fs.hitcnts)
7054                                 t->hits = get_filter_hits(sc, t->idx);
7055                         else
7056                                 t->hits = UINT64_MAX;
7057                         t->fs = f->fs;
7058
7059                         goto done;
7060                 }
7061         }
7062
7063         t->idx = 0xffffffff;
7064 done:
7065         end_synchronized_op(sc, LOCK_HELD);
7066         return (0);
7067 }
7068
7069 static int
7070 set_filter(struct adapter *sc, struct t4_filter *t)
7071 {
7072         unsigned int nfilters, nports;
7073         struct filter_entry *f;
7074         int i, rc;
7075
7076         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
7077         if (rc)
7078                 return (rc);
7079
7080         nfilters = sc->tids.nftids;
7081         nports = sc->params.nports;
7082
7083         if (nfilters == 0) {
7084                 rc = ENOTSUP;
7085                 goto done;
7086         }
7087
7088         if (!(sc->flags & FULL_INIT_DONE)) {
7089                 rc = EAGAIN;
7090                 goto done;
7091         }
7092
7093         if (t->idx >= nfilters) {
7094                 rc = EINVAL;
7095                 goto done;
7096         }
7097
7098         /* Validate against the global filter mode */
7099         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
7100             sc->params.tp.vlan_pri_map) {
7101                 rc = E2BIG;
7102                 goto done;
7103         }
7104
7105         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
7106                 rc = EINVAL;
7107                 goto done;
7108         }
7109
7110         if (t->fs.val.iport >= nports) {
7111                 rc = EINVAL;
7112                 goto done;
7113         }
7114
7115         /* Can't specify an iq if not steering to it */
7116         if (!t->fs.dirsteer && t->fs.iq) {
7117                 rc = EINVAL;
7118                 goto done;
7119         }
7120
7121         /* IPv6 filter idx must be 4 aligned */
7122         if (t->fs.type == 1 &&
7123             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
7124                 rc = EINVAL;
7125                 goto done;
7126         }
7127
7128         if (sc->tids.ftid_tab == NULL) {
7129                 KASSERT(sc->tids.ftids_in_use == 0,
7130                     ("%s: no memory allocated but filters_in_use > 0",
7131                     __func__));
7132
7133                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
7134                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
7135                 if (sc->tids.ftid_tab == NULL) {
7136                         rc = ENOMEM;
7137                         goto done;
7138                 }
7139                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
7140         }
7141
7142         for (i = 0; i < 4; i++) {
7143                 f = &sc->tids.ftid_tab[t->idx + i];
7144
7145                 if (f->pending || f->valid) {
7146                         rc = EBUSY;
7147                         goto done;
7148                 }
7149                 if (f->locked) {
7150                         rc = EPERM;
7151                         goto done;
7152                 }
7153
7154                 if (t->fs.type == 0)
7155                         break;
7156         }
7157
7158         f = &sc->tids.ftid_tab[t->idx];
7159         f->fs = t->fs;
7160
7161         rc = set_filter_wr(sc, t->idx);
7162 done:
7163         end_synchronized_op(sc, 0);
7164
7165         if (rc == 0) {
7166                 mtx_lock(&sc->tids.ftid_lock);
7167                 for (;;) {
7168                         if (f->pending == 0) {
7169                                 rc = f->valid ? 0 : EIO;
7170                                 break;
7171                         }
7172
7173                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7174                             PCATCH, "t4setfw", 0)) {
7175                                 rc = EINPROGRESS;
7176                                 break;
7177                         }
7178                 }
7179                 mtx_unlock(&sc->tids.ftid_lock);
7180         }
7181         return (rc);
7182 }
7183
7184 static int
7185 del_filter(struct adapter *sc, struct t4_filter *t)
7186 {
7187         unsigned int nfilters;
7188         struct filter_entry *f;
7189         int rc;
7190
7191         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7192         if (rc)
7193                 return (rc);
7194
7195         nfilters = sc->tids.nftids;
7196
7197         if (nfilters == 0) {
7198                 rc = ENOTSUP;
7199                 goto done;
7200         }
7201
7202         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7203             t->idx >= nfilters) {
7204                 rc = EINVAL;
7205                 goto done;
7206         }
7207
7208         if (!(sc->flags & FULL_INIT_DONE)) {
7209                 rc = EAGAIN;
7210                 goto done;
7211         }
7212
7213         f = &sc->tids.ftid_tab[t->idx];
7214
7215         if (f->pending) {
7216                 rc = EBUSY;
7217                 goto done;
7218         }
7219         if (f->locked) {
7220                 rc = EPERM;
7221                 goto done;
7222         }
7223
7224         if (f->valid) {
7225                 t->fs = f->fs;  /* extra info for the caller */
7226                 rc = del_filter_wr(sc, t->idx);
7227         }
7228
7229 done:
7230         end_synchronized_op(sc, 0);
7231
7232         if (rc == 0) {
7233                 mtx_lock(&sc->tids.ftid_lock);
7234                 for (;;) {
7235                         if (f->pending == 0) {
7236                                 rc = f->valid ? EIO : 0;
7237                                 break;
7238                         }
7239
7240                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7241                             PCATCH, "t4delfw", 0)) {
7242                                 rc = EINPROGRESS;
7243                                 break;
7244                         }
7245                 }
7246                 mtx_unlock(&sc->tids.ftid_lock);
7247         }
7248
7249         return (rc);
7250 }
7251
7252 static void
7253 clear_filter(struct filter_entry *f)
7254 {
7255         if (f->l2t)
7256                 t4_l2t_release(f->l2t);
7257
7258         bzero(f, sizeof (*f));
7259 }
7260
7261 static int
7262 set_filter_wr(struct adapter *sc, int fidx)
7263 {
7264         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7265         struct wrqe *wr;
7266         struct fw_filter_wr *fwr;
7267         unsigned int ftid;
7268
7269         ASSERT_SYNCHRONIZED_OP(sc);
7270
7271         if (f->fs.newdmac || f->fs.newvlan) {
7272                 /* This filter needs an L2T entry; allocate one. */
7273                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7274                 if (f->l2t == NULL)
7275                         return (EAGAIN);
7276                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7277                     f->fs.dmac)) {
7278                         t4_l2t_release(f->l2t);
7279                         f->l2t = NULL;
7280                         return (ENOMEM);
7281                 }
7282         }
7283
7284         ftid = sc->tids.ftid_base + fidx;
7285
7286         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7287         if (wr == NULL)
7288                 return (ENOMEM);
7289
7290         fwr = wrtod(wr);
7291         bzero(fwr, sizeof (*fwr));
7292
7293         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7294         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7295         fwr->tid_to_iq =
7296             htobe32(V_FW_FILTER_WR_TID(ftid) |
7297                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7298                 V_FW_FILTER_WR_NOREPLY(0) |
7299                 V_FW_FILTER_WR_IQ(f->fs.iq));
7300         fwr->del_filter_to_l2tix =
7301             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7302                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7303                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7304                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7305                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7306                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7307                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7308                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7309                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7310                     f->fs.newvlan == VLAN_REWRITE) |
7311                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7312                     f->fs.newvlan == VLAN_REWRITE) |
7313                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7314                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7315                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7316                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7317         fwr->ethtype = htobe16(f->fs.val.ethtype);
7318         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7319         fwr->frag_to_ovlan_vldm =
7320             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7321                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7322                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7323                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7324                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7325                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7326         fwr->smac_sel = 0;
7327         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7328             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7329         fwr->maci_to_matchtypem =
7330             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7331                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7332                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7333                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7334                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7335                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7336                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7337                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7338         fwr->ptcl = f->fs.val.proto;
7339         fwr->ptclm = f->fs.mask.proto;
7340         fwr->ttyp = f->fs.val.tos;
7341         fwr->ttypm = f->fs.mask.tos;
7342         fwr->ivlan = htobe16(f->fs.val.vlan);
7343         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7344         fwr->ovlan = htobe16(f->fs.val.vnic);
7345         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7346         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7347         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7348         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7349         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7350         fwr->lp = htobe16(f->fs.val.dport);
7351         fwr->lpm = htobe16(f->fs.mask.dport);
7352         fwr->fp = htobe16(f->fs.val.sport);
7353         fwr->fpm = htobe16(f->fs.mask.sport);
7354         if (f->fs.newsmac)
7355                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7356
7357         f->pending = 1;
7358         sc->tids.ftids_in_use++;
7359
7360         t4_wrq_tx(sc, wr);
7361         return (0);
7362 }
7363
7364 static int
7365 del_filter_wr(struct adapter *sc, int fidx)
7366 {
7367         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7368         struct wrqe *wr;
7369         struct fw_filter_wr *fwr;
7370         unsigned int ftid;
7371
7372         ftid = sc->tids.ftid_base + fidx;
7373
7374         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7375         if (wr == NULL)
7376                 return (ENOMEM);
7377         fwr = wrtod(wr);
7378         bzero(fwr, sizeof (*fwr));
7379
7380         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7381
7382         f->pending = 1;
7383         t4_wrq_tx(sc, wr);
7384         return (0);
7385 }
7386
7387 int
7388 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7389 {
7390         struct adapter *sc = iq->adapter;
7391         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7392         unsigned int idx = GET_TID(rpl);
7393         unsigned int rc;
7394         struct filter_entry *f;
7395
7396         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7397             rss->opcode));
7398
7399         if (is_ftid(sc, idx)) {
7400
7401                 idx -= sc->tids.ftid_base;
7402                 f = &sc->tids.ftid_tab[idx];
7403                 rc = G_COOKIE(rpl->cookie);
7404
7405                 mtx_lock(&sc->tids.ftid_lock);
7406                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7407                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7408                             __func__, idx));
7409                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7410                         f->pending = 0;  /* asynchronous setup completed */
7411                         f->valid = 1;
7412                 } else {
7413                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7414                                 /* Add or delete failed, display an error */
7415                                 log(LOG_ERR,
7416                                     "filter %u setup failed with error %u\n",
7417                                     idx, rc);
7418                         }
7419
7420                         clear_filter(f);
7421                         sc->tids.ftids_in_use--;
7422                 }
7423                 wakeup(&sc->tids.ftid_tab);
7424                 mtx_unlock(&sc->tids.ftid_lock);
7425         }
7426
7427         return (0);
7428 }
7429
7430 static int
7431 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7432 {
7433         int rc;
7434
7435         if (cntxt->cid > M_CTXTQID)
7436                 return (EINVAL);
7437
7438         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7439             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7440                 return (EINVAL);
7441
7442         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7443         if (rc)
7444                 return (rc);
7445
7446         if (sc->flags & FW_OK) {
7447                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7448                     &cntxt->data[0]);
7449                 if (rc == 0)
7450                         goto done;
7451         }
7452
7453         /*
7454          * Read via firmware failed or wasn't even attempted.  Read directly via
7455          * the backdoor.
7456          */
7457         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7458 done:
7459         end_synchronized_op(sc, 0);
7460         return (rc);
7461 }
7462
7463 static int
7464 load_fw(struct adapter *sc, struct t4_data *fw)
7465 {
7466         int rc;
7467         uint8_t *fw_data;
7468
7469         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7470         if (rc)
7471                 return (rc);
7472
7473         if (sc->flags & FULL_INIT_DONE) {
7474                 rc = EBUSY;
7475                 goto done;
7476         }
7477
7478         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7479         if (fw_data == NULL) {
7480                 rc = ENOMEM;
7481                 goto done;
7482         }
7483
7484         rc = copyin(fw->data, fw_data, fw->len);
7485         if (rc == 0)
7486                 rc = -t4_load_fw(sc, fw_data, fw->len);
7487
7488         free(fw_data, M_CXGBE);
7489 done:
7490         end_synchronized_op(sc, 0);
7491         return (rc);
7492 }
7493
7494 static int
7495 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7496 {
7497         uint32_t addr, off, remaining, i, n;
7498         uint32_t *buf, *b;
7499         uint32_t mw_base, mw_aperture;
7500         int rc;
7501         uint8_t *dst;
7502
7503         rc = validate_mem_range(sc, mr->addr, mr->len);
7504         if (rc != 0)
7505                 return (rc);
7506
7507         memwin_info(sc, win, &mw_base, &mw_aperture);
7508         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7509         addr = mr->addr;
7510         remaining = mr->len;
7511         dst = (void *)mr->data;
7512
7513         while (remaining) {
7514                 off = position_memwin(sc, win, addr);
7515
7516                 /* number of bytes that we'll copy in the inner loop */
7517                 n = min(remaining, mw_aperture - off);
7518                 for (i = 0; i < n; i += 4)
7519                         *b++ = t4_read_reg(sc, mw_base + off + i);
7520
7521                 rc = copyout(buf, dst, n);
7522                 if (rc != 0)
7523                         break;
7524
7525                 b = buf;
7526                 dst += n;
7527                 remaining -= n;
7528                 addr += n;
7529         }
7530
7531         free(buf, M_CXGBE);
7532         return (rc);
7533 }
7534
7535 static int
7536 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7537 {
7538         int rc;
7539
7540         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7541                 return (EINVAL);
7542
7543         if (i2cd->len > sizeof(i2cd->data))
7544                 return (EFBIG);
7545
7546         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7547         if (rc)
7548                 return (rc);
7549         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7550             i2cd->offset, i2cd->len, &i2cd->data[0]);
7551         end_synchronized_op(sc, 0);
7552
7553         return (rc);
7554 }
7555
7556 static int
7557 in_range(int val, int lo, int hi)
7558 {
7559
7560         return (val < 0 || (val <= hi && val >= lo));
7561 }
7562
7563 static int
7564 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7565 {
7566         int fw_subcmd, fw_type, rc;
7567
7568         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7569         if (rc)
7570                 return (rc);
7571
7572         if (!(sc->flags & FULL_INIT_DONE)) {
7573                 rc = EAGAIN;
7574                 goto done;
7575         }
7576
7577         /*
7578          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7579          * sub-command and type are in common locations.)
7580          */
7581         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7582                 fw_subcmd = FW_SCHED_SC_CONFIG;
7583         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7584                 fw_subcmd = FW_SCHED_SC_PARAMS;
7585         else {
7586                 rc = EINVAL;
7587                 goto done;
7588         }
7589         if (p->type == SCHED_CLASS_TYPE_PACKET)
7590                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7591         else {
7592                 rc = EINVAL;
7593                 goto done;
7594         }
7595
7596         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7597                 /* Vet our parameters ..*/
7598                 if (p->u.config.minmax < 0) {
7599                         rc = EINVAL;
7600                         goto done;
7601                 }
7602
7603                 /* And pass the request to the firmware ...*/
7604                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1);
7605                 goto done;
7606         }
7607
7608         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7609                 int fw_level;
7610                 int fw_mode;
7611                 int fw_rateunit;
7612                 int fw_ratemode;
7613
7614                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7615                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7616                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7617                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7618                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7619                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7620                 else {
7621                         rc = EINVAL;
7622                         goto done;
7623                 }
7624
7625                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7626                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7627                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7628                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7629                 else {
7630                         rc = EINVAL;
7631                         goto done;
7632                 }
7633
7634                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7635                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7636                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7637                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7638                 else {
7639                         rc = EINVAL;
7640                         goto done;
7641                 }
7642
7643                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7644                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7645                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7646                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7647                 else {
7648                         rc = EINVAL;
7649                         goto done;
7650                 }
7651
7652                 /* Vet our parameters ... */
7653                 if (!in_range(p->u.params.channel, 0, 3) ||
7654                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7655                     !in_range(p->u.params.minrate, 0, 10000000) ||
7656                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7657                     !in_range(p->u.params.weight, 0, 100)) {
7658                         rc = ERANGE;
7659                         goto done;
7660                 }
7661
7662                 /*
7663                  * Translate any unset parameters into the firmware's
7664                  * nomenclature and/or fail the call if the parameters
7665                  * are required ...
7666                  */
7667                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7668                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7669                         rc = EINVAL;
7670                         goto done;
7671                 }
7672                 if (p->u.params.minrate < 0)
7673                         p->u.params.minrate = 0;
7674                 if (p->u.params.maxrate < 0) {
7675                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7676                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7677                                 rc = EINVAL;
7678                                 goto done;
7679                         } else
7680                                 p->u.params.maxrate = 0;
7681                 }
7682                 if (p->u.params.weight < 0) {
7683                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7684                                 rc = EINVAL;
7685                                 goto done;
7686                         } else
7687                                 p->u.params.weight = 0;
7688                 }
7689                 if (p->u.params.pktsize < 0) {
7690                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7691                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7692                                 rc = EINVAL;
7693                                 goto done;
7694                         } else
7695                                 p->u.params.pktsize = 0;
7696                 }
7697
7698                 /* See what the firmware thinks of the request ... */
7699                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7700                     fw_rateunit, fw_ratemode, p->u.params.channel,
7701                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7702                     p->u.params.weight, p->u.params.pktsize, 1);
7703                 goto done;
7704         }
7705
7706         rc = EINVAL;
7707 done:
7708         end_synchronized_op(sc, 0);
7709         return (rc);
7710 }
7711
7712 static int
7713 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7714 {
7715         struct port_info *pi = NULL;
7716         struct sge_txq *txq;
7717         uint32_t fw_mnem, fw_queue, fw_class;
7718         int i, rc;
7719
7720         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7721         if (rc)
7722                 return (rc);
7723
7724         if (!(sc->flags & FULL_INIT_DONE)) {
7725                 rc = EAGAIN;
7726                 goto done;
7727         }
7728
7729         if (p->port >= sc->params.nports) {
7730                 rc = EINVAL;
7731                 goto done;
7732         }
7733
7734         pi = sc->port[p->port];
7735         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7736                 rc = EINVAL;
7737                 goto done;
7738         }
7739
7740         /*
7741          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7742          * Scheduling Class in this case).
7743          */
7744         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7745             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7746         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7747
7748         /*
7749          * If op.queue is non-negative, then we're only changing the scheduling
7750          * on a single specified TX queue.
7751          */
7752         if (p->queue >= 0) {
7753                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7754                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7755                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7756                     &fw_class);
7757                 goto done;
7758         }
7759
7760         /*
7761          * Change the scheduling on all the TX queues for the
7762          * interface.
7763          */
7764         for_each_txq(pi, i, txq) {
7765                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7766                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7767                     &fw_class);
7768                 if (rc)
7769                         goto done;
7770         }
7771
7772         rc = 0;
7773 done:
7774         end_synchronized_op(sc, 0);
7775         return (rc);
7776 }
7777
7778 int
7779 t4_os_find_pci_capability(struct adapter *sc, int cap)
7780 {
7781         int i;
7782
7783         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7784 }
7785
7786 int
7787 t4_os_pci_save_state(struct adapter *sc)
7788 {
7789         device_t dev;
7790         struct pci_devinfo *dinfo;
7791
7792         dev = sc->dev;
7793         dinfo = device_get_ivars(dev);
7794
7795         pci_cfg_save(dev, dinfo, 0);
7796         return (0);
7797 }
7798
7799 int
7800 t4_os_pci_restore_state(struct adapter *sc)
7801 {
7802         device_t dev;
7803         struct pci_devinfo *dinfo;
7804
7805         dev = sc->dev;
7806         dinfo = device_get_ivars(dev);
7807
7808         pci_cfg_restore(dev, dinfo);
7809         return (0);
7810 }
7811
7812 void
7813 t4_os_portmod_changed(const struct adapter *sc, int idx)
7814 {
7815         struct port_info *pi = sc->port[idx];
7816         static const char *mod_str[] = {
7817                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7818         };
7819
7820         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7821                 if_printf(pi->ifp, "transceiver unplugged.\n");
7822         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7823                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7824         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7825                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7826         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7827                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7828                     mod_str[pi->mod_type]);
7829         } else {
7830                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7831                     pi->mod_type);
7832         }
7833 }
7834
7835 void
7836 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7837 {
7838         struct port_info *pi = sc->port[idx];
7839         struct ifnet *ifp = pi->ifp;
7840
7841         if (link_stat) {
7842                 pi->linkdnrc = -1;
7843                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7844                 if_link_state_change(ifp, LINK_STATE_UP);
7845         } else {
7846                 if (reason >= 0)
7847                         pi->linkdnrc = reason;
7848                 if_link_state_change(ifp, LINK_STATE_DOWN);
7849         }
7850 }
7851
7852 void
7853 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7854 {
7855         struct adapter *sc;
7856
7857         sx_slock(&t4_list_lock);
7858         SLIST_FOREACH(sc, &t4_list, link) {
7859                 /*
7860                  * func should not make any assumptions about what state sc is
7861                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7862                  */
7863                 func(sc, arg);
7864         }
7865         sx_sunlock(&t4_list_lock);
7866 }
7867
7868 static int
7869 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7870 {
7871        return (0);
7872 }
7873
7874 static int
7875 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7876 {
7877        return (0);
7878 }
7879
7880 static int
7881 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7882     struct thread *td)
7883 {
7884         int rc;
7885         struct adapter *sc = dev->si_drv1;
7886
7887         rc = priv_check(td, PRIV_DRIVER);
7888         if (rc != 0)
7889                 return (rc);
7890
7891         switch (cmd) {
7892         case CHELSIO_T4_GETREG: {
7893                 struct t4_reg *edata = (struct t4_reg *)data;
7894
7895                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7896                         return (EFAULT);
7897
7898                 if (edata->size == 4)
7899                         edata->val = t4_read_reg(sc, edata->addr);
7900                 else if (edata->size == 8)
7901                         edata->val = t4_read_reg64(sc, edata->addr);
7902                 else
7903                         return (EINVAL);
7904
7905                 break;
7906         }
7907         case CHELSIO_T4_SETREG: {
7908                 struct t4_reg *edata = (struct t4_reg *)data;
7909
7910                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7911                         return (EFAULT);
7912
7913                 if (edata->size == 4) {
7914                         if (edata->val & 0xffffffff00000000)
7915                                 return (EINVAL);
7916                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7917                 } else if (edata->size == 8)
7918                         t4_write_reg64(sc, edata->addr, edata->val);
7919                 else
7920                         return (EINVAL);
7921                 break;
7922         }
7923         case CHELSIO_T4_REGDUMP: {
7924                 struct t4_regdump *regs = (struct t4_regdump *)data;
7925                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7926                 uint8_t *buf;
7927
7928                 if (regs->len < reglen) {
7929                         regs->len = reglen; /* hint to the caller */
7930                         return (ENOBUFS);
7931                 }
7932
7933                 regs->len = reglen;
7934                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7935                 t4_get_regs(sc, regs, buf);
7936                 rc = copyout(buf, regs->data, reglen);
7937                 free(buf, M_CXGBE);
7938                 break;
7939         }
7940         case CHELSIO_T4_GET_FILTER_MODE:
7941                 rc = get_filter_mode(sc, (uint32_t *)data);
7942                 break;
7943         case CHELSIO_T4_SET_FILTER_MODE:
7944                 rc = set_filter_mode(sc, *(uint32_t *)data);
7945                 break;
7946         case CHELSIO_T4_GET_FILTER:
7947                 rc = get_filter(sc, (struct t4_filter *)data);
7948                 break;
7949         case CHELSIO_T4_SET_FILTER:
7950                 rc = set_filter(sc, (struct t4_filter *)data);
7951                 break;
7952         case CHELSIO_T4_DEL_FILTER:
7953                 rc = del_filter(sc, (struct t4_filter *)data);
7954                 break;
7955         case CHELSIO_T4_GET_SGE_CONTEXT:
7956                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7957                 break;
7958         case CHELSIO_T4_LOAD_FW:
7959                 rc = load_fw(sc, (struct t4_data *)data);
7960                 break;
7961         case CHELSIO_T4_GET_MEM:
7962                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7963                 break;
7964         case CHELSIO_T4_GET_I2C:
7965                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7966                 break;
7967         case CHELSIO_T4_CLEAR_STATS: {
7968                 int i;
7969                 u_int port_id = *(uint32_t *)data;
7970                 struct port_info *pi;
7971
7972                 if (port_id >= sc->params.nports)
7973                         return (EINVAL);
7974                 pi = sc->port[port_id];
7975
7976                 /* MAC stats */
7977                 t4_clr_port_stats(sc, pi->tx_chan);
7978
7979                 if (pi->flags & PORT_INIT_DONE) {
7980                         struct sge_rxq *rxq;
7981                         struct sge_txq *txq;
7982                         struct sge_wrq *wrq;
7983
7984                         for_each_rxq(pi, i, rxq) {
7985 #if defined(INET) || defined(INET6)
7986                                 rxq->lro.lro_queued = 0;
7987                                 rxq->lro.lro_flushed = 0;
7988 #endif
7989                                 rxq->rxcsum = 0;
7990                                 rxq->vlan_extraction = 0;
7991                         }
7992
7993                         for_each_txq(pi, i, txq) {
7994                                 txq->txcsum = 0;
7995                                 txq->tso_wrs = 0;
7996                                 txq->vlan_insertion = 0;
7997                                 txq->imm_wrs = 0;
7998                                 txq->sgl_wrs = 0;
7999                                 txq->txpkt_wrs = 0;
8000                                 txq->txpkts_wrs = 0;
8001                                 txq->txpkts_pkts = 0;
8002                                 txq->br->br_drops = 0;
8003                                 txq->no_dmamap = 0;
8004                                 txq->no_desc = 0;
8005                         }
8006
8007 #ifdef TCP_OFFLOAD
8008                         /* nothing to clear for each ofld_rxq */
8009
8010                         for_each_ofld_txq(pi, i, wrq) {
8011                                 wrq->tx_wrs = 0;
8012                                 wrq->no_desc = 0;
8013                         }
8014 #endif
8015                         wrq = &sc->sge.ctrlq[pi->port_id];
8016                         wrq->tx_wrs = 0;
8017                         wrq->no_desc = 0;
8018                 }
8019                 break;
8020         }
8021         case CHELSIO_T4_SCHED_CLASS:
8022                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
8023                 break;
8024         case CHELSIO_T4_SCHED_QUEUE:
8025                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
8026                 break;
8027         case CHELSIO_T4_GET_TRACER:
8028                 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
8029                 break;
8030         case CHELSIO_T4_SET_TRACER:
8031                 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
8032                 break;
8033         default:
8034                 rc = EINVAL;
8035         }
8036
8037         return (rc);
8038 }
8039
8040 #ifdef TCP_OFFLOAD
8041 void
8042 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask,
8043     const unsigned int *pgsz_order)
8044 {
8045         struct port_info *pi = ifp->if_softc;
8046         struct adapter *sc = pi->adapter;
8047
8048         t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
8049         t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
8050                 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
8051                 V_HPZ3(pgsz_order[3]));
8052 }
8053
8054 static int
8055 toe_capability(struct port_info *pi, int enable)
8056 {
8057         int rc;
8058         struct adapter *sc = pi->adapter;
8059
8060         ASSERT_SYNCHRONIZED_OP(sc);
8061
8062         if (!is_offload(sc))
8063                 return (ENODEV);
8064
8065         if (enable) {
8066                 if (!(sc->flags & FULL_INIT_DONE)) {
8067                         rc = cxgbe_init_synchronized(pi);
8068                         if (rc)
8069                                 return (rc);
8070                 }
8071
8072                 if (isset(&sc->offload_map, pi->port_id))
8073                         return (0);
8074
8075                 if (!(sc->flags & TOM_INIT_DONE)) {
8076                         rc = t4_activate_uld(sc, ULD_TOM);
8077                         if (rc == EAGAIN) {
8078                                 log(LOG_WARNING,
8079                                     "You must kldload t4_tom.ko before trying "
8080                                     "to enable TOE on a cxgbe interface.\n");
8081                         }
8082                         if (rc != 0)
8083                                 return (rc);
8084                         KASSERT(sc->tom_softc != NULL,
8085                             ("%s: TOM activated but softc NULL", __func__));
8086                         KASSERT(sc->flags & TOM_INIT_DONE,
8087                             ("%s: TOM activated but flag not set", __func__));
8088                 }
8089
8090                 setbit(&sc->offload_map, pi->port_id);
8091         } else {
8092                 if (!isset(&sc->offload_map, pi->port_id))
8093                         return (0);
8094
8095                 KASSERT(sc->flags & TOM_INIT_DONE,
8096                     ("%s: TOM never initialized?", __func__));
8097                 clrbit(&sc->offload_map, pi->port_id);
8098         }
8099
8100         return (0);
8101 }
8102
8103 /*
8104  * Add an upper layer driver to the global list.
8105  */
8106 int
8107 t4_register_uld(struct uld_info *ui)
8108 {
8109         int rc = 0;
8110         struct uld_info *u;
8111
8112         sx_xlock(&t4_uld_list_lock);
8113         SLIST_FOREACH(u, &t4_uld_list, link) {
8114             if (u->uld_id == ui->uld_id) {
8115                     rc = EEXIST;
8116                     goto done;
8117             }
8118         }
8119
8120         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
8121         ui->refcount = 0;
8122 done:
8123         sx_xunlock(&t4_uld_list_lock);
8124         return (rc);
8125 }
8126
8127 int
8128 t4_unregister_uld(struct uld_info *ui)
8129 {
8130         int rc = EINVAL;
8131         struct uld_info *u;
8132
8133         sx_xlock(&t4_uld_list_lock);
8134
8135         SLIST_FOREACH(u, &t4_uld_list, link) {
8136             if (u == ui) {
8137                     if (ui->refcount > 0) {
8138                             rc = EBUSY;
8139                             goto done;
8140                     }
8141
8142                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
8143                     rc = 0;
8144                     goto done;
8145             }
8146         }
8147 done:
8148         sx_xunlock(&t4_uld_list_lock);
8149         return (rc);
8150 }
8151
8152 int
8153 t4_activate_uld(struct adapter *sc, int id)
8154 {
8155         int rc = EAGAIN;
8156         struct uld_info *ui;
8157
8158         ASSERT_SYNCHRONIZED_OP(sc);
8159
8160         sx_slock(&t4_uld_list_lock);
8161
8162         SLIST_FOREACH(ui, &t4_uld_list, link) {
8163                 if (ui->uld_id == id) {
8164                         rc = ui->activate(sc);
8165                         if (rc == 0)
8166                                 ui->refcount++;
8167                         goto done;
8168                 }
8169         }
8170 done:
8171         sx_sunlock(&t4_uld_list_lock);
8172
8173         return (rc);
8174 }
8175
8176 int
8177 t4_deactivate_uld(struct adapter *sc, int id)
8178 {
8179         int rc = EINVAL;
8180         struct uld_info *ui;
8181
8182         ASSERT_SYNCHRONIZED_OP(sc);
8183
8184         sx_slock(&t4_uld_list_lock);
8185
8186         SLIST_FOREACH(ui, &t4_uld_list, link) {
8187                 if (ui->uld_id == id) {
8188                         rc = ui->deactivate(sc);
8189                         if (rc == 0)
8190                                 ui->refcount--;
8191                         goto done;
8192                 }
8193         }
8194 done:
8195         sx_sunlock(&t4_uld_list_lock);
8196
8197         return (rc);
8198 }
8199 #endif
8200
8201 /*
8202  * Come up with reasonable defaults for some of the tunables, provided they're
8203  * not set by the user (in which case we'll use the values as is).
8204  */
8205 static void
8206 tweak_tunables(void)
8207 {
8208         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
8209
8210         if (t4_ntxq10g < 1)
8211                 t4_ntxq10g = min(nc, NTXQ_10G);
8212
8213         if (t4_ntxq1g < 1)
8214                 t4_ntxq1g = min(nc, NTXQ_1G);
8215
8216         if (t4_nrxq10g < 1)
8217                 t4_nrxq10g = min(nc, NRXQ_10G);
8218
8219         if (t4_nrxq1g < 1)
8220                 t4_nrxq1g = min(nc, NRXQ_1G);
8221
8222 #ifdef TCP_OFFLOAD
8223         if (t4_nofldtxq10g < 1)
8224                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8225
8226         if (t4_nofldtxq1g < 1)
8227                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8228
8229         if (t4_nofldrxq10g < 1)
8230                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8231
8232         if (t4_nofldrxq1g < 1)
8233                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8234
8235         if (t4_toecaps_allowed == -1)
8236                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8237 #else
8238         if (t4_toecaps_allowed == -1)
8239                 t4_toecaps_allowed = 0;
8240 #endif
8241
8242 #ifdef DEV_NETMAP
8243         if (t4_nnmtxq10g < 1)
8244                 t4_nnmtxq10g = min(nc, NNMTXQ_10G);
8245
8246         if (t4_nnmtxq1g < 1)
8247                 t4_nnmtxq1g = min(nc, NNMTXQ_1G);
8248
8249         if (t4_nnmrxq10g < 1)
8250                 t4_nnmrxq10g = min(nc, NNMRXQ_10G);
8251
8252         if (t4_nnmrxq1g < 1)
8253                 t4_nnmrxq1g = min(nc, NNMRXQ_1G);
8254 #endif
8255
8256         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8257                 t4_tmr_idx_10g = TMR_IDX_10G;
8258
8259         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8260                 t4_pktc_idx_10g = PKTC_IDX_10G;
8261
8262         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8263                 t4_tmr_idx_1g = TMR_IDX_1G;
8264
8265         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8266                 t4_pktc_idx_1g = PKTC_IDX_1G;
8267
8268         if (t4_qsize_txq < 128)
8269                 t4_qsize_txq = 128;
8270
8271         if (t4_qsize_rxq < 128)
8272                 t4_qsize_rxq = 128;
8273         while (t4_qsize_rxq & 7)
8274                 t4_qsize_rxq++;
8275
8276         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8277 }
8278
8279 static struct sx mlu;   /* mod load unload */
8280 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
8281
8282 static int
8283 mod_event(module_t mod, int cmd, void *arg)
8284 {
8285         int rc = 0;
8286         static int loaded = 0;
8287
8288         switch (cmd) {
8289         case MOD_LOAD:
8290                 sx_xlock(&mlu);
8291                 if (loaded++ == 0) {
8292                         t4_sge_modload();
8293                         sx_init(&t4_list_lock, "T4/T5 adapters");
8294                         SLIST_INIT(&t4_list);
8295 #ifdef TCP_OFFLOAD
8296                         sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8297                         SLIST_INIT(&t4_uld_list);
8298 #endif
8299                         t4_tracer_modload();
8300                         tweak_tunables();
8301                 }
8302                 sx_xunlock(&mlu);
8303                 break;
8304
8305         case MOD_UNLOAD:
8306                 sx_xlock(&mlu);
8307                 if (--loaded == 0) {
8308                         int tries;
8309
8310                         sx_slock(&t4_list_lock);
8311                         if (!SLIST_EMPTY(&t4_list)) {
8312                                 rc = EBUSY;
8313                                 sx_sunlock(&t4_list_lock);
8314                                 goto done_unload;
8315                         }
8316 #ifdef TCP_OFFLOAD
8317                         sx_slock(&t4_uld_list_lock);
8318                         if (!SLIST_EMPTY(&t4_uld_list)) {
8319                                 rc = EBUSY;
8320                                 sx_sunlock(&t4_uld_list_lock);
8321                                 sx_sunlock(&t4_list_lock);
8322                                 goto done_unload;
8323                         }
8324 #endif
8325                         tries = 0;
8326                         while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
8327                                 uprintf("%ju clusters with custom free routine "
8328                                     "still is use.\n", t4_sge_extfree_refs());
8329                                 pause("t4unload", 2 * hz);
8330                         }
8331 #ifdef TCP_OFFLOAD
8332                         sx_sunlock(&t4_uld_list_lock);
8333 #endif
8334                         sx_sunlock(&t4_list_lock);
8335
8336                         if (t4_sge_extfree_refs() == 0) {
8337                                 t4_tracer_modunload();
8338 #ifdef TCP_OFFLOAD
8339                                 sx_destroy(&t4_uld_list_lock);
8340 #endif
8341                                 sx_destroy(&t4_list_lock);
8342                                 t4_sge_modunload();
8343                                 loaded = 0;
8344                         } else {
8345                                 rc = EBUSY;
8346                                 loaded++;       /* undo earlier decrement */
8347                         }
8348                 }
8349 done_unload:
8350                 sx_xunlock(&mlu);
8351                 break;
8352         }
8353
8354         return (rc);
8355 }
8356
8357 static devclass_t t4_devclass, t5_devclass;
8358 static devclass_t cxgbe_devclass, cxl_devclass;
8359
8360 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8361 MODULE_VERSION(t4nex, 1);
8362 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8363
8364 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8365 MODULE_VERSION(t5nex, 1);
8366 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8367
8368 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8369 MODULE_VERSION(cxgbe, 1);
8370
8371 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8372 MODULE_VERSION(cxl, 1);