]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/dev/cxgbe/t4_main.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF      "default"
256 #define FLASH_CF        "flash"
257 #define UWIRE_CF        "uwire"
258 #define FPGA_CF         "fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293
294 struct intrs_and_queues {
295         int intr_type;          /* INTx, MSI, or MSI-X */
296         int nirq;               /* Number of vectors */
297         int intr_flags;
298         int ntxq10g;            /* # of NIC txq's for each 10G port */
299         int nrxq10g;            /* # of NIC rxq's for each 10G port */
300         int ntxq1g;             /* # of NIC txq's for each 1G port */
301         int nrxq1g;             /* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
304         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
305         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
306         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
307 #endif
308 };
309
310 struct filter_entry {
311         uint32_t valid:1;       /* filter allocated and valid */
312         uint32_t locked:1;      /* filter is administratively locked */
313         uint32_t pending:1;     /* filter action is pending firmware reply */
314         uint32_t smtidx:8;      /* Source MAC Table index for smac */
315         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
316
317         struct t4_filter_specification fs;
318 };
319
320 enum {
321         XGMAC_MTU       = (1 << 0),
322         XGMAC_PROMISC   = (1 << 1),
323         XGMAC_ALLMULTI  = (1 << 2),
324         XGMAC_VLANEX    = (1 << 3),
325         XGMAC_UCADDR    = (1 << 4),
326         XGMAC_MCADDRS   = (1 << 5),
327
328         XGMAC_ALL       = 0xffff
329 };
330
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336     uint32_t *);
337 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338 static uint32_t position_memwin(struct adapter *, int, uint32_t);
339 static int cfg_itype_and_nqueues(struct adapter *, int, int,
340     struct intrs_and_queues *);
341 static int prep_firmware(struct adapter *);
342 static int partition_resources(struct adapter *, const struct firmware *,
343     const char *);
344 static int get_params__pre_init(struct adapter *);
345 static int get_params__post_init(struct adapter *);
346 static int set_params__post_init(struct adapter *);
347 static void t4_set_desc(struct adapter *);
348 static void build_medialist(struct port_info *);
349 static int update_mac_settings(struct port_info *, int);
350 static int cxgbe_init_synchronized(struct port_info *);
351 static int cxgbe_uninit_synchronized(struct port_info *);
352 static int setup_intr_handlers(struct adapter *);
353 static int adapter_full_init(struct adapter *);
354 static int adapter_full_uninit(struct adapter *);
355 static int port_full_init(struct port_info *);
356 static int port_full_uninit(struct port_info *);
357 static void quiesce_eq(struct adapter *, struct sge_eq *);
358 static void quiesce_iq(struct adapter *, struct sge_iq *);
359 static void quiesce_fl(struct adapter *, struct sge_fl *);
360 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361     driver_intr_t *, void *, char *);
362 static int t4_free_irq(struct adapter *, struct irq *);
363 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364     unsigned int);
365 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366 static void cxgbe_tick(void *);
367 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369     struct mbuf *);
370 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371 static int fw_msg_not_handled(struct adapter *, const __be64 *);
372 static int t4_sysctls(struct adapter *);
373 static int cxgbe_sysctls(struct port_info *);
374 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
377 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
382 #ifdef SBUF_DRAIN
383 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
384 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
385 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
390 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
391 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
392 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
393 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
394 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
396 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
397 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
398 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
399 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
401 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
402 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
405 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
406 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
407 #endif
408 static inline void txq_start(struct ifnet *, struct sge_txq *);
409 static uint32_t fconf_to_mode(uint32_t);
410 static uint32_t mode_to_fconf(uint32_t);
411 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
412 static int get_filter_mode(struct adapter *, uint32_t *);
413 static int set_filter_mode(struct adapter *, uint32_t);
414 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
415 static int get_filter(struct adapter *, struct t4_filter *);
416 static int set_filter(struct adapter *, struct t4_filter *);
417 static int del_filter(struct adapter *, struct t4_filter *);
418 static void clear_filter(struct filter_entry *);
419 static int set_filter_wr(struct adapter *, int);
420 static int del_filter_wr(struct adapter *, int);
421 static int get_sge_context(struct adapter *, struct t4_sge_context *);
422 static int load_fw(struct adapter *, struct t4_data *);
423 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
424 static int read_i2c(struct adapter *, struct t4_i2c_data *);
425 #ifdef TCP_OFFLOAD
426 static int toe_capability(struct port_info *, int);
427 #endif
428 static int mod_event(module_t, int, void *);
429
430 struct {
431         uint16_t device;
432         char *desc;
433 } t4_pciids[] = {
434         {0xa000, "Chelsio Terminator 4 FPGA"},
435         {0x4400, "Chelsio T440-dbg"},
436         {0x4401, "Chelsio T420-CR"},
437         {0x4402, "Chelsio T422-CR"},
438         {0x4403, "Chelsio T440-CR"},
439         {0x4404, "Chelsio T420-BCH"},
440         {0x4405, "Chelsio T440-BCH"},
441         {0x4406, "Chelsio T440-CH"},
442         {0x4407, "Chelsio T420-SO"},
443         {0x4408, "Chelsio T420-CX"},
444         {0x4409, "Chelsio T420-BT"},
445         {0x440a, "Chelsio T404-BT"},
446         {0x440e, "Chelsio T440-LP-CR"},
447 }, t5_pciids[] = {
448         {0xb000, "Chelsio Terminator 5 FPGA"},
449         {0x5400, "Chelsio T580-dbg"},
450         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
451         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
452         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
453         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
454         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
455         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
456         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
457         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
458         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
459         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
460         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
461         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
462 #ifdef notyet
463         {0x5404,  "Chelsio T520-BCH"},
464         {0x5405,  "Chelsio T540-BCH"},
465         {0x5406,  "Chelsio T540-CH"},
466         {0x5408,  "Chelsio T520-CX"},
467         {0x540b,  "Chelsio B520-SR"},
468         {0x540c,  "Chelsio B504-BT"},
469         {0x540f,  "Chelsio Amsterdam"},
470         {0x5413,  "Chelsio T580-CHR"},
471 #endif
472 };
473
474 #ifdef TCP_OFFLOAD
475 /*
476  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
477  * exactly the same for both rxq and ofld_rxq.
478  */
479 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
480 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
481 #endif
482
483 /* No easy way to include t4_msg.h before adapter.h so we check this way */
484 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
485 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
486
487 static int
488 t4_probe(device_t dev)
489 {
490         int i;
491         uint16_t v = pci_get_vendor(dev);
492         uint16_t d = pci_get_device(dev);
493         uint8_t f = pci_get_function(dev);
494
495         if (v != PCI_VENDOR_ID_CHELSIO)
496                 return (ENXIO);
497
498         /* Attach only to PF0 of the FPGA */
499         if (d == 0xa000 && f != 0)
500                 return (ENXIO);
501
502         for (i = 0; i < nitems(t4_pciids); i++) {
503                 if (d == t4_pciids[i].device) {
504                         device_set_desc(dev, t4_pciids[i].desc);
505                         return (BUS_PROBE_DEFAULT);
506                 }
507         }
508
509         return (ENXIO);
510 }
511
512 static int
513 t5_probe(device_t dev)
514 {
515         int i;
516         uint16_t v = pci_get_vendor(dev);
517         uint16_t d = pci_get_device(dev);
518         uint8_t f = pci_get_function(dev);
519
520         if (v != PCI_VENDOR_ID_CHELSIO)
521                 return (ENXIO);
522
523         /* Attach only to PF0 of the FPGA */
524         if (d == 0xb000 && f != 0)
525                 return (ENXIO);
526
527         for (i = 0; i < nitems(t5_pciids); i++) {
528                 if (d == t5_pciids[i].device) {
529                         device_set_desc(dev, t5_pciids[i].desc);
530                         return (BUS_PROBE_DEFAULT);
531                 }
532         }
533
534         return (ENXIO);
535 }
536
537 static int
538 t4_attach(device_t dev)
539 {
540         struct adapter *sc;
541         int rc = 0, i, n10g, n1g, rqidx, tqidx;
542         struct intrs_and_queues iaq;
543         struct sge *s;
544 #ifdef TCP_OFFLOAD
545         int ofld_rqidx, ofld_tqidx;
546 #endif
547
548         sc = device_get_softc(dev);
549         sc->dev = dev;
550
551         pci_enable_busmaster(dev);
552         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
553                 uint32_t v;
554
555                 pci_set_max_read_req(dev, 4096);
556                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
557                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
558                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
559         }
560
561         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
562             device_get_nameunit(dev));
563         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
564         mtx_lock(&t4_list_lock);
565         SLIST_INSERT_HEAD(&t4_list, sc, link);
566         mtx_unlock(&t4_list_lock);
567
568         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
569         TAILQ_INIT(&sc->sfl);
570         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
571
572         rc = map_bars_0_and_4(sc);
573         if (rc != 0)
574                 goto done; /* error message displayed already */
575
576         /*
577          * This is the real PF# to which we're attaching.  Works from within PCI
578          * passthrough environments too, where pci_get_function() could return a
579          * different PF# depending on the passthrough configuration.  We need to
580          * use the real PF# in all our communication with the firmware.
581          */
582         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
583         sc->mbox = sc->pf;
584
585         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
586         sc->an_handler = an_not_handled;
587         for (i = 0; i < nitems(sc->cpl_handler); i++)
588                 sc->cpl_handler[i] = cpl_not_handled;
589         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
590                 sc->fw_msg_handler[i] = fw_msg_not_handled;
591         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
592         t4_init_sge_cpl_handlers(sc);
593
594         /* Prepare the adapter for operation */
595         rc = -t4_prep_adapter(sc);
596         if (rc != 0) {
597                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
598                 goto done;
599         }
600
601         /*
602          * Do this really early, with the memory windows set up even before the
603          * character device.  The userland tool's register i/o and mem read
604          * will work even in "recovery mode".
605          */
606         setup_memwin(sc);
607         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
608             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
609             device_get_nameunit(dev));
610         if (sc->cdev == NULL)
611                 device_printf(dev, "failed to create nexus char device.\n");
612         else
613                 sc->cdev->si_drv1 = sc;
614
615         /* Go no further if recovery mode has been requested. */
616         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
617                 device_printf(dev, "recovery mode.\n");
618                 goto done;
619         }
620
621         /* Prepare the firmware for operation */
622         rc = prep_firmware(sc);
623         if (rc != 0)
624                 goto done; /* error message displayed already */
625
626         rc = get_params__post_init(sc);
627         if (rc != 0)
628                 goto done; /* error message displayed already */
629
630         rc = set_params__post_init(sc);
631         if (rc != 0)
632                 goto done; /* error message displayed already */
633
634         rc = map_bar_2(sc);
635         if (rc != 0)
636                 goto done; /* error message displayed already */
637
638         rc = t4_create_dma_tag(sc);
639         if (rc != 0)
640                 goto done; /* error message displayed already */
641
642         /*
643          * First pass over all the ports - allocate VIs and initialize some
644          * basic parameters like mac address, port type, etc.  We also figure
645          * out whether a port is 10G or 1G and use that information when
646          * calculating how many interrupts to attempt to allocate.
647          */
648         n10g = n1g = 0;
649         for_each_port(sc, i) {
650                 struct port_info *pi;
651
652                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
653                 sc->port[i] = pi;
654
655                 /* These must be set before t4_port_init */
656                 pi->adapter = sc;
657                 pi->port_id = i;
658
659                 /* Allocate the vi and initialize parameters like mac addr */
660                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
661                 if (rc != 0) {
662                         device_printf(dev, "unable to initialize port %d: %d\n",
663                             i, rc);
664                         free(pi, M_CXGBE);
665                         sc->port[i] = NULL;
666                         goto done;
667                 }
668
669                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
670                     device_get_nameunit(dev), i);
671                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
672
673                 if (is_10G_port(pi) || is_40G_port(pi)) {
674                         n10g++;
675                         pi->tmr_idx = t4_tmr_idx_10g;
676                         pi->pktc_idx = t4_pktc_idx_10g;
677                 } else {
678                         n1g++;
679                         pi->tmr_idx = t4_tmr_idx_1g;
680                         pi->pktc_idx = t4_pktc_idx_1g;
681                 }
682
683                 pi->xact_addr_filt = -1;
684                 pi->linkdnrc = -1;
685
686                 pi->qsize_rxq = t4_qsize_rxq;
687                 pi->qsize_txq = t4_qsize_txq;
688
689                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
690                 if (pi->dev == NULL) {
691                         device_printf(dev,
692                             "failed to add device for port %d.\n", i);
693                         rc = ENXIO;
694                         goto done;
695                 }
696                 device_set_softc(pi->dev, pi);
697         }
698
699         /*
700          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
701          */
702         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
703         if (rc != 0)
704                 goto done; /* error message displayed already */
705
706         sc->intr_type = iaq.intr_type;
707         sc->intr_count = iaq.nirq;
708         sc->flags |= iaq.intr_flags;
709
710         s = &sc->sge;
711         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
712         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
713         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
714         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
715         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
716
717 #ifdef TCP_OFFLOAD
718         if (is_offload(sc)) {
719
720                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
721                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
722                 s->neq += s->nofldtxq + s->nofldrxq;
723                 s->niq += s->nofldrxq;
724
725                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
726                     M_CXGBE, M_ZERO | M_WAITOK);
727                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
728                     M_CXGBE, M_ZERO | M_WAITOK);
729         }
730 #endif
731
732         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
733             M_ZERO | M_WAITOK);
734         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
735             M_ZERO | M_WAITOK);
736         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
737             M_ZERO | M_WAITOK);
738         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
739             M_ZERO | M_WAITOK);
740         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
741             M_ZERO | M_WAITOK);
742
743         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
744             M_ZERO | M_WAITOK);
745
746         t4_init_l2t(sc, M_WAITOK);
747
748         /*
749          * Second pass over the ports.  This time we know the number of rx and
750          * tx queues that each port should get.
751          */
752         rqidx = tqidx = 0;
753 #ifdef TCP_OFFLOAD
754         ofld_rqidx = ofld_tqidx = 0;
755 #endif
756         for_each_port(sc, i) {
757                 struct port_info *pi = sc->port[i];
758
759                 if (pi == NULL)
760                         continue;
761
762                 pi->first_rxq = rqidx;
763                 pi->first_txq = tqidx;
764                 if (is_10G_port(pi) || is_40G_port(pi)) {
765                         pi->nrxq = iaq.nrxq10g;
766                         pi->ntxq = iaq.ntxq10g;
767                 } else {
768                         pi->nrxq = iaq.nrxq1g;
769                         pi->ntxq = iaq.ntxq1g;
770                 }
771
772                 rqidx += pi->nrxq;
773                 tqidx += pi->ntxq;
774
775 #ifdef TCP_OFFLOAD
776                 if (is_offload(sc)) {
777                         pi->first_ofld_rxq = ofld_rqidx;
778                         pi->first_ofld_txq = ofld_tqidx;
779                         if (is_10G_port(pi) || is_40G_port(pi)) {
780                                 pi->nofldrxq = iaq.nofldrxq10g;
781                                 pi->nofldtxq = iaq.nofldtxq10g;
782                         } else {
783                                 pi->nofldrxq = iaq.nofldrxq1g;
784                                 pi->nofldtxq = iaq.nofldtxq1g;
785                         }
786                         ofld_rqidx += pi->nofldrxq;
787                         ofld_tqidx += pi->nofldtxq;
788                 }
789 #endif
790         }
791
792         rc = setup_intr_handlers(sc);
793         if (rc != 0) {
794                 device_printf(dev,
795                     "failed to setup interrupt handlers: %d\n", rc);
796                 goto done;
797         }
798
799         rc = bus_generic_attach(dev);
800         if (rc != 0) {
801                 device_printf(dev,
802                     "failed to attach all child ports: %d\n", rc);
803                 goto done;
804         }
805
806         device_printf(dev,
807             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
808             sc->params.pci.width, sc->params.nports, sc->intr_count,
809             sc->intr_type == INTR_MSIX ? "MSI-X" :
810             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
811             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
812
813         t4_set_desc(sc);
814
815 done:
816         if (rc != 0 && sc->cdev) {
817                 /* cdev was created and so cxgbetool works; recover that way. */
818                 device_printf(dev,
819                     "error during attach, adapter is now in recovery mode.\n");
820                 rc = 0;
821         }
822
823         if (rc != 0)
824                 t4_detach(dev);
825         else
826                 t4_sysctls(sc);
827
828         return (rc);
829 }
830
831 /*
832  * Idempotent
833  */
834 static int
835 t4_detach(device_t dev)
836 {
837         struct adapter *sc;
838         struct port_info *pi;
839         int i, rc;
840
841         sc = device_get_softc(dev);
842
843         if (sc->flags & FULL_INIT_DONE)
844                 t4_intr_disable(sc);
845
846         if (sc->cdev) {
847                 destroy_dev(sc->cdev);
848                 sc->cdev = NULL;
849         }
850
851         rc = bus_generic_detach(dev);
852         if (rc) {
853                 device_printf(dev,
854                     "failed to detach child devices: %d\n", rc);
855                 return (rc);
856         }
857
858         for (i = 0; i < sc->intr_count; i++)
859                 t4_free_irq(sc, &sc->irq[i]);
860
861         for (i = 0; i < MAX_NPORTS; i++) {
862                 pi = sc->port[i];
863                 if (pi) {
864                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
865                         if (pi->dev)
866                                 device_delete_child(dev, pi->dev);
867
868                         mtx_destroy(&pi->pi_lock);
869                         free(pi, M_CXGBE);
870                 }
871         }
872
873         if (sc->flags & FULL_INIT_DONE)
874                 adapter_full_uninit(sc);
875
876         if (sc->flags & FW_OK)
877                 t4_fw_bye(sc, sc->mbox);
878
879         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
880                 pci_release_msi(dev);
881
882         if (sc->regs_res)
883                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
884                     sc->regs_res);
885
886         if (sc->udbs_res)
887                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
888                     sc->udbs_res);
889
890         if (sc->msix_res)
891                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
892                     sc->msix_res);
893
894         if (sc->l2t)
895                 t4_free_l2t(sc->l2t);
896
897 #ifdef TCP_OFFLOAD
898         free(sc->sge.ofld_rxq, M_CXGBE);
899         free(sc->sge.ofld_txq, M_CXGBE);
900 #endif
901         free(sc->irq, M_CXGBE);
902         free(sc->sge.rxq, M_CXGBE);
903         free(sc->sge.txq, M_CXGBE);
904         free(sc->sge.ctrlq, M_CXGBE);
905         free(sc->sge.iqmap, M_CXGBE);
906         free(sc->sge.eqmap, M_CXGBE);
907         free(sc->tids.ftid_tab, M_CXGBE);
908         t4_destroy_dma_tag(sc);
909         if (mtx_initialized(&sc->sc_lock)) {
910                 mtx_lock(&t4_list_lock);
911                 SLIST_REMOVE(&t4_list, sc, adapter, link);
912                 mtx_unlock(&t4_list_lock);
913                 mtx_destroy(&sc->sc_lock);
914         }
915
916         if (mtx_initialized(&sc->tids.ftid_lock))
917                 mtx_destroy(&sc->tids.ftid_lock);
918         if (mtx_initialized(&sc->sfl_lock))
919                 mtx_destroy(&sc->sfl_lock);
920
921         bzero(sc, sizeof(*sc));
922
923         return (0);
924 }
925
926
927 static int
928 cxgbe_probe(device_t dev)
929 {
930         char buf[128];
931         struct port_info *pi = device_get_softc(dev);
932
933         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
934         device_set_desc_copy(dev, buf);
935
936         return (BUS_PROBE_DEFAULT);
937 }
938
939 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
940     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
941     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
942 #define T4_CAP_ENABLE (T4_CAP)
943
944 static int
945 cxgbe_attach(device_t dev)
946 {
947         struct port_info *pi = device_get_softc(dev);
948         struct ifnet *ifp;
949
950         /* Allocate an ifnet and set it up */
951         ifp = if_alloc(IFT_ETHER);
952         if (ifp == NULL) {
953                 device_printf(dev, "Cannot allocate ifnet\n");
954                 return (ENOMEM);
955         }
956         pi->ifp = ifp;
957         ifp->if_softc = pi;
958
959         callout_init(&pi->tick, CALLOUT_MPSAFE);
960
961         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
962         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
963
964         ifp->if_init = cxgbe_init;
965         ifp->if_ioctl = cxgbe_ioctl;
966         ifp->if_transmit = cxgbe_transmit;
967         ifp->if_qflush = cxgbe_qflush;
968
969         ifp->if_capabilities = T4_CAP;
970 #ifdef TCP_OFFLOAD
971         if (is_offload(pi->adapter))
972                 ifp->if_capabilities |= IFCAP_TOE;
973 #endif
974         ifp->if_capenable = T4_CAP_ENABLE;
975         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
976             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
977
978         /* Initialize ifmedia for this port */
979         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
980             cxgbe_media_status);
981         build_medialist(pi);
982
983         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
984             EVENTHANDLER_PRI_ANY);
985
986         ether_ifattach(ifp, pi->hw_addr);
987
988 #ifdef TCP_OFFLOAD
989         if (is_offload(pi->adapter)) {
990                 device_printf(dev,
991                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
992                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
993         } else
994 #endif
995                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
996
997         cxgbe_sysctls(pi);
998
999         return (0);
1000 }
1001
1002 static int
1003 cxgbe_detach(device_t dev)
1004 {
1005         struct port_info *pi = device_get_softc(dev);
1006         struct adapter *sc = pi->adapter;
1007         struct ifnet *ifp = pi->ifp;
1008
1009         /* Tell if_ioctl and if_init that the port is going away */
1010         ADAPTER_LOCK(sc);
1011         SET_DOOMED(pi);
1012         wakeup(&sc->flags);
1013         while (IS_BUSY(sc))
1014                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1015         SET_BUSY(sc);
1016 #ifdef INVARIANTS
1017         sc->last_op = "t4detach";
1018         sc->last_op_thr = curthread;
1019 #endif
1020         ADAPTER_UNLOCK(sc);
1021
1022         if (pi->vlan_c)
1023                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1024
1025         PORT_LOCK(pi);
1026         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1027         callout_stop(&pi->tick);
1028         PORT_UNLOCK(pi);
1029         callout_drain(&pi->tick);
1030
1031         /* Let detach proceed even if these fail. */
1032         cxgbe_uninit_synchronized(pi);
1033         port_full_uninit(pi);
1034
1035         ifmedia_removeall(&pi->media);
1036         ether_ifdetach(pi->ifp);
1037         if_free(pi->ifp);
1038
1039         ADAPTER_LOCK(sc);
1040         CLR_BUSY(sc);
1041         wakeup(&sc->flags);
1042         ADAPTER_UNLOCK(sc);
1043
1044         return (0);
1045 }
1046
1047 static void
1048 cxgbe_init(void *arg)
1049 {
1050         struct port_info *pi = arg;
1051         struct adapter *sc = pi->adapter;
1052
1053         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1054                 return;
1055         cxgbe_init_synchronized(pi);
1056         end_synchronized_op(sc, 0);
1057 }
1058
1059 static int
1060 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1061 {
1062         int rc = 0, mtu, flags;
1063         struct port_info *pi = ifp->if_softc;
1064         struct adapter *sc = pi->adapter;
1065         struct ifreq *ifr = (struct ifreq *)data;
1066         uint32_t mask;
1067
1068         switch (cmd) {
1069         case SIOCSIFMTU:
1070                 mtu = ifr->ifr_mtu;
1071                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1072                         return (EINVAL);
1073
1074                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1075                 if (rc)
1076                         return (rc);
1077                 ifp->if_mtu = mtu;
1078                 if (pi->flags & PORT_INIT_DONE) {
1079                         t4_update_fl_bufsize(ifp);
1080                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1081                                 rc = update_mac_settings(pi, XGMAC_MTU);
1082                 }
1083                 end_synchronized_op(sc, 0);
1084                 break;
1085
1086         case SIOCSIFFLAGS:
1087                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1088                 if (rc)
1089                         return (rc);
1090
1091                 if (ifp->if_flags & IFF_UP) {
1092                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1093                                 flags = pi->if_flags;
1094                                 if ((ifp->if_flags ^ flags) &
1095                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1096                                         rc = update_mac_settings(pi,
1097                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1098                                 }
1099                         } else
1100                                 rc = cxgbe_init_synchronized(pi);
1101                         pi->if_flags = ifp->if_flags;
1102                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1103                         rc = cxgbe_uninit_synchronized(pi);
1104                 end_synchronized_op(sc, 0);
1105                 break;
1106
1107         case SIOCADDMULTI:      
1108         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1109                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1110                 if (rc)
1111                         return (rc);
1112                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1113                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1114                 end_synchronized_op(sc, LOCK_HELD);
1115                 break;
1116
1117         case SIOCSIFCAP:
1118                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1119                 if (rc)
1120                         return (rc);
1121
1122                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1123                 if (mask & IFCAP_TXCSUM) {
1124                         ifp->if_capenable ^= IFCAP_TXCSUM;
1125                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1126
1127                         if (IFCAP_TSO4 & ifp->if_capenable &&
1128                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1129                                 ifp->if_capenable &= ~IFCAP_TSO4;
1130                                 if_printf(ifp,
1131                                     "tso4 disabled due to -txcsum.\n");
1132                         }
1133                 }
1134                 if (mask & IFCAP_TXCSUM_IPV6) {
1135                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1136                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1137
1138                         if (IFCAP_TSO6 & ifp->if_capenable &&
1139                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1140                                 ifp->if_capenable &= ~IFCAP_TSO6;
1141                                 if_printf(ifp,
1142                                     "tso6 disabled due to -txcsum6.\n");
1143                         }
1144                 }
1145                 if (mask & IFCAP_RXCSUM)
1146                         ifp->if_capenable ^= IFCAP_RXCSUM;
1147                 if (mask & IFCAP_RXCSUM_IPV6)
1148                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1149
1150                 /*
1151                  * Note that we leave CSUM_TSO alone (it is always set).  The
1152                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1153                  * sending a TSO request our way, so it's sufficient to toggle
1154                  * IFCAP_TSOx only.
1155                  */
1156                 if (mask & IFCAP_TSO4) {
1157                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1158                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1159                                 if_printf(ifp, "enable txcsum first.\n");
1160                                 rc = EAGAIN;
1161                                 goto fail;
1162                         }
1163                         ifp->if_capenable ^= IFCAP_TSO4;
1164                 }
1165                 if (mask & IFCAP_TSO6) {
1166                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1167                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1168                                 if_printf(ifp, "enable txcsum6 first.\n");
1169                                 rc = EAGAIN;
1170                                 goto fail;
1171                         }
1172                         ifp->if_capenable ^= IFCAP_TSO6;
1173                 }
1174                 if (mask & IFCAP_LRO) {
1175 #if defined(INET) || defined(INET6)
1176                         int i;
1177                         struct sge_rxq *rxq;
1178
1179                         ifp->if_capenable ^= IFCAP_LRO;
1180                         for_each_rxq(pi, i, rxq) {
1181                                 if (ifp->if_capenable & IFCAP_LRO)
1182                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1183                                 else
1184                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1185                         }
1186 #endif
1187                 }
1188 #ifdef TCP_OFFLOAD
1189                 if (mask & IFCAP_TOE) {
1190                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1191
1192                         rc = toe_capability(pi, enable);
1193                         if (rc != 0)
1194                                 goto fail;
1195
1196                         ifp->if_capenable ^= mask;
1197                 }
1198 #endif
1199                 if (mask & IFCAP_VLAN_HWTAGGING) {
1200                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1201                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1202                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1203                 }
1204                 if (mask & IFCAP_VLAN_MTU) {
1205                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1206
1207                         /* Need to find out how to disable auto-mtu-inflation */
1208                 }
1209                 if (mask & IFCAP_VLAN_HWTSO)
1210                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1211                 if (mask & IFCAP_VLAN_HWCSUM)
1212                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1213
1214 #ifdef VLAN_CAPABILITIES
1215                 VLAN_CAPABILITIES(ifp);
1216 #endif
1217 fail:
1218                 end_synchronized_op(sc, 0);
1219                 break;
1220
1221         case SIOCSIFMEDIA:
1222         case SIOCGIFMEDIA:
1223                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1224                 break;
1225
1226         default:
1227                 rc = ether_ioctl(ifp, cmd, data);
1228         }
1229
1230         return (rc);
1231 }
1232
1233 static int
1234 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1235 {
1236         struct port_info *pi = ifp->if_softc;
1237         struct adapter *sc = pi->adapter;
1238         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1239         struct buf_ring *br;
1240         int rc;
1241
1242         M_ASSERTPKTHDR(m);
1243
1244         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1245                 m_freem(m);
1246                 return (ENETDOWN);
1247         }
1248
1249         if (m->m_flags & M_FLOWID)
1250                 txq += (m->m_pkthdr.flowid % pi->ntxq);
1251         br = txq->br;
1252
1253         if (TXQ_TRYLOCK(txq) == 0) {
1254                 struct sge_eq *eq = &txq->eq;
1255
1256                 /*
1257                  * It is possible that t4_eth_tx finishes up and releases the
1258                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1259                  * need to make sure that this mbuf doesn't just sit there in
1260                  * the drbr.
1261                  */
1262
1263                 rc = drbr_enqueue(ifp, br, m);
1264                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1265                     !(eq->flags & EQ_DOOMED))
1266                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1267                 return (rc);
1268         }
1269
1270         /*
1271          * txq->m is the mbuf that is held up due to a temporary shortage of
1272          * resources and it should be put on the wire first.  Then what's in
1273          * drbr and finally the mbuf that was just passed in to us.
1274          *
1275          * Return code should indicate the fate of the mbuf that was passed in
1276          * this time.
1277          */
1278
1279         TXQ_LOCK_ASSERT_OWNED(txq);
1280         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1281
1282                 /* Queued for transmission. */
1283
1284                 rc = drbr_enqueue(ifp, br, m);
1285                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1286                 (void) t4_eth_tx(ifp, txq, m);
1287                 TXQ_UNLOCK(txq);
1288                 return (rc);
1289         }
1290
1291         /* Direct transmission. */
1292         rc = t4_eth_tx(ifp, txq, m);
1293         if (rc != 0 && txq->m)
1294                 rc = 0; /* held, will be transmitted soon (hopefully) */
1295
1296         TXQ_UNLOCK(txq);
1297         return (rc);
1298 }
1299
1300 static void
1301 cxgbe_qflush(struct ifnet *ifp)
1302 {
1303         struct port_info *pi = ifp->if_softc;
1304         struct sge_txq *txq;
1305         int i;
1306         struct mbuf *m;
1307
1308         /* queues do not exist if !PORT_INIT_DONE. */
1309         if (pi->flags & PORT_INIT_DONE) {
1310                 for_each_txq(pi, i, txq) {
1311                         TXQ_LOCK(txq);
1312                         m_freem(txq->m);
1313                         txq->m = NULL;
1314                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1315                                 m_freem(m);
1316                         TXQ_UNLOCK(txq);
1317                 }
1318         }
1319         if_qflush(ifp);
1320 }
1321
1322 static int
1323 cxgbe_media_change(struct ifnet *ifp)
1324 {
1325         struct port_info *pi = ifp->if_softc;
1326
1327         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1328
1329         return (EOPNOTSUPP);
1330 }
1331
1332 static void
1333 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1334 {
1335         struct port_info *pi = ifp->if_softc;
1336         struct ifmedia_entry *cur = pi->media.ifm_cur;
1337         int speed = pi->link_cfg.speed;
1338         int data = (pi->port_type << 8) | pi->mod_type;
1339
1340         if (cur->ifm_data != data) {
1341                 build_medialist(pi);
1342                 cur = pi->media.ifm_cur;
1343         }
1344
1345         ifmr->ifm_status = IFM_AVALID;
1346         if (!pi->link_cfg.link_ok)
1347                 return;
1348
1349         ifmr->ifm_status |= IFM_ACTIVE;
1350
1351         /* active and current will differ iff current media is autoselect. */
1352         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1353                 return;
1354
1355         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1356         if (speed == SPEED_10000)
1357                 ifmr->ifm_active |= IFM_10G_T;
1358         else if (speed == SPEED_1000)
1359                 ifmr->ifm_active |= IFM_1000_T;
1360         else if (speed == SPEED_100)
1361                 ifmr->ifm_active |= IFM_100_TX;
1362         else if (speed == SPEED_10)
1363                 ifmr->ifm_active |= IFM_10_T;
1364         else
1365                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1366                             speed));
1367 }
1368
1369 void
1370 t4_fatal_err(struct adapter *sc)
1371 {
1372         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1373         t4_intr_disable(sc);
1374         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1375             device_get_nameunit(sc->dev));
1376 }
1377
1378 static int
1379 map_bars_0_and_4(struct adapter *sc)
1380 {
1381         sc->regs_rid = PCIR_BAR(0);
1382         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1383             &sc->regs_rid, RF_ACTIVE);
1384         if (sc->regs_res == NULL) {
1385                 device_printf(sc->dev, "cannot map registers.\n");
1386                 return (ENXIO);
1387         }
1388         sc->bt = rman_get_bustag(sc->regs_res);
1389         sc->bh = rman_get_bushandle(sc->regs_res);
1390         sc->mmio_len = rman_get_size(sc->regs_res);
1391         setbit(&sc->doorbells, DOORBELL_KDB);
1392
1393         sc->msix_rid = PCIR_BAR(4);
1394         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1395             &sc->msix_rid, RF_ACTIVE);
1396         if (sc->msix_res == NULL) {
1397                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1398                 return (ENXIO);
1399         }
1400
1401         return (0);
1402 }
1403
1404 static int
1405 map_bar_2(struct adapter *sc)
1406 {
1407
1408         /*
1409          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1410          * to map it if RDMA is disabled.
1411          */
1412         if (is_t4(sc) && sc->rdmacaps == 0)
1413                 return (0);
1414
1415         sc->udbs_rid = PCIR_BAR(2);
1416         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1417             &sc->udbs_rid, RF_ACTIVE);
1418         if (sc->udbs_res == NULL) {
1419                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1420                 return (ENXIO);
1421         }
1422         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1423
1424         if (is_t5(sc)) {
1425                 setbit(&sc->doorbells, DOORBELL_UDB);
1426 #if defined(__i386__) || defined(__amd64__)
1427                 if (t5_write_combine) {
1428                         int rc;
1429
1430                         /*
1431                          * Enable write combining on BAR2.  This is the
1432                          * userspace doorbell BAR and is split into 128B
1433                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1434                          * with an egress queue.  The first 64B has the doorbell
1435                          * and the second 64B can be used to submit a tx work
1436                          * request with an implicit doorbell.
1437                          */
1438
1439                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1440                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1441                         if (rc == 0) {
1442                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1443                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1444                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1445                         } else {
1446                                 device_printf(sc->dev,
1447                                     "couldn't enable write combining: %d\n",
1448                                     rc);
1449                         }
1450
1451                         t4_write_reg(sc, A_SGE_STAT_CFG,
1452                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1453                 }
1454 #endif
1455         }
1456
1457         return (0);
1458 }
1459
1460 static const struct memwin t4_memwin[] = {
1461         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1462         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1463         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1464 };
1465
1466 static const struct memwin t5_memwin[] = {
1467         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1468         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1469         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1470 };
1471
1472 static void
1473 setup_memwin(struct adapter *sc)
1474 {
1475         const struct memwin *mw;
1476         int i, n;
1477         uint32_t bar0;
1478
1479         if (is_t4(sc)) {
1480                 /*
1481                  * Read low 32b of bar0 indirectly via the hardware backdoor
1482                  * mechanism.  Works from within PCI passthrough environments
1483                  * too, where rman_get_start() can return a different value.  We
1484                  * need to program the T4 memory window decoders with the actual
1485                  * addresses that will be coming across the PCIe link.
1486                  */
1487                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1488                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1489
1490                 mw = &t4_memwin[0];
1491                 n = nitems(t4_memwin);
1492         } else {
1493                 /* T5 uses the relative offset inside the PCIe BAR */
1494                 bar0 = 0;
1495
1496                 mw = &t5_memwin[0];
1497                 n = nitems(t5_memwin);
1498         }
1499
1500         for (i = 0; i < n; i++, mw++) {
1501                 t4_write_reg(sc,
1502                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1503                     (mw->base + bar0) | V_BIR(0) |
1504                     V_WINDOW(ilog2(mw->aperture) - 10));
1505         }
1506
1507         /* flush */
1508         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1509 }
1510
1511 /*
1512  * Verify that the memory range specified by the addr/len pair is valid and lies
1513  * entirely within a single region (EDCx or MCx).
1514  */
1515 static int
1516 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1517 {
1518         uint32_t em, addr_len, maddr, mlen;
1519
1520         /* Memory can only be accessed in naturally aligned 4 byte units */
1521         if (addr & 3 || len & 3 || len == 0)
1522                 return (EINVAL);
1523
1524         /* Enabled memories */
1525         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1526         if (em & F_EDRAM0_ENABLE) {
1527                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1528                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1529                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1530                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1531                     addr + len <= maddr + mlen)
1532                         return (0);
1533         }
1534         if (em & F_EDRAM1_ENABLE) {
1535                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1536                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1537                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1538                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1539                     addr + len <= maddr + mlen)
1540                         return (0);
1541         }
1542         if (em & F_EXT_MEM_ENABLE) {
1543                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1544                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1545                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1546                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1547                     addr + len <= maddr + mlen)
1548                         return (0);
1549         }
1550         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1551                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1552                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1553                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1554                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1555                     addr + len <= maddr + mlen)
1556                         return (0);
1557         }
1558
1559         return (EFAULT);
1560 }
1561
1562 /*
1563  * Verify that the memory range specified by the memtype/offset/len pair is
1564  * valid and lies entirely within the memtype specified.  The global address of
1565  * the start of the range is returned in addr.
1566  */
1567 static int
1568 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1569     uint32_t *addr)
1570 {
1571         uint32_t em, addr_len, maddr, mlen;
1572
1573         /* Memory can only be accessed in naturally aligned 4 byte units */
1574         if (off & 3 || len & 3 || len == 0)
1575                 return (EINVAL);
1576
1577         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1578         switch (mtype) {
1579         case MEM_EDC0:
1580                 if (!(em & F_EDRAM0_ENABLE))
1581                         return (EINVAL);
1582                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1583                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1584                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1585                 break;
1586         case MEM_EDC1:
1587                 if (!(em & F_EDRAM1_ENABLE))
1588                         return (EINVAL);
1589                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1590                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1591                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1592                 break;
1593         case MEM_MC:
1594                 if (!(em & F_EXT_MEM_ENABLE))
1595                         return (EINVAL);
1596                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1597                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1598                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1599                 break;
1600         case MEM_MC1:
1601                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1602                         return (EINVAL);
1603                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1604                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1605                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1606                 break;
1607         default:
1608                 return (EINVAL);
1609         }
1610
1611         if (mlen > 0 && off < mlen && off + len <= mlen) {
1612                 *addr = maddr + off;    /* global address */
1613                 return (0);
1614         }
1615
1616         return (EFAULT);
1617 }
1618
1619 static void
1620 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1621 {
1622         const struct memwin *mw;
1623
1624         if (is_t4(sc)) {
1625                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1626                     ("%s: incorrect memwin# (%d)", __func__, win));
1627                 mw = &t4_memwin[win];
1628         } else {
1629                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1630                     ("%s: incorrect memwin# (%d)", __func__, win));
1631                 mw = &t5_memwin[win];
1632         }
1633
1634         if (base != NULL)
1635                 *base = mw->base;
1636         if (aperture != NULL)
1637                 *aperture = mw->aperture;
1638 }
1639
1640 /*
1641  * Positions the memory window such that it can be used to access the specified
1642  * address in the chip's address space.  The return value is the offset of addr
1643  * from the start of the window.
1644  */
1645 static uint32_t
1646 position_memwin(struct adapter *sc, int n, uint32_t addr)
1647 {
1648         uint32_t start, pf;
1649         uint32_t reg;
1650
1651         KASSERT(n >= 0 && n <= 3,
1652             ("%s: invalid window %d.", __func__, n));
1653         KASSERT((addr & 3) == 0,
1654             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1655
1656         if (is_t4(sc)) {
1657                 pf = 0;
1658                 start = addr & ~0xf;    /* start must be 16B aligned */
1659         } else {
1660                 pf = V_PFNUM(sc->pf);
1661                 start = addr & ~0x7f;   /* start must be 128B aligned */
1662         }
1663         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1664
1665         t4_write_reg(sc, reg, start | pf);
1666         t4_read_reg(sc, reg);
1667
1668         return (addr - start);
1669 }
1670
1671 static int
1672 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1673     struct intrs_and_queues *iaq)
1674 {
1675         int rc, itype, navail, nrxq10g, nrxq1g, n;
1676         int nofldrxq10g = 0, nofldrxq1g = 0;
1677
1678         bzero(iaq, sizeof(*iaq));
1679
1680         iaq->ntxq10g = t4_ntxq10g;
1681         iaq->ntxq1g = t4_ntxq1g;
1682         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1683         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1684 #ifdef TCP_OFFLOAD
1685         if (is_offload(sc)) {
1686                 iaq->nofldtxq10g = t4_nofldtxq10g;
1687                 iaq->nofldtxq1g = t4_nofldtxq1g;
1688                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1689                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1690         }
1691 #endif
1692
1693         for (itype = INTR_MSIX; itype; itype >>= 1) {
1694
1695                 if ((itype & t4_intr_types) == 0)
1696                         continue;       /* not allowed */
1697
1698                 if (itype == INTR_MSIX)
1699                         navail = pci_msix_count(sc->dev);
1700                 else if (itype == INTR_MSI)
1701                         navail = pci_msi_count(sc->dev);
1702                 else
1703                         navail = 1;
1704 restart:
1705                 if (navail == 0)
1706                         continue;
1707
1708                 iaq->intr_type = itype;
1709                 iaq->intr_flags = 0;
1710
1711                 /*
1712                  * Best option: an interrupt vector for errors, one for the
1713                  * firmware event queue, and one each for each rxq (NIC as well
1714                  * as offload).
1715                  */
1716                 iaq->nirq = T4_EXTRA_INTR;
1717                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1718                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1719                 if (iaq->nirq <= navail &&
1720                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1721                         iaq->intr_flags |= INTR_DIRECT;
1722                         goto allocate;
1723                 }
1724
1725                 /*
1726                  * Second best option: an interrupt vector for errors, one for
1727                  * the firmware event queue, and one each for either NIC or
1728                  * offload rxq's.
1729                  */
1730                 iaq->nirq = T4_EXTRA_INTR;
1731                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1732                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1733                 if (iaq->nirq <= navail &&
1734                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1735                         goto allocate;
1736
1737                 /*
1738                  * Next best option: an interrupt vector for errors, one for the
1739                  * firmware event queue, and at least one per port.  At this
1740                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1741                  * what's available to us.
1742                  */
1743                 iaq->nirq = T4_EXTRA_INTR;
1744                 iaq->nirq += n10g + n1g;
1745                 if (iaq->nirq <= navail) {
1746                         int leftover = navail - iaq->nirq;
1747
1748                         if (n10g > 0) {
1749                                 int target = max(nrxq10g, nofldrxq10g);
1750
1751                                 n = 1;
1752                                 while (n < target && leftover >= n10g) {
1753                                         leftover -= n10g;
1754                                         iaq->nirq += n10g;
1755                                         n++;
1756                                 }
1757                                 iaq->nrxq10g = min(n, nrxq10g);
1758 #ifdef TCP_OFFLOAD
1759                                 if (is_offload(sc))
1760                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1761 #endif
1762                         }
1763
1764                         if (n1g > 0) {
1765                                 int target = max(nrxq1g, nofldrxq1g);
1766
1767                                 n = 1;
1768                                 while (n < target && leftover >= n1g) {
1769                                         leftover -= n1g;
1770                                         iaq->nirq += n1g;
1771                                         n++;
1772                                 }
1773                                 iaq->nrxq1g = min(n, nrxq1g);
1774 #ifdef TCP_OFFLOAD
1775                                 if (is_offload(sc))
1776                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1777 #endif
1778                         }
1779
1780                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1781                                 goto allocate;
1782                 }
1783
1784                 /*
1785                  * Least desirable option: one interrupt vector for everything.
1786                  */
1787                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1788 #ifdef TCP_OFFLOAD
1789                 if (is_offload(sc))
1790                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1791 #endif
1792
1793 allocate:
1794                 navail = iaq->nirq;
1795                 rc = 0;
1796                 if (itype == INTR_MSIX)
1797                         rc = pci_alloc_msix(sc->dev, &navail);
1798                 else if (itype == INTR_MSI)
1799                         rc = pci_alloc_msi(sc->dev, &navail);
1800
1801                 if (rc == 0) {
1802                         if (navail == iaq->nirq)
1803                                 return (0);
1804
1805                         /*
1806                          * Didn't get the number requested.  Use whatever number
1807                          * the kernel is willing to allocate (it's in navail).
1808                          */
1809                         device_printf(sc->dev, "fewer vectors than requested, "
1810                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1811                             itype, iaq->nirq, navail);
1812                         pci_release_msi(sc->dev);
1813                         goto restart;
1814                 }
1815
1816                 device_printf(sc->dev,
1817                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1818                     itype, rc, iaq->nirq, navail);
1819         }
1820
1821         device_printf(sc->dev,
1822             "failed to find a usable interrupt type.  "
1823             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1824             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1825
1826         return (ENXIO);
1827 }
1828
1829 #define FW_VERSION(chip) ( \
1830     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1831     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1832     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1833     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1834 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1835
1836 struct fw_info {
1837         uint8_t chip;
1838         char *kld_name;
1839         char *fw_mod_name;
1840         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1841 } fw_info[] = {
1842         {
1843                 .chip = CHELSIO_T4,
1844                 .kld_name = "t4fw_cfg",
1845                 .fw_mod_name = "t4fw",
1846                 .fw_hdr = {
1847                         .chip = FW_HDR_CHIP_T4,
1848                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1849                         .intfver_nic = FW_INTFVER(T4, NIC),
1850                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1851                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1852                         .intfver_ri = FW_INTFVER(T4, RI),
1853                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1854                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1855                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1856                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1857                 },
1858         }, {
1859                 .chip = CHELSIO_T5,
1860                 .kld_name = "t5fw_cfg",
1861                 .fw_mod_name = "t5fw",
1862                 .fw_hdr = {
1863                         .chip = FW_HDR_CHIP_T5,
1864                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1865                         .intfver_nic = FW_INTFVER(T5, NIC),
1866                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1867                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1868                         .intfver_ri = FW_INTFVER(T5, RI),
1869                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1870                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1871                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1872                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1873                 },
1874         }
1875 };
1876
1877 static struct fw_info *
1878 find_fw_info(int chip)
1879 {
1880         int i;
1881
1882         for (i = 0; i < nitems(fw_info); i++) {
1883                 if (fw_info[i].chip == chip)
1884                         return (&fw_info[i]);
1885         }
1886         return (NULL);
1887 }
1888
1889 /*
1890  * Is the given firmware API compatible with the one the driver was compiled
1891  * with?
1892  */
1893 static int
1894 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1895 {
1896
1897         /* short circuit if it's the exact same firmware version */
1898         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1899                 return (1);
1900
1901         /*
1902          * XXX: Is this too conservative?  Perhaps I should limit this to the
1903          * features that are supported in the driver.
1904          */
1905 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1906         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1907             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1908             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1909                 return (1);
1910 #undef SAME_INTF
1911
1912         return (0);
1913 }
1914
1915 /*
1916  * The firmware in the KLD is usable, but should it be installed?  This routine
1917  * explains itself in detail if it indicates the KLD firmware should be
1918  * installed.
1919  */
1920 static int
1921 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1922 {
1923         const char *reason;
1924
1925         if (!card_fw_usable) {
1926                 reason = "incompatible or unusable";
1927                 goto install;
1928         }
1929
1930         if (k > c) {
1931                 reason = "older than the version bundled with this driver";
1932                 goto install;
1933         }
1934
1935         if (t4_fw_install == 2 && k != c) {
1936                 reason = "different than the version bundled with this driver";
1937                 goto install;
1938         }
1939
1940         return (0);
1941
1942 install:
1943         if (t4_fw_install == 0) {
1944                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1945                     "but the driver is prohibited from installing a different "
1946                     "firmware on the card.\n",
1947                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1948                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1949
1950                 return (0);
1951         }
1952
1953         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1954             "installing firmware %u.%u.%u.%u on card.\n",
1955             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1956             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1957             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1958             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1959
1960         return (1);
1961 }
1962 /*
1963  * Establish contact with the firmware and determine if we are the master driver
1964  * or not, and whether we are responsible for chip initialization.
1965  */
1966 static int
1967 prep_firmware(struct adapter *sc)
1968 {
1969         const struct firmware *fw = NULL, *default_cfg;
1970         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1971         enum dev_state state;
1972         struct fw_info *fw_info;
1973         struct fw_hdr *card_fw;         /* fw on the card */
1974         const struct fw_hdr *kld_fw;    /* fw in the KLD */
1975         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
1976                                            against */
1977
1978         /* Contact firmware. */
1979         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1980         if (rc < 0 || state == DEV_STATE_ERR) {
1981                 rc = -rc;
1982                 device_printf(sc->dev,
1983                     "failed to connect to the firmware: %d, %d.\n", rc, state);
1984                 return (rc);
1985         }
1986         pf = rc;
1987         if (pf == sc->mbox)
1988                 sc->flags |= MASTER_PF;
1989         else if (state == DEV_STATE_UNINIT) {
1990                 /*
1991                  * We didn't get to be the master so we definitely won't be
1992                  * configuring the chip.  It's a bug if someone else hasn't
1993                  * configured it already.
1994                  */
1995                 device_printf(sc->dev, "couldn't be master(%d), "
1996                     "device not already initialized either(%d).\n", rc, state);
1997                 return (EDOOFUS);
1998         }
1999
2000         /* This is the firmware whose headers the driver was compiled against */
2001         fw_info = find_fw_info(chip_id(sc));
2002         if (fw_info == NULL) {
2003                 device_printf(sc->dev,
2004                     "unable to look up firmware information for chip %d.\n",
2005                     chip_id(sc));
2006                 return (EINVAL);
2007         }
2008         drv_fw = &fw_info->fw_hdr;
2009
2010         /*
2011          * The firmware KLD contains many modules.  The KLD name is also the
2012          * name of the module that contains the default config file.
2013          */
2014         default_cfg = firmware_get(fw_info->kld_name);
2015
2016         /* Read the header of the firmware on the card */
2017         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2018         rc = -t4_read_flash(sc, FLASH_FW_START,
2019             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2020         if (rc == 0)
2021                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2022         else {
2023                 device_printf(sc->dev,
2024                     "Unable to read card's firmware header: %d\n", rc);
2025                 card_fw_usable = 0;
2026         }
2027
2028         /* This is the firmware in the KLD */
2029         fw = firmware_get(fw_info->fw_mod_name);
2030         if (fw != NULL) {
2031                 kld_fw = (const void *)fw->data;
2032                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2033         } else {
2034                 kld_fw = NULL;
2035                 kld_fw_usable = 0;
2036         }
2037
2038         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2039             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2040                 /*
2041                  * Common case: the firmware on the card is an exact match and
2042                  * the KLD is an exact match too, or the KLD is
2043                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2044                  * here -- use cxgbetool loadfw if you want to reinstall the
2045                  * same firmware as the one on the card.
2046                  */
2047         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2048             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2049             be32toh(card_fw->fw_ver))) {
2050
2051                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2052                 if (rc != 0) {
2053                         device_printf(sc->dev,
2054                             "failed to install firmware: %d\n", rc);
2055                         goto done;
2056                 }
2057
2058                 /* Installed successfully, update the cached header too. */
2059                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2060                 card_fw_usable = 1;
2061                 need_fw_reset = 0;      /* already reset as part of load_fw */
2062         }
2063
2064         if (!card_fw_usable) {
2065                 uint32_t d, c, k;
2066
2067                 d = ntohl(drv_fw->fw_ver);
2068                 c = ntohl(card_fw->fw_ver);
2069                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2070
2071                 device_printf(sc->dev, "Cannot find a usable firmware: "
2072                     "fw_install %d, chip state %d, "
2073                     "driver compiled with %d.%d.%d.%d, "
2074                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2075                     t4_fw_install, state,
2076                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2077                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2078                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2079                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2080                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2081                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2082                 rc = EINVAL;
2083                 goto done;
2084         }
2085
2086         /* We're using whatever's on the card and it's known to be good. */
2087         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2088         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2089             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2090             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2091             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2092             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2093         t4_get_tp_version(sc, &sc->params.tp_vers);
2094
2095         /* Reset device */
2096         if (need_fw_reset &&
2097             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2098                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2099                 if (rc != ETIMEDOUT && rc != EIO)
2100                         t4_fw_bye(sc, sc->mbox);
2101                 goto done;
2102         }
2103         sc->flags |= FW_OK;
2104
2105         rc = get_params__pre_init(sc);
2106         if (rc != 0)
2107                 goto done; /* error message displayed already */
2108
2109         /* Partition adapter resources as specified in the config file. */
2110         if (state == DEV_STATE_UNINIT) {
2111
2112                 KASSERT(sc->flags & MASTER_PF,
2113                     ("%s: trying to change chip settings when not master.",
2114                     __func__));
2115
2116                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2117                 if (rc != 0)
2118                         goto done;      /* error message displayed already */
2119
2120                 t4_tweak_chip_settings(sc);
2121
2122                 /* get basic stuff going */
2123                 rc = -t4_fw_initialize(sc, sc->mbox);
2124                 if (rc != 0) {
2125                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2126                         goto done;
2127                 }
2128         } else {
2129                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2130                 sc->cfcsum = 0;
2131         }
2132
2133 done:
2134         free(card_fw, M_CXGBE);
2135         if (fw != NULL)
2136                 firmware_put(fw, FIRMWARE_UNLOAD);
2137         if (default_cfg != NULL)
2138                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2139
2140         return (rc);
2141 }
2142
2143 #define FW_PARAM_DEV(param) \
2144         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2145          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2146 #define FW_PARAM_PFVF(param) \
2147         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2148          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2149
2150 /*
2151  * Partition chip resources for use between various PFs, VFs, etc.
2152  */
2153 static int
2154 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2155     const char *name_prefix)
2156 {
2157         const struct firmware *cfg = NULL;
2158         int rc = 0;
2159         struct fw_caps_config_cmd caps;
2160         uint32_t mtype, moff, finicsum, cfcsum;
2161
2162         /*
2163          * Figure out what configuration file to use.  Pick the default config
2164          * file for the card if the user hasn't specified one explicitly.
2165          */
2166         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2167         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2168                 /* Card specific overrides go here. */
2169                 if (pci_get_device(sc->dev) == 0x440a)
2170                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2171                 if (is_fpga(sc))
2172                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2173         }
2174
2175         /*
2176          * We need to load another module if the profile is anything except
2177          * "default" or "flash".
2178          */
2179         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2180             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2181                 char s[32];
2182
2183                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2184                 cfg = firmware_get(s);
2185                 if (cfg == NULL) {
2186                         if (default_cfg != NULL) {
2187                                 device_printf(sc->dev,
2188                                     "unable to load module \"%s\" for "
2189                                     "configuration profile \"%s\", will use "
2190                                     "the default config file instead.\n",
2191                                     s, sc->cfg_file);
2192                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2193                                     "%s", DEFAULT_CF);
2194                         } else {
2195                                 device_printf(sc->dev,
2196                                     "unable to load module \"%s\" for "
2197                                     "configuration profile \"%s\", will use "
2198                                     "the config file on the card's flash "
2199                                     "instead.\n", s, sc->cfg_file);
2200                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2201                                     "%s", FLASH_CF);
2202                         }
2203                 }
2204         }
2205
2206         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2207             default_cfg == NULL) {
2208                 device_printf(sc->dev,
2209                     "default config file not available, will use the config "
2210                     "file on the card's flash instead.\n");
2211                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2212         }
2213
2214         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2215                 u_int cflen, i, n;
2216                 const uint32_t *cfdata;
2217                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2218
2219                 KASSERT(cfg != NULL || default_cfg != NULL,
2220                     ("%s: no config to upload", __func__));
2221
2222                 /*
2223                  * Ask the firmware where it wants us to upload the config file.
2224                  */
2225                 param = FW_PARAM_DEV(CF);
2226                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2227                 if (rc != 0) {
2228                         /* No support for config file?  Shouldn't happen. */
2229                         device_printf(sc->dev,
2230                             "failed to query config file location: %d.\n", rc);
2231                         goto done;
2232                 }
2233                 mtype = G_FW_PARAMS_PARAM_Y(val);
2234                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2235
2236                 /*
2237                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2238                  * useless stuffing/comments at the end of the config file so
2239                  * it's ok to simply throw away the last remaining bytes when
2240                  * the config file is not an exact multiple of 4.  This also
2241                  * helps with the validate_mt_off_len check.
2242                  */
2243                 if (cfg != NULL) {
2244                         cflen = cfg->datasize & ~3;
2245                         cfdata = cfg->data;
2246                 } else {
2247                         cflen = default_cfg->datasize & ~3;
2248                         cfdata = default_cfg->data;
2249                 }
2250
2251                 if (cflen > FLASH_CFG_MAX_SIZE) {
2252                         device_printf(sc->dev,
2253                             "config file too long (%d, max allowed is %d).  "
2254                             "Will try to use the config on the card, if any.\n",
2255                             cflen, FLASH_CFG_MAX_SIZE);
2256                         goto use_config_on_flash;
2257                 }
2258
2259                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2260                 if (rc != 0) {
2261                         device_printf(sc->dev,
2262                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2263                             "Will try to use the config on the card, if any.\n",
2264                             __func__, mtype, moff, cflen, rc);
2265                         goto use_config_on_flash;
2266                 }
2267
2268                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2269                 while (cflen) {
2270                         off = position_memwin(sc, 2, addr);
2271                         n = min(cflen, mw_aperture - off);
2272                         for (i = 0; i < n; i += 4)
2273                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2274                         cflen -= n;
2275                         addr += n;
2276                 }
2277         } else {
2278 use_config_on_flash:
2279                 mtype = FW_MEMTYPE_CF_FLASH;
2280                 moff = t4_flash_cfg_addr(sc);
2281         }
2282
2283         bzero(&caps, sizeof(caps));
2284         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2285             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2286         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2287             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2288             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2289         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2290         if (rc != 0) {
2291                 device_printf(sc->dev,
2292                     "failed to pre-process config file: %d "
2293                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2294                 goto done;
2295         }
2296
2297         finicsum = be32toh(caps.finicsum);
2298         cfcsum = be32toh(caps.cfcsum);
2299         if (finicsum != cfcsum) {
2300                 device_printf(sc->dev,
2301                     "WARNING: config file checksum mismatch: %08x %08x\n",
2302                     finicsum, cfcsum);
2303         }
2304         sc->cfcsum = cfcsum;
2305
2306 #define LIMIT_CAPS(x) do { \
2307         caps.x &= htobe16(t4_##x##_allowed); \
2308         sc->x = htobe16(caps.x); \
2309 } while (0)
2310
2311         /*
2312          * Let the firmware know what features will (not) be used so it can tune
2313          * things accordingly.
2314          */
2315         LIMIT_CAPS(linkcaps);
2316         LIMIT_CAPS(niccaps);
2317         LIMIT_CAPS(toecaps);
2318         LIMIT_CAPS(rdmacaps);
2319         LIMIT_CAPS(iscsicaps);
2320         LIMIT_CAPS(fcoecaps);
2321 #undef LIMIT_CAPS
2322
2323         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2324             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2325         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2326         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2327         if (rc != 0) {
2328                 device_printf(sc->dev,
2329                     "failed to process config file: %d.\n", rc);
2330         }
2331 done:
2332         if (cfg != NULL)
2333                 firmware_put(cfg, FIRMWARE_UNLOAD);
2334         return (rc);
2335 }
2336
2337 /*
2338  * Retrieve parameters that are needed (or nice to have) very early.
2339  */
2340 static int
2341 get_params__pre_init(struct adapter *sc)
2342 {
2343         int rc;
2344         uint32_t param[2], val[2];
2345         struct fw_devlog_cmd cmd;
2346         struct devlog_params *dlog = &sc->params.devlog;
2347
2348         param[0] = FW_PARAM_DEV(PORTVEC);
2349         param[1] = FW_PARAM_DEV(CCLK);
2350         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2351         if (rc != 0) {
2352                 device_printf(sc->dev,
2353                     "failed to query parameters (pre_init): %d.\n", rc);
2354                 return (rc);
2355         }
2356
2357         sc->params.portvec = val[0];
2358         sc->params.nports = bitcount32(val[0]);
2359         sc->params.vpd.cclk = val[1];
2360
2361         /* Read device log parameters. */
2362         bzero(&cmd, sizeof(cmd));
2363         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2364             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2365         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2366         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2367         if (rc != 0) {
2368                 device_printf(sc->dev,
2369                     "failed to get devlog parameters: %d.\n", rc);
2370                 bzero(dlog, sizeof (*dlog));
2371                 rc = 0; /* devlog isn't critical for device operation */
2372         } else {
2373                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2374                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2375                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2376                 dlog->size = be32toh(cmd.memsize_devlog);
2377         }
2378
2379         return (rc);
2380 }
2381
2382 /*
2383  * Retrieve various parameters that are of interest to the driver.  The device
2384  * has been initialized by the firmware at this point.
2385  */
2386 static int
2387 get_params__post_init(struct adapter *sc)
2388 {
2389         int rc;
2390         uint32_t param[7], val[7];
2391         struct fw_caps_config_cmd caps;
2392
2393         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2394         param[1] = FW_PARAM_PFVF(EQ_START);
2395         param[2] = FW_PARAM_PFVF(FILTER_START);
2396         param[3] = FW_PARAM_PFVF(FILTER_END);
2397         param[4] = FW_PARAM_PFVF(L2T_START);
2398         param[5] = FW_PARAM_PFVF(L2T_END);
2399         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2400         if (rc != 0) {
2401                 device_printf(sc->dev,
2402                     "failed to query parameters (post_init): %d.\n", rc);
2403                 return (rc);
2404         }
2405
2406         sc->sge.iq_start = val[0];
2407         sc->sge.eq_start = val[1];
2408         sc->tids.ftid_base = val[2];
2409         sc->tids.nftids = val[3] - val[2] + 1;
2410         sc->vres.l2t.start = val[4];
2411         sc->vres.l2t.size = val[5] - val[4] + 1;
2412         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2413             ("%s: L2 table size (%u) larger than expected (%u)",
2414             __func__, sc->vres.l2t.size, L2T_SIZE));
2415
2416         /* get capabilites */
2417         bzero(&caps, sizeof(caps));
2418         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2419             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2420         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2421         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2422         if (rc != 0) {
2423                 device_printf(sc->dev,
2424                     "failed to get card capabilities: %d.\n", rc);
2425                 return (rc);
2426         }
2427
2428         if (caps.toecaps) {
2429                 /* query offload-related parameters */
2430                 param[0] = FW_PARAM_DEV(NTID);
2431                 param[1] = FW_PARAM_PFVF(SERVER_START);
2432                 param[2] = FW_PARAM_PFVF(SERVER_END);
2433                 param[3] = FW_PARAM_PFVF(TDDP_START);
2434                 param[4] = FW_PARAM_PFVF(TDDP_END);
2435                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2436                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2437                 if (rc != 0) {
2438                         device_printf(sc->dev,
2439                             "failed to query TOE parameters: %d.\n", rc);
2440                         return (rc);
2441                 }
2442                 sc->tids.ntids = val[0];
2443                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2444                 sc->tids.stid_base = val[1];
2445                 sc->tids.nstids = val[2] - val[1] + 1;
2446                 sc->vres.ddp.start = val[3];
2447                 sc->vres.ddp.size = val[4] - val[3] + 1;
2448                 sc->params.ofldq_wr_cred = val[5];
2449                 sc->params.offload = 1;
2450         }
2451         if (caps.rdmacaps) {
2452                 param[0] = FW_PARAM_PFVF(STAG_START);
2453                 param[1] = FW_PARAM_PFVF(STAG_END);
2454                 param[2] = FW_PARAM_PFVF(RQ_START);
2455                 param[3] = FW_PARAM_PFVF(RQ_END);
2456                 param[4] = FW_PARAM_PFVF(PBL_START);
2457                 param[5] = FW_PARAM_PFVF(PBL_END);
2458                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2459                 if (rc != 0) {
2460                         device_printf(sc->dev,
2461                             "failed to query RDMA parameters(1): %d.\n", rc);
2462                         return (rc);
2463                 }
2464                 sc->vres.stag.start = val[0];
2465                 sc->vres.stag.size = val[1] - val[0] + 1;
2466                 sc->vres.rq.start = val[2];
2467                 sc->vres.rq.size = val[3] - val[2] + 1;
2468                 sc->vres.pbl.start = val[4];
2469                 sc->vres.pbl.size = val[5] - val[4] + 1;
2470
2471                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2472                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2473                 param[2] = FW_PARAM_PFVF(CQ_START);
2474                 param[3] = FW_PARAM_PFVF(CQ_END);
2475                 param[4] = FW_PARAM_PFVF(OCQ_START);
2476                 param[5] = FW_PARAM_PFVF(OCQ_END);
2477                 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2478                 if (rc != 0) {
2479                         device_printf(sc->dev,
2480                             "failed to query RDMA parameters(2): %d.\n", rc);
2481                         return (rc);
2482                 }
2483                 sc->vres.qp.start = val[0];
2484                 sc->vres.qp.size = val[1] - val[0] + 1;
2485                 sc->vres.cq.start = val[2];
2486                 sc->vres.cq.size = val[3] - val[2] + 1;
2487                 sc->vres.ocq.start = val[4];
2488                 sc->vres.ocq.size = val[5] - val[4] + 1;
2489         }
2490         if (caps.iscsicaps) {
2491                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2492                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2493                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2494                 if (rc != 0) {
2495                         device_printf(sc->dev,
2496                             "failed to query iSCSI parameters: %d.\n", rc);
2497                         return (rc);
2498                 }
2499                 sc->vres.iscsi.start = val[0];
2500                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2501         }
2502
2503         /*
2504          * We've got the params we wanted to query via the firmware.  Now grab
2505          * some others directly from the chip.
2506          */
2507         rc = t4_read_chip_settings(sc);
2508
2509         return (rc);
2510 }
2511
2512 static int
2513 set_params__post_init(struct adapter *sc)
2514 {
2515         uint32_t param, val;
2516
2517         /* ask for encapsulated CPLs */
2518         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2519         val = 1;
2520         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2521
2522         return (0);
2523 }
2524
2525 #undef FW_PARAM_PFVF
2526 #undef FW_PARAM_DEV
2527
2528 static void
2529 t4_set_desc(struct adapter *sc)
2530 {
2531         char buf[128];
2532         struct adapter_params *p = &sc->params;
2533
2534         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2535             p->vpd.id, is_offload(sc) ? "R" : "", chip_rev(sc), p->vpd.sn,
2536             p->vpd.ec);
2537
2538         device_set_desc_copy(sc->dev, buf);
2539 }
2540
2541 static void
2542 build_medialist(struct port_info *pi)
2543 {
2544         struct ifmedia *media = &pi->media;
2545         int data, m;
2546
2547         PORT_LOCK(pi);
2548
2549         ifmedia_removeall(media);
2550
2551         m = IFM_ETHER | IFM_FDX;
2552         data = (pi->port_type << 8) | pi->mod_type;
2553
2554         switch(pi->port_type) {
2555         case FW_PORT_TYPE_BT_XFI:
2556                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2557                 break;
2558
2559         case FW_PORT_TYPE_BT_XAUI:
2560                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2561                 /* fall through */
2562
2563         case FW_PORT_TYPE_BT_SGMII:
2564                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2565                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2566                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2567                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2568                 break;
2569
2570         case FW_PORT_TYPE_CX4:
2571                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2572                 ifmedia_set(media, m | IFM_10G_CX4);
2573                 break;
2574
2575         case FW_PORT_TYPE_SFP:
2576         case FW_PORT_TYPE_FIBER_XFI:
2577         case FW_PORT_TYPE_FIBER_XAUI:
2578                 switch (pi->mod_type) {
2579
2580                 case FW_PORT_MOD_TYPE_LR:
2581                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2582                         ifmedia_set(media, m | IFM_10G_LR);
2583                         break;
2584
2585                 case FW_PORT_MOD_TYPE_SR:
2586                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2587                         ifmedia_set(media, m | IFM_10G_SR);
2588                         break;
2589
2590                 case FW_PORT_MOD_TYPE_LRM:
2591                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2592                         ifmedia_set(media, m | IFM_10G_LRM);
2593                         break;
2594
2595                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2596                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2597                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2598                         ifmedia_set(media, m | IFM_10G_TWINAX);
2599                         break;
2600
2601                 case FW_PORT_MOD_TYPE_NONE:
2602                         m &= ~IFM_FDX;
2603                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2604                         ifmedia_set(media, m | IFM_NONE);
2605                         break;
2606
2607                 case FW_PORT_MOD_TYPE_NA:
2608                 case FW_PORT_MOD_TYPE_ER:
2609                 default:
2610                         device_printf(pi->dev,
2611                             "unknown port_type (%d), mod_type (%d)\n",
2612                             pi->port_type, pi->mod_type);
2613                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2614                         ifmedia_set(media, m | IFM_UNKNOWN);
2615                         break;
2616                 }
2617                 break;
2618
2619         case FW_PORT_TYPE_QSFP:
2620                 switch (pi->mod_type) {
2621
2622                 case FW_PORT_MOD_TYPE_LR:
2623                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2624                         ifmedia_set(media, m | IFM_40G_LR4);
2625                         break;
2626
2627                 case FW_PORT_MOD_TYPE_SR:
2628                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2629                         ifmedia_set(media, m | IFM_40G_SR4);
2630                         break;
2631
2632                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2633                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2634                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2635                         ifmedia_set(media, m | IFM_40G_CR4);
2636                         break;
2637
2638                 case FW_PORT_MOD_TYPE_NONE:
2639                         m &= ~IFM_FDX;
2640                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2641                         ifmedia_set(media, m | IFM_NONE);
2642                         break;
2643
2644                 default:
2645                         device_printf(pi->dev,
2646                             "unknown port_type (%d), mod_type (%d)\n",
2647                             pi->port_type, pi->mod_type);
2648                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2649                         ifmedia_set(media, m | IFM_UNKNOWN);
2650                         break;
2651                 }
2652                 break;
2653
2654         default:
2655                 device_printf(pi->dev,
2656                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2657                     pi->mod_type);
2658                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2659                 ifmedia_set(media, m | IFM_UNKNOWN);
2660                 break;
2661         }
2662
2663         PORT_UNLOCK(pi);
2664 }
2665
2666 #define FW_MAC_EXACT_CHUNK      7
2667
2668 /*
2669  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2670  * indicates which parameters should be programmed (the rest are left alone).
2671  */
2672 static int
2673 update_mac_settings(struct port_info *pi, int flags)
2674 {
2675         int rc;
2676         struct ifnet *ifp = pi->ifp;
2677         struct adapter *sc = pi->adapter;
2678         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2679
2680         ASSERT_SYNCHRONIZED_OP(sc);
2681         KASSERT(flags, ("%s: not told what to update.", __func__));
2682
2683         if (flags & XGMAC_MTU)
2684                 mtu = ifp->if_mtu;
2685
2686         if (flags & XGMAC_PROMISC)
2687                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2688
2689         if (flags & XGMAC_ALLMULTI)
2690                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2691
2692         if (flags & XGMAC_VLANEX)
2693                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2694
2695         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2696             vlanex, false);
2697         if (rc) {
2698                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2699                 return (rc);
2700         }
2701
2702         if (flags & XGMAC_UCADDR) {
2703                 uint8_t ucaddr[ETHER_ADDR_LEN];
2704
2705                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2706                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2707                     ucaddr, true, true);
2708                 if (rc < 0) {
2709                         rc = -rc;
2710                         if_printf(ifp, "change_mac failed: %d\n", rc);
2711                         return (rc);
2712                 } else {
2713                         pi->xact_addr_filt = rc;
2714                         rc = 0;
2715                 }
2716         }
2717
2718         if (flags & XGMAC_MCADDRS) {
2719                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2720                 int del = 1;
2721                 uint64_t hash = 0;
2722                 struct ifmultiaddr *ifma;
2723                 int i = 0, j;
2724
2725                 if_maddr_rlock(ifp);
2726                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2727                         if (ifma->ifma_addr->sa_family != AF_LINK)
2728                                 continue;
2729                         mcaddr[i++] =
2730                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2731
2732                         if (i == FW_MAC_EXACT_CHUNK) {
2733                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2734                                     del, i, mcaddr, NULL, &hash, 0);
2735                                 if (rc < 0) {
2736                                         rc = -rc;
2737                                         for (j = 0; j < i; j++) {
2738                                                 if_printf(ifp,
2739                                                     "failed to add mc address"
2740                                                     " %02x:%02x:%02x:"
2741                                                     "%02x:%02x:%02x rc=%d\n",
2742                                                     mcaddr[j][0], mcaddr[j][1],
2743                                                     mcaddr[j][2], mcaddr[j][3],
2744                                                     mcaddr[j][4], mcaddr[j][5],
2745                                                     rc);
2746                                         }
2747                                         goto mcfail;
2748                                 }
2749                                 del = 0;
2750                                 i = 0;
2751                         }
2752                 }
2753                 if (i > 0) {
2754                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2755                             del, i, mcaddr, NULL, &hash, 0);
2756                         if (rc < 0) {
2757                                 rc = -rc;
2758                                 for (j = 0; j < i; j++) {
2759                                         if_printf(ifp,
2760                                             "failed to add mc address"
2761                                             " %02x:%02x:%02x:"
2762                                             "%02x:%02x:%02x rc=%d\n",
2763                                             mcaddr[j][0], mcaddr[j][1],
2764                                             mcaddr[j][2], mcaddr[j][3],
2765                                             mcaddr[j][4], mcaddr[j][5],
2766                                             rc);
2767                                 }
2768                                 goto mcfail;
2769                         }
2770                 }
2771
2772                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2773                 if (rc != 0)
2774                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2775 mcfail:
2776                 if_maddr_runlock(ifp);
2777         }
2778
2779         return (rc);
2780 }
2781
2782 int
2783 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2784     char *wmesg)
2785 {
2786         int rc, pri;
2787
2788 #ifdef WITNESS
2789         /* the caller thinks it's ok to sleep, but is it really? */
2790         if (flags & SLEEP_OK)
2791                 pause("t4slptst", 1);
2792 #endif
2793
2794         if (INTR_OK)
2795                 pri = PCATCH;
2796         else
2797                 pri = 0;
2798
2799         ADAPTER_LOCK(sc);
2800         for (;;) {
2801
2802                 if (pi && IS_DOOMED(pi)) {
2803                         rc = ENXIO;
2804                         goto done;
2805                 }
2806
2807                 if (!IS_BUSY(sc)) {
2808                         rc = 0;
2809                         break;
2810                 }
2811
2812                 if (!(flags & SLEEP_OK)) {
2813                         rc = EBUSY;
2814                         goto done;
2815                 }
2816
2817                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2818                         rc = EINTR;
2819                         goto done;
2820                 }
2821         }
2822
2823         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2824         SET_BUSY(sc);
2825 #ifdef INVARIANTS
2826         sc->last_op = wmesg;
2827         sc->last_op_thr = curthread;
2828 #endif
2829
2830 done:
2831         if (!(flags & HOLD_LOCK) || rc)
2832                 ADAPTER_UNLOCK(sc);
2833
2834         return (rc);
2835 }
2836
2837 void
2838 end_synchronized_op(struct adapter *sc, int flags)
2839 {
2840
2841         if (flags & LOCK_HELD)
2842                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2843         else
2844                 ADAPTER_LOCK(sc);
2845
2846         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2847         CLR_BUSY(sc);
2848         wakeup(&sc->flags);
2849         ADAPTER_UNLOCK(sc);
2850 }
2851
2852 static int
2853 cxgbe_init_synchronized(struct port_info *pi)
2854 {
2855         struct adapter *sc = pi->adapter;
2856         struct ifnet *ifp = pi->ifp;
2857         int rc = 0;
2858
2859         ASSERT_SYNCHRONIZED_OP(sc);
2860
2861         if (isset(&sc->open_device_map, pi->port_id)) {
2862                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2863                     ("mismatch between open_device_map and if_drv_flags"));
2864                 return (0);     /* already running */
2865         }
2866
2867         if (!(sc->flags & FULL_INIT_DONE) &&
2868             ((rc = adapter_full_init(sc)) != 0))
2869                 return (rc);    /* error message displayed already */
2870
2871         if (!(pi->flags & PORT_INIT_DONE) &&
2872             ((rc = port_full_init(pi)) != 0))
2873                 return (rc); /* error message displayed already */
2874
2875         rc = update_mac_settings(pi, XGMAC_ALL);
2876         if (rc)
2877                 goto done;      /* error message displayed already */
2878
2879         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2880         if (rc != 0) {
2881                 if_printf(ifp, "start_link failed: %d\n", rc);
2882                 goto done;
2883         }
2884
2885         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2886         if (rc != 0) {
2887                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2888                 goto done;
2889         }
2890
2891         /* all ok */
2892         setbit(&sc->open_device_map, pi->port_id);
2893         PORT_LOCK(pi);
2894         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2895         PORT_UNLOCK(pi);
2896
2897         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2898 done:
2899         if (rc != 0)
2900                 cxgbe_uninit_synchronized(pi);
2901
2902         return (rc);
2903 }
2904
2905 /*
2906  * Idempotent.
2907  */
2908 static int
2909 cxgbe_uninit_synchronized(struct port_info *pi)
2910 {
2911         struct adapter *sc = pi->adapter;
2912         struct ifnet *ifp = pi->ifp;
2913         int rc;
2914
2915         ASSERT_SYNCHRONIZED_OP(sc);
2916
2917         /*
2918          * Disable the VI so that all its data in either direction is discarded
2919          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2920          * tick) intact as the TP can deliver negative advice or data that it's
2921          * holding in its RAM (for an offloaded connection) even after the VI is
2922          * disabled.
2923          */
2924         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2925         if (rc) {
2926                 if_printf(ifp, "disable_vi failed: %d\n", rc);
2927                 return (rc);
2928         }
2929
2930         clrbit(&sc->open_device_map, pi->port_id);
2931         PORT_LOCK(pi);
2932         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2933         PORT_UNLOCK(pi);
2934
2935         pi->link_cfg.link_ok = 0;
2936         pi->link_cfg.speed = 0;
2937         pi->linkdnrc = -1;
2938         t4_os_link_changed(sc, pi->port_id, 0, -1);
2939
2940         return (0);
2941 }
2942
2943 /*
2944  * It is ok for this function to fail midway and return right away.  t4_detach
2945  * will walk the entire sc->irq list and clean up whatever is valid.
2946  */
2947 static int
2948 setup_intr_handlers(struct adapter *sc)
2949 {
2950         int rc, rid, p, q;
2951         char s[8];
2952         struct irq *irq;
2953         struct port_info *pi;
2954         struct sge_rxq *rxq;
2955 #ifdef TCP_OFFLOAD
2956         struct sge_ofld_rxq *ofld_rxq;
2957 #endif
2958
2959         /*
2960          * Setup interrupts.
2961          */
2962         irq = &sc->irq[0];
2963         rid = sc->intr_type == INTR_INTX ? 0 : 1;
2964         if (sc->intr_count == 1) {
2965                 KASSERT(!(sc->flags & INTR_DIRECT),
2966                     ("%s: single interrupt && INTR_DIRECT?", __func__));
2967
2968                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2969                 if (rc != 0)
2970                         return (rc);
2971         } else {
2972                 /* Multiple interrupts. */
2973                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
2974                     ("%s: too few intr.", __func__));
2975
2976                 /* The first one is always error intr */
2977                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
2978                 if (rc != 0)
2979                         return (rc);
2980                 irq++;
2981                 rid++;
2982
2983                 /* The second one is always the firmware event queue */
2984                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2985                     "evt");
2986                 if (rc != 0)
2987                         return (rc);
2988                 irq++;
2989                 rid++;
2990
2991                 /*
2992                  * Note that if INTR_DIRECT is not set then either the NIC rx
2993                  * queues or (exclusive or) the TOE rx queueus will be taking
2994                  * direct interrupts.
2995                  *
2996                  * There is no need to check for is_offload(sc) as nofldrxq
2997                  * will be 0 if offload is disabled.
2998                  */
2999                 for_each_port(sc, p) {
3000                         pi = sc->port[p];
3001
3002 #ifdef TCP_OFFLOAD
3003                         /*
3004                          * Skip over the NIC queues if they aren't taking direct
3005                          * interrupts.
3006                          */
3007                         if (!(sc->flags & INTR_DIRECT) &&
3008                             pi->nofldrxq > pi->nrxq)
3009                                 goto ofld_queues;
3010 #endif
3011                         rxq = &sc->sge.rxq[pi->first_rxq];
3012                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3013                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3014                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3015                                     s);
3016                                 if (rc != 0)
3017                                         return (rc);
3018                                 irq++;
3019                                 rid++;
3020                         }
3021
3022 #ifdef TCP_OFFLOAD
3023                         /*
3024                          * Skip over the offload queues if they aren't taking
3025                          * direct interrupts.
3026                          */
3027                         if (!(sc->flags & INTR_DIRECT))
3028                                 continue;
3029 ofld_queues:
3030                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3031                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3032                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3033                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3034                                     ofld_rxq, s);
3035                                 if (rc != 0)
3036                                         return (rc);
3037                                 irq++;
3038                                 rid++;
3039                         }
3040 #endif
3041                 }
3042         }
3043
3044         return (0);
3045 }
3046
3047 static int
3048 adapter_full_init(struct adapter *sc)
3049 {
3050         int rc, i;
3051
3052         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3053         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3054             ("%s: FULL_INIT_DONE already", __func__));
3055
3056         /*
3057          * queues that belong to the adapter (not any particular port).
3058          */
3059         rc = t4_setup_adapter_queues(sc);
3060         if (rc != 0)
3061                 goto done;
3062
3063         for (i = 0; i < nitems(sc->tq); i++) {
3064                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3065                     taskqueue_thread_enqueue, &sc->tq[i]);
3066                 if (sc->tq[i] == NULL) {
3067                         device_printf(sc->dev,
3068                             "failed to allocate task queue %d\n", i);
3069                         rc = ENOMEM;
3070                         goto done;
3071                 }
3072                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3073                     device_get_nameunit(sc->dev), i);
3074         }
3075
3076         t4_intr_enable(sc);
3077         sc->flags |= FULL_INIT_DONE;
3078 done:
3079         if (rc != 0)
3080                 adapter_full_uninit(sc);
3081
3082         return (rc);
3083 }
3084
3085 static int
3086 adapter_full_uninit(struct adapter *sc)
3087 {
3088         int i;
3089
3090         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3091
3092         t4_teardown_adapter_queues(sc);
3093
3094         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3095                 taskqueue_free(sc->tq[i]);
3096                 sc->tq[i] = NULL;
3097         }
3098
3099         sc->flags &= ~FULL_INIT_DONE;
3100
3101         return (0);
3102 }
3103
3104 static int
3105 port_full_init(struct port_info *pi)
3106 {
3107         struct adapter *sc = pi->adapter;
3108         struct ifnet *ifp = pi->ifp;
3109         uint16_t *rss;
3110         struct sge_rxq *rxq;
3111         int rc, i;
3112
3113         ASSERT_SYNCHRONIZED_OP(sc);
3114         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3115             ("%s: PORT_INIT_DONE already", __func__));
3116
3117         sysctl_ctx_init(&pi->ctx);
3118         pi->flags |= PORT_SYSCTL_CTX;
3119
3120         /*
3121          * Allocate tx/rx/fl queues for this port.
3122          */
3123         rc = t4_setup_port_queues(pi);
3124         if (rc != 0)
3125                 goto done;      /* error message displayed already */
3126
3127         /*
3128          * Setup RSS for this port.
3129          */
3130         rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3131             M_ZERO | M_WAITOK);
3132         for_each_rxq(pi, i, rxq) {
3133                 rss[i] = rxq->iq.abs_id;
3134         }
3135         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3136             pi->rss_size, rss, pi->nrxq);
3137         free(rss, M_CXGBE);
3138         if (rc != 0) {
3139                 if_printf(ifp, "rss_config failed: %d\n", rc);
3140                 goto done;
3141         }
3142
3143         pi->flags |= PORT_INIT_DONE;
3144 done:
3145         if (rc != 0)
3146                 port_full_uninit(pi);
3147
3148         return (rc);
3149 }
3150
3151 /*
3152  * Idempotent.
3153  */
3154 static int
3155 port_full_uninit(struct port_info *pi)
3156 {
3157         struct adapter *sc = pi->adapter;
3158         int i;
3159         struct sge_rxq *rxq;
3160         struct sge_txq *txq;
3161 #ifdef TCP_OFFLOAD
3162         struct sge_ofld_rxq *ofld_rxq;
3163         struct sge_wrq *ofld_txq;
3164 #endif
3165
3166         if (pi->flags & PORT_INIT_DONE) {
3167
3168                 /* Need to quiesce queues.  XXX: ctrl queues? */
3169
3170                 for_each_txq(pi, i, txq) {
3171                         quiesce_eq(sc, &txq->eq);
3172                 }
3173
3174 #ifdef TCP_OFFLOAD
3175                 for_each_ofld_txq(pi, i, ofld_txq) {
3176                         quiesce_eq(sc, &ofld_txq->eq);
3177                 }
3178 #endif
3179
3180                 for_each_rxq(pi, i, rxq) {
3181                         quiesce_iq(sc, &rxq->iq);
3182                         quiesce_fl(sc, &rxq->fl);
3183                 }
3184
3185 #ifdef TCP_OFFLOAD
3186                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3187                         quiesce_iq(sc, &ofld_rxq->iq);
3188                         quiesce_fl(sc, &ofld_rxq->fl);
3189                 }
3190 #endif
3191         }
3192
3193         t4_teardown_port_queues(pi);
3194         pi->flags &= ~PORT_INIT_DONE;
3195
3196         return (0);
3197 }
3198
3199 static void
3200 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3201 {
3202         EQ_LOCK(eq);
3203         eq->flags |= EQ_DOOMED;
3204
3205         /*
3206          * Wait for the response to a credit flush if one's
3207          * pending.
3208          */
3209         while (eq->flags & EQ_CRFLUSHED)
3210                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3211         EQ_UNLOCK(eq);
3212
3213         callout_drain(&eq->tx_callout); /* XXX: iffy */
3214         pause("callout", 10);           /* Still iffy */
3215
3216         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3217 }
3218
3219 static void
3220 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3221 {
3222         (void) sc;      /* unused */
3223
3224         /* Synchronize with the interrupt handler */
3225         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3226                 pause("iqfree", 1);
3227 }
3228
3229 static void
3230 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3231 {
3232         mtx_lock(&sc->sfl_lock);
3233         FL_LOCK(fl);
3234         fl->flags |= FL_DOOMED;
3235         FL_UNLOCK(fl);
3236         mtx_unlock(&sc->sfl_lock);
3237
3238         callout_drain(&sc->sfl_callout);
3239         KASSERT((fl->flags & FL_STARVING) == 0,
3240             ("%s: still starving", __func__));
3241 }
3242
3243 static int
3244 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3245     driver_intr_t *handler, void *arg, char *name)
3246 {
3247         int rc;
3248
3249         irq->rid = rid;
3250         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3251             RF_SHAREABLE | RF_ACTIVE);
3252         if (irq->res == NULL) {
3253                 device_printf(sc->dev,
3254                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3255                 return (ENOMEM);
3256         }
3257
3258         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3259             NULL, handler, arg, &irq->tag);
3260         if (rc != 0) {
3261                 device_printf(sc->dev,
3262                     "failed to setup interrupt for rid %d, name %s: %d\n",
3263                     rid, name, rc);
3264         } else if (name)
3265                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3266
3267         return (rc);
3268 }
3269
3270 static int
3271 t4_free_irq(struct adapter *sc, struct irq *irq)
3272 {
3273         if (irq->tag)
3274                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3275         if (irq->res)
3276                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3277
3278         bzero(irq, sizeof(*irq));
3279
3280         return (0);
3281 }
3282
3283 static void
3284 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3285     unsigned int end)
3286 {
3287         uint32_t *p = (uint32_t *)(buf + start);
3288
3289         for ( ; start <= end; start += sizeof(uint32_t))
3290                 *p++ = t4_read_reg(sc, start);
3291 }
3292
3293 static void
3294 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3295 {
3296         int i, n;
3297         const unsigned int *reg_ranges;
3298         static const unsigned int t4_reg_ranges[] = {
3299                 0x1008, 0x1108,
3300                 0x1180, 0x11b4,
3301                 0x11fc, 0x123c,
3302                 0x1300, 0x173c,
3303                 0x1800, 0x18fc,
3304                 0x3000, 0x30d8,
3305                 0x30e0, 0x5924,
3306                 0x5960, 0x59d4,
3307                 0x5a00, 0x5af8,
3308                 0x6000, 0x6098,
3309                 0x6100, 0x6150,
3310                 0x6200, 0x6208,
3311                 0x6240, 0x6248,
3312                 0x6280, 0x6338,
3313                 0x6370, 0x638c,
3314                 0x6400, 0x643c,
3315                 0x6500, 0x6524,
3316                 0x6a00, 0x6a38,
3317                 0x6a60, 0x6a78,
3318                 0x6b00, 0x6b84,
3319                 0x6bf0, 0x6c84,
3320                 0x6cf0, 0x6d84,
3321                 0x6df0, 0x6e84,
3322                 0x6ef0, 0x6f84,
3323                 0x6ff0, 0x7084,
3324                 0x70f0, 0x7184,
3325                 0x71f0, 0x7284,
3326                 0x72f0, 0x7384,
3327                 0x73f0, 0x7450,
3328                 0x7500, 0x7530,
3329                 0x7600, 0x761c,
3330                 0x7680, 0x76cc,
3331                 0x7700, 0x7798,
3332                 0x77c0, 0x77fc,
3333                 0x7900, 0x79fc,
3334                 0x7b00, 0x7c38,
3335                 0x7d00, 0x7efc,
3336                 0x8dc0, 0x8e1c,
3337                 0x8e30, 0x8e78,
3338                 0x8ea0, 0x8f6c,
3339                 0x8fc0, 0x9074,
3340                 0x90fc, 0x90fc,
3341                 0x9400, 0x9458,
3342                 0x9600, 0x96bc,
3343                 0x9800, 0x9808,
3344                 0x9820, 0x983c,
3345                 0x9850, 0x9864,
3346                 0x9c00, 0x9c6c,
3347                 0x9c80, 0x9cec,
3348                 0x9d00, 0x9d6c,
3349                 0x9d80, 0x9dec,
3350                 0x9e00, 0x9e6c,
3351                 0x9e80, 0x9eec,
3352                 0x9f00, 0x9f6c,
3353                 0x9f80, 0x9fec,
3354                 0xd004, 0xd03c,
3355                 0xdfc0, 0xdfe0,
3356                 0xe000, 0xea7c,
3357                 0xf000, 0x11190,
3358                 0x19040, 0x1906c,
3359                 0x19078, 0x19080,
3360                 0x1908c, 0x19124,
3361                 0x19150, 0x191b0,
3362                 0x191d0, 0x191e8,
3363                 0x19238, 0x1924c,
3364                 0x193f8, 0x19474,
3365                 0x19490, 0x194f8,
3366                 0x19800, 0x19f30,
3367                 0x1a000, 0x1a06c,
3368                 0x1a0b0, 0x1a120,
3369                 0x1a128, 0x1a138,
3370                 0x1a190, 0x1a1c4,
3371                 0x1a1fc, 0x1a1fc,
3372                 0x1e040, 0x1e04c,
3373                 0x1e284, 0x1e28c,
3374                 0x1e2c0, 0x1e2c0,
3375                 0x1e2e0, 0x1e2e0,
3376                 0x1e300, 0x1e384,
3377                 0x1e3c0, 0x1e3c8,
3378                 0x1e440, 0x1e44c,
3379                 0x1e684, 0x1e68c,
3380                 0x1e6c0, 0x1e6c0,
3381                 0x1e6e0, 0x1e6e0,
3382                 0x1e700, 0x1e784,
3383                 0x1e7c0, 0x1e7c8,
3384                 0x1e840, 0x1e84c,
3385                 0x1ea84, 0x1ea8c,
3386                 0x1eac0, 0x1eac0,
3387                 0x1eae0, 0x1eae0,
3388                 0x1eb00, 0x1eb84,
3389                 0x1ebc0, 0x1ebc8,
3390                 0x1ec40, 0x1ec4c,
3391                 0x1ee84, 0x1ee8c,
3392                 0x1eec0, 0x1eec0,
3393                 0x1eee0, 0x1eee0,
3394                 0x1ef00, 0x1ef84,
3395                 0x1efc0, 0x1efc8,
3396                 0x1f040, 0x1f04c,
3397                 0x1f284, 0x1f28c,
3398                 0x1f2c0, 0x1f2c0,
3399                 0x1f2e0, 0x1f2e0,
3400                 0x1f300, 0x1f384,
3401                 0x1f3c0, 0x1f3c8,
3402                 0x1f440, 0x1f44c,
3403                 0x1f684, 0x1f68c,
3404                 0x1f6c0, 0x1f6c0,
3405                 0x1f6e0, 0x1f6e0,
3406                 0x1f700, 0x1f784,
3407                 0x1f7c0, 0x1f7c8,
3408                 0x1f840, 0x1f84c,
3409                 0x1fa84, 0x1fa8c,
3410                 0x1fac0, 0x1fac0,
3411                 0x1fae0, 0x1fae0,
3412                 0x1fb00, 0x1fb84,
3413                 0x1fbc0, 0x1fbc8,
3414                 0x1fc40, 0x1fc4c,
3415                 0x1fe84, 0x1fe8c,
3416                 0x1fec0, 0x1fec0,
3417                 0x1fee0, 0x1fee0,
3418                 0x1ff00, 0x1ff84,
3419                 0x1ffc0, 0x1ffc8,
3420                 0x20000, 0x2002c,
3421                 0x20100, 0x2013c,
3422                 0x20190, 0x201c8,
3423                 0x20200, 0x20318,
3424                 0x20400, 0x20528,
3425                 0x20540, 0x20614,
3426                 0x21000, 0x21040,
3427                 0x2104c, 0x21060,
3428                 0x210c0, 0x210ec,
3429                 0x21200, 0x21268,
3430                 0x21270, 0x21284,
3431                 0x212fc, 0x21388,
3432                 0x21400, 0x21404,
3433                 0x21500, 0x21518,
3434                 0x2152c, 0x2153c,
3435                 0x21550, 0x21554,
3436                 0x21600, 0x21600,
3437                 0x21608, 0x21628,
3438                 0x21630, 0x2163c,
3439                 0x21700, 0x2171c,
3440                 0x21780, 0x2178c,
3441                 0x21800, 0x21c38,
3442                 0x21c80, 0x21d7c,
3443                 0x21e00, 0x21e04,
3444                 0x22000, 0x2202c,
3445                 0x22100, 0x2213c,
3446                 0x22190, 0x221c8,
3447                 0x22200, 0x22318,
3448                 0x22400, 0x22528,
3449                 0x22540, 0x22614,
3450                 0x23000, 0x23040,
3451                 0x2304c, 0x23060,
3452                 0x230c0, 0x230ec,
3453                 0x23200, 0x23268,
3454                 0x23270, 0x23284,
3455                 0x232fc, 0x23388,
3456                 0x23400, 0x23404,
3457                 0x23500, 0x23518,
3458                 0x2352c, 0x2353c,
3459                 0x23550, 0x23554,
3460                 0x23600, 0x23600,
3461                 0x23608, 0x23628,
3462                 0x23630, 0x2363c,
3463                 0x23700, 0x2371c,
3464                 0x23780, 0x2378c,
3465                 0x23800, 0x23c38,
3466                 0x23c80, 0x23d7c,
3467                 0x23e00, 0x23e04,
3468                 0x24000, 0x2402c,
3469                 0x24100, 0x2413c,
3470                 0x24190, 0x241c8,
3471                 0x24200, 0x24318,
3472                 0x24400, 0x24528,
3473                 0x24540, 0x24614,
3474                 0x25000, 0x25040,
3475                 0x2504c, 0x25060,
3476                 0x250c0, 0x250ec,
3477                 0x25200, 0x25268,
3478                 0x25270, 0x25284,
3479                 0x252fc, 0x25388,
3480                 0x25400, 0x25404,
3481                 0x25500, 0x25518,
3482                 0x2552c, 0x2553c,
3483                 0x25550, 0x25554,
3484                 0x25600, 0x25600,
3485                 0x25608, 0x25628,
3486                 0x25630, 0x2563c,
3487                 0x25700, 0x2571c,
3488                 0x25780, 0x2578c,
3489                 0x25800, 0x25c38,
3490                 0x25c80, 0x25d7c,
3491                 0x25e00, 0x25e04,
3492                 0x26000, 0x2602c,
3493                 0x26100, 0x2613c,
3494                 0x26190, 0x261c8,
3495                 0x26200, 0x26318,
3496                 0x26400, 0x26528,
3497                 0x26540, 0x26614,
3498                 0x27000, 0x27040,
3499                 0x2704c, 0x27060,
3500                 0x270c0, 0x270ec,
3501                 0x27200, 0x27268,
3502                 0x27270, 0x27284,
3503                 0x272fc, 0x27388,
3504                 0x27400, 0x27404,
3505                 0x27500, 0x27518,
3506                 0x2752c, 0x2753c,
3507                 0x27550, 0x27554,
3508                 0x27600, 0x27600,
3509                 0x27608, 0x27628,
3510                 0x27630, 0x2763c,
3511                 0x27700, 0x2771c,
3512                 0x27780, 0x2778c,
3513                 0x27800, 0x27c38,
3514                 0x27c80, 0x27d7c,
3515                 0x27e00, 0x27e04
3516         };
3517         static const unsigned int t5_reg_ranges[] = {
3518                 0x1008, 0x1148,
3519                 0x1180, 0x11b4,
3520                 0x11fc, 0x123c,
3521                 0x1280, 0x173c,
3522                 0x1800, 0x18fc,
3523                 0x3000, 0x3028,
3524                 0x3060, 0x30d8,
3525                 0x30e0, 0x30fc,
3526                 0x3140, 0x357c,
3527                 0x35a8, 0x35cc,
3528                 0x35ec, 0x35ec,
3529                 0x3600, 0x5624,
3530                 0x56cc, 0x575c,
3531                 0x580c, 0x5814,
3532                 0x5890, 0x58bc,
3533                 0x5940, 0x59dc,
3534                 0x59fc, 0x5a18,
3535                 0x5a60, 0x5a9c,
3536                 0x5b94, 0x5bfc,
3537                 0x6000, 0x6040,
3538                 0x6058, 0x614c,
3539                 0x7700, 0x7798,
3540                 0x77c0, 0x78fc,
3541                 0x7b00, 0x7c54,
3542                 0x7d00, 0x7efc,
3543                 0x8dc0, 0x8de0,
3544                 0x8df8, 0x8e84,
3545                 0x8ea0, 0x8f84,
3546                 0x8fc0, 0x90f8,
3547                 0x9400, 0x9470,
3548                 0x9600, 0x96f4,
3549                 0x9800, 0x9808,
3550                 0x9820, 0x983c,
3551                 0x9850, 0x9864,
3552                 0x9c00, 0x9c6c,
3553                 0x9c80, 0x9cec,
3554                 0x9d00, 0x9d6c,
3555                 0x9d80, 0x9dec,
3556                 0x9e00, 0x9e6c,
3557                 0x9e80, 0x9eec,
3558                 0x9f00, 0x9f6c,
3559                 0x9f80, 0xa020,
3560                 0xd004, 0xd03c,
3561                 0xdfc0, 0xdfe0,
3562                 0xe000, 0x11088,
3563                 0x1109c, 0x1117c,
3564                 0x11190, 0x11204,
3565                 0x19040, 0x1906c,
3566                 0x19078, 0x19080,
3567                 0x1908c, 0x19124,
3568                 0x19150, 0x191b0,
3569                 0x191d0, 0x191e8,
3570                 0x19238, 0x19290,
3571                 0x193f8, 0x19474,
3572                 0x19490, 0x194cc,
3573                 0x194f0, 0x194f8,
3574                 0x19c00, 0x19c60,
3575                 0x19c94, 0x19e10,
3576                 0x19e50, 0x19f34,
3577                 0x19f40, 0x19f50,
3578                 0x19f90, 0x19fe4,
3579                 0x1a000, 0x1a06c,
3580                 0x1a0b0, 0x1a120,
3581                 0x1a128, 0x1a138,
3582                 0x1a190, 0x1a1c4,
3583                 0x1a1fc, 0x1a1fc,
3584                 0x1e008, 0x1e00c,
3585                 0x1e040, 0x1e04c,
3586                 0x1e284, 0x1e290,
3587                 0x1e2c0, 0x1e2c0,
3588                 0x1e2e0, 0x1e2e0,
3589                 0x1e300, 0x1e384,
3590                 0x1e3c0, 0x1e3c8,
3591                 0x1e408, 0x1e40c,
3592                 0x1e440, 0x1e44c,
3593                 0x1e684, 0x1e690,
3594                 0x1e6c0, 0x1e6c0,
3595                 0x1e6e0, 0x1e6e0,
3596                 0x1e700, 0x1e784,
3597                 0x1e7c0, 0x1e7c8,
3598                 0x1e808, 0x1e80c,
3599                 0x1e840, 0x1e84c,
3600                 0x1ea84, 0x1ea90,
3601                 0x1eac0, 0x1eac0,
3602                 0x1eae0, 0x1eae0,
3603                 0x1eb00, 0x1eb84,
3604                 0x1ebc0, 0x1ebc8,
3605                 0x1ec08, 0x1ec0c,
3606                 0x1ec40, 0x1ec4c,
3607                 0x1ee84, 0x1ee90,
3608                 0x1eec0, 0x1eec0,
3609                 0x1eee0, 0x1eee0,
3610                 0x1ef00, 0x1ef84,
3611                 0x1efc0, 0x1efc8,
3612                 0x1f008, 0x1f00c,
3613                 0x1f040, 0x1f04c,
3614                 0x1f284, 0x1f290,
3615                 0x1f2c0, 0x1f2c0,
3616                 0x1f2e0, 0x1f2e0,
3617                 0x1f300, 0x1f384,
3618                 0x1f3c0, 0x1f3c8,
3619                 0x1f408, 0x1f40c,
3620                 0x1f440, 0x1f44c,
3621                 0x1f684, 0x1f690,
3622                 0x1f6c0, 0x1f6c0,
3623                 0x1f6e0, 0x1f6e0,
3624                 0x1f700, 0x1f784,
3625                 0x1f7c0, 0x1f7c8,
3626                 0x1f808, 0x1f80c,
3627                 0x1f840, 0x1f84c,
3628                 0x1fa84, 0x1fa90,
3629                 0x1fac0, 0x1fac0,
3630                 0x1fae0, 0x1fae0,
3631                 0x1fb00, 0x1fb84,
3632                 0x1fbc0, 0x1fbc8,
3633                 0x1fc08, 0x1fc0c,
3634                 0x1fc40, 0x1fc4c,
3635                 0x1fe84, 0x1fe90,
3636                 0x1fec0, 0x1fec0,
3637                 0x1fee0, 0x1fee0,
3638                 0x1ff00, 0x1ff84,
3639                 0x1ffc0, 0x1ffc8,
3640                 0x30000, 0x30030,
3641                 0x30100, 0x30144,
3642                 0x30190, 0x301d0,
3643                 0x30200, 0x30318,
3644                 0x30400, 0x3052c,
3645                 0x30540, 0x3061c,
3646                 0x30800, 0x30834,
3647                 0x308c0, 0x30908,
3648                 0x30910, 0x309ac,
3649                 0x30a00, 0x30a2c,
3650                 0x30a44, 0x30a50,
3651                 0x30a74, 0x30c24,
3652                 0x30d00, 0x30d00,
3653                 0x30d08, 0x30d14,
3654                 0x30d1c, 0x30d20,
3655                 0x30d3c, 0x30d50,
3656                 0x31200, 0x3120c,
3657                 0x31220, 0x31220,
3658                 0x31240, 0x31240,
3659                 0x31600, 0x3160c,
3660                 0x31a00, 0x31a1c,
3661                 0x31e00, 0x31e20,
3662                 0x31e38, 0x31e3c,
3663                 0x31e80, 0x31e80,
3664                 0x31e88, 0x31ea8,
3665                 0x31eb0, 0x31eb4,
3666                 0x31ec8, 0x31ed4,
3667                 0x31fb8, 0x32004,
3668                 0x32200, 0x32200,
3669                 0x32208, 0x32240,
3670                 0x32248, 0x32280,
3671                 0x32288, 0x322c0,
3672                 0x322c8, 0x322fc,
3673                 0x32600, 0x32630,
3674                 0x32a00, 0x32abc,
3675                 0x32b00, 0x32b70,
3676                 0x33000, 0x33048,
3677                 0x33060, 0x3309c,
3678                 0x330f0, 0x33148,
3679                 0x33160, 0x3319c,
3680                 0x331f0, 0x332e4,
3681                 0x332f8, 0x333e4,
3682                 0x333f8, 0x33448,
3683                 0x33460, 0x3349c,
3684                 0x334f0, 0x33548,
3685                 0x33560, 0x3359c,
3686                 0x335f0, 0x336e4,
3687                 0x336f8, 0x337e4,
3688                 0x337f8, 0x337fc,
3689                 0x33814, 0x33814,
3690                 0x3382c, 0x3382c,
3691                 0x33880, 0x3388c,
3692                 0x338e8, 0x338ec,
3693                 0x33900, 0x33948,
3694                 0x33960, 0x3399c,
3695                 0x339f0, 0x33ae4,
3696                 0x33af8, 0x33b10,
3697                 0x33b28, 0x33b28,
3698                 0x33b3c, 0x33b50,
3699                 0x33bf0, 0x33c10,
3700                 0x33c28, 0x33c28,
3701                 0x33c3c, 0x33c50,
3702                 0x33cf0, 0x33cfc,
3703                 0x34000, 0x34030,
3704                 0x34100, 0x34144,
3705                 0x34190, 0x341d0,
3706                 0x34200, 0x34318,
3707                 0x34400, 0x3452c,
3708                 0x34540, 0x3461c,
3709                 0x34800, 0x34834,
3710                 0x348c0, 0x34908,
3711                 0x34910, 0x349ac,
3712                 0x34a00, 0x34a2c,
3713                 0x34a44, 0x34a50,
3714                 0x34a74, 0x34c24,
3715                 0x34d00, 0x34d00,
3716                 0x34d08, 0x34d14,
3717                 0x34d1c, 0x34d20,
3718                 0x34d3c, 0x34d50,
3719                 0x35200, 0x3520c,
3720                 0x35220, 0x35220,
3721                 0x35240, 0x35240,
3722                 0x35600, 0x3560c,
3723                 0x35a00, 0x35a1c,
3724                 0x35e00, 0x35e20,
3725                 0x35e38, 0x35e3c,
3726                 0x35e80, 0x35e80,
3727                 0x35e88, 0x35ea8,
3728                 0x35eb0, 0x35eb4,
3729                 0x35ec8, 0x35ed4,
3730                 0x35fb8, 0x36004,
3731                 0x36200, 0x36200,
3732                 0x36208, 0x36240,
3733                 0x36248, 0x36280,
3734                 0x36288, 0x362c0,
3735                 0x362c8, 0x362fc,
3736                 0x36600, 0x36630,
3737                 0x36a00, 0x36abc,
3738                 0x36b00, 0x36b70,
3739                 0x37000, 0x37048,
3740                 0x37060, 0x3709c,
3741                 0x370f0, 0x37148,
3742                 0x37160, 0x3719c,
3743                 0x371f0, 0x372e4,
3744                 0x372f8, 0x373e4,
3745                 0x373f8, 0x37448,
3746                 0x37460, 0x3749c,
3747                 0x374f0, 0x37548,
3748                 0x37560, 0x3759c,
3749                 0x375f0, 0x376e4,
3750                 0x376f8, 0x377e4,
3751                 0x377f8, 0x377fc,
3752                 0x37814, 0x37814,
3753                 0x3782c, 0x3782c,
3754                 0x37880, 0x3788c,
3755                 0x378e8, 0x378ec,
3756                 0x37900, 0x37948,
3757                 0x37960, 0x3799c,
3758                 0x379f0, 0x37ae4,
3759                 0x37af8, 0x37b10,
3760                 0x37b28, 0x37b28,
3761                 0x37b3c, 0x37b50,
3762                 0x37bf0, 0x37c10,
3763                 0x37c28, 0x37c28,
3764                 0x37c3c, 0x37c50,
3765                 0x37cf0, 0x37cfc,
3766                 0x38000, 0x38030,
3767                 0x38100, 0x38144,
3768                 0x38190, 0x381d0,
3769                 0x38200, 0x38318,
3770                 0x38400, 0x3852c,
3771                 0x38540, 0x3861c,
3772                 0x38800, 0x38834,
3773                 0x388c0, 0x38908,
3774                 0x38910, 0x389ac,
3775                 0x38a00, 0x38a2c,
3776                 0x38a44, 0x38a50,
3777                 0x38a74, 0x38c24,
3778                 0x38d00, 0x38d00,
3779                 0x38d08, 0x38d14,
3780                 0x38d1c, 0x38d20,
3781                 0x38d3c, 0x38d50,
3782                 0x39200, 0x3920c,
3783                 0x39220, 0x39220,
3784                 0x39240, 0x39240,
3785                 0x39600, 0x3960c,
3786                 0x39a00, 0x39a1c,
3787                 0x39e00, 0x39e20,
3788                 0x39e38, 0x39e3c,
3789                 0x39e80, 0x39e80,
3790                 0x39e88, 0x39ea8,
3791                 0x39eb0, 0x39eb4,
3792                 0x39ec8, 0x39ed4,
3793                 0x39fb8, 0x3a004,
3794                 0x3a200, 0x3a200,
3795                 0x3a208, 0x3a240,
3796                 0x3a248, 0x3a280,
3797                 0x3a288, 0x3a2c0,
3798                 0x3a2c8, 0x3a2fc,
3799                 0x3a600, 0x3a630,
3800                 0x3aa00, 0x3aabc,
3801                 0x3ab00, 0x3ab70,
3802                 0x3b000, 0x3b048,
3803                 0x3b060, 0x3b09c,
3804                 0x3b0f0, 0x3b148,
3805                 0x3b160, 0x3b19c,
3806                 0x3b1f0, 0x3b2e4,
3807                 0x3b2f8, 0x3b3e4,
3808                 0x3b3f8, 0x3b448,
3809                 0x3b460, 0x3b49c,
3810                 0x3b4f0, 0x3b548,
3811                 0x3b560, 0x3b59c,
3812                 0x3b5f0, 0x3b6e4,
3813                 0x3b6f8, 0x3b7e4,
3814                 0x3b7f8, 0x3b7fc,
3815                 0x3b814, 0x3b814,
3816                 0x3b82c, 0x3b82c,
3817                 0x3b880, 0x3b88c,
3818                 0x3b8e8, 0x3b8ec,
3819                 0x3b900, 0x3b948,
3820                 0x3b960, 0x3b99c,
3821                 0x3b9f0, 0x3bae4,
3822                 0x3baf8, 0x3bb10,
3823                 0x3bb28, 0x3bb28,
3824                 0x3bb3c, 0x3bb50,
3825                 0x3bbf0, 0x3bc10,
3826                 0x3bc28, 0x3bc28,
3827                 0x3bc3c, 0x3bc50,
3828                 0x3bcf0, 0x3bcfc,
3829                 0x3c000, 0x3c030,
3830                 0x3c100, 0x3c144,
3831                 0x3c190, 0x3c1d0,
3832                 0x3c200, 0x3c318,
3833                 0x3c400, 0x3c52c,
3834                 0x3c540, 0x3c61c,
3835                 0x3c800, 0x3c834,
3836                 0x3c8c0, 0x3c908,
3837                 0x3c910, 0x3c9ac,
3838                 0x3ca00, 0x3ca2c,
3839                 0x3ca44, 0x3ca50,
3840                 0x3ca74, 0x3cc24,
3841                 0x3cd00, 0x3cd00,
3842                 0x3cd08, 0x3cd14,
3843                 0x3cd1c, 0x3cd20,
3844                 0x3cd3c, 0x3cd50,
3845                 0x3d200, 0x3d20c,
3846                 0x3d220, 0x3d220,
3847                 0x3d240, 0x3d240,
3848                 0x3d600, 0x3d60c,
3849                 0x3da00, 0x3da1c,
3850                 0x3de00, 0x3de20,
3851                 0x3de38, 0x3de3c,
3852                 0x3de80, 0x3de80,
3853                 0x3de88, 0x3dea8,
3854                 0x3deb0, 0x3deb4,
3855                 0x3dec8, 0x3ded4,
3856                 0x3dfb8, 0x3e004,
3857                 0x3e200, 0x3e200,
3858                 0x3e208, 0x3e240,
3859                 0x3e248, 0x3e280,
3860                 0x3e288, 0x3e2c0,
3861                 0x3e2c8, 0x3e2fc,
3862                 0x3e600, 0x3e630,
3863                 0x3ea00, 0x3eabc,
3864                 0x3eb00, 0x3eb70,
3865                 0x3f000, 0x3f048,
3866                 0x3f060, 0x3f09c,
3867                 0x3f0f0, 0x3f148,
3868                 0x3f160, 0x3f19c,
3869                 0x3f1f0, 0x3f2e4,
3870                 0x3f2f8, 0x3f3e4,
3871                 0x3f3f8, 0x3f448,
3872                 0x3f460, 0x3f49c,
3873                 0x3f4f0, 0x3f548,
3874                 0x3f560, 0x3f59c,
3875                 0x3f5f0, 0x3f6e4,
3876                 0x3f6f8, 0x3f7e4,
3877                 0x3f7f8, 0x3f7fc,
3878                 0x3f814, 0x3f814,
3879                 0x3f82c, 0x3f82c,
3880                 0x3f880, 0x3f88c,
3881                 0x3f8e8, 0x3f8ec,
3882                 0x3f900, 0x3f948,
3883                 0x3f960, 0x3f99c,
3884                 0x3f9f0, 0x3fae4,
3885                 0x3faf8, 0x3fb10,
3886                 0x3fb28, 0x3fb28,
3887                 0x3fb3c, 0x3fb50,
3888                 0x3fbf0, 0x3fc10,
3889                 0x3fc28, 0x3fc28,
3890                 0x3fc3c, 0x3fc50,
3891                 0x3fcf0, 0x3fcfc,
3892                 0x40000, 0x4000c,
3893                 0x40040, 0x40068,
3894                 0x4007c, 0x40144,
3895                 0x40180, 0x4018c,
3896                 0x40200, 0x40298,
3897                 0x402ac, 0x4033c,
3898                 0x403f8, 0x403fc,
3899                 0x41304, 0x413c4,
3900                 0x41400, 0x4141c,
3901                 0x41480, 0x414d0,
3902                 0x44000, 0x44078,
3903                 0x440c0, 0x44278,
3904                 0x442c0, 0x44478,
3905                 0x444c0, 0x44678,
3906                 0x446c0, 0x44878,
3907                 0x448c0, 0x449fc,
3908                 0x45000, 0x45068,
3909                 0x45080, 0x45084,
3910                 0x450a0, 0x450b0,
3911                 0x45200, 0x45268,
3912                 0x45280, 0x45284,
3913                 0x452a0, 0x452b0,
3914                 0x460c0, 0x460e4,
3915                 0x47000, 0x4708c,
3916                 0x47200, 0x47250,
3917                 0x47400, 0x47420,
3918                 0x47600, 0x47618,
3919                 0x47800, 0x47814,
3920                 0x48000, 0x4800c,
3921                 0x48040, 0x48068,
3922                 0x4807c, 0x48144,
3923                 0x48180, 0x4818c,
3924                 0x48200, 0x48298,
3925                 0x482ac, 0x4833c,
3926                 0x483f8, 0x483fc,
3927                 0x49304, 0x493c4,
3928                 0x49400, 0x4941c,
3929                 0x49480, 0x494d0,
3930                 0x4c000, 0x4c078,
3931                 0x4c0c0, 0x4c278,
3932                 0x4c2c0, 0x4c478,
3933                 0x4c4c0, 0x4c678,
3934                 0x4c6c0, 0x4c878,
3935                 0x4c8c0, 0x4c9fc,
3936                 0x4d000, 0x4d068,
3937                 0x4d080, 0x4d084,
3938                 0x4d0a0, 0x4d0b0,
3939                 0x4d200, 0x4d268,
3940                 0x4d280, 0x4d284,
3941                 0x4d2a0, 0x4d2b0,
3942                 0x4e0c0, 0x4e0e4,
3943                 0x4f000, 0x4f08c,
3944                 0x4f200, 0x4f250,
3945                 0x4f400, 0x4f420,
3946                 0x4f600, 0x4f618,
3947                 0x4f800, 0x4f814,
3948                 0x50000, 0x500cc,
3949                 0x50400, 0x50400,
3950                 0x50800, 0x508cc,
3951                 0x50c00, 0x50c00,
3952                 0x51000, 0x5101c,
3953                 0x51300, 0x51308,
3954         };
3955
3956         if (is_t4(sc)) {
3957                 reg_ranges = &t4_reg_ranges[0];
3958                 n = nitems(t4_reg_ranges);
3959         } else {
3960                 reg_ranges = &t5_reg_ranges[0];
3961                 n = nitems(t5_reg_ranges);
3962         }
3963
3964         regs->version = chip_id(sc) | chip_rev(sc) << 10;
3965         for (i = 0; i < n; i += 2)
3966                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3967 }
3968
3969 static void
3970 cxgbe_tick(void *arg)
3971 {
3972         struct port_info *pi = arg;
3973         struct ifnet *ifp = pi->ifp;
3974         struct sge_txq *txq;
3975         int i, drops;
3976         struct port_stats *s = &pi->stats;
3977
3978         PORT_LOCK(pi);
3979         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3980                 PORT_UNLOCK(pi);
3981                 return; /* without scheduling another callout */
3982         }
3983
3984         t4_get_port_stats(pi->adapter, pi->tx_chan, s);
3985
3986         ifp->if_opackets = s->tx_frames - s->tx_pause;
3987         ifp->if_ipackets = s->rx_frames - s->rx_pause;
3988         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
3989         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
3990         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
3991         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
3992         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
3993             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
3994             s->rx_trunc3;
3995
3996         drops = s->tx_drop;
3997         for_each_txq(pi, i, txq)
3998                 drops += txq->br->br_drops;
3999         ifp->if_snd.ifq_drops = drops;
4000
4001         ifp->if_oerrors = s->tx_error_frames;
4002         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4003             s->rx_fcs_err + s->rx_len_err;
4004
4005         callout_schedule(&pi->tick, hz);
4006         PORT_UNLOCK(pi);
4007 }
4008
4009 static void
4010 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4011 {
4012         struct ifnet *vlan;
4013
4014         if (arg != ifp || ifp->if_type != IFT_ETHER)
4015                 return;
4016
4017         vlan = VLAN_DEVAT(ifp, vid);
4018         VLAN_SETCOOKIE(vlan, ifp);
4019 }
4020
4021 static int
4022 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4023 {
4024
4025 #ifdef INVARIANTS
4026         panic("%s: opcode 0x%02x on iq %p with payload %p",
4027             __func__, rss->opcode, iq, m);
4028 #else
4029         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4030             __func__, rss->opcode, iq, m);
4031         m_freem(m);
4032 #endif
4033         return (EDOOFUS);
4034 }
4035
4036 int
4037 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4038 {
4039         uintptr_t *loc, new;
4040
4041         if (opcode >= nitems(sc->cpl_handler))
4042                 return (EINVAL);
4043
4044         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4045         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4046         atomic_store_rel_ptr(loc, new);
4047
4048         return (0);
4049 }
4050
4051 static int
4052 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4053 {
4054
4055 #ifdef INVARIANTS
4056         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4057 #else
4058         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4059             __func__, iq, ctrl);
4060 #endif
4061         return (EDOOFUS);
4062 }
4063
4064 int
4065 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4066 {
4067         uintptr_t *loc, new;
4068
4069         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4070         loc = (uintptr_t *) &sc->an_handler;
4071         atomic_store_rel_ptr(loc, new);
4072
4073         return (0);
4074 }
4075
4076 static int
4077 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4078 {
4079         const struct cpl_fw6_msg *cpl =
4080             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4081
4082 #ifdef INVARIANTS
4083         panic("%s: fw_msg type %d", __func__, cpl->type);
4084 #else
4085         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4086 #endif
4087         return (EDOOFUS);
4088 }
4089
4090 int
4091 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4092 {
4093         uintptr_t *loc, new;
4094
4095         if (type >= nitems(sc->fw_msg_handler))
4096                 return (EINVAL);
4097
4098         /*
4099          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4100          * handler dispatch table.  Reject any attempt to install a handler for
4101          * this subtype.
4102          */
4103         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4104                 return (EINVAL);
4105
4106         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4107         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4108         atomic_store_rel_ptr(loc, new);
4109
4110         return (0);
4111 }
4112
4113 static int
4114 t4_sysctls(struct adapter *sc)
4115 {
4116         struct sysctl_ctx_list *ctx;
4117         struct sysctl_oid *oid;
4118         struct sysctl_oid_list *children, *c0;
4119         static char *caps[] = {
4120                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4121                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",       /* caps[1] niccaps */
4122                 "\20\1TOE",                             /* caps[2] toecaps */
4123                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4124                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4125                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4126                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4127                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4128         };
4129         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4130
4131         ctx = device_get_sysctl_ctx(sc->dev);
4132
4133         /*
4134          * dev.t4nex.X.
4135          */
4136         oid = device_get_sysctl_tree(sc->dev);
4137         c0 = children = SYSCTL_CHILDREN(oid);
4138
4139         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4140             sc->params.nports, "# of ports");
4141
4142         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4143             NULL, chip_rev(sc), "chip hardware revision");
4144
4145         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4146             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4147
4148         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4149             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4150
4151         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4152             sc->cfcsum, "config file checksum");
4153
4154         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4155             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4156             sysctl_bitfield, "A", "available doorbells");
4157
4158         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4159             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4160             sysctl_bitfield, "A", "available link capabilities");
4161
4162         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4163             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4164             sysctl_bitfield, "A", "available NIC capabilities");
4165
4166         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4167             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4168             sysctl_bitfield, "A", "available TCP offload capabilities");
4169
4170         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4171             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4172             sysctl_bitfield, "A", "available RDMA capabilities");
4173
4174         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4175             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4176             sysctl_bitfield, "A", "available iSCSI capabilities");
4177
4178         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4179             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4180             sysctl_bitfield, "A", "available FCoE capabilities");
4181
4182         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4183             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4184
4185         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4186             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4187             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4188             "interrupt holdoff timer values (us)");
4189
4190         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4191             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4192             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4193             "interrupt holdoff packet counter values");
4194
4195         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4196             NULL, sc->tids.nftids, "number of filters");
4197
4198 #ifdef SBUF_DRAIN
4199         /*
4200          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4201          */
4202         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4203             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4204             "logs and miscellaneous information");
4205         children = SYSCTL_CHILDREN(oid);
4206
4207         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4208             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4209             sysctl_cctrl, "A", "congestion control");
4210
4211         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4212             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4213             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4214
4215         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4216             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4217             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4218
4219         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4220             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4221             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4222
4223         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4224             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4225             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4226
4227         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4228             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4229             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4230
4231         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4232             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4233             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4234
4235         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4236             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4237             sysctl_cim_la, "A", "CIM logic analyzer");
4238
4239         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4240             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4241             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4242
4243         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4244             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4245             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4246
4247         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4248             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4249             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4250
4251         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4252             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4253             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4254
4255         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4256             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4257             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4258
4259         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4260             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4261             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4262
4263         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4264             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4265             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4266
4267         if (is_t5(sc)) {
4268                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4269                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4270                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4271
4272                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4273                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4274                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4275         }
4276
4277         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4278             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4279             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4280
4281         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4282             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4283             sysctl_cim_qcfg, "A", "CIM queue configuration");
4284
4285         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4286             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4287             sysctl_cpl_stats, "A", "CPL statistics");
4288
4289         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4290             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4291             sysctl_ddp_stats, "A", "DDP statistics");
4292
4293         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4294             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4295             sysctl_devlog, "A", "firmware's device log");
4296
4297         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4298             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4299             sysctl_fcoe_stats, "A", "FCoE statistics");
4300
4301         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4302             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4303             sysctl_hw_sched, "A", "hardware scheduler ");
4304
4305         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4306             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4307             sysctl_l2t, "A", "hardware L2 table");
4308
4309         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4310             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4311             sysctl_lb_stats, "A", "loopback statistics");
4312
4313         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4314             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4315             sysctl_meminfo, "A", "memory regions");
4316
4317         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4318             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4319             sysctl_mps_tcam, "A", "MPS TCAM entries");
4320
4321         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4322             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4323             sysctl_path_mtus, "A", "path MTUs");
4324
4325         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4326             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4327             sysctl_pm_stats, "A", "PM statistics");
4328
4329         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4330             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4331             sysctl_rdma_stats, "A", "RDMA statistics");
4332
4333         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4334             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4335             sysctl_tcp_stats, "A", "TCP statistics");
4336
4337         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4338             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4339             sysctl_tids, "A", "TID information");
4340
4341         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4342             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4343             sysctl_tp_err_stats, "A", "TP error statistics");
4344
4345         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4346             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4347             sysctl_tp_la, "A", "TP logic analyzer");
4348
4349         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4350             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4351             sysctl_tx_rate, "A", "Tx rate");
4352
4353         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4354             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4355             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4356
4357         if (is_t5(sc)) {
4358                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4359                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4360                     sysctl_wcwr_stats, "A", "write combined work requests");
4361         }
4362 #endif
4363
4364 #ifdef TCP_OFFLOAD
4365         if (is_offload(sc)) {
4366                 /*
4367                  * dev.t4nex.X.toe.
4368                  */
4369                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4370                     NULL, "TOE parameters");
4371                 children = SYSCTL_CHILDREN(oid);
4372
4373                 sc->tt.sndbuf = 256 * 1024;
4374                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4375                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4376
4377                 sc->tt.ddp = 0;
4378                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4379                     &sc->tt.ddp, 0, "DDP allowed");
4380
4381                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4382                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4383                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4384
4385                 sc->tt.ddp_thres =
4386                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4387                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4388                     &sc->tt.ddp_thres, 0, "DDP threshold");
4389
4390                 sc->tt.rx_coalesce = 1;
4391                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4392                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4393         }
4394 #endif
4395
4396
4397         return (0);
4398 }
4399
4400 static int
4401 cxgbe_sysctls(struct port_info *pi)
4402 {
4403         struct sysctl_ctx_list *ctx;
4404         struct sysctl_oid *oid;
4405         struct sysctl_oid_list *children;
4406
4407         ctx = device_get_sysctl_ctx(pi->dev);
4408
4409         /*
4410          * dev.cxgbe.X.
4411          */
4412         oid = device_get_sysctl_tree(pi->dev);
4413         children = SYSCTL_CHILDREN(oid);
4414
4415         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "linkdnrc", CTLFLAG_RD,
4416             &pi->linkdnrc, 0, "reason why link is down");
4417         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4418                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4419                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4420                     "PHY temperature (in Celsius)");
4421                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4422                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4423                     "PHY firmware version");
4424         }
4425         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4426             &pi->nrxq, 0, "# of rx queues");
4427         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4428             &pi->ntxq, 0, "# of tx queues");
4429         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4430             &pi->first_rxq, 0, "index of first rx queue");
4431         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4432             &pi->first_txq, 0, "index of first tx queue");
4433
4434 #ifdef TCP_OFFLOAD
4435         if (is_offload(pi->adapter)) {
4436                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4437                     &pi->nofldrxq, 0,
4438                     "# of rx queues for offloaded TCP connections");
4439                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4440                     &pi->nofldtxq, 0,
4441                     "# of tx queues for offloaded TCP connections");
4442                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4443                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4444                     "index of first TOE rx queue");
4445                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4446                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4447                     "index of first TOE tx queue");
4448         }
4449 #endif
4450
4451         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4452             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4453             "holdoff timer index");
4454         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4455             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4456             "holdoff packet counter index");
4457
4458         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4459             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4460             "rx queue size");
4461         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4462             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4463             "tx queue size");
4464
4465         /*
4466          * dev.cxgbe.X.stats.
4467          */
4468         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4469             NULL, "port statistics");
4470         children = SYSCTL_CHILDREN(oid);
4471
4472 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4473         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4474             CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4475             sysctl_handle_t4_reg64, "QU", desc)
4476
4477         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4478             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4479         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4480             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4481         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4482             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4483         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4484             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4485         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4486             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4487         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4488             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4489         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4490             "# of tx frames in this range",
4491             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4492         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4493             "# of tx frames in this range",
4494             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4495         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4496             "# of tx frames in this range",
4497             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4498         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4499             "# of tx frames in this range",
4500             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4501         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4502             "# of tx frames in this range",
4503             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4504         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4505             "# of tx frames in this range",
4506             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4507         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4508             "# of tx frames in this range",
4509             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4510         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4511             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4512         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4513             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4514         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4515             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4516         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4517             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4518         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4519             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4520         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4521             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4522         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4523             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4524         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4525             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4526         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4527             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4528         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4529             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4530
4531         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4532             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4533         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4534             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4535         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4536             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4537         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4538             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4539         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4540             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4541         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4542             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4543         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4544             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4545         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4546             "# of frames received with bad FCS",
4547             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4548         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4549             "# of frames received with length error",
4550             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4551         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4552             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4553         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4554             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4555         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4556             "# of rx frames in this range",
4557             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4558         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4559             "# of rx frames in this range",
4560             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4561         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4562             "# of rx frames in this range",
4563             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4564         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4565             "# of rx frames in this range",
4566             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4567         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4568             "# of rx frames in this range",
4569             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4570         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4571             "# of rx frames in this range",
4572             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4573         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4574             "# of rx frames in this range",
4575             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4576         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4577             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4578         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4579             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4580         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4581             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4582         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4583             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4584         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4585             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4586         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4587             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4588         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4589             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4590         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4591             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4592         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4593             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4594
4595 #undef SYSCTL_ADD_T4_REG64
4596
4597 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4598         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4599             &pi->stats.name, desc)
4600
4601         /* We get these from port_stats and they may be stale by upto 1s */
4602         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4603             "# drops due to buffer-group 0 overflows");
4604         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4605             "# drops due to buffer-group 1 overflows");
4606         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4607             "# drops due to buffer-group 2 overflows");
4608         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4609             "# drops due to buffer-group 3 overflows");
4610         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4611             "# of buffer-group 0 truncated packets");
4612         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4613             "# of buffer-group 1 truncated packets");
4614         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4615             "# of buffer-group 2 truncated packets");
4616         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4617             "# of buffer-group 3 truncated packets");
4618
4619 #undef SYSCTL_ADD_T4_PORTSTAT
4620
4621         return (0);
4622 }
4623
4624 static int
4625 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4626 {
4627         int rc, *i;
4628         struct sbuf sb;
4629
4630         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4631         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4632                 sbuf_printf(&sb, "%d ", *i);
4633         sbuf_trim(&sb);
4634         sbuf_finish(&sb);
4635         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4636         sbuf_delete(&sb);
4637         return (rc);
4638 }
4639
4640 static int
4641 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4642 {
4643         int rc;
4644         struct sbuf *sb;
4645
4646         rc = sysctl_wire_old_buffer(req, 0);
4647         if (rc != 0)
4648                 return(rc);
4649
4650         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4651         if (sb == NULL)
4652                 return (ENOMEM);
4653
4654         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4655         rc = sbuf_finish(sb);
4656         sbuf_delete(sb);
4657
4658         return (rc);
4659 }
4660
4661 static int
4662 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4663 {
4664         struct port_info *pi = arg1;
4665         int op = arg2;
4666         struct adapter *sc = pi->adapter;
4667         u_int v;
4668         int rc;
4669
4670         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4671         if (rc)
4672                 return (rc);
4673         /* XXX: magic numbers */
4674         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4675             &v);
4676         end_synchronized_op(sc, 0);
4677         if (rc)
4678                 return (rc);
4679         if (op == 0)
4680                 v /= 256;
4681
4682         rc = sysctl_handle_int(oidp, &v, 0, req);
4683         return (rc);
4684 }
4685
4686 static int
4687 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4688 {
4689         struct port_info *pi = arg1;
4690         struct adapter *sc = pi->adapter;
4691         int idx, rc, i;
4692         struct sge_rxq *rxq;
4693 #ifdef TCP_OFFLOAD
4694         struct sge_ofld_rxq *ofld_rxq;
4695 #endif
4696         uint8_t v;
4697
4698         idx = pi->tmr_idx;
4699
4700         rc = sysctl_handle_int(oidp, &idx, 0, req);
4701         if (rc != 0 || req->newptr == NULL)
4702                 return (rc);
4703
4704         if (idx < 0 || idx >= SGE_NTIMERS)
4705                 return (EINVAL);
4706
4707         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4708             "t4tmr");
4709         if (rc)
4710                 return (rc);
4711
4712         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4713         for_each_rxq(pi, i, rxq) {
4714 #ifdef atomic_store_rel_8
4715                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4716 #else
4717                 rxq->iq.intr_params = v;
4718 #endif
4719         }
4720 #ifdef TCP_OFFLOAD
4721         for_each_ofld_rxq(pi, i, ofld_rxq) {
4722 #ifdef atomic_store_rel_8
4723                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4724 #else
4725                 ofld_rxq->iq.intr_params = v;
4726 #endif
4727         }
4728 #endif
4729         pi->tmr_idx = idx;
4730
4731         end_synchronized_op(sc, LOCK_HELD);
4732         return (0);
4733 }
4734
4735 static int
4736 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4737 {
4738         struct port_info *pi = arg1;
4739         struct adapter *sc = pi->adapter;
4740         int idx, rc;
4741
4742         idx = pi->pktc_idx;
4743
4744         rc = sysctl_handle_int(oidp, &idx, 0, req);
4745         if (rc != 0 || req->newptr == NULL)
4746                 return (rc);
4747
4748         if (idx < -1 || idx >= SGE_NCOUNTERS)
4749                 return (EINVAL);
4750
4751         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4752             "t4pktc");
4753         if (rc)
4754                 return (rc);
4755
4756         if (pi->flags & PORT_INIT_DONE)
4757                 rc = EBUSY; /* cannot be changed once the queues are created */
4758         else
4759                 pi->pktc_idx = idx;
4760
4761         end_synchronized_op(sc, LOCK_HELD);
4762         return (rc);
4763 }
4764
4765 static int
4766 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4767 {
4768         struct port_info *pi = arg1;
4769         struct adapter *sc = pi->adapter;
4770         int qsize, rc;
4771
4772         qsize = pi->qsize_rxq;
4773
4774         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4775         if (rc != 0 || req->newptr == NULL)
4776                 return (rc);
4777
4778         if (qsize < 128 || (qsize & 7))
4779                 return (EINVAL);
4780
4781         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4782             "t4rxqs");
4783         if (rc)
4784                 return (rc);
4785
4786         if (pi->flags & PORT_INIT_DONE)
4787                 rc = EBUSY; /* cannot be changed once the queues are created */
4788         else
4789                 pi->qsize_rxq = qsize;
4790
4791         end_synchronized_op(sc, LOCK_HELD);
4792         return (rc);
4793 }
4794
4795 static int
4796 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4797 {
4798         struct port_info *pi = arg1;
4799         struct adapter *sc = pi->adapter;
4800         int qsize, rc;
4801
4802         qsize = pi->qsize_txq;
4803
4804         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4805         if (rc != 0 || req->newptr == NULL)
4806                 return (rc);
4807
4808         /* bufring size must be powerof2 */
4809         if (qsize < 128 || !powerof2(qsize))
4810                 return (EINVAL);
4811
4812         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4813             "t4txqs");
4814         if (rc)
4815                 return (rc);
4816
4817         if (pi->flags & PORT_INIT_DONE)
4818                 rc = EBUSY; /* cannot be changed once the queues are created */
4819         else
4820                 pi->qsize_txq = qsize;
4821
4822         end_synchronized_op(sc, LOCK_HELD);
4823         return (rc);
4824 }
4825
4826 static int
4827 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4828 {
4829         struct adapter *sc = arg1;
4830         int reg = arg2;
4831         uint64_t val;
4832
4833         val = t4_read_reg64(sc, reg);
4834
4835         return (sysctl_handle_64(oidp, &val, 0, req));
4836 }
4837
4838 #ifdef SBUF_DRAIN
4839 static int
4840 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4841 {
4842         struct adapter *sc = arg1;
4843         struct sbuf *sb;
4844         int rc, i;
4845         uint16_t incr[NMTUS][NCCTRL_WIN];
4846         static const char *dec_fac[] = {
4847                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4848                 "0.9375"
4849         };
4850
4851         rc = sysctl_wire_old_buffer(req, 0);
4852         if (rc != 0)
4853                 return (rc);
4854
4855         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4856         if (sb == NULL)
4857                 return (ENOMEM);
4858
4859         t4_read_cong_tbl(sc, incr);
4860
4861         for (i = 0; i < NCCTRL_WIN; ++i) {
4862                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4863                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4864                     incr[5][i], incr[6][i], incr[7][i]);
4865                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4866                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4867                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4868                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4869         }
4870
4871         rc = sbuf_finish(sb);
4872         sbuf_delete(sb);
4873
4874         return (rc);
4875 }
4876
4877 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4878         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
4879         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
4880         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
4881 };
4882
4883 static int
4884 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4885 {
4886         struct adapter *sc = arg1;
4887         struct sbuf *sb;
4888         int rc, i, n, qid = arg2;
4889         uint32_t *buf, *p;
4890         char *qtype;
4891         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4892
4893         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4894             ("%s: bad qid %d\n", __func__, qid));
4895
4896         if (qid < CIM_NUM_IBQ) {
4897                 /* inbound queue */
4898                 qtype = "IBQ";
4899                 n = 4 * CIM_IBQ_SIZE;
4900                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4901                 rc = t4_read_cim_ibq(sc, qid, buf, n);
4902         } else {
4903                 /* outbound queue */
4904                 qtype = "OBQ";
4905                 qid -= CIM_NUM_IBQ;
4906                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4907                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4908                 rc = t4_read_cim_obq(sc, qid, buf, n);
4909         }
4910
4911         if (rc < 0) {
4912                 rc = -rc;
4913                 goto done;
4914         }
4915         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
4916
4917         rc = sysctl_wire_old_buffer(req, 0);
4918         if (rc != 0)
4919                 goto done;
4920
4921         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4922         if (sb == NULL) {
4923                 rc = ENOMEM;
4924                 goto done;
4925         }
4926
4927         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4928         for (i = 0, p = buf; i < n; i += 16, p += 4)
4929                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4930                     p[2], p[3]);
4931
4932         rc = sbuf_finish(sb);
4933         sbuf_delete(sb);
4934 done:
4935         free(buf, M_CXGBE);
4936         return (rc);
4937 }
4938
4939 static int
4940 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
4941 {
4942         struct adapter *sc = arg1;
4943         u_int cfg;
4944         struct sbuf *sb;
4945         uint32_t *buf, *p;
4946         int rc;
4947
4948         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
4949         if (rc != 0)
4950                 return (rc);
4951
4952         rc = sysctl_wire_old_buffer(req, 0);
4953         if (rc != 0)
4954                 return (rc);
4955
4956         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4957         if (sb == NULL)
4958                 return (ENOMEM);
4959
4960         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
4961             M_ZERO | M_WAITOK);
4962
4963         rc = -t4_cim_read_la(sc, buf, NULL);
4964         if (rc != 0)
4965                 goto done;
4966
4967         sbuf_printf(sb, "Status   Data      PC%s",
4968             cfg & F_UPDBGLACAPTPCONLY ? "" :
4969             "     LS0Stat  LS0Addr             LS0Data");
4970
4971         KASSERT((sc->params.cim_la_size & 7) == 0,
4972             ("%s: p will walk off the end of buf", __func__));
4973
4974         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
4975                 if (cfg & F_UPDBGLACAPTPCONLY) {
4976                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
4977                             p[6], p[7]);
4978                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
4979                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
4980                             p[4] & 0xff, p[5] >> 8);
4981                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
4982                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4983                             p[1] & 0xf, p[2] >> 4);
4984                 } else {
4985                         sbuf_printf(sb,
4986                             "\n  %02x   %x%07x %x%07x %08x %08x "
4987                             "%08x%08x%08x%08x",
4988                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
4989                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
4990                             p[6], p[7]);
4991                 }
4992         }
4993
4994         rc = sbuf_finish(sb);
4995         sbuf_delete(sb);
4996 done:
4997         free(buf, M_CXGBE);
4998         return (rc);
4999 }
5000
5001 static int
5002 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5003 {
5004         struct adapter *sc = arg1;
5005         u_int i;
5006         struct sbuf *sb;
5007         uint32_t *buf, *p;
5008         int rc;
5009
5010         rc = sysctl_wire_old_buffer(req, 0);
5011         if (rc != 0)
5012                 return (rc);
5013
5014         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5015         if (sb == NULL)
5016                 return (ENOMEM);
5017
5018         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5019             M_ZERO | M_WAITOK);
5020
5021         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5022         p = buf;
5023
5024         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5025                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5026                     p[1], p[0]);
5027         }
5028
5029         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5030         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5031                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5032                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5033                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5034                     (p[1] >> 2) | ((p[2] & 3) << 30),
5035                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5036                     p[0] & 1);
5037         }
5038
5039         rc = sbuf_finish(sb);
5040         sbuf_delete(sb);
5041         free(buf, M_CXGBE);
5042         return (rc);
5043 }
5044
5045 static int
5046 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5047 {
5048         struct adapter *sc = arg1;
5049         u_int i;
5050         struct sbuf *sb;
5051         uint32_t *buf, *p;
5052         int rc;
5053
5054         rc = sysctl_wire_old_buffer(req, 0);
5055         if (rc != 0)
5056                 return (rc);
5057
5058         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5059         if (sb == NULL)
5060                 return (ENOMEM);
5061
5062         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5063             M_ZERO | M_WAITOK);
5064
5065         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5066         p = buf;
5067
5068         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5069         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5070                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5071                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5072                     p[4], p[3], p[2], p[1], p[0]);
5073         }
5074
5075         sbuf_printf(sb, "\n\nCntl ID               Data");
5076         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5077                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5078                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5079         }
5080
5081         rc = sbuf_finish(sb);
5082         sbuf_delete(sb);
5083         free(buf, M_CXGBE);
5084         return (rc);
5085 }
5086
5087 static int
5088 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5089 {
5090         struct adapter *sc = arg1;
5091         struct sbuf *sb;
5092         int rc, i;
5093         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5094         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5095         uint16_t thres[CIM_NUM_IBQ];
5096         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5097         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5098         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5099
5100         if (is_t4(sc)) {
5101                 cim_num_obq = CIM_NUM_OBQ;
5102                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5103                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5104         } else {
5105                 cim_num_obq = CIM_NUM_OBQ_T5;
5106                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5107                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5108         }
5109         nq = CIM_NUM_IBQ + cim_num_obq;
5110
5111         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5112         if (rc == 0)
5113                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5114         if (rc != 0)
5115                 return (rc);
5116
5117         t4_read_cimq_cfg(sc, base, size, thres);
5118
5119         rc = sysctl_wire_old_buffer(req, 0);
5120         if (rc != 0)
5121                 return (rc);
5122
5123         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5124         if (sb == NULL)
5125                 return (ENOMEM);
5126
5127         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5128
5129         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5130                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5131                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5132                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5133                     G_QUEREMFLITS(p[2]) * 16);
5134         for ( ; i < nq; i++, p += 4, wr += 2)
5135                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5136                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5137                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5138                     G_QUEREMFLITS(p[2]) * 16);
5139
5140         rc = sbuf_finish(sb);
5141         sbuf_delete(sb);
5142
5143         return (rc);
5144 }
5145
5146 static int
5147 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5148 {
5149         struct adapter *sc = arg1;
5150         struct sbuf *sb;
5151         int rc;
5152         struct tp_cpl_stats stats;
5153
5154         rc = sysctl_wire_old_buffer(req, 0);
5155         if (rc != 0)
5156                 return (rc);
5157
5158         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5159         if (sb == NULL)
5160                 return (ENOMEM);
5161
5162         t4_tp_get_cpl_stats(sc, &stats);
5163
5164         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5165             "channel 3\n");
5166         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5167                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5168         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5169                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5170
5171         rc = sbuf_finish(sb);
5172         sbuf_delete(sb);
5173
5174         return (rc);
5175 }
5176
5177 static int
5178 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5179 {
5180         struct adapter *sc = arg1;
5181         struct sbuf *sb;
5182         int rc;
5183         struct tp_usm_stats stats;
5184
5185         rc = sysctl_wire_old_buffer(req, 0);
5186         if (rc != 0)
5187                 return(rc);
5188
5189         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5190         if (sb == NULL)
5191                 return (ENOMEM);
5192
5193         t4_get_usm_stats(sc, &stats);
5194
5195         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5196         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5197         sbuf_printf(sb, "Drops:  %u", stats.drops);
5198
5199         rc = sbuf_finish(sb);
5200         sbuf_delete(sb);
5201
5202         return (rc);
5203 }
5204
5205 const char *devlog_level_strings[] = {
5206         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5207         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5208         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5209         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5210         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5211         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5212 };
5213
5214 const char *devlog_facility_strings[] = {
5215         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5216         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5217         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5218         [FW_DEVLOG_FACILITY_RES]        = "RES",
5219         [FW_DEVLOG_FACILITY_HW]         = "HW",
5220         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5221         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5222         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5223         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5224         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5225         [FW_DEVLOG_FACILITY_VI]         = "VI",
5226         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5227         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5228         [FW_DEVLOG_FACILITY_TM]         = "TM",
5229         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5230         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5231         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5232         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5233         [FW_DEVLOG_FACILITY_RI]         = "RI",
5234         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5235         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5236         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5237         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5238 };
5239
5240 static int
5241 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5242 {
5243         struct adapter *sc = arg1;
5244         struct devlog_params *dparams = &sc->params.devlog;
5245         struct fw_devlog_e *buf, *e;
5246         int i, j, rc, nentries, first = 0;
5247         struct sbuf *sb;
5248         uint64_t ftstamp = UINT64_MAX;
5249
5250         if (dparams->start == 0) {
5251                 dparams->memtype = 0;
5252                 dparams->start = 0x84000;
5253                 dparams->size = 32768;
5254         }
5255
5256         nentries = dparams->size / sizeof(struct fw_devlog_e);
5257
5258         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5259         if (buf == NULL)
5260                 return (ENOMEM);
5261
5262         rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5263             (void *)buf);
5264         if (rc != 0)
5265                 goto done;
5266
5267         for (i = 0; i < nentries; i++) {
5268                 e = &buf[i];
5269
5270                 if (e->timestamp == 0)
5271                         break;  /* end */
5272
5273                 e->timestamp = be64toh(e->timestamp);
5274                 e->seqno = be32toh(e->seqno);
5275                 for (j = 0; j < 8; j++)
5276                         e->params[j] = be32toh(e->params[j]);
5277
5278                 if (e->timestamp < ftstamp) {
5279                         ftstamp = e->timestamp;
5280                         first = i;
5281                 }
5282         }
5283
5284         if (buf[first].timestamp == 0)
5285                 goto done;      /* nothing in the log */
5286
5287         rc = sysctl_wire_old_buffer(req, 0);
5288         if (rc != 0)
5289                 goto done;
5290
5291         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5292         if (sb == NULL) {
5293                 rc = ENOMEM;
5294                 goto done;
5295         }
5296         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5297             "Seq#", "Tstamp", "Level", "Facility", "Message");
5298
5299         i = first;
5300         do {
5301                 e = &buf[i];
5302                 if (e->timestamp == 0)
5303                         break;  /* end */
5304
5305                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5306                     e->seqno, e->timestamp,
5307                     (e->level < nitems(devlog_level_strings) ?
5308                         devlog_level_strings[e->level] : "UNKNOWN"),
5309                     (e->facility < nitems(devlog_facility_strings) ?
5310                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5311                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5312                     e->params[2], e->params[3], e->params[4],
5313                     e->params[5], e->params[6], e->params[7]);
5314
5315                 if (++i == nentries)
5316                         i = 0;
5317         } while (i != first);
5318
5319         rc = sbuf_finish(sb);
5320         sbuf_delete(sb);
5321 done:
5322         free(buf, M_CXGBE);
5323         return (rc);
5324 }
5325
5326 static int
5327 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5328 {
5329         struct adapter *sc = arg1;
5330         struct sbuf *sb;
5331         int rc;
5332         struct tp_fcoe_stats stats[4];
5333
5334         rc = sysctl_wire_old_buffer(req, 0);
5335         if (rc != 0)
5336                 return (rc);
5337
5338         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5339         if (sb == NULL)
5340                 return (ENOMEM);
5341
5342         t4_get_fcoe_stats(sc, 0, &stats[0]);
5343         t4_get_fcoe_stats(sc, 1, &stats[1]);
5344         t4_get_fcoe_stats(sc, 2, &stats[2]);
5345         t4_get_fcoe_stats(sc, 3, &stats[3]);
5346
5347         sbuf_printf(sb, "                   channel 0        channel 1        "
5348             "channel 2        channel 3\n");
5349         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5350             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5351             stats[3].octetsDDP);
5352         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5353             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5354         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5355             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5356             stats[3].framesDrop);
5357
5358         rc = sbuf_finish(sb);
5359         sbuf_delete(sb);
5360
5361         return (rc);
5362 }
5363
5364 static int
5365 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5366 {
5367         struct adapter *sc = arg1;
5368         struct sbuf *sb;
5369         int rc, i;
5370         unsigned int map, kbps, ipg, mode;
5371         unsigned int pace_tab[NTX_SCHED];
5372
5373         rc = sysctl_wire_old_buffer(req, 0);
5374         if (rc != 0)
5375                 return (rc);
5376
5377         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5378         if (sb == NULL)
5379                 return (ENOMEM);
5380
5381         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5382         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5383         t4_read_pace_tbl(sc, pace_tab);
5384
5385         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5386             "Class IPG (0.1 ns)   Flow IPG (us)");
5387
5388         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5389                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5390                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5391                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5392                 if (kbps)
5393                         sbuf_printf(sb, "%9u     ", kbps);
5394                 else
5395                         sbuf_printf(sb, " disabled     ");
5396
5397                 if (ipg)
5398                         sbuf_printf(sb, "%13u        ", ipg);
5399                 else
5400                         sbuf_printf(sb, "     disabled        ");
5401
5402                 if (pace_tab[i])
5403                         sbuf_printf(sb, "%10u", pace_tab[i]);
5404                 else
5405                         sbuf_printf(sb, "  disabled");
5406         }
5407
5408         rc = sbuf_finish(sb);
5409         sbuf_delete(sb);
5410
5411         return (rc);
5412 }
5413
5414 static int
5415 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5416 {
5417         struct adapter *sc = arg1;
5418         struct sbuf *sb;
5419         int rc, i, j;
5420         uint64_t *p0, *p1;
5421         struct lb_port_stats s[2];
5422         static const char *stat_name[] = {
5423                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5424                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5425                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5426                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5427                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5428                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5429                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5430         };
5431
5432         rc = sysctl_wire_old_buffer(req, 0);
5433         if (rc != 0)
5434                 return (rc);
5435
5436         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5437         if (sb == NULL)
5438                 return (ENOMEM);
5439
5440         memset(s, 0, sizeof(s));
5441
5442         for (i = 0; i < 4; i += 2) {
5443                 t4_get_lb_stats(sc, i, &s[0]);
5444                 t4_get_lb_stats(sc, i + 1, &s[1]);
5445
5446                 p0 = &s[0].octets;
5447                 p1 = &s[1].octets;
5448                 sbuf_printf(sb, "%s                       Loopback %u"
5449                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5450
5451                 for (j = 0; j < nitems(stat_name); j++)
5452                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5453                                    *p0++, *p1++);
5454         }
5455
5456         rc = sbuf_finish(sb);
5457         sbuf_delete(sb);
5458
5459         return (rc);
5460 }
5461
5462 struct mem_desc {
5463         unsigned int base;
5464         unsigned int limit;
5465         unsigned int idx;
5466 };
5467
5468 static int
5469 mem_desc_cmp(const void *a, const void *b)
5470 {
5471         return ((const struct mem_desc *)a)->base -
5472                ((const struct mem_desc *)b)->base;
5473 }
5474
5475 static void
5476 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5477     unsigned int to)
5478 {
5479         unsigned int size;
5480
5481         size = to - from + 1;
5482         if (size == 0)
5483                 return;
5484
5485         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5486         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5487 }
5488
5489 static int
5490 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5491 {
5492         struct adapter *sc = arg1;
5493         struct sbuf *sb;
5494         int rc, i, n;
5495         uint32_t lo, hi, used, alloc;
5496         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5497         static const char *region[] = {
5498                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5499                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5500                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5501                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5502                 "RQUDP region:", "PBL region:", "TXPBL region:",
5503                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5504                 "On-chip queues:"
5505         };
5506         struct mem_desc avail[4];
5507         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5508         struct mem_desc *md = mem;
5509
5510         rc = sysctl_wire_old_buffer(req, 0);
5511         if (rc != 0)
5512                 return (rc);
5513
5514         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5515         if (sb == NULL)
5516                 return (ENOMEM);
5517
5518         for (i = 0; i < nitems(mem); i++) {
5519                 mem[i].limit = 0;
5520                 mem[i].idx = i;
5521         }
5522
5523         /* Find and sort the populated memory ranges */
5524         i = 0;
5525         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5526         if (lo & F_EDRAM0_ENABLE) {
5527                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5528                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5529                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5530                 avail[i].idx = 0;
5531                 i++;
5532         }
5533         if (lo & F_EDRAM1_ENABLE) {
5534                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5535                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5536                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5537                 avail[i].idx = 1;
5538                 i++;
5539         }
5540         if (lo & F_EXT_MEM_ENABLE) {
5541                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5542                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5543                 avail[i].limit = avail[i].base +
5544                     (G_EXT_MEM_SIZE(hi) << 20);
5545                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5546                 i++;
5547         }
5548         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5549                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5550                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5551                 avail[i].limit = avail[i].base +
5552                     (G_EXT_MEM1_SIZE(hi) << 20);
5553                 avail[i].idx = 4;
5554                 i++;
5555         }
5556         if (!i)                                    /* no memory available */
5557                 return 0;
5558         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5559
5560         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5561         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5562         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5563         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5564         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5565         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5566         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5567         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5568         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5569
5570         /* the next few have explicit upper bounds */
5571         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5572         md->limit = md->base - 1 +
5573                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5574                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5575         md++;
5576
5577         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5578         md->limit = md->base - 1 +
5579                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5580                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5581         md++;
5582
5583         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5584                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5585                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5586                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5587         } else {
5588                 md->base = 0;
5589                 md->idx = nitems(region);  /* hide it */
5590         }
5591         md++;
5592
5593 #define ulp_region(reg) \
5594         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5595         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5596
5597         ulp_region(RX_ISCSI);
5598         ulp_region(RX_TDDP);
5599         ulp_region(TX_TPT);
5600         ulp_region(RX_STAG);
5601         ulp_region(RX_RQ);
5602         ulp_region(RX_RQUDP);
5603         ulp_region(RX_PBL);
5604         ulp_region(TX_PBL);
5605 #undef ulp_region
5606
5607         md->base = 0;
5608         md->idx = nitems(region);
5609         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5610                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5611                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5612                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5613         }
5614         md++;
5615
5616         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5617         md->limit = md->base + sc->tids.ntids - 1;
5618         md++;
5619         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5620         md->limit = md->base + sc->tids.ntids - 1;
5621         md++;
5622
5623         md->base = sc->vres.ocq.start;
5624         if (sc->vres.ocq.size)
5625                 md->limit = md->base + sc->vres.ocq.size - 1;
5626         else
5627                 md->idx = nitems(region);  /* hide it */
5628         md++;
5629
5630         /* add any address-space holes, there can be up to 3 */
5631         for (n = 0; n < i - 1; n++)
5632                 if (avail[n].limit < avail[n + 1].base)
5633                         (md++)->base = avail[n].limit;
5634         if (avail[n].limit)
5635                 (md++)->base = avail[n].limit;
5636
5637         n = md - mem;
5638         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5639
5640         for (lo = 0; lo < i; lo++)
5641                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5642                                 avail[lo].limit - 1);
5643
5644         sbuf_printf(sb, "\n");
5645         for (i = 0; i < n; i++) {
5646                 if (mem[i].idx >= nitems(region))
5647                         continue;                        /* skip holes */
5648                 if (!mem[i].limit)
5649                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5650                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5651                                 mem[i].limit);
5652         }
5653
5654         sbuf_printf(sb, "\n");
5655         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5656         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5657         mem_region_show(sb, "uP RAM:", lo, hi);
5658
5659         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5660         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5661         mem_region_show(sb, "uP Extmem2:", lo, hi);
5662
5663         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5664         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5665                    G_PMRXMAXPAGE(lo),
5666                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5667                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5668
5669         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5670         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5671         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5672                    G_PMTXMAXPAGE(lo),
5673                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5674                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5675         sbuf_printf(sb, "%u p-structs\n",
5676                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5677
5678         for (i = 0; i < 4; i++) {
5679                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5680                 if (is_t4(sc)) {
5681                         used = G_USED(lo);
5682                         alloc = G_ALLOC(lo);
5683                 } else {
5684                         used = G_T5_USED(lo);
5685                         alloc = G_T5_ALLOC(lo);
5686                 }
5687                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5688                            i, used, alloc);
5689         }
5690         for (i = 0; i < 4; i++) {
5691                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5692                 if (is_t4(sc)) {
5693                         used = G_USED(lo);
5694                         alloc = G_ALLOC(lo);
5695                 } else {
5696                         used = G_T5_USED(lo);
5697                         alloc = G_T5_ALLOC(lo);
5698                 }
5699                 sbuf_printf(sb,
5700                            "\nLoopback %d using %u pages out of %u allocated",
5701                            i, used, alloc);
5702         }
5703
5704         rc = sbuf_finish(sb);
5705         sbuf_delete(sb);
5706
5707         return (rc);
5708 }
5709
5710 static inline void
5711 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5712 {
5713         *mask = x | y;
5714         y = htobe64(y);
5715         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5716 }
5717
5718 static int
5719 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5720 {
5721         struct adapter *sc = arg1;
5722         struct sbuf *sb;
5723         int rc, i, n;
5724
5725         rc = sysctl_wire_old_buffer(req, 0);
5726         if (rc != 0)
5727                 return (rc);
5728
5729         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5730         if (sb == NULL)
5731                 return (ENOMEM);
5732
5733         sbuf_printf(sb,
5734             "Idx  Ethernet address     Mask     Vld Ports PF"
5735             "  VF              Replication             P0 P1 P2 P3  ML");
5736         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5737             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5738         for (i = 0; i < n; i++) {
5739                 uint64_t tcamx, tcamy, mask;
5740                 uint32_t cls_lo, cls_hi;
5741                 uint8_t addr[ETHER_ADDR_LEN];
5742
5743                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5744                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5745                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5746                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5747
5748                 if (tcamx & tcamy)
5749                         continue;
5750
5751                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5752                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5753                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5754                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5755                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5756                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5757                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5758
5759                 if (cls_lo & F_REPLICATE) {
5760                         struct fw_ldst_cmd ldst_cmd;
5761
5762                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5763                         ldst_cmd.op_to_addrspace =
5764                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5765                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5766                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5767                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5768                         ldst_cmd.u.mps.fid_ctl =
5769                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5770                                 V_FW_LDST_CMD_CTL(i));
5771
5772                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5773                             "t4mps");
5774                         if (rc)
5775                                 break;
5776                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5777                             sizeof(ldst_cmd), &ldst_cmd);
5778                         end_synchronized_op(sc, 0);
5779
5780                         if (rc != 0) {
5781                                 sbuf_printf(sb,
5782                                     " ------------ error %3u ------------", rc);
5783                                 rc = 0;
5784                         } else {
5785                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5786                                     be32toh(ldst_cmd.u.mps.rplc127_96),
5787                                     be32toh(ldst_cmd.u.mps.rplc95_64),
5788                                     be32toh(ldst_cmd.u.mps.rplc63_32),
5789                                     be32toh(ldst_cmd.u.mps.rplc31_0));
5790                         }
5791                 } else
5792                         sbuf_printf(sb, "%36s", "");
5793
5794                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5795                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5796                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5797         }
5798
5799         if (rc)
5800                 (void) sbuf_finish(sb);
5801         else
5802                 rc = sbuf_finish(sb);
5803         sbuf_delete(sb);
5804
5805         return (rc);
5806 }
5807
5808 static int
5809 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5810 {
5811         struct adapter *sc = arg1;
5812         struct sbuf *sb;
5813         int rc;
5814         uint16_t mtus[NMTUS];
5815
5816         rc = sysctl_wire_old_buffer(req, 0);
5817         if (rc != 0)
5818                 return (rc);
5819
5820         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5821         if (sb == NULL)
5822                 return (ENOMEM);
5823
5824         t4_read_mtu_tbl(sc, mtus, NULL);
5825
5826         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5827             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5828             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5829             mtus[14], mtus[15]);
5830
5831         rc = sbuf_finish(sb);
5832         sbuf_delete(sb);
5833
5834         return (rc);
5835 }
5836
5837 static int
5838 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5839 {
5840         struct adapter *sc = arg1;
5841         struct sbuf *sb;
5842         int rc, i;
5843         uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5844         uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5845         static const char *pm_stats[] = {
5846                 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5847         };
5848
5849         rc = sysctl_wire_old_buffer(req, 0);
5850         if (rc != 0)
5851                 return (rc);
5852
5853         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5854         if (sb == NULL)
5855                 return (ENOMEM);
5856
5857         t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5858         t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5859
5860         sbuf_printf(sb, "                Tx count            Tx cycles    "
5861             "Rx count            Rx cycles");
5862         for (i = 0; i < PM_NSTATS; i++)
5863                 sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
5864                     pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5865
5866         rc = sbuf_finish(sb);
5867         sbuf_delete(sb);
5868
5869         return (rc);
5870 }
5871
5872 static int
5873 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5874 {
5875         struct adapter *sc = arg1;
5876         struct sbuf *sb;
5877         int rc;
5878         struct tp_rdma_stats stats;
5879
5880         rc = sysctl_wire_old_buffer(req, 0);
5881         if (rc != 0)
5882                 return (rc);
5883
5884         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5885         if (sb == NULL)
5886                 return (ENOMEM);
5887
5888         t4_tp_get_rdma_stats(sc, &stats);
5889         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5890         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5891
5892         rc = sbuf_finish(sb);
5893         sbuf_delete(sb);
5894
5895         return (rc);
5896 }
5897
5898 static int
5899 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5900 {
5901         struct adapter *sc = arg1;
5902         struct sbuf *sb;
5903         int rc;
5904         struct tp_tcp_stats v4, v6;
5905
5906         rc = sysctl_wire_old_buffer(req, 0);
5907         if (rc != 0)
5908                 return (rc);
5909
5910         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5911         if (sb == NULL)
5912                 return (ENOMEM);
5913
5914         t4_tp_get_tcp_stats(sc, &v4, &v6);
5915         sbuf_printf(sb,
5916             "                                IP                 IPv6\n");
5917         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
5918             v4.tcpOutRsts, v6.tcpOutRsts);
5919         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
5920             v4.tcpInSegs, v6.tcpInSegs);
5921         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
5922             v4.tcpOutSegs, v6.tcpOutSegs);
5923         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
5924             v4.tcpRetransSegs, v6.tcpRetransSegs);
5925
5926         rc = sbuf_finish(sb);
5927         sbuf_delete(sb);
5928
5929         return (rc);
5930 }
5931
5932 static int
5933 sysctl_tids(SYSCTL_HANDLER_ARGS)
5934 {
5935         struct adapter *sc = arg1;
5936         struct sbuf *sb;
5937         int rc;
5938         struct tid_info *t = &sc->tids;
5939
5940         rc = sysctl_wire_old_buffer(req, 0);
5941         if (rc != 0)
5942                 return (rc);
5943
5944         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5945         if (sb == NULL)
5946                 return (ENOMEM);
5947
5948         if (t->natids) {
5949                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
5950                     t->atids_in_use);
5951         }
5952
5953         if (t->ntids) {
5954                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5955                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
5956
5957                         if (b) {
5958                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
5959                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5960                                     t->ntids - 1);
5961                         } else {
5962                                 sbuf_printf(sb, "TID range: %u-%u",
5963                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
5964                                     t->ntids - 1);
5965                         }
5966                 } else
5967                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
5968                 sbuf_printf(sb, ", in use: %u\n",
5969                     atomic_load_acq_int(&t->tids_in_use));
5970         }
5971
5972         if (t->nstids) {
5973                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
5974                     t->stid_base + t->nstids - 1, t->stids_in_use);
5975         }
5976
5977         if (t->nftids) {
5978                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
5979                     t->ftid_base + t->nftids - 1);
5980         }
5981
5982         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
5983             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
5984             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
5985
5986         rc = sbuf_finish(sb);
5987         sbuf_delete(sb);
5988
5989         return (rc);
5990 }
5991
5992 static int
5993 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
5994 {
5995         struct adapter *sc = arg1;
5996         struct sbuf *sb;
5997         int rc;
5998         struct tp_err_stats stats;
5999
6000         rc = sysctl_wire_old_buffer(req, 0);
6001         if (rc != 0)
6002                 return (rc);
6003
6004         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6005         if (sb == NULL)
6006                 return (ENOMEM);
6007
6008         t4_tp_get_err_stats(sc, &stats);
6009
6010         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6011                       "channel 3\n");
6012         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6013             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6014             stats.macInErrs[3]);
6015         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6016             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6017             stats.hdrInErrs[3]);
6018         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6019             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6020             stats.tcpInErrs[3]);
6021         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6022             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6023             stats.tcp6InErrs[3]);
6024         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6025             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6026             stats.tnlCongDrops[3]);
6027         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6028             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6029             stats.tnlTxDrops[3]);
6030         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6031             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6032             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6033         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6034             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6035             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6036         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6037             stats.ofldNoNeigh, stats.ofldCongDefer);
6038
6039         rc = sbuf_finish(sb);
6040         sbuf_delete(sb);
6041
6042         return (rc);
6043 }
6044
6045 struct field_desc {
6046         const char *name;
6047         u_int start;
6048         u_int width;
6049 };
6050
6051 static void
6052 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6053 {
6054         char buf[32];
6055         int line_size = 0;
6056
6057         while (f->name) {
6058                 uint64_t mask = (1ULL << f->width) - 1;
6059                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6060                     ((uintmax_t)v >> f->start) & mask);
6061
6062                 if (line_size + len >= 79) {
6063                         line_size = 8;
6064                         sbuf_printf(sb, "\n        ");
6065                 }
6066                 sbuf_printf(sb, "%s ", buf);
6067                 line_size += len + 1;
6068                 f++;
6069         }
6070         sbuf_printf(sb, "\n");
6071 }
6072
6073 static struct field_desc tp_la0[] = {
6074         { "RcfOpCodeOut", 60, 4 },
6075         { "State", 56, 4 },
6076         { "WcfState", 52, 4 },
6077         { "RcfOpcSrcOut", 50, 2 },
6078         { "CRxError", 49, 1 },
6079         { "ERxError", 48, 1 },
6080         { "SanityFailed", 47, 1 },
6081         { "SpuriousMsg", 46, 1 },
6082         { "FlushInputMsg", 45, 1 },
6083         { "FlushInputCpl", 44, 1 },
6084         { "RssUpBit", 43, 1 },
6085         { "RssFilterHit", 42, 1 },
6086         { "Tid", 32, 10 },
6087         { "InitTcb", 31, 1 },
6088         { "LineNumber", 24, 7 },
6089         { "Emsg", 23, 1 },
6090         { "EdataOut", 22, 1 },
6091         { "Cmsg", 21, 1 },
6092         { "CdataOut", 20, 1 },
6093         { "EreadPdu", 19, 1 },
6094         { "CreadPdu", 18, 1 },
6095         { "TunnelPkt", 17, 1 },
6096         { "RcfPeerFin", 16, 1 },
6097         { "RcfReasonOut", 12, 4 },
6098         { "TxCchannel", 10, 2 },
6099         { "RcfTxChannel", 8, 2 },
6100         { "RxEchannel", 6, 2 },
6101         { "RcfRxChannel", 5, 1 },
6102         { "RcfDataOutSrdy", 4, 1 },
6103         { "RxDvld", 3, 1 },
6104         { "RxOoDvld", 2, 1 },
6105         { "RxCongestion", 1, 1 },
6106         { "TxCongestion", 0, 1 },
6107         { NULL }
6108 };
6109
6110 static struct field_desc tp_la1[] = {
6111         { "CplCmdIn", 56, 8 },
6112         { "CplCmdOut", 48, 8 },
6113         { "ESynOut", 47, 1 },
6114         { "EAckOut", 46, 1 },
6115         { "EFinOut", 45, 1 },
6116         { "ERstOut", 44, 1 },
6117         { "SynIn", 43, 1 },
6118         { "AckIn", 42, 1 },
6119         { "FinIn", 41, 1 },
6120         { "RstIn", 40, 1 },
6121         { "DataIn", 39, 1 },
6122         { "DataInVld", 38, 1 },
6123         { "PadIn", 37, 1 },
6124         { "RxBufEmpty", 36, 1 },
6125         { "RxDdp", 35, 1 },
6126         { "RxFbCongestion", 34, 1 },
6127         { "TxFbCongestion", 33, 1 },
6128         { "TxPktSumSrdy", 32, 1 },
6129         { "RcfUlpType", 28, 4 },
6130         { "Eread", 27, 1 },
6131         { "Ebypass", 26, 1 },
6132         { "Esave", 25, 1 },
6133         { "Static0", 24, 1 },
6134         { "Cread", 23, 1 },
6135         { "Cbypass", 22, 1 },
6136         { "Csave", 21, 1 },
6137         { "CPktOut", 20, 1 },
6138         { "RxPagePoolFull", 18, 2 },
6139         { "RxLpbkPkt", 17, 1 },
6140         { "TxLpbkPkt", 16, 1 },
6141         { "RxVfValid", 15, 1 },
6142         { "SynLearned", 14, 1 },
6143         { "SetDelEntry", 13, 1 },
6144         { "SetInvEntry", 12, 1 },
6145         { "CpcmdDvld", 11, 1 },
6146         { "CpcmdSave", 10, 1 },
6147         { "RxPstructsFull", 8, 2 },
6148         { "EpcmdDvld", 7, 1 },
6149         { "EpcmdFlush", 6, 1 },
6150         { "EpcmdTrimPrefix", 5, 1 },
6151         { "EpcmdTrimPostfix", 4, 1 },
6152         { "ERssIp4Pkt", 3, 1 },
6153         { "ERssIp6Pkt", 2, 1 },
6154         { "ERssTcpUdpPkt", 1, 1 },
6155         { "ERssFceFipPkt", 0, 1 },
6156         { NULL }
6157 };
6158
6159 static struct field_desc tp_la2[] = {
6160         { "CplCmdIn", 56, 8 },
6161         { "MpsVfVld", 55, 1 },
6162         { "MpsPf", 52, 3 },
6163         { "MpsVf", 44, 8 },
6164         { "SynIn", 43, 1 },
6165         { "AckIn", 42, 1 },
6166         { "FinIn", 41, 1 },
6167         { "RstIn", 40, 1 },
6168         { "DataIn", 39, 1 },
6169         { "DataInVld", 38, 1 },
6170         { "PadIn", 37, 1 },
6171         { "RxBufEmpty", 36, 1 },
6172         { "RxDdp", 35, 1 },
6173         { "RxFbCongestion", 34, 1 },
6174         { "TxFbCongestion", 33, 1 },
6175         { "TxPktSumSrdy", 32, 1 },
6176         { "RcfUlpType", 28, 4 },
6177         { "Eread", 27, 1 },
6178         { "Ebypass", 26, 1 },
6179         { "Esave", 25, 1 },
6180         { "Static0", 24, 1 },
6181         { "Cread", 23, 1 },
6182         { "Cbypass", 22, 1 },
6183         { "Csave", 21, 1 },
6184         { "CPktOut", 20, 1 },
6185         { "RxPagePoolFull", 18, 2 },
6186         { "RxLpbkPkt", 17, 1 },
6187         { "TxLpbkPkt", 16, 1 },
6188         { "RxVfValid", 15, 1 },
6189         { "SynLearned", 14, 1 },
6190         { "SetDelEntry", 13, 1 },
6191         { "SetInvEntry", 12, 1 },
6192         { "CpcmdDvld", 11, 1 },
6193         { "CpcmdSave", 10, 1 },
6194         { "RxPstructsFull", 8, 2 },
6195         { "EpcmdDvld", 7, 1 },
6196         { "EpcmdFlush", 6, 1 },
6197         { "EpcmdTrimPrefix", 5, 1 },
6198         { "EpcmdTrimPostfix", 4, 1 },
6199         { "ERssIp4Pkt", 3, 1 },
6200         { "ERssIp6Pkt", 2, 1 },
6201         { "ERssTcpUdpPkt", 1, 1 },
6202         { "ERssFceFipPkt", 0, 1 },
6203         { NULL }
6204 };
6205
6206 static void
6207 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6208 {
6209
6210         field_desc_show(sb, *p, tp_la0);
6211 }
6212
6213 static void
6214 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6215 {
6216
6217         if (idx)
6218                 sbuf_printf(sb, "\n");
6219         field_desc_show(sb, p[0], tp_la0);
6220         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6221                 field_desc_show(sb, p[1], tp_la0);
6222 }
6223
6224 static void
6225 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6226 {
6227
6228         if (idx)
6229                 sbuf_printf(sb, "\n");
6230         field_desc_show(sb, p[0], tp_la0);
6231         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6232                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6233 }
6234
6235 static int
6236 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6237 {
6238         struct adapter *sc = arg1;
6239         struct sbuf *sb;
6240         uint64_t *buf, *p;
6241         int rc;
6242         u_int i, inc;
6243         void (*show_func)(struct sbuf *, uint64_t *, int);
6244
6245         rc = sysctl_wire_old_buffer(req, 0);
6246         if (rc != 0)
6247                 return (rc);
6248
6249         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6250         if (sb == NULL)
6251                 return (ENOMEM);
6252
6253         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6254
6255         t4_tp_read_la(sc, buf, NULL);
6256         p = buf;
6257
6258         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6259         case 2:
6260                 inc = 2;
6261                 show_func = tp_la_show2;
6262                 break;
6263         case 3:
6264                 inc = 2;
6265                 show_func = tp_la_show3;
6266                 break;
6267         default:
6268                 inc = 1;
6269                 show_func = tp_la_show;
6270         }
6271
6272         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6273                 (*show_func)(sb, p, i);
6274
6275         rc = sbuf_finish(sb);
6276         sbuf_delete(sb);
6277         free(buf, M_CXGBE);
6278         return (rc);
6279 }
6280
6281 static int
6282 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6283 {
6284         struct adapter *sc = arg1;
6285         struct sbuf *sb;
6286         int rc;
6287         u64 nrate[NCHAN], orate[NCHAN];
6288
6289         rc = sysctl_wire_old_buffer(req, 0);
6290         if (rc != 0)
6291                 return (rc);
6292
6293         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6294         if (sb == NULL)
6295                 return (ENOMEM);
6296
6297         t4_get_chan_txrate(sc, nrate, orate);
6298         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6299                  "channel 3\n");
6300         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6301             nrate[0], nrate[1], nrate[2], nrate[3]);
6302         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6303             orate[0], orate[1], orate[2], orate[3]);
6304
6305         rc = sbuf_finish(sb);
6306         sbuf_delete(sb);
6307
6308         return (rc);
6309 }
6310
6311 static int
6312 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6313 {
6314         struct adapter *sc = arg1;
6315         struct sbuf *sb;
6316         uint32_t *buf, *p;
6317         int rc, i;
6318
6319         rc = sysctl_wire_old_buffer(req, 0);
6320         if (rc != 0)
6321                 return (rc);
6322
6323         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6324         if (sb == NULL)
6325                 return (ENOMEM);
6326
6327         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6328             M_ZERO | M_WAITOK);
6329
6330         t4_ulprx_read_la(sc, buf);
6331         p = buf;
6332
6333         sbuf_printf(sb, "      Pcmd        Type   Message"
6334             "                Data");
6335         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6336                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6337                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6338         }
6339
6340         rc = sbuf_finish(sb);
6341         sbuf_delete(sb);
6342         free(buf, M_CXGBE);
6343         return (rc);
6344 }
6345
6346 static int
6347 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6348 {
6349         struct adapter *sc = arg1;
6350         struct sbuf *sb;
6351         int rc, v;
6352
6353         rc = sysctl_wire_old_buffer(req, 0);
6354         if (rc != 0)
6355                 return (rc);
6356
6357         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6358         if (sb == NULL)
6359                 return (ENOMEM);
6360
6361         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6362         if (G_STATSOURCE_T5(v) == 7) {
6363                 if (G_STATMODE(v) == 0) {
6364                         sbuf_printf(sb, "total %d, incomplete %d",
6365                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6366                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6367                 } else if (G_STATMODE(v) == 1) {
6368                         sbuf_printf(sb, "total %d, data overflow %d",
6369                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6370                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6371                 }
6372         }
6373         rc = sbuf_finish(sb);
6374         sbuf_delete(sb);
6375
6376         return (rc);
6377 }
6378 #endif
6379
6380 static inline void
6381 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6382 {
6383         struct buf_ring *br;
6384         struct mbuf *m;
6385
6386         TXQ_LOCK_ASSERT_OWNED(txq);
6387
6388         br = txq->br;
6389         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6390         if (m)
6391                 t4_eth_tx(ifp, txq, m);
6392 }
6393
6394 void
6395 t4_tx_callout(void *arg)
6396 {
6397         struct sge_eq *eq = arg;
6398         struct adapter *sc;
6399
6400         if (EQ_TRYLOCK(eq) == 0)
6401                 goto reschedule;
6402
6403         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6404                 EQ_UNLOCK(eq);
6405 reschedule:
6406                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6407                         callout_schedule(&eq->tx_callout, 1);
6408                 return;
6409         }
6410
6411         EQ_LOCK_ASSERT_OWNED(eq);
6412
6413         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6414
6415                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6416                         struct sge_txq *txq = arg;
6417                         struct port_info *pi = txq->ifp->if_softc;
6418
6419                         sc = pi->adapter;
6420                 } else {
6421                         struct sge_wrq *wrq = arg;
6422
6423                         sc = wrq->adapter;
6424                 }
6425
6426                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6427         }
6428
6429         EQ_UNLOCK(eq);
6430 }
6431
6432 void
6433 t4_tx_task(void *arg, int count)
6434 {
6435         struct sge_eq *eq = arg;
6436
6437         EQ_LOCK(eq);
6438         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6439                 struct sge_txq *txq = arg;
6440                 txq_start(txq->ifp, txq);
6441         } else {
6442                 struct sge_wrq *wrq = arg;
6443                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6444         }
6445         EQ_UNLOCK(eq);
6446 }
6447
6448 static uint32_t
6449 fconf_to_mode(uint32_t fconf)
6450 {
6451         uint32_t mode;
6452
6453         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6454             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6455
6456         if (fconf & F_FRAGMENTATION)
6457                 mode |= T4_FILTER_IP_FRAGMENT;
6458
6459         if (fconf & F_MPSHITTYPE)
6460                 mode |= T4_FILTER_MPS_HIT_TYPE;
6461
6462         if (fconf & F_MACMATCH)
6463                 mode |= T4_FILTER_MAC_IDX;
6464
6465         if (fconf & F_ETHERTYPE)
6466                 mode |= T4_FILTER_ETH_TYPE;
6467
6468         if (fconf & F_PROTOCOL)
6469                 mode |= T4_FILTER_IP_PROTO;
6470
6471         if (fconf & F_TOS)
6472                 mode |= T4_FILTER_IP_TOS;
6473
6474         if (fconf & F_VLAN)
6475                 mode |= T4_FILTER_VLAN;
6476
6477         if (fconf & F_VNIC_ID)
6478                 mode |= T4_FILTER_VNIC;
6479
6480         if (fconf & F_PORT)
6481                 mode |= T4_FILTER_PORT;
6482
6483         if (fconf & F_FCOE)
6484                 mode |= T4_FILTER_FCoE;
6485
6486         return (mode);
6487 }
6488
6489 static uint32_t
6490 mode_to_fconf(uint32_t mode)
6491 {
6492         uint32_t fconf = 0;
6493
6494         if (mode & T4_FILTER_IP_FRAGMENT)
6495                 fconf |= F_FRAGMENTATION;
6496
6497         if (mode & T4_FILTER_MPS_HIT_TYPE)
6498                 fconf |= F_MPSHITTYPE;
6499
6500         if (mode & T4_FILTER_MAC_IDX)
6501                 fconf |= F_MACMATCH;
6502
6503         if (mode & T4_FILTER_ETH_TYPE)
6504                 fconf |= F_ETHERTYPE;
6505
6506         if (mode & T4_FILTER_IP_PROTO)
6507                 fconf |= F_PROTOCOL;
6508
6509         if (mode & T4_FILTER_IP_TOS)
6510                 fconf |= F_TOS;
6511
6512         if (mode & T4_FILTER_VLAN)
6513                 fconf |= F_VLAN;
6514
6515         if (mode & T4_FILTER_VNIC)
6516                 fconf |= F_VNIC_ID;
6517
6518         if (mode & T4_FILTER_PORT)
6519                 fconf |= F_PORT;
6520
6521         if (mode & T4_FILTER_FCoE)
6522                 fconf |= F_FCOE;
6523
6524         return (fconf);
6525 }
6526
6527 static uint32_t
6528 fspec_to_fconf(struct t4_filter_specification *fs)
6529 {
6530         uint32_t fconf = 0;
6531
6532         if (fs->val.frag || fs->mask.frag)
6533                 fconf |= F_FRAGMENTATION;
6534
6535         if (fs->val.matchtype || fs->mask.matchtype)
6536                 fconf |= F_MPSHITTYPE;
6537
6538         if (fs->val.macidx || fs->mask.macidx)
6539                 fconf |= F_MACMATCH;
6540
6541         if (fs->val.ethtype || fs->mask.ethtype)
6542                 fconf |= F_ETHERTYPE;
6543
6544         if (fs->val.proto || fs->mask.proto)
6545                 fconf |= F_PROTOCOL;
6546
6547         if (fs->val.tos || fs->mask.tos)
6548                 fconf |= F_TOS;
6549
6550         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6551                 fconf |= F_VLAN;
6552
6553         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6554                 fconf |= F_VNIC_ID;
6555
6556         if (fs->val.iport || fs->mask.iport)
6557                 fconf |= F_PORT;
6558
6559         if (fs->val.fcoe || fs->mask.fcoe)
6560                 fconf |= F_FCOE;
6561
6562         return (fconf);
6563 }
6564
6565 static int
6566 get_filter_mode(struct adapter *sc, uint32_t *mode)
6567 {
6568         int rc;
6569         uint32_t fconf;
6570
6571         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6572             "t4getfm");
6573         if (rc)
6574                 return (rc);
6575
6576         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6577             A_TP_VLAN_PRI_MAP);
6578
6579         if (sc->params.tp.vlan_pri_map != fconf) {
6580                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6581                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6582                     fconf);
6583                 sc->params.tp.vlan_pri_map = fconf;
6584         }
6585
6586         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6587
6588         end_synchronized_op(sc, LOCK_HELD);
6589         return (0);
6590 }
6591
6592 static int
6593 set_filter_mode(struct adapter *sc, uint32_t mode)
6594 {
6595         uint32_t fconf;
6596         int rc;
6597
6598         fconf = mode_to_fconf(mode);
6599
6600         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6601             "t4setfm");
6602         if (rc)
6603                 return (rc);
6604
6605         if (sc->tids.ftids_in_use > 0) {
6606                 rc = EBUSY;
6607                 goto done;
6608         }
6609
6610 #ifdef TCP_OFFLOAD
6611         if (sc->offload_map) {
6612                 rc = EBUSY;
6613                 goto done;
6614         }
6615 #endif
6616
6617 #ifdef notyet
6618         rc = -t4_set_filter_mode(sc, fconf);
6619         if (rc == 0)
6620                 sc->filter_mode = fconf;
6621 #else
6622         rc = ENOTSUP;
6623 #endif
6624
6625 done:
6626         end_synchronized_op(sc, LOCK_HELD);
6627         return (rc);
6628 }
6629
6630 static inline uint64_t
6631 get_filter_hits(struct adapter *sc, uint32_t fid)
6632 {
6633         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6634         uint64_t hits;
6635
6636         memwin_info(sc, 0, &mw_base, NULL);
6637         off = position_memwin(sc, 0,
6638             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6639         if (is_t4(sc)) {
6640                 hits = t4_read_reg64(sc, mw_base + off + 16);
6641                 hits = be64toh(hits);
6642         } else {
6643                 hits = t4_read_reg(sc, mw_base + off + 24);
6644                 hits = be32toh(hits);
6645         }
6646
6647         return (hits);
6648 }
6649
6650 static int
6651 get_filter(struct adapter *sc, struct t4_filter *t)
6652 {
6653         int i, rc, nfilters = sc->tids.nftids;
6654         struct filter_entry *f;
6655
6656         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6657             "t4getf");
6658         if (rc)
6659                 return (rc);
6660
6661         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6662             t->idx >= nfilters) {
6663                 t->idx = 0xffffffff;
6664                 goto done;
6665         }
6666
6667         f = &sc->tids.ftid_tab[t->idx];
6668         for (i = t->idx; i < nfilters; i++, f++) {
6669                 if (f->valid) {
6670                         t->idx = i;
6671                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6672                         t->smtidx = f->smtidx;
6673                         if (f->fs.hitcnts)
6674                                 t->hits = get_filter_hits(sc, t->idx);
6675                         else
6676                                 t->hits = UINT64_MAX;
6677                         t->fs = f->fs;
6678
6679                         goto done;
6680                 }
6681         }
6682
6683         t->idx = 0xffffffff;
6684 done:
6685         end_synchronized_op(sc, LOCK_HELD);
6686         return (0);
6687 }
6688
6689 static int
6690 set_filter(struct adapter *sc, struct t4_filter *t)
6691 {
6692         unsigned int nfilters, nports;
6693         struct filter_entry *f;
6694         int i, rc;
6695
6696         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6697         if (rc)
6698                 return (rc);
6699
6700         nfilters = sc->tids.nftids;
6701         nports = sc->params.nports;
6702
6703         if (nfilters == 0) {
6704                 rc = ENOTSUP;
6705                 goto done;
6706         }
6707
6708         if (!(sc->flags & FULL_INIT_DONE)) {
6709                 rc = EAGAIN;
6710                 goto done;
6711         }
6712
6713         if (t->idx >= nfilters) {
6714                 rc = EINVAL;
6715                 goto done;
6716         }
6717
6718         /* Validate against the global filter mode */
6719         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6720             sc->params.tp.vlan_pri_map) {
6721                 rc = E2BIG;
6722                 goto done;
6723         }
6724
6725         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6726                 rc = EINVAL;
6727                 goto done;
6728         }
6729
6730         if (t->fs.val.iport >= nports) {
6731                 rc = EINVAL;
6732                 goto done;
6733         }
6734
6735         /* Can't specify an iq if not steering to it */
6736         if (!t->fs.dirsteer && t->fs.iq) {
6737                 rc = EINVAL;
6738                 goto done;
6739         }
6740
6741         /* IPv6 filter idx must be 4 aligned */
6742         if (t->fs.type == 1 &&
6743             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6744                 rc = EINVAL;
6745                 goto done;
6746         }
6747
6748         if (sc->tids.ftid_tab == NULL) {
6749                 KASSERT(sc->tids.ftids_in_use == 0,
6750                     ("%s: no memory allocated but filters_in_use > 0",
6751                     __func__));
6752
6753                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6754                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6755                 if (sc->tids.ftid_tab == NULL) {
6756                         rc = ENOMEM;
6757                         goto done;
6758                 }
6759                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6760         }
6761
6762         for (i = 0; i < 4; i++) {
6763                 f = &sc->tids.ftid_tab[t->idx + i];
6764
6765                 if (f->pending || f->valid) {
6766                         rc = EBUSY;
6767                         goto done;
6768                 }
6769                 if (f->locked) {
6770                         rc = EPERM;
6771                         goto done;
6772                 }
6773
6774                 if (t->fs.type == 0)
6775                         break;
6776         }
6777
6778         f = &sc->tids.ftid_tab[t->idx];
6779         f->fs = t->fs;
6780
6781         rc = set_filter_wr(sc, t->idx);
6782 done:
6783         end_synchronized_op(sc, 0);
6784
6785         if (rc == 0) {
6786                 mtx_lock(&sc->tids.ftid_lock);
6787                 for (;;) {
6788                         if (f->pending == 0) {
6789                                 rc = f->valid ? 0 : EIO;
6790                                 break;
6791                         }
6792
6793                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6794                             PCATCH, "t4setfw", 0)) {
6795                                 rc = EINPROGRESS;
6796                                 break;
6797                         }
6798                 }
6799                 mtx_unlock(&sc->tids.ftid_lock);
6800         }
6801         return (rc);
6802 }
6803
6804 static int
6805 del_filter(struct adapter *sc, struct t4_filter *t)
6806 {
6807         unsigned int nfilters;
6808         struct filter_entry *f;
6809         int rc;
6810
6811         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6812         if (rc)
6813                 return (rc);
6814
6815         nfilters = sc->tids.nftids;
6816
6817         if (nfilters == 0) {
6818                 rc = ENOTSUP;
6819                 goto done;
6820         }
6821
6822         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6823             t->idx >= nfilters) {
6824                 rc = EINVAL;
6825                 goto done;
6826         }
6827
6828         if (!(sc->flags & FULL_INIT_DONE)) {
6829                 rc = EAGAIN;
6830                 goto done;
6831         }
6832
6833         f = &sc->tids.ftid_tab[t->idx];
6834
6835         if (f->pending) {
6836                 rc = EBUSY;
6837                 goto done;
6838         }
6839         if (f->locked) {
6840                 rc = EPERM;
6841                 goto done;
6842         }
6843
6844         if (f->valid) {
6845                 t->fs = f->fs;  /* extra info for the caller */
6846                 rc = del_filter_wr(sc, t->idx);
6847         }
6848
6849 done:
6850         end_synchronized_op(sc, 0);
6851
6852         if (rc == 0) {
6853                 mtx_lock(&sc->tids.ftid_lock);
6854                 for (;;) {
6855                         if (f->pending == 0) {
6856                                 rc = f->valid ? EIO : 0;
6857                                 break;
6858                         }
6859
6860                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6861                             PCATCH, "t4delfw", 0)) {
6862                                 rc = EINPROGRESS;
6863                                 break;
6864                         }
6865                 }
6866                 mtx_unlock(&sc->tids.ftid_lock);
6867         }
6868
6869         return (rc);
6870 }
6871
6872 static void
6873 clear_filter(struct filter_entry *f)
6874 {
6875         if (f->l2t)
6876                 t4_l2t_release(f->l2t);
6877
6878         bzero(f, sizeof (*f));
6879 }
6880
6881 static int
6882 set_filter_wr(struct adapter *sc, int fidx)
6883 {
6884         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6885         struct wrqe *wr;
6886         struct fw_filter_wr *fwr;
6887         unsigned int ftid;
6888
6889         ASSERT_SYNCHRONIZED_OP(sc);
6890
6891         if (f->fs.newdmac || f->fs.newvlan) {
6892                 /* This filter needs an L2T entry; allocate one. */
6893                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
6894                 if (f->l2t == NULL)
6895                         return (EAGAIN);
6896                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6897                     f->fs.dmac)) {
6898                         t4_l2t_release(f->l2t);
6899                         f->l2t = NULL;
6900                         return (ENOMEM);
6901                 }
6902         }
6903
6904         ftid = sc->tids.ftid_base + fidx;
6905
6906         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6907         if (wr == NULL)
6908                 return (ENOMEM);
6909
6910         fwr = wrtod(wr);
6911         bzero(fwr, sizeof (*fwr));
6912
6913         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
6914         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
6915         fwr->tid_to_iq =
6916             htobe32(V_FW_FILTER_WR_TID(ftid) |
6917                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
6918                 V_FW_FILTER_WR_NOREPLY(0) |
6919                 V_FW_FILTER_WR_IQ(f->fs.iq));
6920         fwr->del_filter_to_l2tix =
6921             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
6922                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
6923                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
6924                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
6925                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
6926                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
6927                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
6928                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
6929                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
6930                     f->fs.newvlan == VLAN_REWRITE) |
6931                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
6932                     f->fs.newvlan == VLAN_REWRITE) |
6933                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
6934                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
6935                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
6936                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
6937         fwr->ethtype = htobe16(f->fs.val.ethtype);
6938         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
6939         fwr->frag_to_ovlan_vldm =
6940             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
6941                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
6942                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
6943                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
6944                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
6945                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
6946         fwr->smac_sel = 0;
6947         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
6948             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
6949         fwr->maci_to_matchtypem =
6950             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
6951                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
6952                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
6953                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
6954                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
6955                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
6956                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
6957                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
6958         fwr->ptcl = f->fs.val.proto;
6959         fwr->ptclm = f->fs.mask.proto;
6960         fwr->ttyp = f->fs.val.tos;
6961         fwr->ttypm = f->fs.mask.tos;
6962         fwr->ivlan = htobe16(f->fs.val.vlan);
6963         fwr->ivlanm = htobe16(f->fs.mask.vlan);
6964         fwr->ovlan = htobe16(f->fs.val.vnic);
6965         fwr->ovlanm = htobe16(f->fs.mask.vnic);
6966         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
6967         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
6968         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
6969         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
6970         fwr->lp = htobe16(f->fs.val.dport);
6971         fwr->lpm = htobe16(f->fs.mask.dport);
6972         fwr->fp = htobe16(f->fs.val.sport);
6973         fwr->fpm = htobe16(f->fs.mask.sport);
6974         if (f->fs.newsmac)
6975                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
6976
6977         f->pending = 1;
6978         sc->tids.ftids_in_use++;
6979
6980         t4_wrq_tx(sc, wr);
6981         return (0);
6982 }
6983
6984 static int
6985 del_filter_wr(struct adapter *sc, int fidx)
6986 {
6987         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6988         struct wrqe *wr;
6989         struct fw_filter_wr *fwr;
6990         unsigned int ftid;
6991
6992         ftid = sc->tids.ftid_base + fidx;
6993
6994         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6995         if (wr == NULL)
6996                 return (ENOMEM);
6997         fwr = wrtod(wr);
6998         bzero(fwr, sizeof (*fwr));
6999
7000         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7001
7002         f->pending = 1;
7003         t4_wrq_tx(sc, wr);
7004         return (0);
7005 }
7006
7007 int
7008 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7009 {
7010         struct adapter *sc = iq->adapter;
7011         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7012         unsigned int idx = GET_TID(rpl);
7013
7014         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7015             rss->opcode));
7016
7017         if (idx >= sc->tids.ftid_base &&
7018             (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7019                 unsigned int rc = G_COOKIE(rpl->cookie);
7020                 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7021
7022                 mtx_lock(&sc->tids.ftid_lock);
7023                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7024                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7025                             __func__, idx));
7026                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7027                         f->pending = 0;  /* asynchronous setup completed */
7028                         f->valid = 1;
7029                 } else {
7030                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7031                                 /* Add or delete failed, display an error */
7032                                 log(LOG_ERR,
7033                                     "filter %u setup failed with error %u\n",
7034                                     idx, rc);
7035                         }
7036
7037                         clear_filter(f);
7038                         sc->tids.ftids_in_use--;
7039                 }
7040                 wakeup(&sc->tids.ftid_tab);
7041                 mtx_unlock(&sc->tids.ftid_lock);
7042         }
7043
7044         return (0);
7045 }
7046
7047 static int
7048 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7049 {
7050         int rc;
7051
7052         if (cntxt->cid > M_CTXTQID)
7053                 return (EINVAL);
7054
7055         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7056             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7057                 return (EINVAL);
7058
7059         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7060         if (rc)
7061                 return (rc);
7062
7063         if (sc->flags & FW_OK) {
7064                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7065                     &cntxt->data[0]);
7066                 if (rc == 0)
7067                         goto done;
7068         }
7069
7070         /*
7071          * Read via firmware failed or wasn't even attempted.  Read directly via
7072          * the backdoor.
7073          */
7074         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7075 done:
7076         end_synchronized_op(sc, 0);
7077         return (rc);
7078 }
7079
7080 static int
7081 load_fw(struct adapter *sc, struct t4_data *fw)
7082 {
7083         int rc;
7084         uint8_t *fw_data;
7085
7086         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7087         if (rc)
7088                 return (rc);
7089
7090         if (sc->flags & FULL_INIT_DONE) {
7091                 rc = EBUSY;
7092                 goto done;
7093         }
7094
7095         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7096         if (fw_data == NULL) {
7097                 rc = ENOMEM;
7098                 goto done;
7099         }
7100
7101         rc = copyin(fw->data, fw_data, fw->len);
7102         if (rc == 0)
7103                 rc = -t4_load_fw(sc, fw_data, fw->len);
7104
7105         free(fw_data, M_CXGBE);
7106 done:
7107         end_synchronized_op(sc, 0);
7108         return (rc);
7109 }
7110
7111 static int
7112 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7113 {
7114         uint32_t addr, off, remaining, i, n;
7115         uint32_t *buf, *b;
7116         uint32_t mw_base, mw_aperture;
7117         int rc;
7118         uint8_t *dst;
7119
7120         rc = validate_mem_range(sc, mr->addr, mr->len);
7121         if (rc != 0)
7122                 return (rc);
7123
7124         memwin_info(sc, win, &mw_base, &mw_aperture);
7125         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7126         addr = mr->addr;
7127         remaining = mr->len;
7128         dst = (void *)mr->data;
7129
7130         while (remaining) {
7131                 off = position_memwin(sc, win, addr);
7132
7133                 /* number of bytes that we'll copy in the inner loop */
7134                 n = min(remaining, mw_aperture - off);
7135                 for (i = 0; i < n; i += 4)
7136                         *b++ = t4_read_reg(sc, mw_base + off + i);
7137
7138                 rc = copyout(buf, dst, n);
7139                 if (rc != 0)
7140                         break;
7141
7142                 b = buf;
7143                 dst += n;
7144                 remaining -= n;
7145                 addr += n;
7146         }
7147
7148         free(buf, M_CXGBE);
7149         return (rc);
7150 }
7151
7152 static int
7153 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7154 {
7155         int rc;
7156
7157         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7158                 return (EINVAL);
7159
7160         if (i2cd->len > 1) {
7161                 /* XXX: need fw support for longer reads in one go */
7162                 return (ENOTSUP);
7163         }
7164
7165         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7166         if (rc)
7167                 return (rc);
7168         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7169             i2cd->offset, &i2cd->data[0]);
7170         end_synchronized_op(sc, 0);
7171
7172         return (rc);
7173 }
7174
7175 int
7176 t4_os_find_pci_capability(struct adapter *sc, int cap)
7177 {
7178         int i;
7179
7180         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7181 }
7182
7183 int
7184 t4_os_pci_save_state(struct adapter *sc)
7185 {
7186         device_t dev;
7187         struct pci_devinfo *dinfo;
7188
7189         dev = sc->dev;
7190         dinfo = device_get_ivars(dev);
7191
7192         pci_cfg_save(dev, dinfo, 0);
7193         return (0);
7194 }
7195
7196 int
7197 t4_os_pci_restore_state(struct adapter *sc)
7198 {
7199         device_t dev;
7200         struct pci_devinfo *dinfo;
7201
7202         dev = sc->dev;
7203         dinfo = device_get_ivars(dev);
7204
7205         pci_cfg_restore(dev, dinfo);
7206         return (0);
7207 }
7208
7209 void
7210 t4_os_portmod_changed(const struct adapter *sc, int idx)
7211 {
7212         struct port_info *pi = sc->port[idx];
7213         static const char *mod_str[] = {
7214                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7215         };
7216
7217         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7218                 if_printf(pi->ifp, "transceiver unplugged.\n");
7219         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7220                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7221         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7222                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7223         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7224                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7225                     mod_str[pi->mod_type]);
7226         } else {
7227                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7228                     pi->mod_type);
7229         }
7230 }
7231
7232 void
7233 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7234 {
7235         struct port_info *pi = sc->port[idx];
7236         struct ifnet *ifp = pi->ifp;
7237
7238         if (link_stat) {
7239                 pi->linkdnrc = -1;
7240                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7241                 if_link_state_change(ifp, LINK_STATE_UP);
7242         } else {
7243                 if (reason >= 0)
7244                         pi->linkdnrc = reason;
7245                 if_link_state_change(ifp, LINK_STATE_DOWN);
7246         }
7247 }
7248
7249 void
7250 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7251 {
7252         struct adapter *sc;
7253
7254         mtx_lock(&t4_list_lock);
7255         SLIST_FOREACH(sc, &t4_list, link) {
7256                 /*
7257                  * func should not make any assumptions about what state sc is
7258                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7259                  */
7260                 func(sc, arg);
7261         }
7262         mtx_unlock(&t4_list_lock);
7263 }
7264
7265 static int
7266 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7267 {
7268        return (0);
7269 }
7270
7271 static int
7272 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7273 {
7274        return (0);
7275 }
7276
7277 static int
7278 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7279     struct thread *td)
7280 {
7281         int rc;
7282         struct adapter *sc = dev->si_drv1;
7283
7284         rc = priv_check(td, PRIV_DRIVER);
7285         if (rc != 0)
7286                 return (rc);
7287
7288         switch (cmd) {
7289         case CHELSIO_T4_GETREG: {
7290                 struct t4_reg *edata = (struct t4_reg *)data;
7291
7292                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7293                         return (EFAULT);
7294
7295                 if (edata->size == 4)
7296                         edata->val = t4_read_reg(sc, edata->addr);
7297                 else if (edata->size == 8)
7298                         edata->val = t4_read_reg64(sc, edata->addr);
7299                 else
7300                         return (EINVAL);
7301
7302                 break;
7303         }
7304         case CHELSIO_T4_SETREG: {
7305                 struct t4_reg *edata = (struct t4_reg *)data;
7306
7307                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7308                         return (EFAULT);
7309
7310                 if (edata->size == 4) {
7311                         if (edata->val & 0xffffffff00000000)
7312                                 return (EINVAL);
7313                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7314                 } else if (edata->size == 8)
7315                         t4_write_reg64(sc, edata->addr, edata->val);
7316                 else
7317                         return (EINVAL);
7318                 break;
7319         }
7320         case CHELSIO_T4_REGDUMP: {
7321                 struct t4_regdump *regs = (struct t4_regdump *)data;
7322                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7323                 uint8_t *buf;
7324
7325                 if (regs->len < reglen) {
7326                         regs->len = reglen; /* hint to the caller */
7327                         return (ENOBUFS);
7328                 }
7329
7330                 regs->len = reglen;
7331                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7332                 t4_get_regs(sc, regs, buf);
7333                 rc = copyout(buf, regs->data, reglen);
7334                 free(buf, M_CXGBE);
7335                 break;
7336         }
7337         case CHELSIO_T4_GET_FILTER_MODE:
7338                 rc = get_filter_mode(sc, (uint32_t *)data);
7339                 break;
7340         case CHELSIO_T4_SET_FILTER_MODE:
7341                 rc = set_filter_mode(sc, *(uint32_t *)data);
7342                 break;
7343         case CHELSIO_T4_GET_FILTER:
7344                 rc = get_filter(sc, (struct t4_filter *)data);
7345                 break;
7346         case CHELSIO_T4_SET_FILTER:
7347                 rc = set_filter(sc, (struct t4_filter *)data);
7348                 break;
7349         case CHELSIO_T4_DEL_FILTER:
7350                 rc = del_filter(sc, (struct t4_filter *)data);
7351                 break;
7352         case CHELSIO_T4_GET_SGE_CONTEXT:
7353                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7354                 break;
7355         case CHELSIO_T4_LOAD_FW:
7356                 rc = load_fw(sc, (struct t4_data *)data);
7357                 break;
7358         case CHELSIO_T4_GET_MEM:
7359                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7360                 break;
7361         case CHELSIO_T4_GET_I2C:
7362                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7363                 break;
7364         case CHELSIO_T4_CLEAR_STATS: {
7365                 int i;
7366                 u_int port_id = *(uint32_t *)data;
7367                 struct port_info *pi;
7368
7369                 if (port_id >= sc->params.nports)
7370                         return (EINVAL);
7371
7372                 /* MAC stats */
7373                 t4_clr_port_stats(sc, port_id);
7374
7375                 pi = sc->port[port_id];
7376                 if (pi->flags & PORT_INIT_DONE) {
7377                         struct sge_rxq *rxq;
7378                         struct sge_txq *txq;
7379                         struct sge_wrq *wrq;
7380
7381                         for_each_rxq(pi, i, rxq) {
7382 #if defined(INET) || defined(INET6)
7383                                 rxq->lro.lro_queued = 0;
7384                                 rxq->lro.lro_flushed = 0;
7385 #endif
7386                                 rxq->rxcsum = 0;
7387                                 rxq->vlan_extraction = 0;
7388                         }
7389
7390                         for_each_txq(pi, i, txq) {
7391                                 txq->txcsum = 0;
7392                                 txq->tso_wrs = 0;
7393                                 txq->vlan_insertion = 0;
7394                                 txq->imm_wrs = 0;
7395                                 txq->sgl_wrs = 0;
7396                                 txq->txpkt_wrs = 0;
7397                                 txq->txpkts_wrs = 0;
7398                                 txq->txpkts_pkts = 0;
7399                                 txq->br->br_drops = 0;
7400                                 txq->no_dmamap = 0;
7401                                 txq->no_desc = 0;
7402                         }
7403
7404 #ifdef TCP_OFFLOAD
7405                         /* nothing to clear for each ofld_rxq */
7406
7407                         for_each_ofld_txq(pi, i, wrq) {
7408                                 wrq->tx_wrs = 0;
7409                                 wrq->no_desc = 0;
7410                         }
7411 #endif
7412                         wrq = &sc->sge.ctrlq[pi->port_id];
7413                         wrq->tx_wrs = 0;
7414                         wrq->no_desc = 0;
7415                 }
7416                 break;
7417         }
7418         default:
7419                 rc = EINVAL;
7420         }
7421
7422         return (rc);
7423 }
7424
7425 #ifdef TCP_OFFLOAD
7426 static int
7427 toe_capability(struct port_info *pi, int enable)
7428 {
7429         int rc;
7430         struct adapter *sc = pi->adapter;
7431
7432         ASSERT_SYNCHRONIZED_OP(sc);
7433
7434         if (!is_offload(sc))
7435                 return (ENODEV);
7436
7437         if (enable) {
7438                 if (!(sc->flags & FULL_INIT_DONE)) {
7439                         rc = cxgbe_init_synchronized(pi);
7440                         if (rc)
7441                                 return (rc);
7442                 }
7443
7444                 if (isset(&sc->offload_map, pi->port_id))
7445                         return (0);
7446
7447                 if (!(sc->flags & TOM_INIT_DONE)) {
7448                         rc = t4_activate_uld(sc, ULD_TOM);
7449                         if (rc == EAGAIN) {
7450                                 log(LOG_WARNING,
7451                                     "You must kldload t4_tom.ko before trying "
7452                                     "to enable TOE on a cxgbe interface.\n");
7453                         }
7454                         if (rc != 0)
7455                                 return (rc);
7456                         KASSERT(sc->tom_softc != NULL,
7457                             ("%s: TOM activated but softc NULL", __func__));
7458                         KASSERT(sc->flags & TOM_INIT_DONE,
7459                             ("%s: TOM activated but flag not set", __func__));
7460                 }
7461
7462                 setbit(&sc->offload_map, pi->port_id);
7463         } else {
7464                 if (!isset(&sc->offload_map, pi->port_id))
7465                         return (0);
7466
7467                 KASSERT(sc->flags & TOM_INIT_DONE,
7468                     ("%s: TOM never initialized?", __func__));
7469                 clrbit(&sc->offload_map, pi->port_id);
7470         }
7471
7472         return (0);
7473 }
7474
7475 /*
7476  * Add an upper layer driver to the global list.
7477  */
7478 int
7479 t4_register_uld(struct uld_info *ui)
7480 {
7481         int rc = 0;
7482         struct uld_info *u;
7483
7484         mtx_lock(&t4_uld_list_lock);
7485         SLIST_FOREACH(u, &t4_uld_list, link) {
7486             if (u->uld_id == ui->uld_id) {
7487                     rc = EEXIST;
7488                     goto done;
7489             }
7490         }
7491
7492         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7493         ui->refcount = 0;
7494 done:
7495         mtx_unlock(&t4_uld_list_lock);
7496         return (rc);
7497 }
7498
7499 int
7500 t4_unregister_uld(struct uld_info *ui)
7501 {
7502         int rc = EINVAL;
7503         struct uld_info *u;
7504
7505         mtx_lock(&t4_uld_list_lock);
7506
7507         SLIST_FOREACH(u, &t4_uld_list, link) {
7508             if (u == ui) {
7509                     if (ui->refcount > 0) {
7510                             rc = EBUSY;
7511                             goto done;
7512                     }
7513
7514                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7515                     rc = 0;
7516                     goto done;
7517             }
7518         }
7519 done:
7520         mtx_unlock(&t4_uld_list_lock);
7521         return (rc);
7522 }
7523
7524 int
7525 t4_activate_uld(struct adapter *sc, int id)
7526 {
7527         int rc = EAGAIN;
7528         struct uld_info *ui;
7529
7530         ASSERT_SYNCHRONIZED_OP(sc);
7531
7532         mtx_lock(&t4_uld_list_lock);
7533
7534         SLIST_FOREACH(ui, &t4_uld_list, link) {
7535                 if (ui->uld_id == id) {
7536                         rc = ui->activate(sc);
7537                         if (rc == 0)
7538                                 ui->refcount++;
7539                         goto done;
7540                 }
7541         }
7542 done:
7543         mtx_unlock(&t4_uld_list_lock);
7544
7545         return (rc);
7546 }
7547
7548 int
7549 t4_deactivate_uld(struct adapter *sc, int id)
7550 {
7551         int rc = EINVAL;
7552         struct uld_info *ui;
7553
7554         ASSERT_SYNCHRONIZED_OP(sc);
7555
7556         mtx_lock(&t4_uld_list_lock);
7557
7558         SLIST_FOREACH(ui, &t4_uld_list, link) {
7559                 if (ui->uld_id == id) {
7560                         rc = ui->deactivate(sc);
7561                         if (rc == 0)
7562                                 ui->refcount--;
7563                         goto done;
7564                 }
7565         }
7566 done:
7567         mtx_unlock(&t4_uld_list_lock);
7568
7569         return (rc);
7570 }
7571 #endif
7572
7573 /*
7574  * Come up with reasonable defaults for some of the tunables, provided they're
7575  * not set by the user (in which case we'll use the values as is).
7576  */
7577 static void
7578 tweak_tunables(void)
7579 {
7580         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
7581
7582         if (t4_ntxq10g < 1)
7583                 t4_ntxq10g = min(nc, NTXQ_10G);
7584
7585         if (t4_ntxq1g < 1)
7586                 t4_ntxq1g = min(nc, NTXQ_1G);
7587
7588         if (t4_nrxq10g < 1)
7589                 t4_nrxq10g = min(nc, NRXQ_10G);
7590
7591         if (t4_nrxq1g < 1)
7592                 t4_nrxq1g = min(nc, NRXQ_1G);
7593
7594 #ifdef TCP_OFFLOAD
7595         if (t4_nofldtxq10g < 1)
7596                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7597
7598         if (t4_nofldtxq1g < 1)
7599                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7600
7601         if (t4_nofldrxq10g < 1)
7602                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7603
7604         if (t4_nofldrxq1g < 1)
7605                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7606
7607         if (t4_toecaps_allowed == -1)
7608                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7609 #else
7610         if (t4_toecaps_allowed == -1)
7611                 t4_toecaps_allowed = 0;
7612 #endif
7613
7614         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7615                 t4_tmr_idx_10g = TMR_IDX_10G;
7616
7617         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7618                 t4_pktc_idx_10g = PKTC_IDX_10G;
7619
7620         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7621                 t4_tmr_idx_1g = TMR_IDX_1G;
7622
7623         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7624                 t4_pktc_idx_1g = PKTC_IDX_1G;
7625
7626         if (t4_qsize_txq < 128)
7627                 t4_qsize_txq = 128;
7628
7629         if (t4_qsize_rxq < 128)
7630                 t4_qsize_rxq = 128;
7631         while (t4_qsize_rxq & 7)
7632                 t4_qsize_rxq++;
7633
7634         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7635 }
7636
7637 static int
7638 mod_event(module_t mod, int cmd, void *arg)
7639 {
7640         int rc = 0;
7641         static int loaded = 0;
7642
7643         switch (cmd) {
7644         case MOD_LOAD:
7645                 if (atomic_fetchadd_int(&loaded, 1))
7646                         break;
7647                 t4_sge_modload();
7648                 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7649                 SLIST_INIT(&t4_list);
7650 #ifdef TCP_OFFLOAD
7651                 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7652                 SLIST_INIT(&t4_uld_list);
7653 #endif
7654                 tweak_tunables();
7655                 break;
7656
7657         case MOD_UNLOAD:
7658                 if (atomic_fetchadd_int(&loaded, -1) > 1)
7659                         break;
7660 #ifdef TCP_OFFLOAD
7661                 mtx_lock(&t4_uld_list_lock);
7662                 if (!SLIST_EMPTY(&t4_uld_list)) {
7663                         rc = EBUSY;
7664                         mtx_unlock(&t4_uld_list_lock);
7665                         break;
7666                 }
7667                 mtx_unlock(&t4_uld_list_lock);
7668                 mtx_destroy(&t4_uld_list_lock);
7669 #endif
7670                 mtx_lock(&t4_list_lock);
7671                 if (!SLIST_EMPTY(&t4_list)) {
7672                         rc = EBUSY;
7673                         mtx_unlock(&t4_list_lock);
7674                         break;
7675                 }
7676                 mtx_unlock(&t4_list_lock);
7677                 mtx_destroy(&t4_list_lock);
7678                 break;
7679         }
7680
7681         return (rc);
7682 }
7683
7684 static devclass_t t4_devclass, t5_devclass;
7685 static devclass_t cxgbe_devclass, cxl_devclass;
7686
7687 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7688 MODULE_VERSION(t4nex, 1);
7689 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7690
7691 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7692 MODULE_VERSION(t5nex, 1);
7693 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7694
7695 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7696 MODULE_VERSION(cxgbe, 1);
7697
7698 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7699 MODULE_VERSION(cxl, 1);