]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_main.c
Add atf m4 files from the vendor branch.
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 static int t4_rsrv_noflowq = 0;
201 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203 #ifdef TCP_OFFLOAD
204 #define NOFLDTXQ_10G 8
205 static int t4_nofldtxq10g = -1;
206 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208 #define NOFLDRXQ_10G 2
209 static int t4_nofldrxq10g = -1;
210 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212 #define NOFLDTXQ_1G 2
213 static int t4_nofldtxq1g = -1;
214 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216 #define NOFLDRXQ_1G 1
217 static int t4_nofldrxq1g = -1;
218 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 #endif
220
221 /*
222  * Holdoff parameters for 10G and 1G ports.
223  */
224 #define TMR_IDX_10G 1
225 static int t4_tmr_idx_10g = TMR_IDX_10G;
226 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228 #define PKTC_IDX_10G (-1)
229 static int t4_pktc_idx_10g = PKTC_IDX_10G;
230 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232 #define TMR_IDX_1G 1
233 static int t4_tmr_idx_1g = TMR_IDX_1G;
234 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236 #define PKTC_IDX_1G (-1)
237 static int t4_pktc_idx_1g = PKTC_IDX_1G;
238 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240 /*
241  * Size (# of entries) of each tx and rx queue.
242  */
243 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249 /*
250  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251  */
252 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255 /*
256  * Configuration file.
257  */
258 #define DEFAULT_CF      "default"
259 #define FLASH_CF        "flash"
260 #define UWIRE_CF        "uwire"
261 #define FPGA_CF         "fpga"
262 static char t4_cfg_file[32] = DEFAULT_CF;
263 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265 /*
266  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267  * encouraged respectively).
268  */
269 static unsigned int t4_fw_install = 1;
270 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272 /*
273  * ASIC features that will be used.  Disable the ones you don't want so that the
274  * chip resources aren't wasted on features that will not be used.
275  */
276 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
277 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282 static int t4_toecaps_allowed = -1;
283 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285 static int t4_rdmacaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288 static int t4_iscsicaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291 static int t4_fcoecaps_allowed = 0;
292 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294 static int t5_write_combine = 0;
295 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297 struct intrs_and_queues {
298         int intr_type;          /* INTx, MSI, or MSI-X */
299         int nirq;               /* Number of vectors */
300         int intr_flags;
301         int ntxq10g;            /* # of NIC txq's for each 10G port */
302         int nrxq10g;            /* # of NIC rxq's for each 10G port */
303         int ntxq1g;             /* # of NIC txq's for each 1G port */
304         int nrxq1g;             /* # of NIC rxq's for each 1G port */
305         int rsrv_noflowq;       /* Flag whether to reserve queue 0 */
306 #ifdef TCP_OFFLOAD
307         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
308         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
309         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
310         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
311 #endif
312 };
313
314 struct filter_entry {
315         uint32_t valid:1;       /* filter allocated and valid */
316         uint32_t locked:1;      /* filter is administratively locked */
317         uint32_t pending:1;     /* filter action is pending firmware reply */
318         uint32_t smtidx:8;      /* Source MAC Table index for smac */
319         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
320
321         struct t4_filter_specification fs;
322 };
323
324 enum {
325         XGMAC_MTU       = (1 << 0),
326         XGMAC_PROMISC   = (1 << 1),
327         XGMAC_ALLMULTI  = (1 << 2),
328         XGMAC_VLANEX    = (1 << 3),
329         XGMAC_UCADDR    = (1 << 4),
330         XGMAC_MCADDRS   = (1 << 5),
331
332         XGMAC_ALL       = 0xffff
333 };
334
335 static int map_bars_0_and_4(struct adapter *);
336 static int map_bar_2(struct adapter *);
337 static void setup_memwin(struct adapter *);
338 static int validate_mem_range(struct adapter *, uint32_t, int);
339 static int fwmtype_to_hwmtype(int);
340 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341     uint32_t *);
342 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343 static uint32_t position_memwin(struct adapter *, int, uint32_t);
344 static int cfg_itype_and_nqueues(struct adapter *, int, int,
345     struct intrs_and_queues *);
346 static int prep_firmware(struct adapter *);
347 static int partition_resources(struct adapter *, const struct firmware *,
348     const char *);
349 static int get_params__pre_init(struct adapter *);
350 static int get_params__post_init(struct adapter *);
351 static int set_params__post_init(struct adapter *);
352 static void t4_set_desc(struct adapter *);
353 static void build_medialist(struct port_info *);
354 static int update_mac_settings(struct port_info *, int);
355 static int cxgbe_init_synchronized(struct port_info *);
356 static int cxgbe_uninit_synchronized(struct port_info *);
357 static int setup_intr_handlers(struct adapter *);
358 static int adapter_full_init(struct adapter *);
359 static int adapter_full_uninit(struct adapter *);
360 static int port_full_init(struct port_info *);
361 static int port_full_uninit(struct port_info *);
362 static void quiesce_eq(struct adapter *, struct sge_eq *);
363 static void quiesce_iq(struct adapter *, struct sge_iq *);
364 static void quiesce_fl(struct adapter *, struct sge_fl *);
365 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366     driver_intr_t *, void *, char *);
367 static int t4_free_irq(struct adapter *, struct irq *);
368 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369     unsigned int);
370 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371 static void cxgbe_tick(void *);
372 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
373 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
374     struct mbuf *);
375 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
376 static int fw_msg_not_handled(struct adapter *, const __be64 *);
377 static int t4_sysctls(struct adapter *);
378 static int cxgbe_sysctls(struct port_info *);
379 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
380 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
381 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
382 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
383 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
384 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
385 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
386 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
388 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
389 #ifdef SBUF_DRAIN
390 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
392 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
393 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
394 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
395 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
396 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
398 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
399 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
401 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
403 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
404 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
405 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
406 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
407 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
408 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
409 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
410 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
411 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
412 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
413 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
414 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
415 #endif
416 static inline void txq_start(struct ifnet *, struct sge_txq *);
417 static uint32_t fconf_to_mode(uint32_t);
418 static uint32_t mode_to_fconf(uint32_t);
419 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
420 static int get_filter_mode(struct adapter *, uint32_t *);
421 static int set_filter_mode(struct adapter *, uint32_t);
422 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
423 static int get_filter(struct adapter *, struct t4_filter *);
424 static int set_filter(struct adapter *, struct t4_filter *);
425 static int del_filter(struct adapter *, struct t4_filter *);
426 static void clear_filter(struct filter_entry *);
427 static int set_filter_wr(struct adapter *, int);
428 static int del_filter_wr(struct adapter *, int);
429 static int get_sge_context(struct adapter *, struct t4_sge_context *);
430 static int load_fw(struct adapter *, struct t4_data *);
431 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
432 static int read_i2c(struct adapter *, struct t4_i2c_data *);
433 static int set_sched_class(struct adapter *, struct t4_sched_params *);
434 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
435 #ifdef TCP_OFFLOAD
436 static int toe_capability(struct port_info *, int);
437 #endif
438 static int mod_event(module_t, int, void *);
439
440 struct {
441         uint16_t device;
442         char *desc;
443 } t4_pciids[] = {
444         {0xa000, "Chelsio Terminator 4 FPGA"},
445         {0x4400, "Chelsio T440-dbg"},
446         {0x4401, "Chelsio T420-CR"},
447         {0x4402, "Chelsio T422-CR"},
448         {0x4403, "Chelsio T440-CR"},
449         {0x4404, "Chelsio T420-BCH"},
450         {0x4405, "Chelsio T440-BCH"},
451         {0x4406, "Chelsio T440-CH"},
452         {0x4407, "Chelsio T420-SO"},
453         {0x4408, "Chelsio T420-CX"},
454         {0x4409, "Chelsio T420-BT"},
455         {0x440a, "Chelsio T404-BT"},
456         {0x440e, "Chelsio T440-LP-CR"},
457 }, t5_pciids[] = {
458         {0xb000, "Chelsio Terminator 5 FPGA"},
459         {0x5400, "Chelsio T580-dbg"},
460         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
461         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
462         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
463         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
464         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
465         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
466         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
467         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
468         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
469         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
470         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
471         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
472 #ifdef notyet
473         {0x5404,  "Chelsio T520-BCH"},
474         {0x5405,  "Chelsio T540-BCH"},
475         {0x5406,  "Chelsio T540-CH"},
476         {0x5408,  "Chelsio T520-CX"},
477         {0x540b,  "Chelsio B520-SR"},
478         {0x540c,  "Chelsio B504-BT"},
479         {0x540f,  "Chelsio Amsterdam"},
480         {0x5413,  "Chelsio T580-CHR"},
481 #endif
482 };
483
484 #ifdef TCP_OFFLOAD
485 /*
486  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
487  * exactly the same for both rxq and ofld_rxq.
488  */
489 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
490 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
491 #endif
492
493 /* No easy way to include t4_msg.h before adapter.h so we check this way */
494 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
495 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
496
497 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
498
499 static int
500 t4_probe(device_t dev)
501 {
502         int i;
503         uint16_t v = pci_get_vendor(dev);
504         uint16_t d = pci_get_device(dev);
505         uint8_t f = pci_get_function(dev);
506
507         if (v != PCI_VENDOR_ID_CHELSIO)
508                 return (ENXIO);
509
510         /* Attach only to PF0 of the FPGA */
511         if (d == 0xa000 && f != 0)
512                 return (ENXIO);
513
514         for (i = 0; i < nitems(t4_pciids); i++) {
515                 if (d == t4_pciids[i].device) {
516                         device_set_desc(dev, t4_pciids[i].desc);
517                         return (BUS_PROBE_DEFAULT);
518                 }
519         }
520
521         return (ENXIO);
522 }
523
524 static int
525 t5_probe(device_t dev)
526 {
527         int i;
528         uint16_t v = pci_get_vendor(dev);
529         uint16_t d = pci_get_device(dev);
530         uint8_t f = pci_get_function(dev);
531
532         if (v != PCI_VENDOR_ID_CHELSIO)
533                 return (ENXIO);
534
535         /* Attach only to PF0 of the FPGA */
536         if (d == 0xb000 && f != 0)
537                 return (ENXIO);
538
539         for (i = 0; i < nitems(t5_pciids); i++) {
540                 if (d == t5_pciids[i].device) {
541                         device_set_desc(dev, t5_pciids[i].desc);
542                         return (BUS_PROBE_DEFAULT);
543                 }
544         }
545
546         return (ENXIO);
547 }
548
549 static int
550 t4_attach(device_t dev)
551 {
552         struct adapter *sc;
553         int rc = 0, i, n10g, n1g, rqidx, tqidx;
554         struct intrs_and_queues iaq;
555         struct sge *s;
556 #ifdef TCP_OFFLOAD
557         int ofld_rqidx, ofld_tqidx;
558 #endif
559         const char *pcie_ts;
560
561         sc = device_get_softc(dev);
562         sc->dev = dev;
563
564         pci_enable_busmaster(dev);
565         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
566                 uint32_t v;
567
568                 pci_set_max_read_req(dev, 4096);
569                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
570                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
571                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
572         }
573
574         sc->traceq = -1;
575         mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
576         snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
577             device_get_nameunit(dev));
578
579         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
580             device_get_nameunit(dev));
581         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
582         sx_xlock(&t4_list_lock);
583         SLIST_INSERT_HEAD(&t4_list, sc, link);
584         sx_xunlock(&t4_list_lock);
585
586         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
587         TAILQ_INIT(&sc->sfl);
588         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
589
590         rc = map_bars_0_and_4(sc);
591         if (rc != 0)
592                 goto done; /* error message displayed already */
593
594         /*
595          * This is the real PF# to which we're attaching.  Works from within PCI
596          * passthrough environments too, where pci_get_function() could return a
597          * different PF# depending on the passthrough configuration.  We need to
598          * use the real PF# in all our communication with the firmware.
599          */
600         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
601         sc->mbox = sc->pf;
602
603         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
604         sc->an_handler = an_not_handled;
605         for (i = 0; i < nitems(sc->cpl_handler); i++)
606                 sc->cpl_handler[i] = cpl_not_handled;
607         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
608                 sc->fw_msg_handler[i] = fw_msg_not_handled;
609         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
610         t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
611         t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
612         t4_init_sge_cpl_handlers(sc);
613
614         /* Prepare the adapter for operation */
615         rc = -t4_prep_adapter(sc);
616         if (rc != 0) {
617                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
618                 goto done;
619         }
620
621         /*
622          * Do this really early, with the memory windows set up even before the
623          * character device.  The userland tool's register i/o and mem read
624          * will work even in "recovery mode".
625          */
626         setup_memwin(sc);
627         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
628             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
629             device_get_nameunit(dev));
630         if (sc->cdev == NULL)
631                 device_printf(dev, "failed to create nexus char device.\n");
632         else
633                 sc->cdev->si_drv1 = sc;
634
635         /* Go no further if recovery mode has been requested. */
636         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
637                 device_printf(dev, "recovery mode.\n");
638                 goto done;
639         }
640
641         /* Prepare the firmware for operation */
642         rc = prep_firmware(sc);
643         if (rc != 0)
644                 goto done; /* error message displayed already */
645
646         rc = get_params__post_init(sc);
647         if (rc != 0)
648                 goto done; /* error message displayed already */
649
650         rc = set_params__post_init(sc);
651         if (rc != 0)
652                 goto done; /* error message displayed already */
653
654         rc = map_bar_2(sc);
655         if (rc != 0)
656                 goto done; /* error message displayed already */
657
658         rc = t4_create_dma_tag(sc);
659         if (rc != 0)
660                 goto done; /* error message displayed already */
661
662         /*
663          * First pass over all the ports - allocate VIs and initialize some
664          * basic parameters like mac address, port type, etc.  We also figure
665          * out whether a port is 10G or 1G and use that information when
666          * calculating how many interrupts to attempt to allocate.
667          */
668         n10g = n1g = 0;
669         for_each_port(sc, i) {
670                 struct port_info *pi;
671
672                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
673                 sc->port[i] = pi;
674
675                 /* These must be set before t4_port_init */
676                 pi->adapter = sc;
677                 pi->port_id = i;
678
679                 /* Allocate the vi and initialize parameters like mac addr */
680                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
681                 if (rc != 0) {
682                         device_printf(dev, "unable to initialize port %d: %d\n",
683                             i, rc);
684                         free(pi, M_CXGBE);
685                         sc->port[i] = NULL;
686                         goto done;
687                 }
688
689                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
690                     device_get_nameunit(dev), i);
691                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
692                 sc->chan_map[pi->tx_chan] = i;
693
694                 if (is_10G_port(pi) || is_40G_port(pi)) {
695                         n10g++;
696                         pi->tmr_idx = t4_tmr_idx_10g;
697                         pi->pktc_idx = t4_pktc_idx_10g;
698                 } else {
699                         n1g++;
700                         pi->tmr_idx = t4_tmr_idx_1g;
701                         pi->pktc_idx = t4_pktc_idx_1g;
702                 }
703
704                 pi->xact_addr_filt = -1;
705                 pi->linkdnrc = -1;
706
707                 pi->qsize_rxq = t4_qsize_rxq;
708                 pi->qsize_txq = t4_qsize_txq;
709
710                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
711                 if (pi->dev == NULL) {
712                         device_printf(dev,
713                             "failed to add device for port %d.\n", i);
714                         rc = ENXIO;
715                         goto done;
716                 }
717                 device_set_softc(pi->dev, pi);
718         }
719
720         /*
721          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
722          */
723         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
724         if (rc != 0)
725                 goto done; /* error message displayed already */
726
727         sc->intr_type = iaq.intr_type;
728         sc->intr_count = iaq.nirq;
729         sc->flags |= iaq.intr_flags;
730
731         s = &sc->sge;
732         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
733         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
734         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
735         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
736         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
737
738 #ifdef TCP_OFFLOAD
739         if (is_offload(sc)) {
740
741                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
742                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
743                 s->neq += s->nofldtxq + s->nofldrxq;
744                 s->niq += s->nofldrxq;
745
746                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
747                     M_CXGBE, M_ZERO | M_WAITOK);
748                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
749                     M_CXGBE, M_ZERO | M_WAITOK);
750         }
751 #endif
752
753         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
754             M_ZERO | M_WAITOK);
755         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
756             M_ZERO | M_WAITOK);
757         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
758             M_ZERO | M_WAITOK);
759         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
760             M_ZERO | M_WAITOK);
761         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
762             M_ZERO | M_WAITOK);
763
764         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
765             M_ZERO | M_WAITOK);
766
767         t4_init_l2t(sc, M_WAITOK);
768
769         /*
770          * Second pass over the ports.  This time we know the number of rx and
771          * tx queues that each port should get.
772          */
773         rqidx = tqidx = 0;
774 #ifdef TCP_OFFLOAD
775         ofld_rqidx = ofld_tqidx = 0;
776 #endif
777         for_each_port(sc, i) {
778                 struct port_info *pi = sc->port[i];
779
780                 if (pi == NULL)
781                         continue;
782
783                 pi->first_rxq = rqidx;
784                 pi->first_txq = tqidx;
785                 if (is_10G_port(pi) || is_40G_port(pi)) {
786                         pi->nrxq = iaq.nrxq10g;
787                         pi->ntxq = iaq.ntxq10g;
788                 } else {
789                         pi->nrxq = iaq.nrxq1g;
790                         pi->ntxq = iaq.ntxq1g;
791                 }
792
793                 if (pi->ntxq > 1)
794                         pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
795                 else
796                         pi->rsrv_noflowq = 0;
797
798                 rqidx += pi->nrxq;
799                 tqidx += pi->ntxq;
800
801 #ifdef TCP_OFFLOAD
802                 if (is_offload(sc)) {
803                         pi->first_ofld_rxq = ofld_rqidx;
804                         pi->first_ofld_txq = ofld_tqidx;
805                         if (is_10G_port(pi) || is_40G_port(pi)) {
806                                 pi->nofldrxq = iaq.nofldrxq10g;
807                                 pi->nofldtxq = iaq.nofldtxq10g;
808                         } else {
809                                 pi->nofldrxq = iaq.nofldrxq1g;
810                                 pi->nofldtxq = iaq.nofldtxq1g;
811                         }
812                         ofld_rqidx += pi->nofldrxq;
813                         ofld_tqidx += pi->nofldtxq;
814                 }
815 #endif
816         }
817
818         rc = setup_intr_handlers(sc);
819         if (rc != 0) {
820                 device_printf(dev,
821                     "failed to setup interrupt handlers: %d\n", rc);
822                 goto done;
823         }
824
825         rc = bus_generic_attach(dev);
826         if (rc != 0) {
827                 device_printf(dev,
828                     "failed to attach all child ports: %d\n", rc);
829                 goto done;
830         }
831
832         switch (sc->params.pci.speed) {
833                 case 0x1:
834                         pcie_ts = "2.5";
835                         break;
836                 case 0x2:
837                         pcie_ts = "5.0";
838                         break;
839                 case 0x3:
840                         pcie_ts = "8.0";
841                         break;
842                 default:
843                         pcie_ts = "??";
844                         break;
845         }
846         device_printf(dev,
847             "PCIe x%d (%s GTS/s) (%d), %d ports, %d %s interrupt%s, %d eq, %d iq\n",
848             sc->params.pci.width, pcie_ts, sc->params.pci.speed,
849             sc->params.nports, sc->intr_count,
850             sc->intr_type == INTR_MSIX ? "MSI-X" :
851             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
852             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
853
854         t4_set_desc(sc);
855
856 done:
857         if (rc != 0 && sc->cdev) {
858                 /* cdev was created and so cxgbetool works; recover that way. */
859                 device_printf(dev,
860                     "error during attach, adapter is now in recovery mode.\n");
861                 rc = 0;
862         }
863
864         if (rc != 0)
865                 t4_detach(dev);
866         else
867                 t4_sysctls(sc);
868
869         return (rc);
870 }
871
872 /*
873  * Idempotent
874  */
875 static int
876 t4_detach(device_t dev)
877 {
878         struct adapter *sc;
879         struct port_info *pi;
880         int i, rc;
881
882         sc = device_get_softc(dev);
883
884         if (sc->flags & FULL_INIT_DONE)
885                 t4_intr_disable(sc);
886
887         if (sc->cdev) {
888                 destroy_dev(sc->cdev);
889                 sc->cdev = NULL;
890         }
891
892         rc = bus_generic_detach(dev);
893         if (rc) {
894                 device_printf(dev,
895                     "failed to detach child devices: %d\n", rc);
896                 return (rc);
897         }
898
899         for (i = 0; i < sc->intr_count; i++)
900                 t4_free_irq(sc, &sc->irq[i]);
901
902         for (i = 0; i < MAX_NPORTS; i++) {
903                 pi = sc->port[i];
904                 if (pi) {
905                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
906                         if (pi->dev)
907                                 device_delete_child(dev, pi->dev);
908
909                         mtx_destroy(&pi->pi_lock);
910                         free(pi, M_CXGBE);
911                 }
912         }
913
914         if (sc->flags & FULL_INIT_DONE)
915                 adapter_full_uninit(sc);
916
917         if (sc->flags & FW_OK)
918                 t4_fw_bye(sc, sc->mbox);
919
920         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
921                 pci_release_msi(dev);
922
923         if (sc->regs_res)
924                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
925                     sc->regs_res);
926
927         if (sc->udbs_res)
928                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
929                     sc->udbs_res);
930
931         if (sc->msix_res)
932                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
933                     sc->msix_res);
934
935         if (sc->l2t)
936                 t4_free_l2t(sc->l2t);
937
938 #ifdef TCP_OFFLOAD
939         free(sc->sge.ofld_rxq, M_CXGBE);
940         free(sc->sge.ofld_txq, M_CXGBE);
941 #endif
942         free(sc->irq, M_CXGBE);
943         free(sc->sge.rxq, M_CXGBE);
944         free(sc->sge.txq, M_CXGBE);
945         free(sc->sge.ctrlq, M_CXGBE);
946         free(sc->sge.iqmap, M_CXGBE);
947         free(sc->sge.eqmap, M_CXGBE);
948         free(sc->tids.ftid_tab, M_CXGBE);
949         t4_destroy_dma_tag(sc);
950         if (mtx_initialized(&sc->sc_lock)) {
951                 sx_xlock(&t4_list_lock);
952                 SLIST_REMOVE(&t4_list, sc, adapter, link);
953                 sx_xunlock(&t4_list_lock);
954                 mtx_destroy(&sc->sc_lock);
955         }
956
957         if (mtx_initialized(&sc->tids.ftid_lock))
958                 mtx_destroy(&sc->tids.ftid_lock);
959         if (mtx_initialized(&sc->sfl_lock))
960                 mtx_destroy(&sc->sfl_lock);
961         if (mtx_initialized(&sc->ifp_lock))
962                 mtx_destroy(&sc->ifp_lock);
963
964         bzero(sc, sizeof(*sc));
965
966         return (0);
967 }
968
969
970 static int
971 cxgbe_probe(device_t dev)
972 {
973         char buf[128];
974         struct port_info *pi = device_get_softc(dev);
975
976         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
977         device_set_desc_copy(dev, buf);
978
979         return (BUS_PROBE_DEFAULT);
980 }
981
982 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
983     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
984     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
985 #define T4_CAP_ENABLE (T4_CAP)
986
987 static int
988 cxgbe_attach(device_t dev)
989 {
990         struct port_info *pi = device_get_softc(dev);
991         struct ifnet *ifp;
992
993         /* Allocate an ifnet and set it up */
994         ifp = if_alloc(IFT_ETHER);
995         if (ifp == NULL) {
996                 device_printf(dev, "Cannot allocate ifnet\n");
997                 return (ENOMEM);
998         }
999         pi->ifp = ifp;
1000         ifp->if_softc = pi;
1001
1002         callout_init(&pi->tick, CALLOUT_MPSAFE);
1003
1004         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1005         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1006
1007         ifp->if_init = cxgbe_init;
1008         ifp->if_ioctl = cxgbe_ioctl;
1009         ifp->if_transmit = cxgbe_transmit;
1010         ifp->if_qflush = cxgbe_qflush;
1011
1012         ifp->if_capabilities = T4_CAP;
1013 #ifdef TCP_OFFLOAD
1014         if (is_offload(pi->adapter))
1015                 ifp->if_capabilities |= IFCAP_TOE;
1016 #endif
1017         ifp->if_capenable = T4_CAP_ENABLE;
1018         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1019             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1020
1021         /* Initialize ifmedia for this port */
1022         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1023             cxgbe_media_status);
1024         build_medialist(pi);
1025
1026         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1027             EVENTHANDLER_PRI_ANY);
1028
1029         ether_ifattach(ifp, pi->hw_addr);
1030
1031 #ifdef TCP_OFFLOAD
1032         if (is_offload(pi->adapter)) {
1033                 device_printf(dev,
1034                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1035                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1036         } else
1037 #endif
1038                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1039
1040         cxgbe_sysctls(pi);
1041
1042         return (0);
1043 }
1044
1045 static int
1046 cxgbe_detach(device_t dev)
1047 {
1048         struct port_info *pi = device_get_softc(dev);
1049         struct adapter *sc = pi->adapter;
1050         struct ifnet *ifp = pi->ifp;
1051
1052         /* Tell if_ioctl and if_init that the port is going away */
1053         ADAPTER_LOCK(sc);
1054         SET_DOOMED(pi);
1055         wakeup(&sc->flags);
1056         while (IS_BUSY(sc))
1057                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1058         SET_BUSY(sc);
1059 #ifdef INVARIANTS
1060         sc->last_op = "t4detach";
1061         sc->last_op_thr = curthread;
1062 #endif
1063         ADAPTER_UNLOCK(sc);
1064
1065         if (pi->flags & HAS_TRACEQ) {
1066                 sc->traceq = -1;        /* cloner should not create ifnet */
1067                 t4_tracer_port_detach(sc);
1068         }
1069
1070         if (pi->vlan_c)
1071                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1072
1073         PORT_LOCK(pi);
1074         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1075         callout_stop(&pi->tick);
1076         PORT_UNLOCK(pi);
1077         callout_drain(&pi->tick);
1078
1079         /* Let detach proceed even if these fail. */
1080         cxgbe_uninit_synchronized(pi);
1081         port_full_uninit(pi);
1082
1083         ifmedia_removeall(&pi->media);
1084         ether_ifdetach(pi->ifp);
1085         if_free(pi->ifp);
1086
1087         ADAPTER_LOCK(sc);
1088         CLR_BUSY(sc);
1089         wakeup(&sc->flags);
1090         ADAPTER_UNLOCK(sc);
1091
1092         return (0);
1093 }
1094
1095 static void
1096 cxgbe_init(void *arg)
1097 {
1098         struct port_info *pi = arg;
1099         struct adapter *sc = pi->adapter;
1100
1101         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1102                 return;
1103         cxgbe_init_synchronized(pi);
1104         end_synchronized_op(sc, 0);
1105 }
1106
1107 static int
1108 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1109 {
1110         int rc = 0, mtu, flags;
1111         struct port_info *pi = ifp->if_softc;
1112         struct adapter *sc = pi->adapter;
1113         struct ifreq *ifr = (struct ifreq *)data;
1114         uint32_t mask;
1115
1116         switch (cmd) {
1117         case SIOCSIFMTU:
1118                 mtu = ifr->ifr_mtu;
1119                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1120                         return (EINVAL);
1121
1122                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1123                 if (rc)
1124                         return (rc);
1125                 ifp->if_mtu = mtu;
1126                 if (pi->flags & PORT_INIT_DONE) {
1127                         t4_update_fl_bufsize(ifp);
1128                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1129                                 rc = update_mac_settings(pi, XGMAC_MTU);
1130                 }
1131                 end_synchronized_op(sc, 0);
1132                 break;
1133
1134         case SIOCSIFFLAGS:
1135                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1136                 if (rc)
1137                         return (rc);
1138
1139                 if (ifp->if_flags & IFF_UP) {
1140                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1141                                 flags = pi->if_flags;
1142                                 if ((ifp->if_flags ^ flags) &
1143                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1144                                         rc = update_mac_settings(pi,
1145                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1146                                 }
1147                         } else
1148                                 rc = cxgbe_init_synchronized(pi);
1149                         pi->if_flags = ifp->if_flags;
1150                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1151                         rc = cxgbe_uninit_synchronized(pi);
1152                 end_synchronized_op(sc, 0);
1153                 break;
1154
1155         case SIOCADDMULTI:      
1156         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1157                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1158                 if (rc)
1159                         return (rc);
1160                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1161                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1162                 end_synchronized_op(sc, LOCK_HELD);
1163                 break;
1164
1165         case SIOCSIFCAP:
1166                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1167                 if (rc)
1168                         return (rc);
1169
1170                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1171                 if (mask & IFCAP_TXCSUM) {
1172                         ifp->if_capenable ^= IFCAP_TXCSUM;
1173                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1174
1175                         if (IFCAP_TSO4 & ifp->if_capenable &&
1176                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1177                                 ifp->if_capenable &= ~IFCAP_TSO4;
1178                                 if_printf(ifp,
1179                                     "tso4 disabled due to -txcsum.\n");
1180                         }
1181                 }
1182                 if (mask & IFCAP_TXCSUM_IPV6) {
1183                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1184                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1185
1186                         if (IFCAP_TSO6 & ifp->if_capenable &&
1187                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1188                                 ifp->if_capenable &= ~IFCAP_TSO6;
1189                                 if_printf(ifp,
1190                                     "tso6 disabled due to -txcsum6.\n");
1191                         }
1192                 }
1193                 if (mask & IFCAP_RXCSUM)
1194                         ifp->if_capenable ^= IFCAP_RXCSUM;
1195                 if (mask & IFCAP_RXCSUM_IPV6)
1196                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1197
1198                 /*
1199                  * Note that we leave CSUM_TSO alone (it is always set).  The
1200                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1201                  * sending a TSO request our way, so it's sufficient to toggle
1202                  * IFCAP_TSOx only.
1203                  */
1204                 if (mask & IFCAP_TSO4) {
1205                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1206                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1207                                 if_printf(ifp, "enable txcsum first.\n");
1208                                 rc = EAGAIN;
1209                                 goto fail;
1210                         }
1211                         ifp->if_capenable ^= IFCAP_TSO4;
1212                 }
1213                 if (mask & IFCAP_TSO6) {
1214                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1215                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1216                                 if_printf(ifp, "enable txcsum6 first.\n");
1217                                 rc = EAGAIN;
1218                                 goto fail;
1219                         }
1220                         ifp->if_capenable ^= IFCAP_TSO6;
1221                 }
1222                 if (mask & IFCAP_LRO) {
1223 #if defined(INET) || defined(INET6)
1224                         int i;
1225                         struct sge_rxq *rxq;
1226
1227                         ifp->if_capenable ^= IFCAP_LRO;
1228                         for_each_rxq(pi, i, rxq) {
1229                                 if (ifp->if_capenable & IFCAP_LRO)
1230                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1231                                 else
1232                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1233                         }
1234 #endif
1235                 }
1236 #ifdef TCP_OFFLOAD
1237                 if (mask & IFCAP_TOE) {
1238                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1239
1240                         rc = toe_capability(pi, enable);
1241                         if (rc != 0)
1242                                 goto fail;
1243
1244                         ifp->if_capenable ^= mask;
1245                 }
1246 #endif
1247                 if (mask & IFCAP_VLAN_HWTAGGING) {
1248                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1249                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1250                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1251                 }
1252                 if (mask & IFCAP_VLAN_MTU) {
1253                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1254
1255                         /* Need to find out how to disable auto-mtu-inflation */
1256                 }
1257                 if (mask & IFCAP_VLAN_HWTSO)
1258                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1259                 if (mask & IFCAP_VLAN_HWCSUM)
1260                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1261
1262 #ifdef VLAN_CAPABILITIES
1263                 VLAN_CAPABILITIES(ifp);
1264 #endif
1265 fail:
1266                 end_synchronized_op(sc, 0);
1267                 break;
1268
1269         case SIOCSIFMEDIA:
1270         case SIOCGIFMEDIA:
1271                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1272                 break;
1273
1274         default:
1275                 rc = ether_ioctl(ifp, cmd, data);
1276         }
1277
1278         return (rc);
1279 }
1280
1281 static int
1282 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1283 {
1284         struct port_info *pi = ifp->if_softc;
1285         struct adapter *sc = pi->adapter;
1286         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1287         struct buf_ring *br;
1288         int rc;
1289
1290         M_ASSERTPKTHDR(m);
1291
1292         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1293                 m_freem(m);
1294                 return (ENETDOWN);
1295         }
1296
1297         if (m->m_flags & M_FLOWID)
1298                 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1299                     + pi->rsrv_noflowq);
1300         br = txq->br;
1301
1302         if (TXQ_TRYLOCK(txq) == 0) {
1303                 struct sge_eq *eq = &txq->eq;
1304
1305                 /*
1306                  * It is possible that t4_eth_tx finishes up and releases the
1307                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1308                  * need to make sure that this mbuf doesn't just sit there in
1309                  * the drbr.
1310                  */
1311
1312                 rc = drbr_enqueue(ifp, br, m);
1313                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1314                     !(eq->flags & EQ_DOOMED))
1315                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1316                 return (rc);
1317         }
1318
1319         /*
1320          * txq->m is the mbuf that is held up due to a temporary shortage of
1321          * resources and it should be put on the wire first.  Then what's in
1322          * drbr and finally the mbuf that was just passed in to us.
1323          *
1324          * Return code should indicate the fate of the mbuf that was passed in
1325          * this time.
1326          */
1327
1328         TXQ_LOCK_ASSERT_OWNED(txq);
1329         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1330
1331                 /* Queued for transmission. */
1332
1333                 rc = drbr_enqueue(ifp, br, m);
1334                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1335                 (void) t4_eth_tx(ifp, txq, m);
1336                 TXQ_UNLOCK(txq);
1337                 return (rc);
1338         }
1339
1340         /* Direct transmission. */
1341         rc = t4_eth_tx(ifp, txq, m);
1342         if (rc != 0 && txq->m)
1343                 rc = 0; /* held, will be transmitted soon (hopefully) */
1344
1345         TXQ_UNLOCK(txq);
1346         return (rc);
1347 }
1348
1349 static void
1350 cxgbe_qflush(struct ifnet *ifp)
1351 {
1352         struct port_info *pi = ifp->if_softc;
1353         struct sge_txq *txq;
1354         int i;
1355         struct mbuf *m;
1356
1357         /* queues do not exist if !PORT_INIT_DONE. */
1358         if (pi->flags & PORT_INIT_DONE) {
1359                 for_each_txq(pi, i, txq) {
1360                         TXQ_LOCK(txq);
1361                         m_freem(txq->m);
1362                         txq->m = NULL;
1363                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1364                                 m_freem(m);
1365                         TXQ_UNLOCK(txq);
1366                 }
1367         }
1368         if_qflush(ifp);
1369 }
1370
1371 static int
1372 cxgbe_media_change(struct ifnet *ifp)
1373 {
1374         struct port_info *pi = ifp->if_softc;
1375
1376         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1377
1378         return (EOPNOTSUPP);
1379 }
1380
1381 static void
1382 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1383 {
1384         struct port_info *pi = ifp->if_softc;
1385         struct ifmedia_entry *cur = pi->media.ifm_cur;
1386         int speed = pi->link_cfg.speed;
1387         int data = (pi->port_type << 8) | pi->mod_type;
1388
1389         if (cur->ifm_data != data) {
1390                 build_medialist(pi);
1391                 cur = pi->media.ifm_cur;
1392         }
1393
1394         ifmr->ifm_status = IFM_AVALID;
1395         if (!pi->link_cfg.link_ok)
1396                 return;
1397
1398         ifmr->ifm_status |= IFM_ACTIVE;
1399
1400         /* active and current will differ iff current media is autoselect. */
1401         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1402                 return;
1403
1404         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1405         if (speed == SPEED_10000)
1406                 ifmr->ifm_active |= IFM_10G_T;
1407         else if (speed == SPEED_1000)
1408                 ifmr->ifm_active |= IFM_1000_T;
1409         else if (speed == SPEED_100)
1410                 ifmr->ifm_active |= IFM_100_TX;
1411         else if (speed == SPEED_10)
1412                 ifmr->ifm_active |= IFM_10_T;
1413         else
1414                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1415                             speed));
1416 }
1417
1418 void
1419 t4_fatal_err(struct adapter *sc)
1420 {
1421         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1422         t4_intr_disable(sc);
1423         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1424             device_get_nameunit(sc->dev));
1425 }
1426
1427 static int
1428 map_bars_0_and_4(struct adapter *sc)
1429 {
1430         sc->regs_rid = PCIR_BAR(0);
1431         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1432             &sc->regs_rid, RF_ACTIVE);
1433         if (sc->regs_res == NULL) {
1434                 device_printf(sc->dev, "cannot map registers.\n");
1435                 return (ENXIO);
1436         }
1437         sc->bt = rman_get_bustag(sc->regs_res);
1438         sc->bh = rman_get_bushandle(sc->regs_res);
1439         sc->mmio_len = rman_get_size(sc->regs_res);
1440         setbit(&sc->doorbells, DOORBELL_KDB);
1441
1442         sc->msix_rid = PCIR_BAR(4);
1443         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1444             &sc->msix_rid, RF_ACTIVE);
1445         if (sc->msix_res == NULL) {
1446                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1447                 return (ENXIO);
1448         }
1449
1450         return (0);
1451 }
1452
1453 static int
1454 map_bar_2(struct adapter *sc)
1455 {
1456
1457         /*
1458          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1459          * to map it if RDMA is disabled.
1460          */
1461         if (is_t4(sc) && sc->rdmacaps == 0)
1462                 return (0);
1463
1464         sc->udbs_rid = PCIR_BAR(2);
1465         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1466             &sc->udbs_rid, RF_ACTIVE);
1467         if (sc->udbs_res == NULL) {
1468                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1469                 return (ENXIO);
1470         }
1471         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1472
1473         if (is_t5(sc)) {
1474                 setbit(&sc->doorbells, DOORBELL_UDB);
1475 #if defined(__i386__) || defined(__amd64__)
1476                 if (t5_write_combine) {
1477                         int rc;
1478
1479                         /*
1480                          * Enable write combining on BAR2.  This is the
1481                          * userspace doorbell BAR and is split into 128B
1482                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1483                          * with an egress queue.  The first 64B has the doorbell
1484                          * and the second 64B can be used to submit a tx work
1485                          * request with an implicit doorbell.
1486                          */
1487
1488                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1489                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1490                         if (rc == 0) {
1491                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1492                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1493                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1494                         } else {
1495                                 device_printf(sc->dev,
1496                                     "couldn't enable write combining: %d\n",
1497                                     rc);
1498                         }
1499
1500                         t4_write_reg(sc, A_SGE_STAT_CFG,
1501                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1502                 }
1503 #endif
1504         }
1505
1506         return (0);
1507 }
1508
1509 static const struct memwin t4_memwin[] = {
1510         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1511         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1512         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1513 };
1514
1515 static const struct memwin t5_memwin[] = {
1516         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1517         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1518         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1519 };
1520
1521 static void
1522 setup_memwin(struct adapter *sc)
1523 {
1524         const struct memwin *mw;
1525         int i, n;
1526         uint32_t bar0;
1527
1528         if (is_t4(sc)) {
1529                 /*
1530                  * Read low 32b of bar0 indirectly via the hardware backdoor
1531                  * mechanism.  Works from within PCI passthrough environments
1532                  * too, where rman_get_start() can return a different value.  We
1533                  * need to program the T4 memory window decoders with the actual
1534                  * addresses that will be coming across the PCIe link.
1535                  */
1536                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1537                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1538
1539                 mw = &t4_memwin[0];
1540                 n = nitems(t4_memwin);
1541         } else {
1542                 /* T5 uses the relative offset inside the PCIe BAR */
1543                 bar0 = 0;
1544
1545                 mw = &t5_memwin[0];
1546                 n = nitems(t5_memwin);
1547         }
1548
1549         for (i = 0; i < n; i++, mw++) {
1550                 t4_write_reg(sc,
1551                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1552                     (mw->base + bar0) | V_BIR(0) |
1553                     V_WINDOW(ilog2(mw->aperture) - 10));
1554         }
1555
1556         /* flush */
1557         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1558 }
1559
1560 /*
1561  * Verify that the memory range specified by the addr/len pair is valid and lies
1562  * entirely within a single region (EDCx or MCx).
1563  */
1564 static int
1565 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1566 {
1567         uint32_t em, addr_len, maddr, mlen;
1568
1569         /* Memory can only be accessed in naturally aligned 4 byte units */
1570         if (addr & 3 || len & 3 || len == 0)
1571                 return (EINVAL);
1572
1573         /* Enabled memories */
1574         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1575         if (em & F_EDRAM0_ENABLE) {
1576                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1577                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1578                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1579                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1580                     addr + len <= maddr + mlen)
1581                         return (0);
1582         }
1583         if (em & F_EDRAM1_ENABLE) {
1584                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1585                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1586                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1587                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1588                     addr + len <= maddr + mlen)
1589                         return (0);
1590         }
1591         if (em & F_EXT_MEM_ENABLE) {
1592                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1593                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1594                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1595                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1596                     addr + len <= maddr + mlen)
1597                         return (0);
1598         }
1599         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1600                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1601                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1602                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1603                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1604                     addr + len <= maddr + mlen)
1605                         return (0);
1606         }
1607
1608         return (EFAULT);
1609 }
1610
1611 static int
1612 fwmtype_to_hwmtype(int mtype)
1613 {
1614
1615         switch (mtype) {
1616         case FW_MEMTYPE_EDC0:
1617                 return (MEM_EDC0);
1618         case FW_MEMTYPE_EDC1:
1619                 return (MEM_EDC1);
1620         case FW_MEMTYPE_EXTMEM:
1621                 return (MEM_MC0);
1622         case FW_MEMTYPE_EXTMEM1:
1623                 return (MEM_MC1);
1624         default:
1625                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1626         }
1627 }
1628
1629 /*
1630  * Verify that the memory range specified by the memtype/offset/len pair is
1631  * valid and lies entirely within the memtype specified.  The global address of
1632  * the start of the range is returned in addr.
1633  */
1634 static int
1635 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1636     uint32_t *addr)
1637 {
1638         uint32_t em, addr_len, maddr, mlen;
1639
1640         /* Memory can only be accessed in naturally aligned 4 byte units */
1641         if (off & 3 || len & 3 || len == 0)
1642                 return (EINVAL);
1643
1644         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1645         switch (fwmtype_to_hwmtype(mtype)) {
1646         case MEM_EDC0:
1647                 if (!(em & F_EDRAM0_ENABLE))
1648                         return (EINVAL);
1649                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1650                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1651                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1652                 break;
1653         case MEM_EDC1:
1654                 if (!(em & F_EDRAM1_ENABLE))
1655                         return (EINVAL);
1656                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1657                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1658                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1659                 break;
1660         case MEM_MC:
1661                 if (!(em & F_EXT_MEM_ENABLE))
1662                         return (EINVAL);
1663                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1664                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1665                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1666                 break;
1667         case MEM_MC1:
1668                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1669                         return (EINVAL);
1670                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1671                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1672                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1673                 break;
1674         default:
1675                 return (EINVAL);
1676         }
1677
1678         if (mlen > 0 && off < mlen && off + len <= mlen) {
1679                 *addr = maddr + off;    /* global address */
1680                 return (0);
1681         }
1682
1683         return (EFAULT);
1684 }
1685
1686 static void
1687 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1688 {
1689         const struct memwin *mw;
1690
1691         if (is_t4(sc)) {
1692                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1693                     ("%s: incorrect memwin# (%d)", __func__, win));
1694                 mw = &t4_memwin[win];
1695         } else {
1696                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1697                     ("%s: incorrect memwin# (%d)", __func__, win));
1698                 mw = &t5_memwin[win];
1699         }
1700
1701         if (base != NULL)
1702                 *base = mw->base;
1703         if (aperture != NULL)
1704                 *aperture = mw->aperture;
1705 }
1706
1707 /*
1708  * Positions the memory window such that it can be used to access the specified
1709  * address in the chip's address space.  The return value is the offset of addr
1710  * from the start of the window.
1711  */
1712 static uint32_t
1713 position_memwin(struct adapter *sc, int n, uint32_t addr)
1714 {
1715         uint32_t start, pf;
1716         uint32_t reg;
1717
1718         KASSERT(n >= 0 && n <= 3,
1719             ("%s: invalid window %d.", __func__, n));
1720         KASSERT((addr & 3) == 0,
1721             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1722
1723         if (is_t4(sc)) {
1724                 pf = 0;
1725                 start = addr & ~0xf;    /* start must be 16B aligned */
1726         } else {
1727                 pf = V_PFNUM(sc->pf);
1728                 start = addr & ~0x7f;   /* start must be 128B aligned */
1729         }
1730         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1731
1732         t4_write_reg(sc, reg, start | pf);
1733         t4_read_reg(sc, reg);
1734
1735         return (addr - start);
1736 }
1737
1738 static int
1739 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1740     struct intrs_and_queues *iaq)
1741 {
1742         int rc, itype, navail, nrxq10g, nrxq1g, n;
1743         int nofldrxq10g = 0, nofldrxq1g = 0;
1744
1745         bzero(iaq, sizeof(*iaq));
1746
1747         iaq->ntxq10g = t4_ntxq10g;
1748         iaq->ntxq1g = t4_ntxq1g;
1749         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1750         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1751         iaq->rsrv_noflowq = t4_rsrv_noflowq;
1752 #ifdef TCP_OFFLOAD
1753         if (is_offload(sc)) {
1754                 iaq->nofldtxq10g = t4_nofldtxq10g;
1755                 iaq->nofldtxq1g = t4_nofldtxq1g;
1756                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1757                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1758         }
1759 #endif
1760
1761         for (itype = INTR_MSIX; itype; itype >>= 1) {
1762
1763                 if ((itype & t4_intr_types) == 0)
1764                         continue;       /* not allowed */
1765
1766                 if (itype == INTR_MSIX)
1767                         navail = pci_msix_count(sc->dev);
1768                 else if (itype == INTR_MSI)
1769                         navail = pci_msi_count(sc->dev);
1770                 else
1771                         navail = 1;
1772 restart:
1773                 if (navail == 0)
1774                         continue;
1775
1776                 iaq->intr_type = itype;
1777                 iaq->intr_flags = 0;
1778
1779                 /*
1780                  * Best option: an interrupt vector for errors, one for the
1781                  * firmware event queue, and one each for each rxq (NIC as well
1782                  * as offload).
1783                  */
1784                 iaq->nirq = T4_EXTRA_INTR;
1785                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1786                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1787                 if (iaq->nirq <= navail &&
1788                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1789                         iaq->intr_flags |= INTR_DIRECT;
1790                         goto allocate;
1791                 }
1792
1793                 /*
1794                  * Second best option: an interrupt vector for errors, one for
1795                  * the firmware event queue, and one each for either NIC or
1796                  * offload rxq's.
1797                  */
1798                 iaq->nirq = T4_EXTRA_INTR;
1799                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1800                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1801                 if (iaq->nirq <= navail &&
1802                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1803                         goto allocate;
1804
1805                 /*
1806                  * Next best option: an interrupt vector for errors, one for the
1807                  * firmware event queue, and at least one per port.  At this
1808                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1809                  * what's available to us.
1810                  */
1811                 iaq->nirq = T4_EXTRA_INTR;
1812                 iaq->nirq += n10g + n1g;
1813                 if (iaq->nirq <= navail) {
1814                         int leftover = navail - iaq->nirq;
1815
1816                         if (n10g > 0) {
1817                                 int target = max(nrxq10g, nofldrxq10g);
1818
1819                                 n = 1;
1820                                 while (n < target && leftover >= n10g) {
1821                                         leftover -= n10g;
1822                                         iaq->nirq += n10g;
1823                                         n++;
1824                                 }
1825                                 iaq->nrxq10g = min(n, nrxq10g);
1826 #ifdef TCP_OFFLOAD
1827                                 if (is_offload(sc))
1828                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1829 #endif
1830                         }
1831
1832                         if (n1g > 0) {
1833                                 int target = max(nrxq1g, nofldrxq1g);
1834
1835                                 n = 1;
1836                                 while (n < target && leftover >= n1g) {
1837                                         leftover -= n1g;
1838                                         iaq->nirq += n1g;
1839                                         n++;
1840                                 }
1841                                 iaq->nrxq1g = min(n, nrxq1g);
1842 #ifdef TCP_OFFLOAD
1843                                 if (is_offload(sc))
1844                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1845 #endif
1846                         }
1847
1848                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1849                                 goto allocate;
1850                 }
1851
1852                 /*
1853                  * Least desirable option: one interrupt vector for everything.
1854                  */
1855                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1856 #ifdef TCP_OFFLOAD
1857                 if (is_offload(sc))
1858                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1859 #endif
1860
1861 allocate:
1862                 navail = iaq->nirq;
1863                 rc = 0;
1864                 if (itype == INTR_MSIX)
1865                         rc = pci_alloc_msix(sc->dev, &navail);
1866                 else if (itype == INTR_MSI)
1867                         rc = pci_alloc_msi(sc->dev, &navail);
1868
1869                 if (rc == 0) {
1870                         if (navail == iaq->nirq)
1871                                 return (0);
1872
1873                         /*
1874                          * Didn't get the number requested.  Use whatever number
1875                          * the kernel is willing to allocate (it's in navail).
1876                          */
1877                         device_printf(sc->dev, "fewer vectors than requested, "
1878                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1879                             itype, iaq->nirq, navail);
1880                         pci_release_msi(sc->dev);
1881                         goto restart;
1882                 }
1883
1884                 device_printf(sc->dev,
1885                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1886                     itype, rc, iaq->nirq, navail);
1887         }
1888
1889         device_printf(sc->dev,
1890             "failed to find a usable interrupt type.  "
1891             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1892             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1893
1894         return (ENXIO);
1895 }
1896
1897 #define FW_VERSION(chip) ( \
1898     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1899     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1900     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1901     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1902 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1903
1904 struct fw_info {
1905         uint8_t chip;
1906         char *kld_name;
1907         char *fw_mod_name;
1908         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1909 } fw_info[] = {
1910         {
1911                 .chip = CHELSIO_T4,
1912                 .kld_name = "t4fw_cfg",
1913                 .fw_mod_name = "t4fw",
1914                 .fw_hdr = {
1915                         .chip = FW_HDR_CHIP_T4,
1916                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1917                         .intfver_nic = FW_INTFVER(T4, NIC),
1918                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1919                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1920                         .intfver_ri = FW_INTFVER(T4, RI),
1921                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1922                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1923                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1924                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1925                 },
1926         }, {
1927                 .chip = CHELSIO_T5,
1928                 .kld_name = "t5fw_cfg",
1929                 .fw_mod_name = "t5fw",
1930                 .fw_hdr = {
1931                         .chip = FW_HDR_CHIP_T5,
1932                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1933                         .intfver_nic = FW_INTFVER(T5, NIC),
1934                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1935                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1936                         .intfver_ri = FW_INTFVER(T5, RI),
1937                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1938                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1939                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1940                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1941                 },
1942         }
1943 };
1944
1945 static struct fw_info *
1946 find_fw_info(int chip)
1947 {
1948         int i;
1949
1950         for (i = 0; i < nitems(fw_info); i++) {
1951                 if (fw_info[i].chip == chip)
1952                         return (&fw_info[i]);
1953         }
1954         return (NULL);
1955 }
1956
1957 /*
1958  * Is the given firmware API compatible with the one the driver was compiled
1959  * with?
1960  */
1961 static int
1962 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1963 {
1964
1965         /* short circuit if it's the exact same firmware version */
1966         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1967                 return (1);
1968
1969         /*
1970          * XXX: Is this too conservative?  Perhaps I should limit this to the
1971          * features that are supported in the driver.
1972          */
1973 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1974         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1975             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1976             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1977                 return (1);
1978 #undef SAME_INTF
1979
1980         return (0);
1981 }
1982
1983 /*
1984  * The firmware in the KLD is usable, but should it be installed?  This routine
1985  * explains itself in detail if it indicates the KLD firmware should be
1986  * installed.
1987  */
1988 static int
1989 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1990 {
1991         const char *reason;
1992
1993         if (!card_fw_usable) {
1994                 reason = "incompatible or unusable";
1995                 goto install;
1996         }
1997
1998         if (k > c) {
1999                 reason = "older than the version bundled with this driver";
2000                 goto install;
2001         }
2002
2003         if (t4_fw_install == 2 && k != c) {
2004                 reason = "different than the version bundled with this driver";
2005                 goto install;
2006         }
2007
2008         return (0);
2009
2010 install:
2011         if (t4_fw_install == 0) {
2012                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2013                     "but the driver is prohibited from installing a different "
2014                     "firmware on the card.\n",
2015                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2016                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2017
2018                 return (0);
2019         }
2020
2021         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2022             "installing firmware %u.%u.%u.%u on card.\n",
2023             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2024             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2025             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2026             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2027
2028         return (1);
2029 }
2030 /*
2031  * Establish contact with the firmware and determine if we are the master driver
2032  * or not, and whether we are responsible for chip initialization.
2033  */
2034 static int
2035 prep_firmware(struct adapter *sc)
2036 {
2037         const struct firmware *fw = NULL, *default_cfg;
2038         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2039         enum dev_state state;
2040         struct fw_info *fw_info;
2041         struct fw_hdr *card_fw;         /* fw on the card */
2042         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2043         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2044                                            against */
2045
2046         /* Contact firmware. */
2047         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2048         if (rc < 0 || state == DEV_STATE_ERR) {
2049                 rc = -rc;
2050                 device_printf(sc->dev,
2051                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2052                 return (rc);
2053         }
2054         pf = rc;
2055         if (pf == sc->mbox)
2056                 sc->flags |= MASTER_PF;
2057         else if (state == DEV_STATE_UNINIT) {
2058                 /*
2059                  * We didn't get to be the master so we definitely won't be
2060                  * configuring the chip.  It's a bug if someone else hasn't
2061                  * configured it already.
2062                  */
2063                 device_printf(sc->dev, "couldn't be master(%d), "
2064                     "device not already initialized either(%d).\n", rc, state);
2065                 return (EDOOFUS);
2066         }
2067
2068         /* This is the firmware whose headers the driver was compiled against */
2069         fw_info = find_fw_info(chip_id(sc));
2070         if (fw_info == NULL) {
2071                 device_printf(sc->dev,
2072                     "unable to look up firmware information for chip %d.\n",
2073                     chip_id(sc));
2074                 return (EINVAL);
2075         }
2076         drv_fw = &fw_info->fw_hdr;
2077
2078         /*
2079          * The firmware KLD contains many modules.  The KLD name is also the
2080          * name of the module that contains the default config file.
2081          */
2082         default_cfg = firmware_get(fw_info->kld_name);
2083
2084         /* Read the header of the firmware on the card */
2085         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2086         rc = -t4_read_flash(sc, FLASH_FW_START,
2087             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2088         if (rc == 0)
2089                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2090         else {
2091                 device_printf(sc->dev,
2092                     "Unable to read card's firmware header: %d\n", rc);
2093                 card_fw_usable = 0;
2094         }
2095
2096         /* This is the firmware in the KLD */
2097         fw = firmware_get(fw_info->fw_mod_name);
2098         if (fw != NULL) {
2099                 kld_fw = (const void *)fw->data;
2100                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2101         } else {
2102                 kld_fw = NULL;
2103                 kld_fw_usable = 0;
2104         }
2105
2106         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2107             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2108                 /*
2109                  * Common case: the firmware on the card is an exact match and
2110                  * the KLD is an exact match too, or the KLD is
2111                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2112                  * here -- use cxgbetool loadfw if you want to reinstall the
2113                  * same firmware as the one on the card.
2114                  */
2115         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2116             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2117             be32toh(card_fw->fw_ver))) {
2118
2119                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2120                 if (rc != 0) {
2121                         device_printf(sc->dev,
2122                             "failed to install firmware: %d\n", rc);
2123                         goto done;
2124                 }
2125
2126                 /* Installed successfully, update the cached header too. */
2127                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2128                 card_fw_usable = 1;
2129                 need_fw_reset = 0;      /* already reset as part of load_fw */
2130         }
2131
2132         if (!card_fw_usable) {
2133                 uint32_t d, c, k;
2134
2135                 d = ntohl(drv_fw->fw_ver);
2136                 c = ntohl(card_fw->fw_ver);
2137                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2138
2139                 device_printf(sc->dev, "Cannot find a usable firmware: "
2140                     "fw_install %d, chip state %d, "
2141                     "driver compiled with %d.%d.%d.%d, "
2142                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2143                     t4_fw_install, state,
2144                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2145                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2146                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2147                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2148                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2149                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2150                 rc = EINVAL;
2151                 goto done;
2152         }
2153
2154         /* We're using whatever's on the card and it's known to be good. */
2155         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2156         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2157             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2158             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2159             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2160             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2161         t4_get_tp_version(sc, &sc->params.tp_vers);
2162
2163         /* Reset device */
2164         if (need_fw_reset &&
2165             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2166                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2167                 if (rc != ETIMEDOUT && rc != EIO)
2168                         t4_fw_bye(sc, sc->mbox);
2169                 goto done;
2170         }
2171         sc->flags |= FW_OK;
2172
2173         rc = get_params__pre_init(sc);
2174         if (rc != 0)
2175                 goto done; /* error message displayed already */
2176
2177         /* Partition adapter resources as specified in the config file. */
2178         if (state == DEV_STATE_UNINIT) {
2179
2180                 KASSERT(sc->flags & MASTER_PF,
2181                     ("%s: trying to change chip settings when not master.",
2182                     __func__));
2183
2184                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2185                 if (rc != 0)
2186                         goto done;      /* error message displayed already */
2187
2188                 t4_tweak_chip_settings(sc);
2189
2190                 /* get basic stuff going */
2191                 rc = -t4_fw_initialize(sc, sc->mbox);
2192                 if (rc != 0) {
2193                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2194                         goto done;
2195                 }
2196         } else {
2197                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2198                 sc->cfcsum = 0;
2199         }
2200
2201 done:
2202         free(card_fw, M_CXGBE);
2203         if (fw != NULL)
2204                 firmware_put(fw, FIRMWARE_UNLOAD);
2205         if (default_cfg != NULL)
2206                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2207
2208         return (rc);
2209 }
2210
2211 #define FW_PARAM_DEV(param) \
2212         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2213          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2214 #define FW_PARAM_PFVF(param) \
2215         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2216          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2217
2218 /*
2219  * Partition chip resources for use between various PFs, VFs, etc.
2220  */
2221 static int
2222 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2223     const char *name_prefix)
2224 {
2225         const struct firmware *cfg = NULL;
2226         int rc = 0;
2227         struct fw_caps_config_cmd caps;
2228         uint32_t mtype, moff, finicsum, cfcsum;
2229
2230         /*
2231          * Figure out what configuration file to use.  Pick the default config
2232          * file for the card if the user hasn't specified one explicitly.
2233          */
2234         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2235         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2236                 /* Card specific overrides go here. */
2237                 if (pci_get_device(sc->dev) == 0x440a)
2238                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2239                 if (is_fpga(sc))
2240                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2241         }
2242
2243         /*
2244          * We need to load another module if the profile is anything except
2245          * "default" or "flash".
2246          */
2247         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2248             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2249                 char s[32];
2250
2251                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2252                 cfg = firmware_get(s);
2253                 if (cfg == NULL) {
2254                         if (default_cfg != NULL) {
2255                                 device_printf(sc->dev,
2256                                     "unable to load module \"%s\" for "
2257                                     "configuration profile \"%s\", will use "
2258                                     "the default config file instead.\n",
2259                                     s, sc->cfg_file);
2260                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2261                                     "%s", DEFAULT_CF);
2262                         } else {
2263                                 device_printf(sc->dev,
2264                                     "unable to load module \"%s\" for "
2265                                     "configuration profile \"%s\", will use "
2266                                     "the config file on the card's flash "
2267                                     "instead.\n", s, sc->cfg_file);
2268                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2269                                     "%s", FLASH_CF);
2270                         }
2271                 }
2272         }
2273
2274         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2275             default_cfg == NULL) {
2276                 device_printf(sc->dev,
2277                     "default config file not available, will use the config "
2278                     "file on the card's flash instead.\n");
2279                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2280         }
2281
2282         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2283                 u_int cflen, i, n;
2284                 const uint32_t *cfdata;
2285                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2286
2287                 KASSERT(cfg != NULL || default_cfg != NULL,
2288                     ("%s: no config to upload", __func__));
2289
2290                 /*
2291                  * Ask the firmware where it wants us to upload the config file.
2292                  */
2293                 param = FW_PARAM_DEV(CF);
2294                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2295                 if (rc != 0) {
2296                         /* No support for config file?  Shouldn't happen. */
2297                         device_printf(sc->dev,
2298                             "failed to query config file location: %d.\n", rc);
2299                         goto done;
2300                 }
2301                 mtype = G_FW_PARAMS_PARAM_Y(val);
2302                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2303
2304                 /*
2305                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2306                  * useless stuffing/comments at the end of the config file so
2307                  * it's ok to simply throw away the last remaining bytes when
2308                  * the config file is not an exact multiple of 4.  This also
2309                  * helps with the validate_mt_off_len check.
2310                  */
2311                 if (cfg != NULL) {
2312                         cflen = cfg->datasize & ~3;
2313                         cfdata = cfg->data;
2314                 } else {
2315                         cflen = default_cfg->datasize & ~3;
2316                         cfdata = default_cfg->data;
2317                 }
2318
2319                 if (cflen > FLASH_CFG_MAX_SIZE) {
2320                         device_printf(sc->dev,
2321                             "config file too long (%d, max allowed is %d).  "
2322                             "Will try to use the config on the card, if any.\n",
2323                             cflen, FLASH_CFG_MAX_SIZE);
2324                         goto use_config_on_flash;
2325                 }
2326
2327                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2328                 if (rc != 0) {
2329                         device_printf(sc->dev,
2330                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2331                             "Will try to use the config on the card, if any.\n",
2332                             __func__, mtype, moff, cflen, rc);
2333                         goto use_config_on_flash;
2334                 }
2335
2336                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2337                 while (cflen) {
2338                         off = position_memwin(sc, 2, addr);
2339                         n = min(cflen, mw_aperture - off);
2340                         for (i = 0; i < n; i += 4)
2341                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2342                         cflen -= n;
2343                         addr += n;
2344                 }
2345         } else {
2346 use_config_on_flash:
2347                 mtype = FW_MEMTYPE_FLASH;
2348                 moff = t4_flash_cfg_addr(sc);
2349         }
2350
2351         bzero(&caps, sizeof(caps));
2352         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2353             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2354         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2355             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2356             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2357         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2358         if (rc != 0) {
2359                 device_printf(sc->dev,
2360                     "failed to pre-process config file: %d "
2361                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2362                 goto done;
2363         }
2364
2365         finicsum = be32toh(caps.finicsum);
2366         cfcsum = be32toh(caps.cfcsum);
2367         if (finicsum != cfcsum) {
2368                 device_printf(sc->dev,
2369                     "WARNING: config file checksum mismatch: %08x %08x\n",
2370                     finicsum, cfcsum);
2371         }
2372         sc->cfcsum = cfcsum;
2373
2374 #define LIMIT_CAPS(x) do { \
2375         caps.x &= htobe16(t4_##x##_allowed); \
2376 } while (0)
2377
2378         /*
2379          * Let the firmware know what features will (not) be used so it can tune
2380          * things accordingly.
2381          */
2382         LIMIT_CAPS(linkcaps);
2383         LIMIT_CAPS(niccaps);
2384         LIMIT_CAPS(toecaps);
2385         LIMIT_CAPS(rdmacaps);
2386         LIMIT_CAPS(iscsicaps);
2387         LIMIT_CAPS(fcoecaps);
2388 #undef LIMIT_CAPS
2389
2390         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2391             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2392         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2393         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2394         if (rc != 0) {
2395                 device_printf(sc->dev,
2396                     "failed to process config file: %d.\n", rc);
2397         }
2398 done:
2399         if (cfg != NULL)
2400                 firmware_put(cfg, FIRMWARE_UNLOAD);
2401         return (rc);
2402 }
2403
2404 /*
2405  * Retrieve parameters that are needed (or nice to have) very early.
2406  */
2407 static int
2408 get_params__pre_init(struct adapter *sc)
2409 {
2410         int rc;
2411         uint32_t param[2], val[2];
2412         struct fw_devlog_cmd cmd;
2413         struct devlog_params *dlog = &sc->params.devlog;
2414
2415         param[0] = FW_PARAM_DEV(PORTVEC);
2416         param[1] = FW_PARAM_DEV(CCLK);
2417         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2418         if (rc != 0) {
2419                 device_printf(sc->dev,
2420                     "failed to query parameters (pre_init): %d.\n", rc);
2421                 return (rc);
2422         }
2423
2424         sc->params.portvec = val[0];
2425         sc->params.nports = bitcount32(val[0]);
2426         sc->params.vpd.cclk = val[1];
2427
2428         /* Read device log parameters. */
2429         bzero(&cmd, sizeof(cmd));
2430         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2431             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2432         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2433         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2434         if (rc != 0) {
2435                 device_printf(sc->dev,
2436                     "failed to get devlog parameters: %d.\n", rc);
2437                 bzero(dlog, sizeof (*dlog));
2438                 rc = 0; /* devlog isn't critical for device operation */
2439         } else {
2440                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2441                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2442                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2443                 dlog->size = be32toh(cmd.memsize_devlog);
2444         }
2445
2446         return (rc);
2447 }
2448
2449 /*
2450  * Retrieve various parameters that are of interest to the driver.  The device
2451  * has been initialized by the firmware at this point.
2452  */
2453 static int
2454 get_params__post_init(struct adapter *sc)
2455 {
2456         int rc;
2457         uint32_t param[7], val[7];
2458         struct fw_caps_config_cmd caps;
2459
2460         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2461         param[1] = FW_PARAM_PFVF(EQ_START);
2462         param[2] = FW_PARAM_PFVF(FILTER_START);
2463         param[3] = FW_PARAM_PFVF(FILTER_END);
2464         param[4] = FW_PARAM_PFVF(L2T_START);
2465         param[5] = FW_PARAM_PFVF(L2T_END);
2466         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2467         if (rc != 0) {
2468                 device_printf(sc->dev,
2469                     "failed to query parameters (post_init): %d.\n", rc);
2470                 return (rc);
2471         }
2472
2473         sc->sge.iq_start = val[0];
2474         sc->sge.eq_start = val[1];
2475         sc->tids.ftid_base = val[2];
2476         sc->tids.nftids = val[3] - val[2] + 1;
2477         sc->params.ftid_min = val[2];
2478         sc->params.ftid_max = val[3];
2479         sc->vres.l2t.start = val[4];
2480         sc->vres.l2t.size = val[5] - val[4] + 1;
2481         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2482             ("%s: L2 table size (%u) larger than expected (%u)",
2483             __func__, sc->vres.l2t.size, L2T_SIZE));
2484
2485         /* get capabilites */
2486         bzero(&caps, sizeof(caps));
2487         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2488             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2489         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2490         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2491         if (rc != 0) {
2492                 device_printf(sc->dev,
2493                     "failed to get card capabilities: %d.\n", rc);
2494                 return (rc);
2495         }
2496
2497 #define READ_CAPS(x) do { \
2498         sc->x = htobe16(caps.x); \
2499 } while (0)
2500         READ_CAPS(linkcaps);
2501         READ_CAPS(niccaps);
2502         READ_CAPS(toecaps);
2503         READ_CAPS(rdmacaps);
2504         READ_CAPS(iscsicaps);
2505         READ_CAPS(fcoecaps);
2506
2507         if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2508                 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2509                 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2510                 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2511                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2512                 if (rc != 0) {
2513                         device_printf(sc->dev,
2514                             "failed to query NIC parameters: %d.\n", rc);
2515                         return (rc);
2516                 }
2517                 sc->tids.etid_base = val[0];
2518                 sc->params.etid_min = val[0];
2519                 sc->tids.netids = val[1] - val[0] + 1;
2520                 sc->params.netids = sc->tids.netids;
2521                 sc->params.eo_wr_cred = val[2];
2522                 sc->params.ethoffload = 1;
2523         }
2524
2525         if (sc->toecaps) {
2526                 /* query offload-related parameters */
2527                 param[0] = FW_PARAM_DEV(NTID);
2528                 param[1] = FW_PARAM_PFVF(SERVER_START);
2529                 param[2] = FW_PARAM_PFVF(SERVER_END);
2530                 param[3] = FW_PARAM_PFVF(TDDP_START);
2531                 param[4] = FW_PARAM_PFVF(TDDP_END);
2532                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2533                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2534                 if (rc != 0) {
2535                         device_printf(sc->dev,
2536                             "failed to query TOE parameters: %d.\n", rc);
2537                         return (rc);
2538                 }
2539                 sc->tids.ntids = val[0];
2540                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2541                 sc->tids.stid_base = val[1];
2542                 sc->tids.nstids = val[2] - val[1] + 1;
2543                 sc->vres.ddp.start = val[3];
2544                 sc->vres.ddp.size = val[4] - val[3] + 1;
2545                 sc->params.ofldq_wr_cred = val[5];
2546                 sc->params.offload = 1;
2547         }
2548         if (sc->rdmacaps) {
2549                 param[0] = FW_PARAM_PFVF(STAG_START);
2550                 param[1] = FW_PARAM_PFVF(STAG_END);
2551                 param[2] = FW_PARAM_PFVF(RQ_START);
2552                 param[3] = FW_PARAM_PFVF(RQ_END);
2553                 param[4] = FW_PARAM_PFVF(PBL_START);
2554                 param[5] = FW_PARAM_PFVF(PBL_END);
2555                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2556                 if (rc != 0) {
2557                         device_printf(sc->dev,
2558                             "failed to query RDMA parameters(1): %d.\n", rc);
2559                         return (rc);
2560                 }
2561                 sc->vres.stag.start = val[0];
2562                 sc->vres.stag.size = val[1] - val[0] + 1;
2563                 sc->vres.rq.start = val[2];
2564                 sc->vres.rq.size = val[3] - val[2] + 1;
2565                 sc->vres.pbl.start = val[4];
2566                 sc->vres.pbl.size = val[5] - val[4] + 1;
2567
2568                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2569                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2570                 param[2] = FW_PARAM_PFVF(CQ_START);
2571                 param[3] = FW_PARAM_PFVF(CQ_END);
2572                 param[4] = FW_PARAM_PFVF(OCQ_START);
2573                 param[5] = FW_PARAM_PFVF(OCQ_END);
2574                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2575                 if (rc != 0) {
2576                         device_printf(sc->dev,
2577                             "failed to query RDMA parameters(2): %d.\n", rc);
2578                         return (rc);
2579                 }
2580                 sc->vres.qp.start = val[0];
2581                 sc->vres.qp.size = val[1] - val[0] + 1;
2582                 sc->vres.cq.start = val[2];
2583                 sc->vres.cq.size = val[3] - val[2] + 1;
2584                 sc->vres.ocq.start = val[4];
2585                 sc->vres.ocq.size = val[5] - val[4] + 1;
2586         }
2587         if (sc->iscsicaps) {
2588                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2589                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2590                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2591                 if (rc != 0) {
2592                         device_printf(sc->dev,
2593                             "failed to query iSCSI parameters: %d.\n", rc);
2594                         return (rc);
2595                 }
2596                 sc->vres.iscsi.start = val[0];
2597                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2598         }
2599
2600         /*
2601          * We've got the params we wanted to query via the firmware.  Now grab
2602          * some others directly from the chip.
2603          */
2604         rc = t4_read_chip_settings(sc);
2605
2606         return (rc);
2607 }
2608
2609 static int
2610 set_params__post_init(struct adapter *sc)
2611 {
2612         uint32_t param, val;
2613
2614         /* ask for encapsulated CPLs */
2615         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2616         val = 1;
2617         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2618
2619         return (0);
2620 }
2621
2622 #undef FW_PARAM_PFVF
2623 #undef FW_PARAM_DEV
2624
2625 static void
2626 t4_set_desc(struct adapter *sc)
2627 {
2628         char buf[128];
2629         struct adapter_params *p = &sc->params;
2630
2631         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2632             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2633             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2634
2635         device_set_desc_copy(sc->dev, buf);
2636 }
2637
2638 static void
2639 build_medialist(struct port_info *pi)
2640 {
2641         struct ifmedia *media = &pi->media;
2642         int data, m;
2643
2644         PORT_LOCK(pi);
2645
2646         ifmedia_removeall(media);
2647
2648         m = IFM_ETHER | IFM_FDX;
2649         data = (pi->port_type << 8) | pi->mod_type;
2650
2651         switch(pi->port_type) {
2652         case FW_PORT_TYPE_BT_XFI:
2653                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2654                 break;
2655
2656         case FW_PORT_TYPE_BT_XAUI:
2657                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2658                 /* fall through */
2659
2660         case FW_PORT_TYPE_BT_SGMII:
2661                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2662                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2663                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2664                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2665                 break;
2666
2667         case FW_PORT_TYPE_CX4:
2668                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2669                 ifmedia_set(media, m | IFM_10G_CX4);
2670                 break;
2671
2672         case FW_PORT_TYPE_QSFP_10G:
2673         case FW_PORT_TYPE_SFP:
2674         case FW_PORT_TYPE_FIBER_XFI:
2675         case FW_PORT_TYPE_FIBER_XAUI:
2676                 switch (pi->mod_type) {
2677
2678                 case FW_PORT_MOD_TYPE_LR:
2679                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2680                         ifmedia_set(media, m | IFM_10G_LR);
2681                         break;
2682
2683                 case FW_PORT_MOD_TYPE_SR:
2684                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2685                         ifmedia_set(media, m | IFM_10G_SR);
2686                         break;
2687
2688                 case FW_PORT_MOD_TYPE_LRM:
2689                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2690                         ifmedia_set(media, m | IFM_10G_LRM);
2691                         break;
2692
2693                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2694                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2695                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2696                         ifmedia_set(media, m | IFM_10G_TWINAX);
2697                         break;
2698
2699                 case FW_PORT_MOD_TYPE_NONE:
2700                         m &= ~IFM_FDX;
2701                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2702                         ifmedia_set(media, m | IFM_NONE);
2703                         break;
2704
2705                 case FW_PORT_MOD_TYPE_NA:
2706                 case FW_PORT_MOD_TYPE_ER:
2707                 default:
2708                         device_printf(pi->dev,
2709                             "unknown port_type (%d), mod_type (%d)\n",
2710                             pi->port_type, pi->mod_type);
2711                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2712                         ifmedia_set(media, m | IFM_UNKNOWN);
2713                         break;
2714                 }
2715                 break;
2716
2717         case FW_PORT_TYPE_QSFP:
2718                 switch (pi->mod_type) {
2719
2720                 case FW_PORT_MOD_TYPE_LR:
2721                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2722                         ifmedia_set(media, m | IFM_40G_LR4);
2723                         break;
2724
2725                 case FW_PORT_MOD_TYPE_SR:
2726                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2727                         ifmedia_set(media, m | IFM_40G_SR4);
2728                         break;
2729
2730                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2731                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2732                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2733                         ifmedia_set(media, m | IFM_40G_CR4);
2734                         break;
2735
2736                 case FW_PORT_MOD_TYPE_NONE:
2737                         m &= ~IFM_FDX;
2738                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2739                         ifmedia_set(media, m | IFM_NONE);
2740                         break;
2741
2742                 default:
2743                         device_printf(pi->dev,
2744                             "unknown port_type (%d), mod_type (%d)\n",
2745                             pi->port_type, pi->mod_type);
2746                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2747                         ifmedia_set(media, m | IFM_UNKNOWN);
2748                         break;
2749                 }
2750                 break;
2751
2752         default:
2753                 device_printf(pi->dev,
2754                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2755                     pi->mod_type);
2756                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2757                 ifmedia_set(media, m | IFM_UNKNOWN);
2758                 break;
2759         }
2760
2761         PORT_UNLOCK(pi);
2762 }
2763
2764 #define FW_MAC_EXACT_CHUNK      7
2765
2766 /*
2767  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2768  * indicates which parameters should be programmed (the rest are left alone).
2769  */
2770 static int
2771 update_mac_settings(struct port_info *pi, int flags)
2772 {
2773         int rc;
2774         struct ifnet *ifp = pi->ifp;
2775         struct adapter *sc = pi->adapter;
2776         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2777
2778         ASSERT_SYNCHRONIZED_OP(sc);
2779         KASSERT(flags, ("%s: not told what to update.", __func__));
2780
2781         if (flags & XGMAC_MTU)
2782                 mtu = ifp->if_mtu;
2783
2784         if (flags & XGMAC_PROMISC)
2785                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2786
2787         if (flags & XGMAC_ALLMULTI)
2788                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2789
2790         if (flags & XGMAC_VLANEX)
2791                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2792
2793         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2794             vlanex, false);
2795         if (rc) {
2796                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2797                 return (rc);
2798         }
2799
2800         if (flags & XGMAC_UCADDR) {
2801                 uint8_t ucaddr[ETHER_ADDR_LEN];
2802
2803                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2804                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2805                     ucaddr, true, true);
2806                 if (rc < 0) {
2807                         rc = -rc;
2808                         if_printf(ifp, "change_mac failed: %d\n", rc);
2809                         return (rc);
2810                 } else {
2811                         pi->xact_addr_filt = rc;
2812                         rc = 0;
2813                 }
2814         }
2815
2816         if (flags & XGMAC_MCADDRS) {
2817                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2818                 int del = 1;
2819                 uint64_t hash = 0;
2820                 struct ifmultiaddr *ifma;
2821                 int i = 0, j;
2822
2823                 if_maddr_rlock(ifp);
2824                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2825                         if (ifma->ifma_addr->sa_family != AF_LINK)
2826                                 continue;
2827                         mcaddr[i++] =
2828                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2829
2830                         if (i == FW_MAC_EXACT_CHUNK) {
2831                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2832                                     del, i, mcaddr, NULL, &hash, 0);
2833                                 if (rc < 0) {
2834                                         rc = -rc;
2835                                         for (j = 0; j < i; j++) {
2836                                                 if_printf(ifp,
2837                                                     "failed to add mc address"
2838                                                     " %02x:%02x:%02x:"
2839                                                     "%02x:%02x:%02x rc=%d\n",
2840                                                     mcaddr[j][0], mcaddr[j][1],
2841                                                     mcaddr[j][2], mcaddr[j][3],
2842                                                     mcaddr[j][4], mcaddr[j][5],
2843                                                     rc);
2844                                         }
2845                                         goto mcfail;
2846                                 }
2847                                 del = 0;
2848                                 i = 0;
2849                         }
2850                 }
2851                 if (i > 0) {
2852                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2853                             del, i, mcaddr, NULL, &hash, 0);
2854                         if (rc < 0) {
2855                                 rc = -rc;
2856                                 for (j = 0; j < i; j++) {
2857                                         if_printf(ifp,
2858                                             "failed to add mc address"
2859                                             " %02x:%02x:%02x:"
2860                                             "%02x:%02x:%02x rc=%d\n",
2861                                             mcaddr[j][0], mcaddr[j][1],
2862                                             mcaddr[j][2], mcaddr[j][3],
2863                                             mcaddr[j][4], mcaddr[j][5],
2864                                             rc);
2865                                 }
2866                                 goto mcfail;
2867                         }
2868                 }
2869
2870                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2871                 if (rc != 0)
2872                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2873 mcfail:
2874                 if_maddr_runlock(ifp);
2875         }
2876
2877         return (rc);
2878 }
2879
2880 int
2881 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2882     char *wmesg)
2883 {
2884         int rc, pri;
2885
2886 #ifdef WITNESS
2887         /* the caller thinks it's ok to sleep, but is it really? */
2888         if (flags & SLEEP_OK)
2889                 pause("t4slptst", 1);
2890 #endif
2891
2892         if (INTR_OK)
2893                 pri = PCATCH;
2894         else
2895                 pri = 0;
2896
2897         ADAPTER_LOCK(sc);
2898         for (;;) {
2899
2900                 if (pi && IS_DOOMED(pi)) {
2901                         rc = ENXIO;
2902                         goto done;
2903                 }
2904
2905                 if (!IS_BUSY(sc)) {
2906                         rc = 0;
2907                         break;
2908                 }
2909
2910                 if (!(flags & SLEEP_OK)) {
2911                         rc = EBUSY;
2912                         goto done;
2913                 }
2914
2915                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2916                         rc = EINTR;
2917                         goto done;
2918                 }
2919         }
2920
2921         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2922         SET_BUSY(sc);
2923 #ifdef INVARIANTS
2924         sc->last_op = wmesg;
2925         sc->last_op_thr = curthread;
2926 #endif
2927
2928 done:
2929         if (!(flags & HOLD_LOCK) || rc)
2930                 ADAPTER_UNLOCK(sc);
2931
2932         return (rc);
2933 }
2934
2935 void
2936 end_synchronized_op(struct adapter *sc, int flags)
2937 {
2938
2939         if (flags & LOCK_HELD)
2940                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2941         else
2942                 ADAPTER_LOCK(sc);
2943
2944         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2945         CLR_BUSY(sc);
2946         wakeup(&sc->flags);
2947         ADAPTER_UNLOCK(sc);
2948 }
2949
2950 static int
2951 cxgbe_init_synchronized(struct port_info *pi)
2952 {
2953         struct adapter *sc = pi->adapter;
2954         struct ifnet *ifp = pi->ifp;
2955         int rc = 0;
2956
2957         ASSERT_SYNCHRONIZED_OP(sc);
2958
2959         if (isset(&sc->open_device_map, pi->port_id)) {
2960                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2961                     ("mismatch between open_device_map and if_drv_flags"));
2962                 return (0);     /* already running */
2963         }
2964
2965         if (!(sc->flags & FULL_INIT_DONE) &&
2966             ((rc = adapter_full_init(sc)) != 0))
2967                 return (rc);    /* error message displayed already */
2968
2969         if (!(pi->flags & PORT_INIT_DONE) &&
2970             ((rc = port_full_init(pi)) != 0))
2971                 return (rc); /* error message displayed already */
2972
2973         rc = update_mac_settings(pi, XGMAC_ALL);
2974         if (rc)
2975                 goto done;      /* error message displayed already */
2976
2977         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2978         if (rc != 0) {
2979                 if_printf(ifp, "start_link failed: %d\n", rc);
2980                 goto done;
2981         }
2982
2983         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2984         if (rc != 0) {
2985                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2986                 goto done;
2987         }
2988
2989         /*
2990          * The first iq of the first port to come up is used for tracing.
2991          */
2992         if (sc->traceq < 0) {
2993                 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2994                 t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2995                     A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2996                     V_QUEUENUMBER(sc->traceq));
2997                 pi->flags |= HAS_TRACEQ;
2998         }
2999
3000         /* all ok */
3001         setbit(&sc->open_device_map, pi->port_id);
3002         PORT_LOCK(pi);
3003         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3004         PORT_UNLOCK(pi);
3005
3006         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
3007 done:
3008         if (rc != 0)
3009                 cxgbe_uninit_synchronized(pi);
3010
3011         return (rc);
3012 }
3013
3014 /*
3015  * Idempotent.
3016  */
3017 static int
3018 cxgbe_uninit_synchronized(struct port_info *pi)
3019 {
3020         struct adapter *sc = pi->adapter;
3021         struct ifnet *ifp = pi->ifp;
3022         int rc;
3023
3024         ASSERT_SYNCHRONIZED_OP(sc);
3025
3026         /*
3027          * Disable the VI so that all its data in either direction is discarded
3028          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
3029          * tick) intact as the TP can deliver negative advice or data that it's
3030          * holding in its RAM (for an offloaded connection) even after the VI is
3031          * disabled.
3032          */
3033         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
3034         if (rc) {
3035                 if_printf(ifp, "disable_vi failed: %d\n", rc);
3036                 return (rc);
3037         }
3038
3039         clrbit(&sc->open_device_map, pi->port_id);
3040         PORT_LOCK(pi);
3041         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3042         PORT_UNLOCK(pi);
3043
3044         pi->link_cfg.link_ok = 0;
3045         pi->link_cfg.speed = 0;
3046         pi->linkdnrc = -1;
3047         t4_os_link_changed(sc, pi->port_id, 0, -1);
3048
3049         return (0);
3050 }
3051
3052 /*
3053  * It is ok for this function to fail midway and return right away.  t4_detach
3054  * will walk the entire sc->irq list and clean up whatever is valid.
3055  */
3056 static int
3057 setup_intr_handlers(struct adapter *sc)
3058 {
3059         int rc, rid, p, q;
3060         char s[8];
3061         struct irq *irq;
3062         struct port_info *pi;
3063         struct sge_rxq *rxq;
3064 #ifdef TCP_OFFLOAD
3065         struct sge_ofld_rxq *ofld_rxq;
3066 #endif
3067
3068         /*
3069          * Setup interrupts.
3070          */
3071         irq = &sc->irq[0];
3072         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3073         if (sc->intr_count == 1) {
3074                 KASSERT(!(sc->flags & INTR_DIRECT),
3075                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3076
3077                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3078                 if (rc != 0)
3079                         return (rc);
3080         } else {
3081                 /* Multiple interrupts. */
3082                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3083                     ("%s: too few intr.", __func__));
3084
3085                 /* The first one is always error intr */
3086                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3087                 if (rc != 0)
3088                         return (rc);
3089                 irq++;
3090                 rid++;
3091
3092                 /* The second one is always the firmware event queue */
3093                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3094                     "evt");
3095                 if (rc != 0)
3096                         return (rc);
3097                 irq++;
3098                 rid++;
3099
3100                 /*
3101                  * Note that if INTR_DIRECT is not set then either the NIC rx
3102                  * queues or (exclusive or) the TOE rx queueus will be taking
3103                  * direct interrupts.
3104                  *
3105                  * There is no need to check for is_offload(sc) as nofldrxq
3106                  * will be 0 if offload is disabled.
3107                  */
3108                 for_each_port(sc, p) {
3109                         pi = sc->port[p];
3110
3111 #ifdef TCP_OFFLOAD
3112                         /*
3113                          * Skip over the NIC queues if they aren't taking direct
3114                          * interrupts.
3115                          */
3116                         if (!(sc->flags & INTR_DIRECT) &&
3117                             pi->nofldrxq > pi->nrxq)
3118                                 goto ofld_queues;
3119 #endif
3120                         rxq = &sc->sge.rxq[pi->first_rxq];
3121                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3122                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3123                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3124                                     s);
3125                                 if (rc != 0)
3126                                         return (rc);
3127                                 irq++;
3128                                 rid++;
3129                         }
3130
3131 #ifdef TCP_OFFLOAD
3132                         /*
3133                          * Skip over the offload queues if they aren't taking
3134                          * direct interrupts.
3135                          */
3136                         if (!(sc->flags & INTR_DIRECT))
3137                                 continue;
3138 ofld_queues:
3139                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3140                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3141                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3142                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3143                                     ofld_rxq, s);
3144                                 if (rc != 0)
3145                                         return (rc);
3146                                 irq++;
3147                                 rid++;
3148                         }
3149 #endif
3150                 }
3151         }
3152
3153         return (0);
3154 }
3155
3156 static int
3157 adapter_full_init(struct adapter *sc)
3158 {
3159         int rc, i;
3160
3161         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3162         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3163             ("%s: FULL_INIT_DONE already", __func__));
3164
3165         /*
3166          * queues that belong to the adapter (not any particular port).
3167          */
3168         rc = t4_setup_adapter_queues(sc);
3169         if (rc != 0)
3170                 goto done;
3171
3172         for (i = 0; i < nitems(sc->tq); i++) {
3173                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3174                     taskqueue_thread_enqueue, &sc->tq[i]);
3175                 if (sc->tq[i] == NULL) {
3176                         device_printf(sc->dev,
3177                             "failed to allocate task queue %d\n", i);
3178                         rc = ENOMEM;
3179                         goto done;
3180                 }
3181                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3182                     device_get_nameunit(sc->dev), i);
3183         }
3184
3185         t4_intr_enable(sc);
3186         sc->flags |= FULL_INIT_DONE;
3187 done:
3188         if (rc != 0)
3189                 adapter_full_uninit(sc);
3190
3191         return (rc);
3192 }
3193
3194 static int
3195 adapter_full_uninit(struct adapter *sc)
3196 {
3197         int i;
3198
3199         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3200
3201         t4_teardown_adapter_queues(sc);
3202
3203         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3204                 taskqueue_free(sc->tq[i]);
3205                 sc->tq[i] = NULL;
3206         }
3207
3208         sc->flags &= ~FULL_INIT_DONE;
3209
3210         return (0);
3211 }
3212
3213 static int
3214 port_full_init(struct port_info *pi)
3215 {
3216         struct adapter *sc = pi->adapter;
3217         struct ifnet *ifp = pi->ifp;
3218         uint16_t *rss;
3219         struct sge_rxq *rxq;
3220         int rc, i, j;
3221
3222         ASSERT_SYNCHRONIZED_OP(sc);
3223         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3224             ("%s: PORT_INIT_DONE already", __func__));
3225
3226         sysctl_ctx_init(&pi->ctx);
3227         pi->flags |= PORT_SYSCTL_CTX;
3228
3229         /*
3230          * Allocate tx/rx/fl queues for this port.
3231          */
3232         rc = t4_setup_port_queues(pi);
3233         if (rc != 0)
3234                 goto done;      /* error message displayed already */
3235
3236         /*
3237          * Setup RSS for this port.  Save a copy of the RSS table for later use.
3238          */
3239         rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3240         for (i = 0; i < pi->rss_size;) {
3241                 for_each_rxq(pi, j, rxq) {
3242                         rss[i++] = rxq->iq.abs_id;
3243                         if (i == pi->rss_size)
3244                                 break;
3245                 }
3246         }
3247
3248         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3249             pi->rss_size);
3250         if (rc != 0) {
3251                 if_printf(ifp, "rss_config failed: %d\n", rc);
3252                 goto done;
3253         }
3254
3255         pi->rss = rss;
3256         pi->flags |= PORT_INIT_DONE;
3257 done:
3258         if (rc != 0)
3259                 port_full_uninit(pi);
3260
3261         return (rc);
3262 }
3263
3264 /*
3265  * Idempotent.
3266  */
3267 static int
3268 port_full_uninit(struct port_info *pi)
3269 {
3270         struct adapter *sc = pi->adapter;
3271         int i;
3272         struct sge_rxq *rxq;
3273         struct sge_txq *txq;
3274 #ifdef TCP_OFFLOAD
3275         struct sge_ofld_rxq *ofld_rxq;
3276         struct sge_wrq *ofld_txq;
3277 #endif
3278
3279         if (pi->flags & PORT_INIT_DONE) {
3280
3281                 /* Need to quiesce queues.  XXX: ctrl queues? */
3282
3283                 for_each_txq(pi, i, txq) {
3284                         quiesce_eq(sc, &txq->eq);
3285                 }
3286
3287 #ifdef TCP_OFFLOAD
3288                 for_each_ofld_txq(pi, i, ofld_txq) {
3289                         quiesce_eq(sc, &ofld_txq->eq);
3290                 }
3291 #endif
3292
3293                 for_each_rxq(pi, i, rxq) {
3294                         quiesce_iq(sc, &rxq->iq);
3295                         quiesce_fl(sc, &rxq->fl);
3296                 }
3297
3298 #ifdef TCP_OFFLOAD
3299                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3300                         quiesce_iq(sc, &ofld_rxq->iq);
3301                         quiesce_fl(sc, &ofld_rxq->fl);
3302                 }
3303 #endif
3304                 free(pi->rss, M_CXGBE);
3305         }
3306
3307         t4_teardown_port_queues(pi);
3308         pi->flags &= ~PORT_INIT_DONE;
3309
3310         return (0);
3311 }
3312
3313 static void
3314 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3315 {
3316         EQ_LOCK(eq);
3317         eq->flags |= EQ_DOOMED;
3318
3319         /*
3320          * Wait for the response to a credit flush if one's
3321          * pending.
3322          */
3323         while (eq->flags & EQ_CRFLUSHED)
3324                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3325         EQ_UNLOCK(eq);
3326
3327         callout_drain(&eq->tx_callout); /* XXX: iffy */
3328         pause("callout", 10);           /* Still iffy */
3329
3330         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3331 }
3332
3333 static void
3334 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3335 {
3336         (void) sc;      /* unused */
3337
3338         /* Synchronize with the interrupt handler */
3339         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3340                 pause("iqfree", 1);
3341 }
3342
3343 static void
3344 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3345 {
3346         mtx_lock(&sc->sfl_lock);
3347         FL_LOCK(fl);
3348         fl->flags |= FL_DOOMED;
3349         FL_UNLOCK(fl);
3350         mtx_unlock(&sc->sfl_lock);
3351
3352         callout_drain(&sc->sfl_callout);
3353         KASSERT((fl->flags & FL_STARVING) == 0,
3354             ("%s: still starving", __func__));
3355 }
3356
3357 static int
3358 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3359     driver_intr_t *handler, void *arg, char *name)
3360 {
3361         int rc;
3362
3363         irq->rid = rid;
3364         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3365             RF_SHAREABLE | RF_ACTIVE);
3366         if (irq->res == NULL) {
3367                 device_printf(sc->dev,
3368                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3369                 return (ENOMEM);
3370         }
3371
3372         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3373             NULL, handler, arg, &irq->tag);
3374         if (rc != 0) {
3375                 device_printf(sc->dev,
3376                     "failed to setup interrupt for rid %d, name %s: %d\n",
3377                     rid, name, rc);
3378         } else if (name)
3379                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3380
3381         return (rc);
3382 }
3383
3384 static int
3385 t4_free_irq(struct adapter *sc, struct irq *irq)
3386 {
3387         if (irq->tag)
3388                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3389         if (irq->res)
3390                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3391
3392         bzero(irq, sizeof(*irq));
3393
3394         return (0);
3395 }
3396
3397 static void
3398 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3399     unsigned int end)
3400 {
3401         uint32_t *p = (uint32_t *)(buf + start);
3402
3403         for ( ; start <= end; start += sizeof(uint32_t))
3404                 *p++ = t4_read_reg(sc, start);
3405 }
3406
3407 static void
3408 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3409 {
3410         int i, n;
3411         const unsigned int *reg_ranges;
3412         static const unsigned int t4_reg_ranges[] = {
3413                 0x1008, 0x1108,
3414                 0x1180, 0x11b4,
3415                 0x11fc, 0x123c,
3416                 0x1300, 0x173c,
3417                 0x1800, 0x18fc,
3418                 0x3000, 0x30d8,
3419                 0x30e0, 0x5924,
3420                 0x5960, 0x59d4,
3421                 0x5a00, 0x5af8,
3422                 0x6000, 0x6098,
3423                 0x6100, 0x6150,
3424                 0x6200, 0x6208,
3425                 0x6240, 0x6248,
3426                 0x6280, 0x6338,
3427                 0x6370, 0x638c,
3428                 0x6400, 0x643c,
3429                 0x6500, 0x6524,
3430                 0x6a00, 0x6a38,
3431                 0x6a60, 0x6a78,
3432                 0x6b00, 0x6b84,
3433                 0x6bf0, 0x6c84,
3434                 0x6cf0, 0x6d84,
3435                 0x6df0, 0x6e84,
3436                 0x6ef0, 0x6f84,
3437                 0x6ff0, 0x7084,
3438                 0x70f0, 0x7184,
3439                 0x71f0, 0x7284,
3440                 0x72f0, 0x7384,
3441                 0x73f0, 0x7450,
3442                 0x7500, 0x7530,
3443                 0x7600, 0x761c,
3444                 0x7680, 0x76cc,
3445                 0x7700, 0x7798,
3446                 0x77c0, 0x77fc,
3447                 0x7900, 0x79fc,
3448                 0x7b00, 0x7c38,
3449                 0x7d00, 0x7efc,
3450                 0x8dc0, 0x8e1c,
3451                 0x8e30, 0x8e78,
3452                 0x8ea0, 0x8f6c,
3453                 0x8fc0, 0x9074,
3454                 0x90fc, 0x90fc,
3455                 0x9400, 0x9458,
3456                 0x9600, 0x96bc,
3457                 0x9800, 0x9808,
3458                 0x9820, 0x983c,
3459                 0x9850, 0x9864,
3460                 0x9c00, 0x9c6c,
3461                 0x9c80, 0x9cec,
3462                 0x9d00, 0x9d6c,
3463                 0x9d80, 0x9dec,
3464                 0x9e00, 0x9e6c,
3465                 0x9e80, 0x9eec,
3466                 0x9f00, 0x9f6c,
3467                 0x9f80, 0x9fec,
3468                 0xd004, 0xd03c,
3469                 0xdfc0, 0xdfe0,
3470                 0xe000, 0xea7c,
3471                 0xf000, 0x11110,
3472                 0x11118, 0x11190,
3473                 0x19040, 0x1906c,
3474                 0x19078, 0x19080,
3475                 0x1908c, 0x19124,
3476                 0x19150, 0x191b0,
3477                 0x191d0, 0x191e8,
3478                 0x19238, 0x1924c,
3479                 0x193f8, 0x19474,
3480                 0x19490, 0x194f8,
3481                 0x19800, 0x19f30,
3482                 0x1a000, 0x1a06c,
3483                 0x1a0b0, 0x1a120,
3484                 0x1a128, 0x1a138,
3485                 0x1a190, 0x1a1c4,
3486                 0x1a1fc, 0x1a1fc,
3487                 0x1e040, 0x1e04c,
3488                 0x1e284, 0x1e28c,
3489                 0x1e2c0, 0x1e2c0,
3490                 0x1e2e0, 0x1e2e0,
3491                 0x1e300, 0x1e384,
3492                 0x1e3c0, 0x1e3c8,
3493                 0x1e440, 0x1e44c,
3494                 0x1e684, 0x1e68c,
3495                 0x1e6c0, 0x1e6c0,
3496                 0x1e6e0, 0x1e6e0,
3497                 0x1e700, 0x1e784,
3498                 0x1e7c0, 0x1e7c8,
3499                 0x1e840, 0x1e84c,
3500                 0x1ea84, 0x1ea8c,
3501                 0x1eac0, 0x1eac0,
3502                 0x1eae0, 0x1eae0,
3503                 0x1eb00, 0x1eb84,
3504                 0x1ebc0, 0x1ebc8,
3505                 0x1ec40, 0x1ec4c,
3506                 0x1ee84, 0x1ee8c,
3507                 0x1eec0, 0x1eec0,
3508                 0x1eee0, 0x1eee0,
3509                 0x1ef00, 0x1ef84,
3510                 0x1efc0, 0x1efc8,
3511                 0x1f040, 0x1f04c,
3512                 0x1f284, 0x1f28c,
3513                 0x1f2c0, 0x1f2c0,
3514                 0x1f2e0, 0x1f2e0,
3515                 0x1f300, 0x1f384,
3516                 0x1f3c0, 0x1f3c8,
3517                 0x1f440, 0x1f44c,
3518                 0x1f684, 0x1f68c,
3519                 0x1f6c0, 0x1f6c0,
3520                 0x1f6e0, 0x1f6e0,
3521                 0x1f700, 0x1f784,
3522                 0x1f7c0, 0x1f7c8,
3523                 0x1f840, 0x1f84c,
3524                 0x1fa84, 0x1fa8c,
3525                 0x1fac0, 0x1fac0,
3526                 0x1fae0, 0x1fae0,
3527                 0x1fb00, 0x1fb84,
3528                 0x1fbc0, 0x1fbc8,
3529                 0x1fc40, 0x1fc4c,
3530                 0x1fe84, 0x1fe8c,
3531                 0x1fec0, 0x1fec0,
3532                 0x1fee0, 0x1fee0,
3533                 0x1ff00, 0x1ff84,
3534                 0x1ffc0, 0x1ffc8,
3535                 0x20000, 0x2002c,
3536                 0x20100, 0x2013c,
3537                 0x20190, 0x201c8,
3538                 0x20200, 0x20318,
3539                 0x20400, 0x20528,
3540                 0x20540, 0x20614,
3541                 0x21000, 0x21040,
3542                 0x2104c, 0x21060,
3543                 0x210c0, 0x210ec,
3544                 0x21200, 0x21268,
3545                 0x21270, 0x21284,
3546                 0x212fc, 0x21388,
3547                 0x21400, 0x21404,
3548                 0x21500, 0x21518,
3549                 0x2152c, 0x2153c,
3550                 0x21550, 0x21554,
3551                 0x21600, 0x21600,
3552                 0x21608, 0x21628,
3553                 0x21630, 0x2163c,
3554                 0x21700, 0x2171c,
3555                 0x21780, 0x2178c,
3556                 0x21800, 0x21c38,
3557                 0x21c80, 0x21d7c,
3558                 0x21e00, 0x21e04,
3559                 0x22000, 0x2202c,
3560                 0x22100, 0x2213c,
3561                 0x22190, 0x221c8,
3562                 0x22200, 0x22318,
3563                 0x22400, 0x22528,
3564                 0x22540, 0x22614,
3565                 0x23000, 0x23040,
3566                 0x2304c, 0x23060,
3567                 0x230c0, 0x230ec,
3568                 0x23200, 0x23268,
3569                 0x23270, 0x23284,
3570                 0x232fc, 0x23388,
3571                 0x23400, 0x23404,
3572                 0x23500, 0x23518,
3573                 0x2352c, 0x2353c,
3574                 0x23550, 0x23554,
3575                 0x23600, 0x23600,
3576                 0x23608, 0x23628,
3577                 0x23630, 0x2363c,
3578                 0x23700, 0x2371c,
3579                 0x23780, 0x2378c,
3580                 0x23800, 0x23c38,
3581                 0x23c80, 0x23d7c,
3582                 0x23e00, 0x23e04,
3583                 0x24000, 0x2402c,
3584                 0x24100, 0x2413c,
3585                 0x24190, 0x241c8,
3586                 0x24200, 0x24318,
3587                 0x24400, 0x24528,
3588                 0x24540, 0x24614,
3589                 0x25000, 0x25040,
3590                 0x2504c, 0x25060,
3591                 0x250c0, 0x250ec,
3592                 0x25200, 0x25268,
3593                 0x25270, 0x25284,
3594                 0x252fc, 0x25388,
3595                 0x25400, 0x25404,
3596                 0x25500, 0x25518,
3597                 0x2552c, 0x2553c,
3598                 0x25550, 0x25554,
3599                 0x25600, 0x25600,
3600                 0x25608, 0x25628,
3601                 0x25630, 0x2563c,
3602                 0x25700, 0x2571c,
3603                 0x25780, 0x2578c,
3604                 0x25800, 0x25c38,
3605                 0x25c80, 0x25d7c,
3606                 0x25e00, 0x25e04,
3607                 0x26000, 0x2602c,
3608                 0x26100, 0x2613c,
3609                 0x26190, 0x261c8,
3610                 0x26200, 0x26318,
3611                 0x26400, 0x26528,
3612                 0x26540, 0x26614,
3613                 0x27000, 0x27040,
3614                 0x2704c, 0x27060,
3615                 0x270c0, 0x270ec,
3616                 0x27200, 0x27268,
3617                 0x27270, 0x27284,
3618                 0x272fc, 0x27388,
3619                 0x27400, 0x27404,
3620                 0x27500, 0x27518,
3621                 0x2752c, 0x2753c,
3622                 0x27550, 0x27554,
3623                 0x27600, 0x27600,
3624                 0x27608, 0x27628,
3625                 0x27630, 0x2763c,
3626                 0x27700, 0x2771c,
3627                 0x27780, 0x2778c,
3628                 0x27800, 0x27c38,
3629                 0x27c80, 0x27d7c,
3630                 0x27e00, 0x27e04
3631         };
3632         static const unsigned int t5_reg_ranges[] = {
3633                 0x1008, 0x1148,
3634                 0x1180, 0x11b4,
3635                 0x11fc, 0x123c,
3636                 0x1280, 0x173c,
3637                 0x1800, 0x18fc,
3638                 0x3000, 0x3028,
3639                 0x3060, 0x30d8,
3640                 0x30e0, 0x30fc,
3641                 0x3140, 0x357c,
3642                 0x35a8, 0x35cc,
3643                 0x35ec, 0x35ec,
3644                 0x3600, 0x5624,
3645                 0x56cc, 0x575c,
3646                 0x580c, 0x5814,
3647                 0x5890, 0x58bc,
3648                 0x5940, 0x59dc,
3649                 0x59fc, 0x5a18,
3650                 0x5a60, 0x5a9c,
3651                 0x5b94, 0x5bfc,
3652                 0x6000, 0x6040,
3653                 0x6058, 0x614c,
3654                 0x7700, 0x7798,
3655                 0x77c0, 0x78fc,
3656                 0x7b00, 0x7c54,
3657                 0x7d00, 0x7efc,
3658                 0x8dc0, 0x8de0,
3659                 0x8df8, 0x8e84,
3660                 0x8ea0, 0x8f84,
3661                 0x8fc0, 0x90f8,
3662                 0x9400, 0x9470,
3663                 0x9600, 0x96f4,
3664                 0x9800, 0x9808,
3665                 0x9820, 0x983c,
3666                 0x9850, 0x9864,
3667                 0x9c00, 0x9c6c,
3668                 0x9c80, 0x9cec,
3669                 0x9d00, 0x9d6c,
3670                 0x9d80, 0x9dec,
3671                 0x9e00, 0x9e6c,
3672                 0x9e80, 0x9eec,
3673                 0x9f00, 0x9f6c,
3674                 0x9f80, 0xa020,
3675                 0xd004, 0xd03c,
3676                 0xdfc0, 0xdfe0,
3677                 0xe000, 0x11088,
3678                 0x1109c, 0x11110,
3679                 0x11118, 0x1117c,
3680                 0x11190, 0x11204,
3681                 0x19040, 0x1906c,
3682                 0x19078, 0x19080,
3683                 0x1908c, 0x19124,
3684                 0x19150, 0x191b0,
3685                 0x191d0, 0x191e8,
3686                 0x19238, 0x19290,
3687                 0x193f8, 0x19474,
3688                 0x19490, 0x194cc,
3689                 0x194f0, 0x194f8,
3690                 0x19c00, 0x19c60,
3691                 0x19c94, 0x19e10,
3692                 0x19e50, 0x19f34,
3693                 0x19f40, 0x19f50,
3694                 0x19f90, 0x19fe4,
3695                 0x1a000, 0x1a06c,
3696                 0x1a0b0, 0x1a120,
3697                 0x1a128, 0x1a138,
3698                 0x1a190, 0x1a1c4,
3699                 0x1a1fc, 0x1a1fc,
3700                 0x1e008, 0x1e00c,
3701                 0x1e040, 0x1e04c,
3702                 0x1e284, 0x1e290,
3703                 0x1e2c0, 0x1e2c0,
3704                 0x1e2e0, 0x1e2e0,
3705                 0x1e300, 0x1e384,
3706                 0x1e3c0, 0x1e3c8,
3707                 0x1e408, 0x1e40c,
3708                 0x1e440, 0x1e44c,
3709                 0x1e684, 0x1e690,
3710                 0x1e6c0, 0x1e6c0,
3711                 0x1e6e0, 0x1e6e0,
3712                 0x1e700, 0x1e784,
3713                 0x1e7c0, 0x1e7c8,
3714                 0x1e808, 0x1e80c,
3715                 0x1e840, 0x1e84c,
3716                 0x1ea84, 0x1ea90,
3717                 0x1eac0, 0x1eac0,
3718                 0x1eae0, 0x1eae0,
3719                 0x1eb00, 0x1eb84,
3720                 0x1ebc0, 0x1ebc8,
3721                 0x1ec08, 0x1ec0c,
3722                 0x1ec40, 0x1ec4c,
3723                 0x1ee84, 0x1ee90,
3724                 0x1eec0, 0x1eec0,
3725                 0x1eee0, 0x1eee0,
3726                 0x1ef00, 0x1ef84,
3727                 0x1efc0, 0x1efc8,
3728                 0x1f008, 0x1f00c,
3729                 0x1f040, 0x1f04c,
3730                 0x1f284, 0x1f290,
3731                 0x1f2c0, 0x1f2c0,
3732                 0x1f2e0, 0x1f2e0,
3733                 0x1f300, 0x1f384,
3734                 0x1f3c0, 0x1f3c8,
3735                 0x1f408, 0x1f40c,
3736                 0x1f440, 0x1f44c,
3737                 0x1f684, 0x1f690,
3738                 0x1f6c0, 0x1f6c0,
3739                 0x1f6e0, 0x1f6e0,
3740                 0x1f700, 0x1f784,
3741                 0x1f7c0, 0x1f7c8,
3742                 0x1f808, 0x1f80c,
3743                 0x1f840, 0x1f84c,
3744                 0x1fa84, 0x1fa90,
3745                 0x1fac0, 0x1fac0,
3746                 0x1fae0, 0x1fae0,
3747                 0x1fb00, 0x1fb84,
3748                 0x1fbc0, 0x1fbc8,
3749                 0x1fc08, 0x1fc0c,
3750                 0x1fc40, 0x1fc4c,
3751                 0x1fe84, 0x1fe90,
3752                 0x1fec0, 0x1fec0,
3753                 0x1fee0, 0x1fee0,
3754                 0x1ff00, 0x1ff84,
3755                 0x1ffc0, 0x1ffc8,
3756                 0x30000, 0x30030,
3757                 0x30100, 0x30144,
3758                 0x30190, 0x301d0,
3759                 0x30200, 0x30318,
3760                 0x30400, 0x3052c,
3761                 0x30540, 0x3061c,
3762                 0x30800, 0x30834,
3763                 0x308c0, 0x30908,
3764                 0x30910, 0x309ac,
3765                 0x30a00, 0x30a2c,
3766                 0x30a44, 0x30a50,
3767                 0x30a74, 0x30c24,
3768                 0x30d00, 0x30d00,
3769                 0x30d08, 0x30d14,
3770                 0x30d1c, 0x30d20,
3771                 0x30d3c, 0x30d50,
3772                 0x31200, 0x3120c,
3773                 0x31220, 0x31220,
3774                 0x31240, 0x31240,
3775                 0x31600, 0x3160c,
3776                 0x31a00, 0x31a1c,
3777                 0x31e00, 0x31e20,
3778                 0x31e38, 0x31e3c,
3779                 0x31e80, 0x31e80,
3780                 0x31e88, 0x31ea8,
3781                 0x31eb0, 0x31eb4,
3782                 0x31ec8, 0x31ed4,
3783                 0x31fb8, 0x32004,
3784                 0x32200, 0x32200,
3785                 0x32208, 0x32240,
3786                 0x32248, 0x32280,
3787                 0x32288, 0x322c0,
3788                 0x322c8, 0x322fc,
3789                 0x32600, 0x32630,
3790                 0x32a00, 0x32abc,
3791                 0x32b00, 0x32b70,
3792                 0x33000, 0x33048,
3793                 0x33060, 0x3309c,
3794                 0x330f0, 0x33148,
3795                 0x33160, 0x3319c,
3796                 0x331f0, 0x332e4,
3797                 0x332f8, 0x333e4,
3798                 0x333f8, 0x33448,
3799                 0x33460, 0x3349c,
3800                 0x334f0, 0x33548,
3801                 0x33560, 0x3359c,
3802                 0x335f0, 0x336e4,
3803                 0x336f8, 0x337e4,
3804                 0x337f8, 0x337fc,
3805                 0x33814, 0x33814,
3806                 0x3382c, 0x3382c,
3807                 0x33880, 0x3388c,
3808                 0x338e8, 0x338ec,
3809                 0x33900, 0x33948,
3810                 0x33960, 0x3399c,
3811                 0x339f0, 0x33ae4,
3812                 0x33af8, 0x33b10,
3813                 0x33b28, 0x33b28,
3814                 0x33b3c, 0x33b50,
3815                 0x33bf0, 0x33c10,
3816                 0x33c28, 0x33c28,
3817                 0x33c3c, 0x33c50,
3818                 0x33cf0, 0x33cfc,
3819                 0x34000, 0x34030,
3820                 0x34100, 0x34144,
3821                 0x34190, 0x341d0,
3822                 0x34200, 0x34318,
3823                 0x34400, 0x3452c,
3824                 0x34540, 0x3461c,
3825                 0x34800, 0x34834,
3826                 0x348c0, 0x34908,
3827                 0x34910, 0x349ac,
3828                 0x34a00, 0x34a2c,
3829                 0x34a44, 0x34a50,
3830                 0x34a74, 0x34c24,
3831                 0x34d00, 0x34d00,
3832                 0x34d08, 0x34d14,
3833                 0x34d1c, 0x34d20,
3834                 0x34d3c, 0x34d50,
3835                 0x35200, 0x3520c,
3836                 0x35220, 0x35220,
3837                 0x35240, 0x35240,
3838                 0x35600, 0x3560c,
3839                 0x35a00, 0x35a1c,
3840                 0x35e00, 0x35e20,
3841                 0x35e38, 0x35e3c,
3842                 0x35e80, 0x35e80,
3843                 0x35e88, 0x35ea8,
3844                 0x35eb0, 0x35eb4,
3845                 0x35ec8, 0x35ed4,
3846                 0x35fb8, 0x36004,
3847                 0x36200, 0x36200,
3848                 0x36208, 0x36240,
3849                 0x36248, 0x36280,
3850                 0x36288, 0x362c0,
3851                 0x362c8, 0x362fc,
3852                 0x36600, 0x36630,
3853                 0x36a00, 0x36abc,
3854                 0x36b00, 0x36b70,
3855                 0x37000, 0x37048,
3856                 0x37060, 0x3709c,
3857                 0x370f0, 0x37148,
3858                 0x37160, 0x3719c,
3859                 0x371f0, 0x372e4,
3860                 0x372f8, 0x373e4,
3861                 0x373f8, 0x37448,
3862                 0x37460, 0x3749c,
3863                 0x374f0, 0x37548,
3864                 0x37560, 0x3759c,
3865                 0x375f0, 0x376e4,
3866                 0x376f8, 0x377e4,
3867                 0x377f8, 0x377fc,
3868                 0x37814, 0x37814,
3869                 0x3782c, 0x3782c,
3870                 0x37880, 0x3788c,
3871                 0x378e8, 0x378ec,
3872                 0x37900, 0x37948,
3873                 0x37960, 0x3799c,
3874                 0x379f0, 0x37ae4,
3875                 0x37af8, 0x37b10,
3876                 0x37b28, 0x37b28,
3877                 0x37b3c, 0x37b50,
3878                 0x37bf0, 0x37c10,
3879                 0x37c28, 0x37c28,
3880                 0x37c3c, 0x37c50,
3881                 0x37cf0, 0x37cfc,
3882                 0x38000, 0x38030,
3883                 0x38100, 0x38144,
3884                 0x38190, 0x381d0,
3885                 0x38200, 0x38318,
3886                 0x38400, 0x3852c,
3887                 0x38540, 0x3861c,
3888                 0x38800, 0x38834,
3889                 0x388c0, 0x38908,
3890                 0x38910, 0x389ac,
3891                 0x38a00, 0x38a2c,
3892                 0x38a44, 0x38a50,
3893                 0x38a74, 0x38c24,
3894                 0x38d00, 0x38d00,
3895                 0x38d08, 0x38d14,
3896                 0x38d1c, 0x38d20,
3897                 0x38d3c, 0x38d50,
3898                 0x39200, 0x3920c,
3899                 0x39220, 0x39220,
3900                 0x39240, 0x39240,
3901                 0x39600, 0x3960c,
3902                 0x39a00, 0x39a1c,
3903                 0x39e00, 0x39e20,
3904                 0x39e38, 0x39e3c,
3905                 0x39e80, 0x39e80,
3906                 0x39e88, 0x39ea8,
3907                 0x39eb0, 0x39eb4,
3908                 0x39ec8, 0x39ed4,
3909                 0x39fb8, 0x3a004,
3910                 0x3a200, 0x3a200,
3911                 0x3a208, 0x3a240,
3912                 0x3a248, 0x3a280,
3913                 0x3a288, 0x3a2c0,
3914                 0x3a2c8, 0x3a2fc,
3915                 0x3a600, 0x3a630,
3916                 0x3aa00, 0x3aabc,
3917                 0x3ab00, 0x3ab70,
3918                 0x3b000, 0x3b048,
3919                 0x3b060, 0x3b09c,
3920                 0x3b0f0, 0x3b148,
3921                 0x3b160, 0x3b19c,
3922                 0x3b1f0, 0x3b2e4,
3923                 0x3b2f8, 0x3b3e4,
3924                 0x3b3f8, 0x3b448,
3925                 0x3b460, 0x3b49c,
3926                 0x3b4f0, 0x3b548,
3927                 0x3b560, 0x3b59c,
3928                 0x3b5f0, 0x3b6e4,
3929                 0x3b6f8, 0x3b7e4,
3930                 0x3b7f8, 0x3b7fc,
3931                 0x3b814, 0x3b814,
3932                 0x3b82c, 0x3b82c,
3933                 0x3b880, 0x3b88c,
3934                 0x3b8e8, 0x3b8ec,
3935                 0x3b900, 0x3b948,
3936                 0x3b960, 0x3b99c,
3937                 0x3b9f0, 0x3bae4,
3938                 0x3baf8, 0x3bb10,
3939                 0x3bb28, 0x3bb28,
3940                 0x3bb3c, 0x3bb50,
3941                 0x3bbf0, 0x3bc10,
3942                 0x3bc28, 0x3bc28,
3943                 0x3bc3c, 0x3bc50,
3944                 0x3bcf0, 0x3bcfc,
3945                 0x3c000, 0x3c030,
3946                 0x3c100, 0x3c144,
3947                 0x3c190, 0x3c1d0,
3948                 0x3c200, 0x3c318,
3949                 0x3c400, 0x3c52c,
3950                 0x3c540, 0x3c61c,
3951                 0x3c800, 0x3c834,
3952                 0x3c8c0, 0x3c908,
3953                 0x3c910, 0x3c9ac,
3954                 0x3ca00, 0x3ca2c,
3955                 0x3ca44, 0x3ca50,
3956                 0x3ca74, 0x3cc24,
3957                 0x3cd00, 0x3cd00,
3958                 0x3cd08, 0x3cd14,
3959                 0x3cd1c, 0x3cd20,
3960                 0x3cd3c, 0x3cd50,
3961                 0x3d200, 0x3d20c,
3962                 0x3d220, 0x3d220,
3963                 0x3d240, 0x3d240,
3964                 0x3d600, 0x3d60c,
3965                 0x3da00, 0x3da1c,
3966                 0x3de00, 0x3de20,
3967                 0x3de38, 0x3de3c,
3968                 0x3de80, 0x3de80,
3969                 0x3de88, 0x3dea8,
3970                 0x3deb0, 0x3deb4,
3971                 0x3dec8, 0x3ded4,
3972                 0x3dfb8, 0x3e004,
3973                 0x3e200, 0x3e200,
3974                 0x3e208, 0x3e240,
3975                 0x3e248, 0x3e280,
3976                 0x3e288, 0x3e2c0,
3977                 0x3e2c8, 0x3e2fc,
3978                 0x3e600, 0x3e630,
3979                 0x3ea00, 0x3eabc,
3980                 0x3eb00, 0x3eb70,
3981                 0x3f000, 0x3f048,
3982                 0x3f060, 0x3f09c,
3983                 0x3f0f0, 0x3f148,
3984                 0x3f160, 0x3f19c,
3985                 0x3f1f0, 0x3f2e4,
3986                 0x3f2f8, 0x3f3e4,
3987                 0x3f3f8, 0x3f448,
3988                 0x3f460, 0x3f49c,
3989                 0x3f4f0, 0x3f548,
3990                 0x3f560, 0x3f59c,
3991                 0x3f5f0, 0x3f6e4,
3992                 0x3f6f8, 0x3f7e4,
3993                 0x3f7f8, 0x3f7fc,
3994                 0x3f814, 0x3f814,
3995                 0x3f82c, 0x3f82c,
3996                 0x3f880, 0x3f88c,
3997                 0x3f8e8, 0x3f8ec,
3998                 0x3f900, 0x3f948,
3999                 0x3f960, 0x3f99c,
4000                 0x3f9f0, 0x3fae4,
4001                 0x3faf8, 0x3fb10,
4002                 0x3fb28, 0x3fb28,
4003                 0x3fb3c, 0x3fb50,
4004                 0x3fbf0, 0x3fc10,
4005                 0x3fc28, 0x3fc28,
4006                 0x3fc3c, 0x3fc50,
4007                 0x3fcf0, 0x3fcfc,
4008                 0x40000, 0x4000c,
4009                 0x40040, 0x40068,
4010                 0x4007c, 0x40144,
4011                 0x40180, 0x4018c,
4012                 0x40200, 0x40298,
4013                 0x402ac, 0x4033c,
4014                 0x403f8, 0x403fc,
4015                 0x41304, 0x413c4,
4016                 0x41400, 0x4141c,
4017                 0x41480, 0x414d0,
4018                 0x44000, 0x44078,
4019                 0x440c0, 0x44278,
4020                 0x442c0, 0x44478,
4021                 0x444c0, 0x44678,
4022                 0x446c0, 0x44878,
4023                 0x448c0, 0x449fc,
4024                 0x45000, 0x45068,
4025                 0x45080, 0x45084,
4026                 0x450a0, 0x450b0,
4027                 0x45200, 0x45268,
4028                 0x45280, 0x45284,
4029                 0x452a0, 0x452b0,
4030                 0x460c0, 0x460e4,
4031                 0x47000, 0x4708c,
4032                 0x47200, 0x47250,
4033                 0x47400, 0x47420,
4034                 0x47600, 0x47618,
4035                 0x47800, 0x47814,
4036                 0x48000, 0x4800c,
4037                 0x48040, 0x48068,
4038                 0x4807c, 0x48144,
4039                 0x48180, 0x4818c,
4040                 0x48200, 0x48298,
4041                 0x482ac, 0x4833c,
4042                 0x483f8, 0x483fc,
4043                 0x49304, 0x493c4,
4044                 0x49400, 0x4941c,
4045                 0x49480, 0x494d0,
4046                 0x4c000, 0x4c078,
4047                 0x4c0c0, 0x4c278,
4048                 0x4c2c0, 0x4c478,
4049                 0x4c4c0, 0x4c678,
4050                 0x4c6c0, 0x4c878,
4051                 0x4c8c0, 0x4c9fc,
4052                 0x4d000, 0x4d068,
4053                 0x4d080, 0x4d084,
4054                 0x4d0a0, 0x4d0b0,
4055                 0x4d200, 0x4d268,
4056                 0x4d280, 0x4d284,
4057                 0x4d2a0, 0x4d2b0,
4058                 0x4e0c0, 0x4e0e4,
4059                 0x4f000, 0x4f08c,
4060                 0x4f200, 0x4f250,
4061                 0x4f400, 0x4f420,
4062                 0x4f600, 0x4f618,
4063                 0x4f800, 0x4f814,
4064                 0x50000, 0x500cc,
4065                 0x50400, 0x50400,
4066                 0x50800, 0x508cc,
4067                 0x50c00, 0x50c00,
4068                 0x51000, 0x5101c,
4069                 0x51300, 0x51308,
4070         };
4071
4072         if (is_t4(sc)) {
4073                 reg_ranges = &t4_reg_ranges[0];
4074                 n = nitems(t4_reg_ranges);
4075         } else {
4076                 reg_ranges = &t5_reg_ranges[0];
4077                 n = nitems(t5_reg_ranges);
4078         }
4079
4080         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4081         for (i = 0; i < n; i += 2)
4082                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4083 }
4084
4085 static void
4086 cxgbe_tick(void *arg)
4087 {
4088         struct port_info *pi = arg;
4089         struct adapter *sc = pi->adapter;
4090         struct ifnet *ifp = pi->ifp;
4091         struct sge_txq *txq;
4092         int i, drops;
4093         struct port_stats *s = &pi->stats;
4094
4095         PORT_LOCK(pi);
4096         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4097                 PORT_UNLOCK(pi);
4098                 return; /* without scheduling another callout */
4099         }
4100
4101         t4_get_port_stats(sc, pi->tx_chan, s);
4102
4103         ifp->if_opackets = s->tx_frames - s->tx_pause;
4104         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4105         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4106         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4107         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4108         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4109         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4110             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4111             s->rx_trunc3;
4112         for (i = 0; i < 4; i++) {
4113                 if (pi->rx_chan_map & (1 << i)) {
4114                         uint32_t v;
4115
4116                         /*
4117                          * XXX: indirect reads from the same ADDR/DATA pair can
4118                          * race with each other.
4119                          */
4120                         t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4121                             1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4122                         ifp->if_iqdrops += v;
4123                 }
4124         }
4125
4126         drops = s->tx_drop;
4127         for_each_txq(pi, i, txq)
4128                 drops += txq->br->br_drops;
4129         ifp->if_oqdrops = drops;
4130
4131         ifp->if_oerrors = s->tx_error_frames;
4132         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4133             s->rx_fcs_err + s->rx_len_err;
4134
4135         callout_schedule(&pi->tick, hz);
4136         PORT_UNLOCK(pi);
4137 }
4138
4139 static void
4140 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4141 {
4142         struct ifnet *vlan;
4143
4144         if (arg != ifp || ifp->if_type != IFT_ETHER)
4145                 return;
4146
4147         vlan = VLAN_DEVAT(ifp, vid);
4148         VLAN_SETCOOKIE(vlan, ifp);
4149 }
4150
4151 static int
4152 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4153 {
4154
4155 #ifdef INVARIANTS
4156         panic("%s: opcode 0x%02x on iq %p with payload %p",
4157             __func__, rss->opcode, iq, m);
4158 #else
4159         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4160             __func__, rss->opcode, iq, m);
4161         m_freem(m);
4162 #endif
4163         return (EDOOFUS);
4164 }
4165
4166 int
4167 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4168 {
4169         uintptr_t *loc, new;
4170
4171         if (opcode >= nitems(sc->cpl_handler))
4172                 return (EINVAL);
4173
4174         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4175         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4176         atomic_store_rel_ptr(loc, new);
4177
4178         return (0);
4179 }
4180
4181 static int
4182 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4183 {
4184
4185 #ifdef INVARIANTS
4186         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4187 #else
4188         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4189             __func__, iq, ctrl);
4190 #endif
4191         return (EDOOFUS);
4192 }
4193
4194 int
4195 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4196 {
4197         uintptr_t *loc, new;
4198
4199         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4200         loc = (uintptr_t *) &sc->an_handler;
4201         atomic_store_rel_ptr(loc, new);
4202
4203         return (0);
4204 }
4205
4206 static int
4207 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4208 {
4209         const struct cpl_fw6_msg *cpl =
4210             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4211
4212 #ifdef INVARIANTS
4213         panic("%s: fw_msg type %d", __func__, cpl->type);
4214 #else
4215         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4216 #endif
4217         return (EDOOFUS);
4218 }
4219
4220 int
4221 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4222 {
4223         uintptr_t *loc, new;
4224
4225         if (type >= nitems(sc->fw_msg_handler))
4226                 return (EINVAL);
4227
4228         /*
4229          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4230          * handler dispatch table.  Reject any attempt to install a handler for
4231          * this subtype.
4232          */
4233         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4234                 return (EINVAL);
4235
4236         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4237         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4238         atomic_store_rel_ptr(loc, new);
4239
4240         return (0);
4241 }
4242
4243 static int
4244 t4_sysctls(struct adapter *sc)
4245 {
4246         struct sysctl_ctx_list *ctx;
4247         struct sysctl_oid *oid;
4248         struct sysctl_oid_list *children, *c0;
4249         static char *caps[] = {
4250                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4251                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4252                     "\6HASHFILTER\7ETHOFLD",
4253                 "\20\1TOE",                             /* caps[2] toecaps */
4254                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4255                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4256                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4257                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4258                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4259                     "\4PO_INITIAOR\5PO_TARGET"
4260         };
4261         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4262
4263         ctx = device_get_sysctl_ctx(sc->dev);
4264
4265         /*
4266          * dev.t4nex.X.
4267          */
4268         oid = device_get_sysctl_tree(sc->dev);
4269         c0 = children = SYSCTL_CHILDREN(oid);
4270
4271         sc->sc_do_rxcopy = 1;
4272         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4273             &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4274
4275         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4276             sc->params.nports, "# of ports");
4277
4278         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4279             NULL, chip_rev(sc), "chip hardware revision");
4280
4281         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4282             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4283
4284         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4285             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4286
4287         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4288             sc->cfcsum, "config file checksum");
4289
4290         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4291             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4292             sysctl_bitfield, "A", "available doorbells");
4293
4294         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4295             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4296             sysctl_bitfield, "A", "available link capabilities");
4297
4298         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4299             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4300             sysctl_bitfield, "A", "available NIC capabilities");
4301
4302         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4303             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4304             sysctl_bitfield, "A", "available TCP offload capabilities");
4305
4306         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4307             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4308             sysctl_bitfield, "A", "available RDMA capabilities");
4309
4310         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4311             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4312             sysctl_bitfield, "A", "available iSCSI capabilities");
4313
4314         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4315             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4316             sysctl_bitfield, "A", "available FCoE capabilities");
4317
4318         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4319             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4320
4321         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4322             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4323             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4324             "interrupt holdoff timer values (us)");
4325
4326         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4327             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4328             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4329             "interrupt holdoff packet counter values");
4330
4331         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4332             NULL, sc->tids.nftids, "number of filters");
4333
4334         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4335             CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4336             "chip temperature (in Celsius)");
4337
4338         t4_sge_sysctls(sc, ctx, children);
4339
4340         sc->lro_timeout = 100;
4341         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4342             &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4343
4344 #ifdef SBUF_DRAIN
4345         /*
4346          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4347          */
4348         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4349             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4350             "logs and miscellaneous information");
4351         children = SYSCTL_CHILDREN(oid);
4352
4353         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4354             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4355             sysctl_cctrl, "A", "congestion control");
4356
4357         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4358             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4359             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4360
4361         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4362             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4363             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4364
4365         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4366             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4367             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4368
4369         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4370             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4371             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4372
4373         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4374             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4375             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4376
4377         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4378             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4379             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4380
4381         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4382             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4383             sysctl_cim_la, "A", "CIM logic analyzer");
4384
4385         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4386             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4387             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4388
4389         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4390             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4391             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4392
4393         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4394             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4395             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4396
4397         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4398             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4399             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4400
4401         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4402             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4403             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4404
4405         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4406             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4407             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4408
4409         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4410             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4411             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4412
4413         if (is_t5(sc)) {
4414                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4415                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4416                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4417
4418                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4419                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4420                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4421         }
4422
4423         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4424             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4425             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4426
4427         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4428             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4429             sysctl_cim_qcfg, "A", "CIM queue configuration");
4430
4431         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4432             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4433             sysctl_cpl_stats, "A", "CPL statistics");
4434
4435         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4436             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4437             sysctl_ddp_stats, "A", "DDP statistics");
4438
4439         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4440             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4441             sysctl_devlog, "A", "firmware's device log");
4442
4443         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4444             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4445             sysctl_fcoe_stats, "A", "FCoE statistics");
4446
4447         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4448             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4449             sysctl_hw_sched, "A", "hardware scheduler ");
4450
4451         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4452             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4453             sysctl_l2t, "A", "hardware L2 table");
4454
4455         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4456             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4457             sysctl_lb_stats, "A", "loopback statistics");
4458
4459         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4460             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4461             sysctl_meminfo, "A", "memory regions");
4462
4463         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4464             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4465             sysctl_mps_tcam, "A", "MPS TCAM entries");
4466
4467         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4468             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4469             sysctl_path_mtus, "A", "path MTUs");
4470
4471         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4472             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4473             sysctl_pm_stats, "A", "PM statistics");
4474
4475         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4476             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4477             sysctl_rdma_stats, "A", "RDMA statistics");
4478
4479         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4480             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4481             sysctl_tcp_stats, "A", "TCP statistics");
4482
4483         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4484             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4485             sysctl_tids, "A", "TID information");
4486
4487         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4488             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4489             sysctl_tp_err_stats, "A", "TP error statistics");
4490
4491         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4492             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4493             sysctl_tp_la, "A", "TP logic analyzer");
4494
4495         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4496             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4497             sysctl_tx_rate, "A", "Tx rate");
4498
4499         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4500             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4501             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4502
4503         if (is_t5(sc)) {
4504                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4505                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4506                     sysctl_wcwr_stats, "A", "write combined work requests");
4507         }
4508 #endif
4509
4510 #ifdef TCP_OFFLOAD
4511         if (is_offload(sc)) {
4512                 /*
4513                  * dev.t4nex.X.toe.
4514                  */
4515                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4516                     NULL, "TOE parameters");
4517                 children = SYSCTL_CHILDREN(oid);
4518
4519                 sc->tt.sndbuf = 256 * 1024;
4520                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4521                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4522
4523                 sc->tt.ddp = 0;
4524                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4525                     &sc->tt.ddp, 0, "DDP allowed");
4526
4527                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4528                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4529                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4530
4531                 sc->tt.ddp_thres =
4532                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4533                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4534                     &sc->tt.ddp_thres, 0, "DDP threshold");
4535
4536                 sc->tt.rx_coalesce = 1;
4537                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4538                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4539         }
4540 #endif
4541
4542
4543         return (0);
4544 }
4545
4546 static int
4547 cxgbe_sysctls(struct port_info *pi)
4548 {
4549         struct sysctl_ctx_list *ctx;
4550         struct sysctl_oid *oid;
4551         struct sysctl_oid_list *children;
4552         struct adapter *sc = pi->adapter;
4553
4554         ctx = device_get_sysctl_ctx(pi->dev);
4555
4556         /*
4557          * dev.cxgbe.X.
4558          */
4559         oid = device_get_sysctl_tree(pi->dev);
4560         children = SYSCTL_CHILDREN(oid);
4561
4562         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4563            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4564         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4565                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4566                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4567                     "PHY temperature (in Celsius)");
4568                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4569                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4570                     "PHY firmware version");
4571         }
4572         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4573             &pi->nrxq, 0, "# of rx queues");
4574         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4575             &pi->ntxq, 0, "# of tx queues");
4576         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4577             &pi->first_rxq, 0, "index of first rx queue");
4578         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4579             &pi->first_txq, 0, "index of first tx queue");
4580         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4581             CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4582             "Reserve queue 0 for non-flowid packets");
4583
4584 #ifdef TCP_OFFLOAD
4585         if (is_offload(sc)) {
4586                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4587                     &pi->nofldrxq, 0,
4588                     "# of rx queues for offloaded TCP connections");
4589                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4590                     &pi->nofldtxq, 0,
4591                     "# of tx queues for offloaded TCP connections");
4592                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4593                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4594                     "index of first TOE rx queue");
4595                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4596                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4597                     "index of first TOE tx queue");
4598         }
4599 #endif
4600
4601         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4602             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4603             "holdoff timer index");
4604         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4605             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4606             "holdoff packet counter index");
4607
4608         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4609             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4610             "rx queue size");
4611         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4612             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4613             "tx queue size");
4614
4615         /*
4616          * dev.cxgbe.X.stats.
4617          */
4618         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4619             NULL, "port statistics");
4620         children = SYSCTL_CHILDREN(oid);
4621
4622 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4623         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4624             CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
4625             sysctl_handle_t4_reg64, "QU", desc)
4626
4627         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4628             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4629         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4630             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4631         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4632             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4633         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4634             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4635         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4636             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4637         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4638             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4639         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4640             "# of tx frames in this range",
4641             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4642         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4643             "# of tx frames in this range",
4644             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4645         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4646             "# of tx frames in this range",
4647             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4648         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4649             "# of tx frames in this range",
4650             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4651         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4652             "# of tx frames in this range",
4653             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4654         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4655             "# of tx frames in this range",
4656             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4657         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4658             "# of tx frames in this range",
4659             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4660         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4661             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4662         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4663             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4664         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4665             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4666         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4667             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4668         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4669             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4670         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4671             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4672         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4673             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4674         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4675             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4676         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4677             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4678         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4679             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4680
4681         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4682             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4683         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4684             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4685         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4686             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4687         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4688             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4689         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4690             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4691         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4692             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4693         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4694             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4695         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4696             "# of frames received with bad FCS",
4697             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4698         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4699             "# of frames received with length error",
4700             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4701         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4702             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4703         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4704             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4705         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4706             "# of rx frames in this range",
4707             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4708         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4709             "# of rx frames in this range",
4710             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4711         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4712             "# of rx frames in this range",
4713             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4714         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4715             "# of rx frames in this range",
4716             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4717         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4718             "# of rx frames in this range",
4719             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4720         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4721             "# of rx frames in this range",
4722             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4723         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4724             "# of rx frames in this range",
4725             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4726         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4727             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4728         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4729             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4730         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4731             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4732         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4733             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4734         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4735             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4736         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4737             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4738         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4739             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4740         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4741             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4742         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4743             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4744
4745 #undef SYSCTL_ADD_T4_REG64
4746
4747 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4748         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4749             &pi->stats.name, desc)
4750
4751         /* We get these from port_stats and they may be stale by upto 1s */
4752         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4753             "# drops due to buffer-group 0 overflows");
4754         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4755             "# drops due to buffer-group 1 overflows");
4756         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4757             "# drops due to buffer-group 2 overflows");
4758         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4759             "# drops due to buffer-group 3 overflows");
4760         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4761             "# of buffer-group 0 truncated packets");
4762         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4763             "# of buffer-group 1 truncated packets");
4764         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4765             "# of buffer-group 2 truncated packets");
4766         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4767             "# of buffer-group 3 truncated packets");
4768
4769 #undef SYSCTL_ADD_T4_PORTSTAT
4770
4771         return (0);
4772 }
4773
4774 static int
4775 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4776 {
4777         int rc, *i;
4778         struct sbuf sb;
4779
4780         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4781         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4782                 sbuf_printf(&sb, "%d ", *i);
4783         sbuf_trim(&sb);
4784         sbuf_finish(&sb);
4785         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4786         sbuf_delete(&sb);
4787         return (rc);
4788 }
4789
4790 static int
4791 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4792 {
4793         int rc;
4794         struct sbuf *sb;
4795
4796         rc = sysctl_wire_old_buffer(req, 0);
4797         if (rc != 0)
4798                 return(rc);
4799
4800         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4801         if (sb == NULL)
4802                 return (ENOMEM);
4803
4804         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4805         rc = sbuf_finish(sb);
4806         sbuf_delete(sb);
4807
4808         return (rc);
4809 }
4810
4811 static int
4812 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4813 {
4814         struct port_info *pi = arg1;
4815         int op = arg2;
4816         struct adapter *sc = pi->adapter;
4817         u_int v;
4818         int rc;
4819
4820         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4821         if (rc)
4822                 return (rc);
4823         /* XXX: magic numbers */
4824         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4825             &v);
4826         end_synchronized_op(sc, 0);
4827         if (rc)
4828                 return (rc);
4829         if (op == 0)
4830                 v /= 256;
4831
4832         rc = sysctl_handle_int(oidp, &v, 0, req);
4833         return (rc);
4834 }
4835
4836 static int
4837 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4838 {
4839         struct port_info *pi = arg1;
4840         int rc, val;
4841
4842         val = pi->rsrv_noflowq;
4843         rc = sysctl_handle_int(oidp, &val, 0, req);
4844         if (rc != 0 || req->newptr == NULL)
4845                 return (rc);
4846
4847         if ((val >= 1) && (pi->ntxq > 1))
4848                 pi->rsrv_noflowq = 1;
4849         else
4850                 pi->rsrv_noflowq = 0;
4851
4852         return (rc);
4853 }
4854
4855 static int
4856 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4857 {
4858         struct port_info *pi = arg1;
4859         struct adapter *sc = pi->adapter;
4860         int idx, rc, i;
4861         struct sge_rxq *rxq;
4862 #ifdef TCP_OFFLOAD
4863         struct sge_ofld_rxq *ofld_rxq;
4864 #endif
4865         uint8_t v;
4866
4867         idx = pi->tmr_idx;
4868
4869         rc = sysctl_handle_int(oidp, &idx, 0, req);
4870         if (rc != 0 || req->newptr == NULL)
4871                 return (rc);
4872
4873         if (idx < 0 || idx >= SGE_NTIMERS)
4874                 return (EINVAL);
4875
4876         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4877             "t4tmr");
4878         if (rc)
4879                 return (rc);
4880
4881         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4882         for_each_rxq(pi, i, rxq) {
4883 #ifdef atomic_store_rel_8
4884                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4885 #else
4886                 rxq->iq.intr_params = v;
4887 #endif
4888         }
4889 #ifdef TCP_OFFLOAD
4890         for_each_ofld_rxq(pi, i, ofld_rxq) {
4891 #ifdef atomic_store_rel_8
4892                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4893 #else
4894                 ofld_rxq->iq.intr_params = v;
4895 #endif
4896         }
4897 #endif
4898         pi->tmr_idx = idx;
4899
4900         end_synchronized_op(sc, LOCK_HELD);
4901         return (0);
4902 }
4903
4904 static int
4905 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4906 {
4907         struct port_info *pi = arg1;
4908         struct adapter *sc = pi->adapter;
4909         int idx, rc;
4910
4911         idx = pi->pktc_idx;
4912
4913         rc = sysctl_handle_int(oidp, &idx, 0, req);
4914         if (rc != 0 || req->newptr == NULL)
4915                 return (rc);
4916
4917         if (idx < -1 || idx >= SGE_NCOUNTERS)
4918                 return (EINVAL);
4919
4920         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4921             "t4pktc");
4922         if (rc)
4923                 return (rc);
4924
4925         if (pi->flags & PORT_INIT_DONE)
4926                 rc = EBUSY; /* cannot be changed once the queues are created */
4927         else
4928                 pi->pktc_idx = idx;
4929
4930         end_synchronized_op(sc, LOCK_HELD);
4931         return (rc);
4932 }
4933
4934 static int
4935 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4936 {
4937         struct port_info *pi = arg1;
4938         struct adapter *sc = pi->adapter;
4939         int qsize, rc;
4940
4941         qsize = pi->qsize_rxq;
4942
4943         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4944         if (rc != 0 || req->newptr == NULL)
4945                 return (rc);
4946
4947         if (qsize < 128 || (qsize & 7))
4948                 return (EINVAL);
4949
4950         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4951             "t4rxqs");
4952         if (rc)
4953                 return (rc);
4954
4955         if (pi->flags & PORT_INIT_DONE)
4956                 rc = EBUSY; /* cannot be changed once the queues are created */
4957         else
4958                 pi->qsize_rxq = qsize;
4959
4960         end_synchronized_op(sc, LOCK_HELD);
4961         return (rc);
4962 }
4963
4964 static int
4965 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4966 {
4967         struct port_info *pi = arg1;
4968         struct adapter *sc = pi->adapter;
4969         int qsize, rc;
4970
4971         qsize = pi->qsize_txq;
4972
4973         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4974         if (rc != 0 || req->newptr == NULL)
4975                 return (rc);
4976
4977         /* bufring size must be powerof2 */
4978         if (qsize < 128 || !powerof2(qsize))
4979                 return (EINVAL);
4980
4981         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4982             "t4txqs");
4983         if (rc)
4984                 return (rc);
4985
4986         if (pi->flags & PORT_INIT_DONE)
4987                 rc = EBUSY; /* cannot be changed once the queues are created */
4988         else
4989                 pi->qsize_txq = qsize;
4990
4991         end_synchronized_op(sc, LOCK_HELD);
4992         return (rc);
4993 }
4994
4995 static int
4996 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4997 {
4998         struct adapter *sc = arg1;
4999         int reg = arg2;
5000         uint64_t val;
5001
5002         val = t4_read_reg64(sc, reg);
5003
5004         return (sysctl_handle_64(oidp, &val, 0, req));
5005 }
5006
5007 static int
5008 sysctl_temperature(SYSCTL_HANDLER_ARGS)
5009 {
5010         struct adapter *sc = arg1;
5011         int rc, t;
5012         uint32_t param, val;
5013
5014         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5015         if (rc)
5016                 return (rc);
5017         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5018             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5019             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5020         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5021         end_synchronized_op(sc, 0);
5022         if (rc)
5023                 return (rc);
5024
5025         /* unknown is returned as 0 but we display -1 in that case */
5026         t = val == 0 ? -1 : val;
5027
5028         rc = sysctl_handle_int(oidp, &t, 0, req);
5029         return (rc);
5030 }
5031
5032 #ifdef SBUF_DRAIN
5033 static int
5034 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5035 {
5036         struct adapter *sc = arg1;
5037         struct sbuf *sb;
5038         int rc, i;
5039         uint16_t incr[NMTUS][NCCTRL_WIN];
5040         static const char *dec_fac[] = {
5041                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5042                 "0.9375"
5043         };
5044
5045         rc = sysctl_wire_old_buffer(req, 0);
5046         if (rc != 0)
5047                 return (rc);
5048
5049         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5050         if (sb == NULL)
5051                 return (ENOMEM);
5052
5053         t4_read_cong_tbl(sc, incr);
5054
5055         for (i = 0; i < NCCTRL_WIN; ++i) {
5056                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5057                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5058                     incr[5][i], incr[6][i], incr[7][i]);
5059                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5060                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5061                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5062                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5063         }
5064
5065         rc = sbuf_finish(sb);
5066         sbuf_delete(sb);
5067
5068         return (rc);
5069 }
5070
5071 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5072         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
5073         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5074         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
5075 };
5076
5077 static int
5078 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5079 {
5080         struct adapter *sc = arg1;
5081         struct sbuf *sb;
5082         int rc, i, n, qid = arg2;
5083         uint32_t *buf, *p;
5084         char *qtype;
5085         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5086
5087         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5088             ("%s: bad qid %d\n", __func__, qid));
5089
5090         if (qid < CIM_NUM_IBQ) {
5091                 /* inbound queue */
5092                 qtype = "IBQ";
5093                 n = 4 * CIM_IBQ_SIZE;
5094                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5095                 rc = t4_read_cim_ibq(sc, qid, buf, n);
5096         } else {
5097                 /* outbound queue */
5098                 qtype = "OBQ";
5099                 qid -= CIM_NUM_IBQ;
5100                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5101                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5102                 rc = t4_read_cim_obq(sc, qid, buf, n);
5103         }
5104
5105         if (rc < 0) {
5106                 rc = -rc;
5107                 goto done;
5108         }
5109         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5110
5111         rc = sysctl_wire_old_buffer(req, 0);
5112         if (rc != 0)
5113                 goto done;
5114
5115         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5116         if (sb == NULL) {
5117                 rc = ENOMEM;
5118                 goto done;
5119         }
5120
5121         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5122         for (i = 0, p = buf; i < n; i += 16, p += 4)
5123                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5124                     p[2], p[3]);
5125
5126         rc = sbuf_finish(sb);
5127         sbuf_delete(sb);
5128 done:
5129         free(buf, M_CXGBE);
5130         return (rc);
5131 }
5132
5133 static int
5134 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5135 {
5136         struct adapter *sc = arg1;
5137         u_int cfg;
5138         struct sbuf *sb;
5139         uint32_t *buf, *p;
5140         int rc;
5141
5142         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5143         if (rc != 0)
5144                 return (rc);
5145
5146         rc = sysctl_wire_old_buffer(req, 0);
5147         if (rc != 0)
5148                 return (rc);
5149
5150         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5151         if (sb == NULL)
5152                 return (ENOMEM);
5153
5154         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5155             M_ZERO | M_WAITOK);
5156
5157         rc = -t4_cim_read_la(sc, buf, NULL);
5158         if (rc != 0)
5159                 goto done;
5160
5161         sbuf_printf(sb, "Status   Data      PC%s",
5162             cfg & F_UPDBGLACAPTPCONLY ? "" :
5163             "     LS0Stat  LS0Addr             LS0Data");
5164
5165         KASSERT((sc->params.cim_la_size & 7) == 0,
5166             ("%s: p will walk off the end of buf", __func__));
5167
5168         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5169                 if (cfg & F_UPDBGLACAPTPCONLY) {
5170                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5171                             p[6], p[7]);
5172                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5173                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5174                             p[4] & 0xff, p[5] >> 8);
5175                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5176                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5177                             p[1] & 0xf, p[2] >> 4);
5178                 } else {
5179                         sbuf_printf(sb,
5180                             "\n  %02x   %x%07x %x%07x %08x %08x "
5181                             "%08x%08x%08x%08x",
5182                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5183                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5184                             p[6], p[7]);
5185                 }
5186         }
5187
5188         rc = sbuf_finish(sb);
5189         sbuf_delete(sb);
5190 done:
5191         free(buf, M_CXGBE);
5192         return (rc);
5193 }
5194
5195 static int
5196 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5197 {
5198         struct adapter *sc = arg1;
5199         u_int i;
5200         struct sbuf *sb;
5201         uint32_t *buf, *p;
5202         int rc;
5203
5204         rc = sysctl_wire_old_buffer(req, 0);
5205         if (rc != 0)
5206                 return (rc);
5207
5208         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5209         if (sb == NULL)
5210                 return (ENOMEM);
5211
5212         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5213             M_ZERO | M_WAITOK);
5214
5215         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5216         p = buf;
5217
5218         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5219                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5220                     p[1], p[0]);
5221         }
5222
5223         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5224         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5225                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5226                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5227                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5228                     (p[1] >> 2) | ((p[2] & 3) << 30),
5229                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5230                     p[0] & 1);
5231         }
5232
5233         rc = sbuf_finish(sb);
5234         sbuf_delete(sb);
5235         free(buf, M_CXGBE);
5236         return (rc);
5237 }
5238
5239 static int
5240 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5241 {
5242         struct adapter *sc = arg1;
5243         u_int i;
5244         struct sbuf *sb;
5245         uint32_t *buf, *p;
5246         int rc;
5247
5248         rc = sysctl_wire_old_buffer(req, 0);
5249         if (rc != 0)
5250                 return (rc);
5251
5252         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5253         if (sb == NULL)
5254                 return (ENOMEM);
5255
5256         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5257             M_ZERO | M_WAITOK);
5258
5259         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5260         p = buf;
5261
5262         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5263         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5264                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5265                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5266                     p[4], p[3], p[2], p[1], p[0]);
5267         }
5268
5269         sbuf_printf(sb, "\n\nCntl ID               Data");
5270         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5271                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5272                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5273         }
5274
5275         rc = sbuf_finish(sb);
5276         sbuf_delete(sb);
5277         free(buf, M_CXGBE);
5278         return (rc);
5279 }
5280
5281 static int
5282 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5283 {
5284         struct adapter *sc = arg1;
5285         struct sbuf *sb;
5286         int rc, i;
5287         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5288         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5289         uint16_t thres[CIM_NUM_IBQ];
5290         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5291         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5292         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5293
5294         if (is_t4(sc)) {
5295                 cim_num_obq = CIM_NUM_OBQ;
5296                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5297                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5298         } else {
5299                 cim_num_obq = CIM_NUM_OBQ_T5;
5300                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5301                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5302         }
5303         nq = CIM_NUM_IBQ + cim_num_obq;
5304
5305         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5306         if (rc == 0)
5307                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5308         if (rc != 0)
5309                 return (rc);
5310
5311         t4_read_cimq_cfg(sc, base, size, thres);
5312
5313         rc = sysctl_wire_old_buffer(req, 0);
5314         if (rc != 0)
5315                 return (rc);
5316
5317         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5318         if (sb == NULL)
5319                 return (ENOMEM);
5320
5321         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5322
5323         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5324                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5325                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5326                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5327                     G_QUEREMFLITS(p[2]) * 16);
5328         for ( ; i < nq; i++, p += 4, wr += 2)
5329                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5330                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5331                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5332                     G_QUEREMFLITS(p[2]) * 16);
5333
5334         rc = sbuf_finish(sb);
5335         sbuf_delete(sb);
5336
5337         return (rc);
5338 }
5339
5340 static int
5341 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5342 {
5343         struct adapter *sc = arg1;
5344         struct sbuf *sb;
5345         int rc;
5346         struct tp_cpl_stats stats;
5347
5348         rc = sysctl_wire_old_buffer(req, 0);
5349         if (rc != 0)
5350                 return (rc);
5351
5352         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5353         if (sb == NULL)
5354                 return (ENOMEM);
5355
5356         t4_tp_get_cpl_stats(sc, &stats);
5357
5358         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5359             "channel 3\n");
5360         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5361                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5362         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5363                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5364
5365         rc = sbuf_finish(sb);
5366         sbuf_delete(sb);
5367
5368         return (rc);
5369 }
5370
5371 static int
5372 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5373 {
5374         struct adapter *sc = arg1;
5375         struct sbuf *sb;
5376         int rc;
5377         struct tp_usm_stats stats;
5378
5379         rc = sysctl_wire_old_buffer(req, 0);
5380         if (rc != 0)
5381                 return(rc);
5382
5383         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5384         if (sb == NULL)
5385                 return (ENOMEM);
5386
5387         t4_get_usm_stats(sc, &stats);
5388
5389         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5390         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5391         sbuf_printf(sb, "Drops:  %u", stats.drops);
5392
5393         rc = sbuf_finish(sb);
5394         sbuf_delete(sb);
5395
5396         return (rc);
5397 }
5398
5399 const char *devlog_level_strings[] = {
5400         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5401         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5402         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5403         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5404         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5405         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5406 };
5407
5408 const char *devlog_facility_strings[] = {
5409         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5410         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5411         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5412         [FW_DEVLOG_FACILITY_RES]        = "RES",
5413         [FW_DEVLOG_FACILITY_HW]         = "HW",
5414         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5415         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5416         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5417         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5418         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5419         [FW_DEVLOG_FACILITY_VI]         = "VI",
5420         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5421         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5422         [FW_DEVLOG_FACILITY_TM]         = "TM",
5423         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5424         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5425         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5426         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5427         [FW_DEVLOG_FACILITY_RI]         = "RI",
5428         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5429         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5430         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5431         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5432 };
5433
5434 static int
5435 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5436 {
5437         struct adapter *sc = arg1;
5438         struct devlog_params *dparams = &sc->params.devlog;
5439         struct fw_devlog_e *buf, *e;
5440         int i, j, rc, nentries, first = 0, m;
5441         struct sbuf *sb;
5442         uint64_t ftstamp = UINT64_MAX;
5443
5444         if (dparams->start == 0) {
5445                 dparams->memtype = FW_MEMTYPE_EDC0;
5446                 dparams->start = 0x84000;
5447                 dparams->size = 32768;
5448         }
5449
5450         nentries = dparams->size / sizeof(struct fw_devlog_e);
5451
5452         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5453         if (buf == NULL)
5454                 return (ENOMEM);
5455
5456         m = fwmtype_to_hwmtype(dparams->memtype);
5457         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5458         if (rc != 0)
5459                 goto done;
5460
5461         for (i = 0; i < nentries; i++) {
5462                 e = &buf[i];
5463
5464                 if (e->timestamp == 0)
5465                         break;  /* end */
5466
5467                 e->timestamp = be64toh(e->timestamp);
5468                 e->seqno = be32toh(e->seqno);
5469                 for (j = 0; j < 8; j++)
5470                         e->params[j] = be32toh(e->params[j]);
5471
5472                 if (e->timestamp < ftstamp) {
5473                         ftstamp = e->timestamp;
5474                         first = i;
5475                 }
5476         }
5477
5478         if (buf[first].timestamp == 0)
5479                 goto done;      /* nothing in the log */
5480
5481         rc = sysctl_wire_old_buffer(req, 0);
5482         if (rc != 0)
5483                 goto done;
5484
5485         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5486         if (sb == NULL) {
5487                 rc = ENOMEM;
5488                 goto done;
5489         }
5490         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5491             "Seq#", "Tstamp", "Level", "Facility", "Message");
5492
5493         i = first;
5494         do {
5495                 e = &buf[i];
5496                 if (e->timestamp == 0)
5497                         break;  /* end */
5498
5499                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5500                     e->seqno, e->timestamp,
5501                     (e->level < nitems(devlog_level_strings) ?
5502                         devlog_level_strings[e->level] : "UNKNOWN"),
5503                     (e->facility < nitems(devlog_facility_strings) ?
5504                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5505                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5506                     e->params[2], e->params[3], e->params[4],
5507                     e->params[5], e->params[6], e->params[7]);
5508
5509                 if (++i == nentries)
5510                         i = 0;
5511         } while (i != first);
5512
5513         rc = sbuf_finish(sb);
5514         sbuf_delete(sb);
5515 done:
5516         free(buf, M_CXGBE);
5517         return (rc);
5518 }
5519
5520 static int
5521 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5522 {
5523         struct adapter *sc = arg1;
5524         struct sbuf *sb;
5525         int rc;
5526         struct tp_fcoe_stats stats[4];
5527
5528         rc = sysctl_wire_old_buffer(req, 0);
5529         if (rc != 0)
5530                 return (rc);
5531
5532         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5533         if (sb == NULL)
5534                 return (ENOMEM);
5535
5536         t4_get_fcoe_stats(sc, 0, &stats[0]);
5537         t4_get_fcoe_stats(sc, 1, &stats[1]);
5538         t4_get_fcoe_stats(sc, 2, &stats[2]);
5539         t4_get_fcoe_stats(sc, 3, &stats[3]);
5540
5541         sbuf_printf(sb, "                   channel 0        channel 1        "
5542             "channel 2        channel 3\n");
5543         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5544             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5545             stats[3].octetsDDP);
5546         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5547             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5548         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5549             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5550             stats[3].framesDrop);
5551
5552         rc = sbuf_finish(sb);
5553         sbuf_delete(sb);
5554
5555         return (rc);
5556 }
5557
5558 static int
5559 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5560 {
5561         struct adapter *sc = arg1;
5562         struct sbuf *sb;
5563         int rc, i;
5564         unsigned int map, kbps, ipg, mode;
5565         unsigned int pace_tab[NTX_SCHED];
5566
5567         rc = sysctl_wire_old_buffer(req, 0);
5568         if (rc != 0)
5569                 return (rc);
5570
5571         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5572         if (sb == NULL)
5573                 return (ENOMEM);
5574
5575         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5576         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5577         t4_read_pace_tbl(sc, pace_tab);
5578
5579         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5580             "Class IPG (0.1 ns)   Flow IPG (us)");
5581
5582         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5583                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5584                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5585                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5586                 if (kbps)
5587                         sbuf_printf(sb, "%9u     ", kbps);
5588                 else
5589                         sbuf_printf(sb, " disabled     ");
5590
5591                 if (ipg)
5592                         sbuf_printf(sb, "%13u        ", ipg);
5593                 else
5594                         sbuf_printf(sb, "     disabled        ");
5595
5596                 if (pace_tab[i])
5597                         sbuf_printf(sb, "%10u", pace_tab[i]);
5598                 else
5599                         sbuf_printf(sb, "  disabled");
5600         }
5601
5602         rc = sbuf_finish(sb);
5603         sbuf_delete(sb);
5604
5605         return (rc);
5606 }
5607
5608 static int
5609 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5610 {
5611         struct adapter *sc = arg1;
5612         struct sbuf *sb;
5613         int rc, i, j;
5614         uint64_t *p0, *p1;
5615         struct lb_port_stats s[2];
5616         static const char *stat_name[] = {
5617                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5618                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5619                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5620                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5621                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5622                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5623                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5624         };
5625
5626         rc = sysctl_wire_old_buffer(req, 0);
5627         if (rc != 0)
5628                 return (rc);
5629
5630         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5631         if (sb == NULL)
5632                 return (ENOMEM);
5633
5634         memset(s, 0, sizeof(s));
5635
5636         for (i = 0; i < 4; i += 2) {
5637                 t4_get_lb_stats(sc, i, &s[0]);
5638                 t4_get_lb_stats(sc, i + 1, &s[1]);
5639
5640                 p0 = &s[0].octets;
5641                 p1 = &s[1].octets;
5642                 sbuf_printf(sb, "%s                       Loopback %u"
5643                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5644
5645                 for (j = 0; j < nitems(stat_name); j++)
5646                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5647                                    *p0++, *p1++);
5648         }
5649
5650         rc = sbuf_finish(sb);
5651         sbuf_delete(sb);
5652
5653         return (rc);
5654 }
5655
5656 static int
5657 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5658 {
5659         int rc = 0;
5660         struct port_info *pi = arg1;
5661         struct sbuf *sb;
5662         static const char *linkdnreasons[] = {
5663                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5664                 "PHY overheated", "unknown", "rx los", "reserved7"
5665         };
5666
5667         rc = sysctl_wire_old_buffer(req, 0);
5668         if (rc != 0)
5669                 return(rc);
5670         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5671         if (sb == NULL)
5672                 return (ENOMEM);
5673
5674         if (pi->linkdnrc < 0)
5675                 sbuf_printf(sb, "n/a");
5676         else if (pi->linkdnrc < nitems(linkdnreasons))
5677                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5678         else
5679                 sbuf_printf(sb, "%d", pi->linkdnrc);
5680
5681         rc = sbuf_finish(sb);
5682         sbuf_delete(sb);
5683
5684         return (rc);
5685 }
5686
5687 struct mem_desc {
5688         unsigned int base;
5689         unsigned int limit;
5690         unsigned int idx;
5691 };
5692
5693 static int
5694 mem_desc_cmp(const void *a, const void *b)
5695 {
5696         return ((const struct mem_desc *)a)->base -
5697                ((const struct mem_desc *)b)->base;
5698 }
5699
5700 static void
5701 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5702     unsigned int to)
5703 {
5704         unsigned int size;
5705
5706         size = to - from + 1;
5707         if (size == 0)
5708                 return;
5709
5710         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5711         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5712 }
5713
5714 static int
5715 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5716 {
5717         struct adapter *sc = arg1;
5718         struct sbuf *sb;
5719         int rc, i, n;
5720         uint32_t lo, hi, used, alloc;
5721         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5722         static const char *region[] = {
5723                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5724                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5725                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5726                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5727                 "RQUDP region:", "PBL region:", "TXPBL region:",
5728                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5729                 "On-chip queues:"
5730         };
5731         struct mem_desc avail[4];
5732         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5733         struct mem_desc *md = mem;
5734
5735         rc = sysctl_wire_old_buffer(req, 0);
5736         if (rc != 0)
5737                 return (rc);
5738
5739         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5740         if (sb == NULL)
5741                 return (ENOMEM);
5742
5743         for (i = 0; i < nitems(mem); i++) {
5744                 mem[i].limit = 0;
5745                 mem[i].idx = i;
5746         }
5747
5748         /* Find and sort the populated memory ranges */
5749         i = 0;
5750         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5751         if (lo & F_EDRAM0_ENABLE) {
5752                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5753                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5754                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5755                 avail[i].idx = 0;
5756                 i++;
5757         }
5758         if (lo & F_EDRAM1_ENABLE) {
5759                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5760                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5761                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5762                 avail[i].idx = 1;
5763                 i++;
5764         }
5765         if (lo & F_EXT_MEM_ENABLE) {
5766                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5767                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5768                 avail[i].limit = avail[i].base +
5769                     (G_EXT_MEM_SIZE(hi) << 20);
5770                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5771                 i++;
5772         }
5773         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5774                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5775                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5776                 avail[i].limit = avail[i].base +
5777                     (G_EXT_MEM1_SIZE(hi) << 20);
5778                 avail[i].idx = 4;
5779                 i++;
5780         }
5781         if (!i)                                    /* no memory available */
5782                 return 0;
5783         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5784
5785         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5786         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5787         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5788         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5789         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5790         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5791         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5792         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5793         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5794
5795         /* the next few have explicit upper bounds */
5796         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5797         md->limit = md->base - 1 +
5798                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5799                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5800         md++;
5801
5802         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5803         md->limit = md->base - 1 +
5804                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5805                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5806         md++;
5807
5808         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5809                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5810                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5811                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5812         } else {
5813                 md->base = 0;
5814                 md->idx = nitems(region);  /* hide it */
5815         }
5816         md++;
5817
5818 #define ulp_region(reg) \
5819         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5820         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5821
5822         ulp_region(RX_ISCSI);
5823         ulp_region(RX_TDDP);
5824         ulp_region(TX_TPT);
5825         ulp_region(RX_STAG);
5826         ulp_region(RX_RQ);
5827         ulp_region(RX_RQUDP);
5828         ulp_region(RX_PBL);
5829         ulp_region(TX_PBL);
5830 #undef ulp_region
5831
5832         md->base = 0;
5833         md->idx = nitems(region);
5834         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5835                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5836                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5837                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5838         }
5839         md++;
5840
5841         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5842         md->limit = md->base + sc->tids.ntids - 1;
5843         md++;
5844         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5845         md->limit = md->base + sc->tids.ntids - 1;
5846         md++;
5847
5848         md->base = sc->vres.ocq.start;
5849         if (sc->vres.ocq.size)
5850                 md->limit = md->base + sc->vres.ocq.size - 1;
5851         else
5852                 md->idx = nitems(region);  /* hide it */
5853         md++;
5854
5855         /* add any address-space holes, there can be up to 3 */
5856         for (n = 0; n < i - 1; n++)
5857                 if (avail[n].limit < avail[n + 1].base)
5858                         (md++)->base = avail[n].limit;
5859         if (avail[n].limit)
5860                 (md++)->base = avail[n].limit;
5861
5862         n = md - mem;
5863         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5864
5865         for (lo = 0; lo < i; lo++)
5866                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5867                                 avail[lo].limit - 1);
5868
5869         sbuf_printf(sb, "\n");
5870         for (i = 0; i < n; i++) {
5871                 if (mem[i].idx >= nitems(region))
5872                         continue;                        /* skip holes */
5873                 if (!mem[i].limit)
5874                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5875                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5876                                 mem[i].limit);
5877         }
5878
5879         sbuf_printf(sb, "\n");
5880         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5881         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5882         mem_region_show(sb, "uP RAM:", lo, hi);
5883
5884         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5885         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5886         mem_region_show(sb, "uP Extmem2:", lo, hi);
5887
5888         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5889         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5890                    G_PMRXMAXPAGE(lo),
5891                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5892                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5893
5894         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5895         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5896         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5897                    G_PMTXMAXPAGE(lo),
5898                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5899                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5900         sbuf_printf(sb, "%u p-structs\n",
5901                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5902
5903         for (i = 0; i < 4; i++) {
5904                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5905                 if (is_t4(sc)) {
5906                         used = G_USED(lo);
5907                         alloc = G_ALLOC(lo);
5908                 } else {
5909                         used = G_T5_USED(lo);
5910                         alloc = G_T5_ALLOC(lo);
5911                 }
5912                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5913                            i, used, alloc);
5914         }
5915         for (i = 0; i < 4; i++) {
5916                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5917                 if (is_t4(sc)) {
5918                         used = G_USED(lo);
5919                         alloc = G_ALLOC(lo);
5920                 } else {
5921                         used = G_T5_USED(lo);
5922                         alloc = G_T5_ALLOC(lo);
5923                 }
5924                 sbuf_printf(sb,
5925                            "\nLoopback %d using %u pages out of %u allocated",
5926                            i, used, alloc);
5927         }
5928
5929         rc = sbuf_finish(sb);
5930         sbuf_delete(sb);
5931
5932         return (rc);
5933 }
5934
5935 static inline void
5936 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5937 {
5938         *mask = x | y;
5939         y = htobe64(y);
5940         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5941 }
5942
5943 static int
5944 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5945 {
5946         struct adapter *sc = arg1;
5947         struct sbuf *sb;
5948         int rc, i, n;
5949
5950         rc = sysctl_wire_old_buffer(req, 0);
5951         if (rc != 0)
5952                 return (rc);
5953
5954         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5955         if (sb == NULL)
5956                 return (ENOMEM);
5957
5958         sbuf_printf(sb,
5959             "Idx  Ethernet address     Mask     Vld Ports PF"
5960             "  VF              Replication             P0 P1 P2 P3  ML");
5961         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5962             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5963         for (i = 0; i < n; i++) {
5964                 uint64_t tcamx, tcamy, mask;
5965                 uint32_t cls_lo, cls_hi;
5966                 uint8_t addr[ETHER_ADDR_LEN];
5967
5968                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5969                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5970                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5971                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5972
5973                 if (tcamx & tcamy)
5974                         continue;
5975
5976                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5977                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5978                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5979                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5980                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5981                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5982                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5983
5984                 if (cls_lo & F_REPLICATE) {
5985                         struct fw_ldst_cmd ldst_cmd;
5986
5987                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5988                         ldst_cmd.op_to_addrspace =
5989                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5990                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5991                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5992                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5993                         ldst_cmd.u.mps.fid_ctl =
5994                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5995                                 V_FW_LDST_CMD_CTL(i));
5996
5997                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5998                             "t4mps");
5999                         if (rc)
6000                                 break;
6001                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
6002                             sizeof(ldst_cmd), &ldst_cmd);
6003                         end_synchronized_op(sc, 0);
6004
6005                         if (rc != 0) {
6006                                 sbuf_printf(sb,
6007                                     " ------------ error %3u ------------", rc);
6008                                 rc = 0;
6009                         } else {
6010                                 sbuf_printf(sb, " %08x %08x %08x %08x",
6011                                     be32toh(ldst_cmd.u.mps.rplc127_96),
6012                                     be32toh(ldst_cmd.u.mps.rplc95_64),
6013                                     be32toh(ldst_cmd.u.mps.rplc63_32),
6014                                     be32toh(ldst_cmd.u.mps.rplc31_0));
6015                         }
6016                 } else
6017                         sbuf_printf(sb, "%36s", "");
6018
6019                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6020                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6021                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6022         }
6023
6024         if (rc)
6025                 (void) sbuf_finish(sb);
6026         else
6027                 rc = sbuf_finish(sb);
6028         sbuf_delete(sb);
6029
6030         return (rc);
6031 }
6032
6033 static int
6034 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6035 {
6036         struct adapter *sc = arg1;
6037         struct sbuf *sb;
6038         int rc;
6039         uint16_t mtus[NMTUS];
6040
6041         rc = sysctl_wire_old_buffer(req, 0);
6042         if (rc != 0)
6043                 return (rc);
6044
6045         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6046         if (sb == NULL)
6047                 return (ENOMEM);
6048
6049         t4_read_mtu_tbl(sc, mtus, NULL);
6050
6051         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6052             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6053             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6054             mtus[14], mtus[15]);
6055
6056         rc = sbuf_finish(sb);
6057         sbuf_delete(sb);
6058
6059         return (rc);
6060 }
6061
6062 static int
6063 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6064 {
6065         struct adapter *sc = arg1;
6066         struct sbuf *sb;
6067         int rc, i;
6068         uint32_t cnt[PM_NSTATS];
6069         uint64_t cyc[PM_NSTATS];
6070         static const char *rx_stats[] = {
6071                 "Read:", "Write bypass:", "Write mem:", "Flush:"
6072         };
6073         static const char *tx_stats[] = {
6074                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6075         };
6076
6077         rc = sysctl_wire_old_buffer(req, 0);
6078         if (rc != 0)
6079                 return (rc);
6080
6081         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6082         if (sb == NULL)
6083                 return (ENOMEM);
6084
6085         t4_pmtx_get_stats(sc, cnt, cyc);
6086         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6087         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6088                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6089                     cyc[i]);
6090
6091         t4_pmrx_get_stats(sc, cnt, cyc);
6092         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6093         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6094                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6095                     cyc[i]);
6096
6097         rc = sbuf_finish(sb);
6098         sbuf_delete(sb);
6099
6100         return (rc);
6101 }
6102
6103 static int
6104 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6105 {
6106         struct adapter *sc = arg1;
6107         struct sbuf *sb;
6108         int rc;
6109         struct tp_rdma_stats stats;
6110
6111         rc = sysctl_wire_old_buffer(req, 0);
6112         if (rc != 0)
6113                 return (rc);
6114
6115         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6116         if (sb == NULL)
6117                 return (ENOMEM);
6118
6119         t4_tp_get_rdma_stats(sc, &stats);
6120         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6121         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6122
6123         rc = sbuf_finish(sb);
6124         sbuf_delete(sb);
6125
6126         return (rc);
6127 }
6128
6129 static int
6130 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6131 {
6132         struct adapter *sc = arg1;
6133         struct sbuf *sb;
6134         int rc;
6135         struct tp_tcp_stats v4, v6;
6136
6137         rc = sysctl_wire_old_buffer(req, 0);
6138         if (rc != 0)
6139                 return (rc);
6140
6141         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6142         if (sb == NULL)
6143                 return (ENOMEM);
6144
6145         t4_tp_get_tcp_stats(sc, &v4, &v6);
6146         sbuf_printf(sb,
6147             "                                IP                 IPv6\n");
6148         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6149             v4.tcpOutRsts, v6.tcpOutRsts);
6150         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6151             v4.tcpInSegs, v6.tcpInSegs);
6152         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6153             v4.tcpOutSegs, v6.tcpOutSegs);
6154         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6155             v4.tcpRetransSegs, v6.tcpRetransSegs);
6156
6157         rc = sbuf_finish(sb);
6158         sbuf_delete(sb);
6159
6160         return (rc);
6161 }
6162
6163 static int
6164 sysctl_tids(SYSCTL_HANDLER_ARGS)
6165 {
6166         struct adapter *sc = arg1;
6167         struct sbuf *sb;
6168         int rc;
6169         struct tid_info *t = &sc->tids;
6170
6171         rc = sysctl_wire_old_buffer(req, 0);
6172         if (rc != 0)
6173                 return (rc);
6174
6175         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6176         if (sb == NULL)
6177                 return (ENOMEM);
6178
6179         if (t->natids) {
6180                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6181                     t->atids_in_use);
6182         }
6183
6184         if (t->ntids) {
6185                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6186                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6187
6188                         if (b) {
6189                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6190                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6191                                     t->ntids - 1);
6192                         } else {
6193                                 sbuf_printf(sb, "TID range: %u-%u",
6194                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6195                                     t->ntids - 1);
6196                         }
6197                 } else
6198                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6199                 sbuf_printf(sb, ", in use: %u\n",
6200                     atomic_load_acq_int(&t->tids_in_use));
6201         }
6202
6203         if (t->nstids) {
6204                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6205                     t->stid_base + t->nstids - 1, t->stids_in_use);
6206         }
6207
6208         if (t->nftids) {
6209                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6210                     t->ftid_base + t->nftids - 1);
6211         }
6212
6213         if (t->netids) {
6214                 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6215                     t->etid_base + t->netids - 1);
6216         }
6217
6218         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6219             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6220             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6221
6222         rc = sbuf_finish(sb);
6223         sbuf_delete(sb);
6224
6225         return (rc);
6226 }
6227
6228 static int
6229 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6230 {
6231         struct adapter *sc = arg1;
6232         struct sbuf *sb;
6233         int rc;
6234         struct tp_err_stats stats;
6235
6236         rc = sysctl_wire_old_buffer(req, 0);
6237         if (rc != 0)
6238                 return (rc);
6239
6240         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6241         if (sb == NULL)
6242                 return (ENOMEM);
6243
6244         t4_tp_get_err_stats(sc, &stats);
6245
6246         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6247                       "channel 3\n");
6248         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6249             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6250             stats.macInErrs[3]);
6251         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6252             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6253             stats.hdrInErrs[3]);
6254         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6255             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6256             stats.tcpInErrs[3]);
6257         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6258             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6259             stats.tcp6InErrs[3]);
6260         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6261             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6262             stats.tnlCongDrops[3]);
6263         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6264             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6265             stats.tnlTxDrops[3]);
6266         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6267             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6268             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6269         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6270             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6271             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6272         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6273             stats.ofldNoNeigh, stats.ofldCongDefer);
6274
6275         rc = sbuf_finish(sb);
6276         sbuf_delete(sb);
6277
6278         return (rc);
6279 }
6280
6281 struct field_desc {
6282         const char *name;
6283         u_int start;
6284         u_int width;
6285 };
6286
6287 static void
6288 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6289 {
6290         char buf[32];
6291         int line_size = 0;
6292
6293         while (f->name) {
6294                 uint64_t mask = (1ULL << f->width) - 1;
6295                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6296                     ((uintmax_t)v >> f->start) & mask);
6297
6298                 if (line_size + len >= 79) {
6299                         line_size = 8;
6300                         sbuf_printf(sb, "\n        ");
6301                 }
6302                 sbuf_printf(sb, "%s ", buf);
6303                 line_size += len + 1;
6304                 f++;
6305         }
6306         sbuf_printf(sb, "\n");
6307 }
6308
6309 static struct field_desc tp_la0[] = {
6310         { "RcfOpCodeOut", 60, 4 },
6311         { "State", 56, 4 },
6312         { "WcfState", 52, 4 },
6313         { "RcfOpcSrcOut", 50, 2 },
6314         { "CRxError", 49, 1 },
6315         { "ERxError", 48, 1 },
6316         { "SanityFailed", 47, 1 },
6317         { "SpuriousMsg", 46, 1 },
6318         { "FlushInputMsg", 45, 1 },
6319         { "FlushInputCpl", 44, 1 },
6320         { "RssUpBit", 43, 1 },
6321         { "RssFilterHit", 42, 1 },
6322         { "Tid", 32, 10 },
6323         { "InitTcb", 31, 1 },
6324         { "LineNumber", 24, 7 },
6325         { "Emsg", 23, 1 },
6326         { "EdataOut", 22, 1 },
6327         { "Cmsg", 21, 1 },
6328         { "CdataOut", 20, 1 },
6329         { "EreadPdu", 19, 1 },
6330         { "CreadPdu", 18, 1 },
6331         { "TunnelPkt", 17, 1 },
6332         { "RcfPeerFin", 16, 1 },
6333         { "RcfReasonOut", 12, 4 },
6334         { "TxCchannel", 10, 2 },
6335         { "RcfTxChannel", 8, 2 },
6336         { "RxEchannel", 6, 2 },
6337         { "RcfRxChannel", 5, 1 },
6338         { "RcfDataOutSrdy", 4, 1 },
6339         { "RxDvld", 3, 1 },
6340         { "RxOoDvld", 2, 1 },
6341         { "RxCongestion", 1, 1 },
6342         { "TxCongestion", 0, 1 },
6343         { NULL }
6344 };
6345
6346 static struct field_desc tp_la1[] = {
6347         { "CplCmdIn", 56, 8 },
6348         { "CplCmdOut", 48, 8 },
6349         { "ESynOut", 47, 1 },
6350         { "EAckOut", 46, 1 },
6351         { "EFinOut", 45, 1 },
6352         { "ERstOut", 44, 1 },
6353         { "SynIn", 43, 1 },
6354         { "AckIn", 42, 1 },
6355         { "FinIn", 41, 1 },
6356         { "RstIn", 40, 1 },
6357         { "DataIn", 39, 1 },
6358         { "DataInVld", 38, 1 },
6359         { "PadIn", 37, 1 },
6360         { "RxBufEmpty", 36, 1 },
6361         { "RxDdp", 35, 1 },
6362         { "RxFbCongestion", 34, 1 },
6363         { "TxFbCongestion", 33, 1 },
6364         { "TxPktSumSrdy", 32, 1 },
6365         { "RcfUlpType", 28, 4 },
6366         { "Eread", 27, 1 },
6367         { "Ebypass", 26, 1 },
6368         { "Esave", 25, 1 },
6369         { "Static0", 24, 1 },
6370         { "Cread", 23, 1 },
6371         { "Cbypass", 22, 1 },
6372         { "Csave", 21, 1 },
6373         { "CPktOut", 20, 1 },
6374         { "RxPagePoolFull", 18, 2 },
6375         { "RxLpbkPkt", 17, 1 },
6376         { "TxLpbkPkt", 16, 1 },
6377         { "RxVfValid", 15, 1 },
6378         { "SynLearned", 14, 1 },
6379         { "SetDelEntry", 13, 1 },
6380         { "SetInvEntry", 12, 1 },
6381         { "CpcmdDvld", 11, 1 },
6382         { "CpcmdSave", 10, 1 },
6383         { "RxPstructsFull", 8, 2 },
6384         { "EpcmdDvld", 7, 1 },
6385         { "EpcmdFlush", 6, 1 },
6386         { "EpcmdTrimPrefix", 5, 1 },
6387         { "EpcmdTrimPostfix", 4, 1 },
6388         { "ERssIp4Pkt", 3, 1 },
6389         { "ERssIp6Pkt", 2, 1 },
6390         { "ERssTcpUdpPkt", 1, 1 },
6391         { "ERssFceFipPkt", 0, 1 },
6392         { NULL }
6393 };
6394
6395 static struct field_desc tp_la2[] = {
6396         { "CplCmdIn", 56, 8 },
6397         { "MpsVfVld", 55, 1 },
6398         { "MpsPf", 52, 3 },
6399         { "MpsVf", 44, 8 },
6400         { "SynIn", 43, 1 },
6401         { "AckIn", 42, 1 },
6402         { "FinIn", 41, 1 },
6403         { "RstIn", 40, 1 },
6404         { "DataIn", 39, 1 },
6405         { "DataInVld", 38, 1 },
6406         { "PadIn", 37, 1 },
6407         { "RxBufEmpty", 36, 1 },
6408         { "RxDdp", 35, 1 },
6409         { "RxFbCongestion", 34, 1 },
6410         { "TxFbCongestion", 33, 1 },
6411         { "TxPktSumSrdy", 32, 1 },
6412         { "RcfUlpType", 28, 4 },
6413         { "Eread", 27, 1 },
6414         { "Ebypass", 26, 1 },
6415         { "Esave", 25, 1 },
6416         { "Static0", 24, 1 },
6417         { "Cread", 23, 1 },
6418         { "Cbypass", 22, 1 },
6419         { "Csave", 21, 1 },
6420         { "CPktOut", 20, 1 },
6421         { "RxPagePoolFull", 18, 2 },
6422         { "RxLpbkPkt", 17, 1 },
6423         { "TxLpbkPkt", 16, 1 },
6424         { "RxVfValid", 15, 1 },
6425         { "SynLearned", 14, 1 },
6426         { "SetDelEntry", 13, 1 },
6427         { "SetInvEntry", 12, 1 },
6428         { "CpcmdDvld", 11, 1 },
6429         { "CpcmdSave", 10, 1 },
6430         { "RxPstructsFull", 8, 2 },
6431         { "EpcmdDvld", 7, 1 },
6432         { "EpcmdFlush", 6, 1 },
6433         { "EpcmdTrimPrefix", 5, 1 },
6434         { "EpcmdTrimPostfix", 4, 1 },
6435         { "ERssIp4Pkt", 3, 1 },
6436         { "ERssIp6Pkt", 2, 1 },
6437         { "ERssTcpUdpPkt", 1, 1 },
6438         { "ERssFceFipPkt", 0, 1 },
6439         { NULL }
6440 };
6441
6442 static void
6443 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6444 {
6445
6446         field_desc_show(sb, *p, tp_la0);
6447 }
6448
6449 static void
6450 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6451 {
6452
6453         if (idx)
6454                 sbuf_printf(sb, "\n");
6455         field_desc_show(sb, p[0], tp_la0);
6456         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6457                 field_desc_show(sb, p[1], tp_la0);
6458 }
6459
6460 static void
6461 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6462 {
6463
6464         if (idx)
6465                 sbuf_printf(sb, "\n");
6466         field_desc_show(sb, p[0], tp_la0);
6467         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6468                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6469 }
6470
6471 static int
6472 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6473 {
6474         struct adapter *sc = arg1;
6475         struct sbuf *sb;
6476         uint64_t *buf, *p;
6477         int rc;
6478         u_int i, inc;
6479         void (*show_func)(struct sbuf *, uint64_t *, int);
6480
6481         rc = sysctl_wire_old_buffer(req, 0);
6482         if (rc != 0)
6483                 return (rc);
6484
6485         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6486         if (sb == NULL)
6487                 return (ENOMEM);
6488
6489         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6490
6491         t4_tp_read_la(sc, buf, NULL);
6492         p = buf;
6493
6494         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6495         case 2:
6496                 inc = 2;
6497                 show_func = tp_la_show2;
6498                 break;
6499         case 3:
6500                 inc = 2;
6501                 show_func = tp_la_show3;
6502                 break;
6503         default:
6504                 inc = 1;
6505                 show_func = tp_la_show;
6506         }
6507
6508         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6509                 (*show_func)(sb, p, i);
6510
6511         rc = sbuf_finish(sb);
6512         sbuf_delete(sb);
6513         free(buf, M_CXGBE);
6514         return (rc);
6515 }
6516
6517 static int
6518 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6519 {
6520         struct adapter *sc = arg1;
6521         struct sbuf *sb;
6522         int rc;
6523         u64 nrate[NCHAN], orate[NCHAN];
6524
6525         rc = sysctl_wire_old_buffer(req, 0);
6526         if (rc != 0)
6527                 return (rc);
6528
6529         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6530         if (sb == NULL)
6531                 return (ENOMEM);
6532
6533         t4_get_chan_txrate(sc, nrate, orate);
6534         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6535                  "channel 3\n");
6536         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6537             nrate[0], nrate[1], nrate[2], nrate[3]);
6538         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6539             orate[0], orate[1], orate[2], orate[3]);
6540
6541         rc = sbuf_finish(sb);
6542         sbuf_delete(sb);
6543
6544         return (rc);
6545 }
6546
6547 static int
6548 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6549 {
6550         struct adapter *sc = arg1;
6551         struct sbuf *sb;
6552         uint32_t *buf, *p;
6553         int rc, i;
6554
6555         rc = sysctl_wire_old_buffer(req, 0);
6556         if (rc != 0)
6557                 return (rc);
6558
6559         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6560         if (sb == NULL)
6561                 return (ENOMEM);
6562
6563         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6564             M_ZERO | M_WAITOK);
6565
6566         t4_ulprx_read_la(sc, buf);
6567         p = buf;
6568
6569         sbuf_printf(sb, "      Pcmd        Type   Message"
6570             "                Data");
6571         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6572                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6573                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6574         }
6575
6576         rc = sbuf_finish(sb);
6577         sbuf_delete(sb);
6578         free(buf, M_CXGBE);
6579         return (rc);
6580 }
6581
6582 static int
6583 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6584 {
6585         struct adapter *sc = arg1;
6586         struct sbuf *sb;
6587         int rc, v;
6588
6589         rc = sysctl_wire_old_buffer(req, 0);
6590         if (rc != 0)
6591                 return (rc);
6592
6593         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6594         if (sb == NULL)
6595                 return (ENOMEM);
6596
6597         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6598         if (G_STATSOURCE_T5(v) == 7) {
6599                 if (G_STATMODE(v) == 0) {
6600                         sbuf_printf(sb, "total %d, incomplete %d",
6601                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6602                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6603                 } else if (G_STATMODE(v) == 1) {
6604                         sbuf_printf(sb, "total %d, data overflow %d",
6605                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6606                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6607                 }
6608         }
6609         rc = sbuf_finish(sb);
6610         sbuf_delete(sb);
6611
6612         return (rc);
6613 }
6614 #endif
6615
6616 static inline void
6617 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6618 {
6619         struct buf_ring *br;
6620         struct mbuf *m;
6621
6622         TXQ_LOCK_ASSERT_OWNED(txq);
6623
6624         br = txq->br;
6625         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6626         if (m)
6627                 t4_eth_tx(ifp, txq, m);
6628 }
6629
6630 void
6631 t4_tx_callout(void *arg)
6632 {
6633         struct sge_eq *eq = arg;
6634         struct adapter *sc;
6635
6636         if (EQ_TRYLOCK(eq) == 0)
6637                 goto reschedule;
6638
6639         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6640                 EQ_UNLOCK(eq);
6641 reschedule:
6642                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6643                         callout_schedule(&eq->tx_callout, 1);
6644                 return;
6645         }
6646
6647         EQ_LOCK_ASSERT_OWNED(eq);
6648
6649         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6650
6651                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6652                         struct sge_txq *txq = arg;
6653                         struct port_info *pi = txq->ifp->if_softc;
6654
6655                         sc = pi->adapter;
6656                 } else {
6657                         struct sge_wrq *wrq = arg;
6658
6659                         sc = wrq->adapter;
6660                 }
6661
6662                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6663         }
6664
6665         EQ_UNLOCK(eq);
6666 }
6667
6668 void
6669 t4_tx_task(void *arg, int count)
6670 {
6671         struct sge_eq *eq = arg;
6672
6673         EQ_LOCK(eq);
6674         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6675                 struct sge_txq *txq = arg;
6676                 txq_start(txq->ifp, txq);
6677         } else {
6678                 struct sge_wrq *wrq = arg;
6679                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6680         }
6681         EQ_UNLOCK(eq);
6682 }
6683
6684 static uint32_t
6685 fconf_to_mode(uint32_t fconf)
6686 {
6687         uint32_t mode;
6688
6689         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6690             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6691
6692         if (fconf & F_FRAGMENTATION)
6693                 mode |= T4_FILTER_IP_FRAGMENT;
6694
6695         if (fconf & F_MPSHITTYPE)
6696                 mode |= T4_FILTER_MPS_HIT_TYPE;
6697
6698         if (fconf & F_MACMATCH)
6699                 mode |= T4_FILTER_MAC_IDX;
6700
6701         if (fconf & F_ETHERTYPE)
6702                 mode |= T4_FILTER_ETH_TYPE;
6703
6704         if (fconf & F_PROTOCOL)
6705                 mode |= T4_FILTER_IP_PROTO;
6706
6707         if (fconf & F_TOS)
6708                 mode |= T4_FILTER_IP_TOS;
6709
6710         if (fconf & F_VLAN)
6711                 mode |= T4_FILTER_VLAN;
6712
6713         if (fconf & F_VNIC_ID)
6714                 mode |= T4_FILTER_VNIC;
6715
6716         if (fconf & F_PORT)
6717                 mode |= T4_FILTER_PORT;
6718
6719         if (fconf & F_FCOE)
6720                 mode |= T4_FILTER_FCoE;
6721
6722         return (mode);
6723 }
6724
6725 static uint32_t
6726 mode_to_fconf(uint32_t mode)
6727 {
6728         uint32_t fconf = 0;
6729
6730         if (mode & T4_FILTER_IP_FRAGMENT)
6731                 fconf |= F_FRAGMENTATION;
6732
6733         if (mode & T4_FILTER_MPS_HIT_TYPE)
6734                 fconf |= F_MPSHITTYPE;
6735
6736         if (mode & T4_FILTER_MAC_IDX)
6737                 fconf |= F_MACMATCH;
6738
6739         if (mode & T4_FILTER_ETH_TYPE)
6740                 fconf |= F_ETHERTYPE;
6741
6742         if (mode & T4_FILTER_IP_PROTO)
6743                 fconf |= F_PROTOCOL;
6744
6745         if (mode & T4_FILTER_IP_TOS)
6746                 fconf |= F_TOS;
6747
6748         if (mode & T4_FILTER_VLAN)
6749                 fconf |= F_VLAN;
6750
6751         if (mode & T4_FILTER_VNIC)
6752                 fconf |= F_VNIC_ID;
6753
6754         if (mode & T4_FILTER_PORT)
6755                 fconf |= F_PORT;
6756
6757         if (mode & T4_FILTER_FCoE)
6758                 fconf |= F_FCOE;
6759
6760         return (fconf);
6761 }
6762
6763 static uint32_t
6764 fspec_to_fconf(struct t4_filter_specification *fs)
6765 {
6766         uint32_t fconf = 0;
6767
6768         if (fs->val.frag || fs->mask.frag)
6769                 fconf |= F_FRAGMENTATION;
6770
6771         if (fs->val.matchtype || fs->mask.matchtype)
6772                 fconf |= F_MPSHITTYPE;
6773
6774         if (fs->val.macidx || fs->mask.macidx)
6775                 fconf |= F_MACMATCH;
6776
6777         if (fs->val.ethtype || fs->mask.ethtype)
6778                 fconf |= F_ETHERTYPE;
6779
6780         if (fs->val.proto || fs->mask.proto)
6781                 fconf |= F_PROTOCOL;
6782
6783         if (fs->val.tos || fs->mask.tos)
6784                 fconf |= F_TOS;
6785
6786         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6787                 fconf |= F_VLAN;
6788
6789         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6790                 fconf |= F_VNIC_ID;
6791
6792         if (fs->val.iport || fs->mask.iport)
6793                 fconf |= F_PORT;
6794
6795         if (fs->val.fcoe || fs->mask.fcoe)
6796                 fconf |= F_FCOE;
6797
6798         return (fconf);
6799 }
6800
6801 static int
6802 get_filter_mode(struct adapter *sc, uint32_t *mode)
6803 {
6804         int rc;
6805         uint32_t fconf;
6806
6807         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6808             "t4getfm");
6809         if (rc)
6810                 return (rc);
6811
6812         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6813             A_TP_VLAN_PRI_MAP);
6814
6815         if (sc->params.tp.vlan_pri_map != fconf) {
6816                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6817                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6818                     fconf);
6819                 sc->params.tp.vlan_pri_map = fconf;
6820         }
6821
6822         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6823
6824         end_synchronized_op(sc, LOCK_HELD);
6825         return (0);
6826 }
6827
6828 static int
6829 set_filter_mode(struct adapter *sc, uint32_t mode)
6830 {
6831         uint32_t fconf;
6832         int rc;
6833
6834         fconf = mode_to_fconf(mode);
6835
6836         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6837             "t4setfm");
6838         if (rc)
6839                 return (rc);
6840
6841         if (sc->tids.ftids_in_use > 0) {
6842                 rc = EBUSY;
6843                 goto done;
6844         }
6845
6846 #ifdef TCP_OFFLOAD
6847         if (sc->offload_map) {
6848                 rc = EBUSY;
6849                 goto done;
6850         }
6851 #endif
6852
6853 #ifdef notyet
6854         rc = -t4_set_filter_mode(sc, fconf);
6855         if (rc == 0)
6856                 sc->filter_mode = fconf;
6857 #else
6858         rc = ENOTSUP;
6859 #endif
6860
6861 done:
6862         end_synchronized_op(sc, LOCK_HELD);
6863         return (rc);
6864 }
6865
6866 static inline uint64_t
6867 get_filter_hits(struct adapter *sc, uint32_t fid)
6868 {
6869         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6870         uint64_t hits;
6871
6872         memwin_info(sc, 0, &mw_base, NULL);
6873         off = position_memwin(sc, 0,
6874             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6875         if (is_t4(sc)) {
6876                 hits = t4_read_reg64(sc, mw_base + off + 16);
6877                 hits = be64toh(hits);
6878         } else {
6879                 hits = t4_read_reg(sc, mw_base + off + 24);
6880                 hits = be32toh(hits);
6881         }
6882
6883         return (hits);
6884 }
6885
6886 static int
6887 get_filter(struct adapter *sc, struct t4_filter *t)
6888 {
6889         int i, rc, nfilters = sc->tids.nftids;
6890         struct filter_entry *f;
6891
6892         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6893             "t4getf");
6894         if (rc)
6895                 return (rc);
6896
6897         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6898             t->idx >= nfilters) {
6899                 t->idx = 0xffffffff;
6900                 goto done;
6901         }
6902
6903         f = &sc->tids.ftid_tab[t->idx];
6904         for (i = t->idx; i < nfilters; i++, f++) {
6905                 if (f->valid) {
6906                         t->idx = i;
6907                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6908                         t->smtidx = f->smtidx;
6909                         if (f->fs.hitcnts)
6910                                 t->hits = get_filter_hits(sc, t->idx);
6911                         else
6912                                 t->hits = UINT64_MAX;
6913                         t->fs = f->fs;
6914
6915                         goto done;
6916                 }
6917         }
6918
6919         t->idx = 0xffffffff;
6920 done:
6921         end_synchronized_op(sc, LOCK_HELD);
6922         return (0);
6923 }
6924
6925 static int
6926 set_filter(struct adapter *sc, struct t4_filter *t)
6927 {
6928         unsigned int nfilters, nports;
6929         struct filter_entry *f;
6930         int i, rc;
6931
6932         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6933         if (rc)
6934                 return (rc);
6935
6936         nfilters = sc->tids.nftids;
6937         nports = sc->params.nports;
6938
6939         if (nfilters == 0) {
6940                 rc = ENOTSUP;
6941                 goto done;
6942         }
6943
6944         if (!(sc->flags & FULL_INIT_DONE)) {
6945                 rc = EAGAIN;
6946                 goto done;
6947         }
6948
6949         if (t->idx >= nfilters) {
6950                 rc = EINVAL;
6951                 goto done;
6952         }
6953
6954         /* Validate against the global filter mode */
6955         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6956             sc->params.tp.vlan_pri_map) {
6957                 rc = E2BIG;
6958                 goto done;
6959         }
6960
6961         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6962                 rc = EINVAL;
6963                 goto done;
6964         }
6965
6966         if (t->fs.val.iport >= nports) {
6967                 rc = EINVAL;
6968                 goto done;
6969         }
6970
6971         /* Can't specify an iq if not steering to it */
6972         if (!t->fs.dirsteer && t->fs.iq) {
6973                 rc = EINVAL;
6974                 goto done;
6975         }
6976
6977         /* IPv6 filter idx must be 4 aligned */
6978         if (t->fs.type == 1 &&
6979             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6980                 rc = EINVAL;
6981                 goto done;
6982         }
6983
6984         if (sc->tids.ftid_tab == NULL) {
6985                 KASSERT(sc->tids.ftids_in_use == 0,
6986                     ("%s: no memory allocated but filters_in_use > 0",
6987                     __func__));
6988
6989                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6990                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6991                 if (sc->tids.ftid_tab == NULL) {
6992                         rc = ENOMEM;
6993                         goto done;
6994                 }
6995                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6996         }
6997
6998         for (i = 0; i < 4; i++) {
6999                 f = &sc->tids.ftid_tab[t->idx + i];
7000
7001                 if (f->pending || f->valid) {
7002                         rc = EBUSY;
7003                         goto done;
7004                 }
7005                 if (f->locked) {
7006                         rc = EPERM;
7007                         goto done;
7008                 }
7009
7010                 if (t->fs.type == 0)
7011                         break;
7012         }
7013
7014         f = &sc->tids.ftid_tab[t->idx];
7015         f->fs = t->fs;
7016
7017         rc = set_filter_wr(sc, t->idx);
7018 done:
7019         end_synchronized_op(sc, 0);
7020
7021         if (rc == 0) {
7022                 mtx_lock(&sc->tids.ftid_lock);
7023                 for (;;) {
7024                         if (f->pending == 0) {
7025                                 rc = f->valid ? 0 : EIO;
7026                                 break;
7027                         }
7028
7029                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7030                             PCATCH, "t4setfw", 0)) {
7031                                 rc = EINPROGRESS;
7032                                 break;
7033                         }
7034                 }
7035                 mtx_unlock(&sc->tids.ftid_lock);
7036         }
7037         return (rc);
7038 }
7039
7040 static int
7041 del_filter(struct adapter *sc, struct t4_filter *t)
7042 {
7043         unsigned int nfilters;
7044         struct filter_entry *f;
7045         int rc;
7046
7047         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7048         if (rc)
7049                 return (rc);
7050
7051         nfilters = sc->tids.nftids;
7052
7053         if (nfilters == 0) {
7054                 rc = ENOTSUP;
7055                 goto done;
7056         }
7057
7058         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7059             t->idx >= nfilters) {
7060                 rc = EINVAL;
7061                 goto done;
7062         }
7063
7064         if (!(sc->flags & FULL_INIT_DONE)) {
7065                 rc = EAGAIN;
7066                 goto done;
7067         }
7068
7069         f = &sc->tids.ftid_tab[t->idx];
7070
7071         if (f->pending) {
7072                 rc = EBUSY;
7073                 goto done;
7074         }
7075         if (f->locked) {
7076                 rc = EPERM;
7077                 goto done;
7078         }
7079
7080         if (f->valid) {
7081                 t->fs = f->fs;  /* extra info for the caller */
7082                 rc = del_filter_wr(sc, t->idx);
7083         }
7084
7085 done:
7086         end_synchronized_op(sc, 0);
7087
7088         if (rc == 0) {
7089                 mtx_lock(&sc->tids.ftid_lock);
7090                 for (;;) {
7091                         if (f->pending == 0) {
7092                                 rc = f->valid ? EIO : 0;
7093                                 break;
7094                         }
7095
7096                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7097                             PCATCH, "t4delfw", 0)) {
7098                                 rc = EINPROGRESS;
7099                                 break;
7100                         }
7101                 }
7102                 mtx_unlock(&sc->tids.ftid_lock);
7103         }
7104
7105         return (rc);
7106 }
7107
7108 static void
7109 clear_filter(struct filter_entry *f)
7110 {
7111         if (f->l2t)
7112                 t4_l2t_release(f->l2t);
7113
7114         bzero(f, sizeof (*f));
7115 }
7116
7117 static int
7118 set_filter_wr(struct adapter *sc, int fidx)
7119 {
7120         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7121         struct wrqe *wr;
7122         struct fw_filter_wr *fwr;
7123         unsigned int ftid;
7124
7125         ASSERT_SYNCHRONIZED_OP(sc);
7126
7127         if (f->fs.newdmac || f->fs.newvlan) {
7128                 /* This filter needs an L2T entry; allocate one. */
7129                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7130                 if (f->l2t == NULL)
7131                         return (EAGAIN);
7132                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7133                     f->fs.dmac)) {
7134                         t4_l2t_release(f->l2t);
7135                         f->l2t = NULL;
7136                         return (ENOMEM);
7137                 }
7138         }
7139
7140         ftid = sc->tids.ftid_base + fidx;
7141
7142         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7143         if (wr == NULL)
7144                 return (ENOMEM);
7145
7146         fwr = wrtod(wr);
7147         bzero(fwr, sizeof (*fwr));
7148
7149         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7150         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7151         fwr->tid_to_iq =
7152             htobe32(V_FW_FILTER_WR_TID(ftid) |
7153                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7154                 V_FW_FILTER_WR_NOREPLY(0) |
7155                 V_FW_FILTER_WR_IQ(f->fs.iq));
7156         fwr->del_filter_to_l2tix =
7157             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7158                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7159                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7160                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7161                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7162                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7163                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7164                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7165                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7166                     f->fs.newvlan == VLAN_REWRITE) |
7167                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7168                     f->fs.newvlan == VLAN_REWRITE) |
7169                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7170                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7171                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7172                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7173         fwr->ethtype = htobe16(f->fs.val.ethtype);
7174         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7175         fwr->frag_to_ovlan_vldm =
7176             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7177                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7178                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7179                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7180                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7181                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7182         fwr->smac_sel = 0;
7183         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7184             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7185         fwr->maci_to_matchtypem =
7186             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7187                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7188                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7189                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7190                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7191                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7192                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7193                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7194         fwr->ptcl = f->fs.val.proto;
7195         fwr->ptclm = f->fs.mask.proto;
7196         fwr->ttyp = f->fs.val.tos;
7197         fwr->ttypm = f->fs.mask.tos;
7198         fwr->ivlan = htobe16(f->fs.val.vlan);
7199         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7200         fwr->ovlan = htobe16(f->fs.val.vnic);
7201         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7202         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7203         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7204         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7205         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7206         fwr->lp = htobe16(f->fs.val.dport);
7207         fwr->lpm = htobe16(f->fs.mask.dport);
7208         fwr->fp = htobe16(f->fs.val.sport);
7209         fwr->fpm = htobe16(f->fs.mask.sport);
7210         if (f->fs.newsmac)
7211                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7212
7213         f->pending = 1;
7214         sc->tids.ftids_in_use++;
7215
7216         t4_wrq_tx(sc, wr);
7217         return (0);
7218 }
7219
7220 static int
7221 del_filter_wr(struct adapter *sc, int fidx)
7222 {
7223         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7224         struct wrqe *wr;
7225         struct fw_filter_wr *fwr;
7226         unsigned int ftid;
7227
7228         ftid = sc->tids.ftid_base + fidx;
7229
7230         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7231         if (wr == NULL)
7232                 return (ENOMEM);
7233         fwr = wrtod(wr);
7234         bzero(fwr, sizeof (*fwr));
7235
7236         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7237
7238         f->pending = 1;
7239         t4_wrq_tx(sc, wr);
7240         return (0);
7241 }
7242
7243 int
7244 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7245 {
7246         struct adapter *sc = iq->adapter;
7247         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7248         unsigned int idx = GET_TID(rpl);
7249         unsigned int rc;
7250         struct filter_entry *f;
7251
7252         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7253             rss->opcode));
7254
7255         if (is_ftid(sc, idx)) {
7256
7257                 idx -= sc->tids.ftid_base;
7258                 f = &sc->tids.ftid_tab[idx];
7259                 rc = G_COOKIE(rpl->cookie);
7260
7261                 mtx_lock(&sc->tids.ftid_lock);
7262                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7263                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7264                             __func__, idx));
7265                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7266                         f->pending = 0;  /* asynchronous setup completed */
7267                         f->valid = 1;
7268                 } else {
7269                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7270                                 /* Add or delete failed, display an error */
7271                                 log(LOG_ERR,
7272                                     "filter %u setup failed with error %u\n",
7273                                     idx, rc);
7274                         }
7275
7276                         clear_filter(f);
7277                         sc->tids.ftids_in_use--;
7278                 }
7279                 wakeup(&sc->tids.ftid_tab);
7280                 mtx_unlock(&sc->tids.ftid_lock);
7281         }
7282
7283         return (0);
7284 }
7285
7286 static int
7287 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7288 {
7289         int rc;
7290
7291         if (cntxt->cid > M_CTXTQID)
7292                 return (EINVAL);
7293
7294         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7295             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7296                 return (EINVAL);
7297
7298         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7299         if (rc)
7300                 return (rc);
7301
7302         if (sc->flags & FW_OK) {
7303                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7304                     &cntxt->data[0]);
7305                 if (rc == 0)
7306                         goto done;
7307         }
7308
7309         /*
7310          * Read via firmware failed or wasn't even attempted.  Read directly via
7311          * the backdoor.
7312          */
7313         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7314 done:
7315         end_synchronized_op(sc, 0);
7316         return (rc);
7317 }
7318
7319 static int
7320 load_fw(struct adapter *sc, struct t4_data *fw)
7321 {
7322         int rc;
7323         uint8_t *fw_data;
7324
7325         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7326         if (rc)
7327                 return (rc);
7328
7329         if (sc->flags & FULL_INIT_DONE) {
7330                 rc = EBUSY;
7331                 goto done;
7332         }
7333
7334         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7335         if (fw_data == NULL) {
7336                 rc = ENOMEM;
7337                 goto done;
7338         }
7339
7340         rc = copyin(fw->data, fw_data, fw->len);
7341         if (rc == 0)
7342                 rc = -t4_load_fw(sc, fw_data, fw->len);
7343
7344         free(fw_data, M_CXGBE);
7345 done:
7346         end_synchronized_op(sc, 0);
7347         return (rc);
7348 }
7349
7350 static int
7351 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7352 {
7353         uint32_t addr, off, remaining, i, n;
7354         uint32_t *buf, *b;
7355         uint32_t mw_base, mw_aperture;
7356         int rc;
7357         uint8_t *dst;
7358
7359         rc = validate_mem_range(sc, mr->addr, mr->len);
7360         if (rc != 0)
7361                 return (rc);
7362
7363         memwin_info(sc, win, &mw_base, &mw_aperture);
7364         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7365         addr = mr->addr;
7366         remaining = mr->len;
7367         dst = (void *)mr->data;
7368
7369         while (remaining) {
7370                 off = position_memwin(sc, win, addr);
7371
7372                 /* number of bytes that we'll copy in the inner loop */
7373                 n = min(remaining, mw_aperture - off);
7374                 for (i = 0; i < n; i += 4)
7375                         *b++ = t4_read_reg(sc, mw_base + off + i);
7376
7377                 rc = copyout(buf, dst, n);
7378                 if (rc != 0)
7379                         break;
7380
7381                 b = buf;
7382                 dst += n;
7383                 remaining -= n;
7384                 addr += n;
7385         }
7386
7387         free(buf, M_CXGBE);
7388         return (rc);
7389 }
7390
7391 static int
7392 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7393 {
7394         int rc;
7395
7396         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7397                 return (EINVAL);
7398
7399         if (i2cd->len > 1) {
7400                 /* XXX: need fw support for longer reads in one go */
7401                 return (ENOTSUP);
7402         }
7403
7404         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7405         if (rc)
7406                 return (rc);
7407         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7408             i2cd->offset, &i2cd->data[0]);
7409         end_synchronized_op(sc, 0);
7410
7411         return (rc);
7412 }
7413
7414 static int
7415 in_range(int val, int lo, int hi)
7416 {
7417
7418         return (val < 0 || (val <= hi && val >= lo));
7419 }
7420
7421 static int
7422 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7423 {
7424         int fw_subcmd, fw_type, rc;
7425
7426         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7427         if (rc)
7428                 return (rc);
7429
7430         if (!(sc->flags & FULL_INIT_DONE)) {
7431                 rc = EAGAIN;
7432                 goto done;
7433         }
7434
7435         /*
7436          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7437          * sub-command and type are in common locations.)
7438          */
7439         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7440                 fw_subcmd = FW_SCHED_SC_CONFIG;
7441         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7442                 fw_subcmd = FW_SCHED_SC_PARAMS;
7443         else {
7444                 rc = EINVAL;
7445                 goto done;
7446         }
7447         if (p->type == SCHED_CLASS_TYPE_PACKET)
7448                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7449         else {
7450                 rc = EINVAL;
7451                 goto done;
7452         }
7453
7454         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7455                 /* Vet our parameters ..*/
7456                 if (p->u.config.minmax < 0) {
7457                         rc = EINVAL;
7458                         goto done;
7459                 }
7460
7461                 /* And pass the request to the firmware ...*/
7462                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7463                 goto done;
7464         }
7465
7466         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7467                 int fw_level;
7468                 int fw_mode;
7469                 int fw_rateunit;
7470                 int fw_ratemode;
7471
7472                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7473                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7474                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7475                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7476                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7477                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7478                 else {
7479                         rc = EINVAL;
7480                         goto done;
7481                 }
7482
7483                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7484                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7485                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7486                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7487                 else {
7488                         rc = EINVAL;
7489                         goto done;
7490                 }
7491
7492                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7493                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7494                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7495                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7496                 else {
7497                         rc = EINVAL;
7498                         goto done;
7499                 }
7500
7501                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7502                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7503                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7504                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7505                 else {
7506                         rc = EINVAL;
7507                         goto done;
7508                 }
7509
7510                 /* Vet our parameters ... */
7511                 if (!in_range(p->u.params.channel, 0, 3) ||
7512                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7513                     !in_range(p->u.params.minrate, 0, 10000000) ||
7514                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7515                     !in_range(p->u.params.weight, 0, 100)) {
7516                         rc = ERANGE;
7517                         goto done;
7518                 }
7519
7520                 /*
7521                  * Translate any unset parameters into the firmware's
7522                  * nomenclature and/or fail the call if the parameters
7523                  * are required ...
7524                  */
7525                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7526                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7527                         rc = EINVAL;
7528                         goto done;
7529                 }
7530                 if (p->u.params.minrate < 0)
7531                         p->u.params.minrate = 0;
7532                 if (p->u.params.maxrate < 0) {
7533                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7534                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7535                                 rc = EINVAL;
7536                                 goto done;
7537                         } else
7538                                 p->u.params.maxrate = 0;
7539                 }
7540                 if (p->u.params.weight < 0) {
7541                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7542                                 rc = EINVAL;
7543                                 goto done;
7544                         } else
7545                                 p->u.params.weight = 0;
7546                 }
7547                 if (p->u.params.pktsize < 0) {
7548                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7549                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7550                                 rc = EINVAL;
7551                                 goto done;
7552                         } else
7553                                 p->u.params.pktsize = 0;
7554                 }
7555
7556                 /* See what the firmware thinks of the request ... */
7557                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7558                     fw_rateunit, fw_ratemode, p->u.params.channel,
7559                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7560                     p->u.params.weight, p->u.params.pktsize);
7561                 goto done;
7562         }
7563
7564         rc = EINVAL;
7565 done:
7566         end_synchronized_op(sc, 0);
7567         return (rc);
7568 }
7569
7570 static int
7571 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7572 {
7573         struct port_info *pi = NULL;
7574         struct sge_txq *txq;
7575         uint32_t fw_mnem, fw_queue, fw_class;
7576         int i, rc;
7577
7578         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7579         if (rc)
7580                 return (rc);
7581
7582         if (!(sc->flags & FULL_INIT_DONE)) {
7583                 rc = EAGAIN;
7584                 goto done;
7585         }
7586
7587         if (p->port >= sc->params.nports) {
7588                 rc = EINVAL;
7589                 goto done;
7590         }
7591
7592         pi = sc->port[p->port];
7593         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7594                 rc = EINVAL;
7595                 goto done;
7596         }
7597
7598         /*
7599          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7600          * Scheduling Class in this case).
7601          */
7602         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7603             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7604         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7605
7606         /*
7607          * If op.queue is non-negative, then we're only changing the scheduling
7608          * on a single specified TX queue.
7609          */
7610         if (p->queue >= 0) {
7611                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7612                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7613                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7614                     &fw_class);
7615                 goto done;
7616         }
7617
7618         /*
7619          * Change the scheduling on all the TX queues for the
7620          * interface.
7621          */
7622         for_each_txq(pi, i, txq) {
7623                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7624                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7625                     &fw_class);
7626                 if (rc)
7627                         goto done;
7628         }
7629
7630         rc = 0;
7631 done:
7632         end_synchronized_op(sc, 0);
7633         return (rc);
7634 }
7635
7636 int
7637 t4_os_find_pci_capability(struct adapter *sc, int cap)
7638 {
7639         int i;
7640
7641         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7642 }
7643
7644 int
7645 t4_os_pci_save_state(struct adapter *sc)
7646 {
7647         device_t dev;
7648         struct pci_devinfo *dinfo;
7649
7650         dev = sc->dev;
7651         dinfo = device_get_ivars(dev);
7652
7653         pci_cfg_save(dev, dinfo, 0);
7654         return (0);
7655 }
7656
7657 int
7658 t4_os_pci_restore_state(struct adapter *sc)
7659 {
7660         device_t dev;
7661         struct pci_devinfo *dinfo;
7662
7663         dev = sc->dev;
7664         dinfo = device_get_ivars(dev);
7665
7666         pci_cfg_restore(dev, dinfo);
7667         return (0);
7668 }
7669
7670 void
7671 t4_os_portmod_changed(const struct adapter *sc, int idx)
7672 {
7673         struct port_info *pi = sc->port[idx];
7674         static const char *mod_str[] = {
7675                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7676         };
7677
7678         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7679                 if_printf(pi->ifp, "transceiver unplugged.\n");
7680         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7681                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7682         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7683                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7684         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7685                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7686                     mod_str[pi->mod_type]);
7687         } else {
7688                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7689                     pi->mod_type);
7690         }
7691 }
7692
7693 void
7694 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7695 {
7696         struct port_info *pi = sc->port[idx];
7697         struct ifnet *ifp = pi->ifp;
7698
7699         if (link_stat) {
7700                 pi->linkdnrc = -1;
7701                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7702                 if_link_state_change(ifp, LINK_STATE_UP);
7703         } else {
7704                 if (reason >= 0)
7705                         pi->linkdnrc = reason;
7706                 if_link_state_change(ifp, LINK_STATE_DOWN);
7707         }
7708 }
7709
7710 void
7711 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7712 {
7713         struct adapter *sc;
7714
7715         sx_slock(&t4_list_lock);
7716         SLIST_FOREACH(sc, &t4_list, link) {
7717                 /*
7718                  * func should not make any assumptions about what state sc is
7719                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7720                  */
7721                 func(sc, arg);
7722         }
7723         sx_sunlock(&t4_list_lock);
7724 }
7725
7726 static int
7727 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7728 {
7729        return (0);
7730 }
7731
7732 static int
7733 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7734 {
7735        return (0);
7736 }
7737
7738 static int
7739 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7740     struct thread *td)
7741 {
7742         int rc;
7743         struct adapter *sc = dev->si_drv1;
7744
7745         rc = priv_check(td, PRIV_DRIVER);
7746         if (rc != 0)
7747                 return (rc);
7748
7749         switch (cmd) {
7750         case CHELSIO_T4_GETREG: {
7751                 struct t4_reg *edata = (struct t4_reg *)data;
7752
7753                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7754                         return (EFAULT);
7755
7756                 if (edata->size == 4)
7757                         edata->val = t4_read_reg(sc, edata->addr);
7758                 else if (edata->size == 8)
7759                         edata->val = t4_read_reg64(sc, edata->addr);
7760                 else
7761                         return (EINVAL);
7762
7763                 break;
7764         }
7765         case CHELSIO_T4_SETREG: {
7766                 struct t4_reg *edata = (struct t4_reg *)data;
7767
7768                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7769                         return (EFAULT);
7770
7771                 if (edata->size == 4) {
7772                         if (edata->val & 0xffffffff00000000)
7773                                 return (EINVAL);
7774                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7775                 } else if (edata->size == 8)
7776                         t4_write_reg64(sc, edata->addr, edata->val);
7777                 else
7778                         return (EINVAL);
7779                 break;
7780         }
7781         case CHELSIO_T4_REGDUMP: {
7782                 struct t4_regdump *regs = (struct t4_regdump *)data;
7783                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7784                 uint8_t *buf;
7785
7786                 if (regs->len < reglen) {
7787                         regs->len = reglen; /* hint to the caller */
7788                         return (ENOBUFS);
7789                 }
7790
7791                 regs->len = reglen;
7792                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7793                 t4_get_regs(sc, regs, buf);
7794                 rc = copyout(buf, regs->data, reglen);
7795                 free(buf, M_CXGBE);
7796                 break;
7797         }
7798         case CHELSIO_T4_GET_FILTER_MODE:
7799                 rc = get_filter_mode(sc, (uint32_t *)data);
7800                 break;
7801         case CHELSIO_T4_SET_FILTER_MODE:
7802                 rc = set_filter_mode(sc, *(uint32_t *)data);
7803                 break;
7804         case CHELSIO_T4_GET_FILTER:
7805                 rc = get_filter(sc, (struct t4_filter *)data);
7806                 break;
7807         case CHELSIO_T4_SET_FILTER:
7808                 rc = set_filter(sc, (struct t4_filter *)data);
7809                 break;
7810         case CHELSIO_T4_DEL_FILTER:
7811                 rc = del_filter(sc, (struct t4_filter *)data);
7812                 break;
7813         case CHELSIO_T4_GET_SGE_CONTEXT:
7814                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7815                 break;
7816         case CHELSIO_T4_LOAD_FW:
7817                 rc = load_fw(sc, (struct t4_data *)data);
7818                 break;
7819         case CHELSIO_T4_GET_MEM:
7820                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7821                 break;
7822         case CHELSIO_T4_GET_I2C:
7823                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7824                 break;
7825         case CHELSIO_T4_CLEAR_STATS: {
7826                 int i;
7827                 u_int port_id = *(uint32_t *)data;
7828                 struct port_info *pi;
7829
7830                 if (port_id >= sc->params.nports)
7831                         return (EINVAL);
7832                 pi = sc->port[port_id];
7833
7834                 /* MAC stats */
7835                 t4_clr_port_stats(sc, pi->tx_chan);
7836
7837                 if (pi->flags & PORT_INIT_DONE) {
7838                         struct sge_rxq *rxq;
7839                         struct sge_txq *txq;
7840                         struct sge_wrq *wrq;
7841
7842                         for_each_rxq(pi, i, rxq) {
7843 #if defined(INET) || defined(INET6)
7844                                 rxq->lro.lro_queued = 0;
7845                                 rxq->lro.lro_flushed = 0;
7846 #endif
7847                                 rxq->rxcsum = 0;
7848                                 rxq->vlan_extraction = 0;
7849                         }
7850
7851                         for_each_txq(pi, i, txq) {
7852                                 txq->txcsum = 0;
7853                                 txq->tso_wrs = 0;
7854                                 txq->vlan_insertion = 0;
7855                                 txq->imm_wrs = 0;
7856                                 txq->sgl_wrs = 0;
7857                                 txq->txpkt_wrs = 0;
7858                                 txq->txpkts_wrs = 0;
7859                                 txq->txpkts_pkts = 0;
7860                                 txq->br->br_drops = 0;
7861                                 txq->no_dmamap = 0;
7862                                 txq->no_desc = 0;
7863                         }
7864
7865 #ifdef TCP_OFFLOAD
7866                         /* nothing to clear for each ofld_rxq */
7867
7868                         for_each_ofld_txq(pi, i, wrq) {
7869                                 wrq->tx_wrs = 0;
7870                                 wrq->no_desc = 0;
7871                         }
7872 #endif
7873                         wrq = &sc->sge.ctrlq[pi->port_id];
7874                         wrq->tx_wrs = 0;
7875                         wrq->no_desc = 0;
7876                 }
7877                 break;
7878         }
7879         case CHELSIO_T4_SCHED_CLASS:
7880                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
7881                 break;
7882         case CHELSIO_T4_SCHED_QUEUE:
7883                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7884                 break;
7885         case CHELSIO_T4_GET_TRACER:
7886                 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7887                 break;
7888         case CHELSIO_T4_SET_TRACER:
7889                 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7890                 break;
7891         default:
7892                 rc = EINVAL;
7893         }
7894
7895         return (rc);
7896 }
7897
7898 #ifdef TCP_OFFLOAD
7899 static int
7900 toe_capability(struct port_info *pi, int enable)
7901 {
7902         int rc;
7903         struct adapter *sc = pi->adapter;
7904
7905         ASSERT_SYNCHRONIZED_OP(sc);
7906
7907         if (!is_offload(sc))
7908                 return (ENODEV);
7909
7910         if (enable) {
7911                 if (!(sc->flags & FULL_INIT_DONE)) {
7912                         rc = cxgbe_init_synchronized(pi);
7913                         if (rc)
7914                                 return (rc);
7915                 }
7916
7917                 if (isset(&sc->offload_map, pi->port_id))
7918                         return (0);
7919
7920                 if (!(sc->flags & TOM_INIT_DONE)) {
7921                         rc = t4_activate_uld(sc, ULD_TOM);
7922                         if (rc == EAGAIN) {
7923                                 log(LOG_WARNING,
7924                                     "You must kldload t4_tom.ko before trying "
7925                                     "to enable TOE on a cxgbe interface.\n");
7926                         }
7927                         if (rc != 0)
7928                                 return (rc);
7929                         KASSERT(sc->tom_softc != NULL,
7930                             ("%s: TOM activated but softc NULL", __func__));
7931                         KASSERT(sc->flags & TOM_INIT_DONE,
7932                             ("%s: TOM activated but flag not set", __func__));
7933                 }
7934
7935                 setbit(&sc->offload_map, pi->port_id);
7936         } else {
7937                 if (!isset(&sc->offload_map, pi->port_id))
7938                         return (0);
7939
7940                 KASSERT(sc->flags & TOM_INIT_DONE,
7941                     ("%s: TOM never initialized?", __func__));
7942                 clrbit(&sc->offload_map, pi->port_id);
7943         }
7944
7945         return (0);
7946 }
7947
7948 /*
7949  * Add an upper layer driver to the global list.
7950  */
7951 int
7952 t4_register_uld(struct uld_info *ui)
7953 {
7954         int rc = 0;
7955         struct uld_info *u;
7956
7957         sx_xlock(&t4_uld_list_lock);
7958         SLIST_FOREACH(u, &t4_uld_list, link) {
7959             if (u->uld_id == ui->uld_id) {
7960                     rc = EEXIST;
7961                     goto done;
7962             }
7963         }
7964
7965         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7966         ui->refcount = 0;
7967 done:
7968         sx_xunlock(&t4_uld_list_lock);
7969         return (rc);
7970 }
7971
7972 int
7973 t4_unregister_uld(struct uld_info *ui)
7974 {
7975         int rc = EINVAL;
7976         struct uld_info *u;
7977
7978         sx_xlock(&t4_uld_list_lock);
7979
7980         SLIST_FOREACH(u, &t4_uld_list, link) {
7981             if (u == ui) {
7982                     if (ui->refcount > 0) {
7983                             rc = EBUSY;
7984                             goto done;
7985                     }
7986
7987                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7988                     rc = 0;
7989                     goto done;
7990             }
7991         }
7992 done:
7993         sx_xunlock(&t4_uld_list_lock);
7994         return (rc);
7995 }
7996
7997 int
7998 t4_activate_uld(struct adapter *sc, int id)
7999 {
8000         int rc = EAGAIN;
8001         struct uld_info *ui;
8002
8003         ASSERT_SYNCHRONIZED_OP(sc);
8004
8005         sx_slock(&t4_uld_list_lock);
8006
8007         SLIST_FOREACH(ui, &t4_uld_list, link) {
8008                 if (ui->uld_id == id) {
8009                         rc = ui->activate(sc);
8010                         if (rc == 0)
8011                                 ui->refcount++;
8012                         goto done;
8013                 }
8014         }
8015 done:
8016         sx_sunlock(&t4_uld_list_lock);
8017
8018         return (rc);
8019 }
8020
8021 int
8022 t4_deactivate_uld(struct adapter *sc, int id)
8023 {
8024         int rc = EINVAL;
8025         struct uld_info *ui;
8026
8027         ASSERT_SYNCHRONIZED_OP(sc);
8028
8029         sx_slock(&t4_uld_list_lock);
8030
8031         SLIST_FOREACH(ui, &t4_uld_list, link) {
8032                 if (ui->uld_id == id) {
8033                         rc = ui->deactivate(sc);
8034                         if (rc == 0)
8035                                 ui->refcount--;
8036                         goto done;
8037                 }
8038         }
8039 done:
8040         sx_sunlock(&t4_uld_list_lock);
8041
8042         return (rc);
8043 }
8044 #endif
8045
8046 /*
8047  * Come up with reasonable defaults for some of the tunables, provided they're
8048  * not set by the user (in which case we'll use the values as is).
8049  */
8050 static void
8051 tweak_tunables(void)
8052 {
8053         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
8054
8055         if (t4_ntxq10g < 1)
8056                 t4_ntxq10g = min(nc, NTXQ_10G);
8057
8058         if (t4_ntxq1g < 1)
8059                 t4_ntxq1g = min(nc, NTXQ_1G);
8060
8061         if (t4_nrxq10g < 1)
8062                 t4_nrxq10g = min(nc, NRXQ_10G);
8063
8064         if (t4_nrxq1g < 1)
8065                 t4_nrxq1g = min(nc, NRXQ_1G);
8066
8067 #ifdef TCP_OFFLOAD
8068         if (t4_nofldtxq10g < 1)
8069                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8070
8071         if (t4_nofldtxq1g < 1)
8072                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8073
8074         if (t4_nofldrxq10g < 1)
8075                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8076
8077         if (t4_nofldrxq1g < 1)
8078                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8079
8080         if (t4_toecaps_allowed == -1)
8081                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8082 #else
8083         if (t4_toecaps_allowed == -1)
8084                 t4_toecaps_allowed = 0;
8085 #endif
8086
8087         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8088                 t4_tmr_idx_10g = TMR_IDX_10G;
8089
8090         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8091                 t4_pktc_idx_10g = PKTC_IDX_10G;
8092
8093         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8094                 t4_tmr_idx_1g = TMR_IDX_1G;
8095
8096         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8097                 t4_pktc_idx_1g = PKTC_IDX_1G;
8098
8099         if (t4_qsize_txq < 128)
8100                 t4_qsize_txq = 128;
8101
8102         if (t4_qsize_rxq < 128)
8103                 t4_qsize_rxq = 128;
8104         while (t4_qsize_rxq & 7)
8105                 t4_qsize_rxq++;
8106
8107         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8108 }
8109
8110 static int
8111 mod_event(module_t mod, int cmd, void *arg)
8112 {
8113         int rc = 0;
8114         static int loaded = 0;
8115
8116         switch (cmd) {
8117         case MOD_LOAD:
8118                 if (atomic_fetchadd_int(&loaded, 1))
8119                         break;
8120                 t4_sge_modload();
8121                 sx_init(&t4_list_lock, "T4/T5 adapters");
8122                 SLIST_INIT(&t4_list);
8123 #ifdef TCP_OFFLOAD
8124                 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8125                 SLIST_INIT(&t4_uld_list);
8126 #endif
8127                 t4_tracer_modload();
8128                 tweak_tunables();
8129                 break;
8130
8131         case MOD_UNLOAD:
8132                 if (atomic_fetchadd_int(&loaded, -1) > 1)
8133                         break;
8134                 t4_tracer_modunload();
8135 #ifdef TCP_OFFLOAD
8136                 sx_slock(&t4_uld_list_lock);
8137                 if (!SLIST_EMPTY(&t4_uld_list)) {
8138                         rc = EBUSY;
8139                         sx_sunlock(&t4_uld_list_lock);
8140                         break;
8141                 }
8142                 sx_sunlock(&t4_uld_list_lock);
8143                 sx_destroy(&t4_uld_list_lock);
8144 #endif
8145                 sx_slock(&t4_list_lock);
8146                 if (!SLIST_EMPTY(&t4_list)) {
8147                         rc = EBUSY;
8148                         sx_sunlock(&t4_list_lock);
8149                         break;
8150                 }
8151                 sx_sunlock(&t4_list_lock);
8152                 sx_destroy(&t4_list_lock);
8153                 break;
8154         }
8155
8156         return (rc);
8157 }
8158
8159 static devclass_t t4_devclass, t5_devclass;
8160 static devclass_t cxgbe_devclass, cxl_devclass;
8161
8162 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8163 MODULE_VERSION(t4nex, 1);
8164 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8165
8166 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8167 MODULE_VERSION(t5nex, 1);
8168 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8169
8170 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8171 MODULE_VERSION(cxgbe, 1);
8172
8173 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8174 MODULE_VERSION(cxl, 1);