]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/cxgbe/t4_main.c
MFC r363988:
[FreeBSD/stable/9.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 static int t4_rsrv_noflowq = 0;
201 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203 #ifdef TCP_OFFLOAD
204 #define NOFLDTXQ_10G 8
205 static int t4_nofldtxq10g = -1;
206 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208 #define NOFLDRXQ_10G 2
209 static int t4_nofldrxq10g = -1;
210 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212 #define NOFLDTXQ_1G 2
213 static int t4_nofldtxq1g = -1;
214 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216 #define NOFLDRXQ_1G 1
217 static int t4_nofldrxq1g = -1;
218 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 #endif
220
221 /*
222  * Holdoff parameters for 10G and 1G ports.
223  */
224 #define TMR_IDX_10G 1
225 static int t4_tmr_idx_10g = TMR_IDX_10G;
226 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228 #define PKTC_IDX_10G (-1)
229 static int t4_pktc_idx_10g = PKTC_IDX_10G;
230 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232 #define TMR_IDX_1G 1
233 static int t4_tmr_idx_1g = TMR_IDX_1G;
234 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236 #define PKTC_IDX_1G (-1)
237 static int t4_pktc_idx_1g = PKTC_IDX_1G;
238 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240 /*
241  * Size (# of entries) of each tx and rx queue.
242  */
243 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249 /*
250  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251  */
252 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255 /*
256  * Configuration file.
257  */
258 #define DEFAULT_CF      "default"
259 #define FLASH_CF        "flash"
260 #define UWIRE_CF        "uwire"
261 #define FPGA_CF         "fpga"
262 static char t4_cfg_file[32] = DEFAULT_CF;
263 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265 /*
266  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267  * encouraged respectively).
268  */
269 static unsigned int t4_fw_install = 1;
270 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272 /*
273  * ASIC features that will be used.  Disable the ones you don't want so that the
274  * chip resources aren't wasted on features that will not be used.
275  */
276 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
277 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282 static int t4_toecaps_allowed = -1;
283 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285 static int t4_rdmacaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288 static int t4_iscsicaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291 static int t4_fcoecaps_allowed = 0;
292 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294 static int t5_write_combine = 0;
295 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297 struct intrs_and_queues {
298         int intr_type;          /* INTx, MSI, or MSI-X */
299         int nirq;               /* Number of vectors */
300         int intr_flags;
301         int ntxq10g;            /* # of NIC txq's for each 10G port */
302         int nrxq10g;            /* # of NIC rxq's for each 10G port */
303         int ntxq1g;             /* # of NIC txq's for each 1G port */
304         int nrxq1g;             /* # of NIC rxq's for each 1G port */
305         int rsrv_noflowq;       /* Flag whether to reserve queue 0 */
306 #ifdef TCP_OFFLOAD
307         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
308         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
309         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
310         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
311 #endif
312 };
313
314 struct filter_entry {
315         uint32_t valid:1;       /* filter allocated and valid */
316         uint32_t locked:1;      /* filter is administratively locked */
317         uint32_t pending:1;     /* filter action is pending firmware reply */
318         uint32_t smtidx:8;      /* Source MAC Table index for smac */
319         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
320
321         struct t4_filter_specification fs;
322 };
323
324 enum {
325         XGMAC_MTU       = (1 << 0),
326         XGMAC_PROMISC   = (1 << 1),
327         XGMAC_ALLMULTI  = (1 << 2),
328         XGMAC_VLANEX    = (1 << 3),
329         XGMAC_UCADDR    = (1 << 4),
330         XGMAC_MCADDRS   = (1 << 5),
331
332         XGMAC_ALL       = 0xffff
333 };
334
335 static int map_bars_0_and_4(struct adapter *);
336 static int map_bar_2(struct adapter *);
337 static void setup_memwin(struct adapter *);
338 static int validate_mem_range(struct adapter *, uint32_t, int);
339 static int fwmtype_to_hwmtype(int);
340 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341     uint32_t *);
342 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343 static uint32_t position_memwin(struct adapter *, int, uint32_t);
344 static int cfg_itype_and_nqueues(struct adapter *, int, int,
345     struct intrs_and_queues *);
346 static int prep_firmware(struct adapter *);
347 static int partition_resources(struct adapter *, const struct firmware *,
348     const char *);
349 static int get_params__pre_init(struct adapter *);
350 static int get_params__post_init(struct adapter *);
351 static int set_params__post_init(struct adapter *);
352 static void t4_set_desc(struct adapter *);
353 static void build_medialist(struct port_info *);
354 static int update_mac_settings(struct port_info *, int);
355 static int cxgbe_init_synchronized(struct port_info *);
356 static int cxgbe_uninit_synchronized(struct port_info *);
357 static int setup_intr_handlers(struct adapter *);
358 static int adapter_full_init(struct adapter *);
359 static int adapter_full_uninit(struct adapter *);
360 static int port_full_init(struct port_info *);
361 static int port_full_uninit(struct port_info *);
362 static void quiesce_eq(struct adapter *, struct sge_eq *);
363 static void quiesce_iq(struct adapter *, struct sge_iq *);
364 static void quiesce_fl(struct adapter *, struct sge_fl *);
365 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366     driver_intr_t *, void *, char *);
367 static int t4_free_irq(struct adapter *, struct irq *);
368 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369     unsigned int);
370 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371 static void cxgbe_tick(void *);
372 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
373 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
374     struct mbuf *);
375 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
376 static int fw_msg_not_handled(struct adapter *, const __be64 *);
377 static int t4_sysctls(struct adapter *);
378 static int cxgbe_sysctls(struct port_info *);
379 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
380 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
381 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
382 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
383 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
384 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
385 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
386 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
388 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
389 #ifdef SBUF_DRAIN
390 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
392 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
393 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
394 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
395 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
396 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
398 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
399 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
401 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
403 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
404 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
405 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
406 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
407 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
408 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
409 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
410 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
411 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
412 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
413 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
414 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
415 #endif
416 static inline void txq_start(struct ifnet *, struct sge_txq *);
417 static uint32_t fconf_to_mode(uint32_t);
418 static uint32_t mode_to_fconf(uint32_t);
419 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
420 static int get_filter_mode(struct adapter *, uint32_t *);
421 static int set_filter_mode(struct adapter *, uint32_t);
422 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
423 static int get_filter(struct adapter *, struct t4_filter *);
424 static int set_filter(struct adapter *, struct t4_filter *);
425 static int del_filter(struct adapter *, struct t4_filter *);
426 static void clear_filter(struct filter_entry *);
427 static int set_filter_wr(struct adapter *, int);
428 static int del_filter_wr(struct adapter *, int);
429 static int get_sge_context(struct adapter *, struct t4_sge_context *);
430 static int load_fw(struct adapter *, struct t4_data *);
431 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
432 static int read_i2c(struct adapter *, struct t4_i2c_data *);
433 static int set_sched_class(struct adapter *, struct t4_sched_params *);
434 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
435 #ifdef TCP_OFFLOAD
436 static int toe_capability(struct port_info *, int);
437 #endif
438 static int mod_event(module_t, int, void *);
439
440 struct {
441         uint16_t device;
442         char *desc;
443 } t4_pciids[] = {
444         {0xa000, "Chelsio Terminator 4 FPGA"},
445         {0x4400, "Chelsio T440-dbg"},
446         {0x4401, "Chelsio T420-CR"},
447         {0x4402, "Chelsio T422-CR"},
448         {0x4403, "Chelsio T440-CR"},
449         {0x4404, "Chelsio T420-BCH"},
450         {0x4405, "Chelsio T440-BCH"},
451         {0x4406, "Chelsio T440-CH"},
452         {0x4407, "Chelsio T420-SO"},
453         {0x4408, "Chelsio T420-CX"},
454         {0x4409, "Chelsio T420-BT"},
455         {0x440a, "Chelsio T404-BT"},
456         {0x440e, "Chelsio T440-LP-CR"},
457 }, t5_pciids[] = {
458         {0xb000, "Chelsio Terminator 5 FPGA"},
459         {0x5400, "Chelsio T580-dbg"},
460         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
461         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
462         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
463         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
464         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
465         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
466         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
467         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
468         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
469         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
470         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
471         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
472         {0x5415,  "Chelsio T502-BT"},           /* 2 x 1G */
473 #ifdef notyet
474         {0x5404,  "Chelsio T520-BCH"},
475         {0x5405,  "Chelsio T540-BCH"},
476         {0x5406,  "Chelsio T540-CH"},
477         {0x5408,  "Chelsio T520-CX"},
478         {0x540b,  "Chelsio B520-SR"},
479         {0x540c,  "Chelsio B504-BT"},
480         {0x540f,  "Chelsio Amsterdam"},
481         {0x5413,  "Chelsio T580-CHR"},
482 #endif
483 };
484
485 #ifdef TCP_OFFLOAD
486 /*
487  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
488  * exactly the same for both rxq and ofld_rxq.
489  */
490 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
491 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
492 #endif
493
494 /* No easy way to include t4_msg.h before adapter.h so we check this way */
495 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
496 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
497
498 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
499
500 static int
501 t4_probe(device_t dev)
502 {
503         int i;
504         uint16_t v = pci_get_vendor(dev);
505         uint16_t d = pci_get_device(dev);
506         uint8_t f = pci_get_function(dev);
507
508         if (v != PCI_VENDOR_ID_CHELSIO)
509                 return (ENXIO);
510
511         /* Attach only to PF0 of the FPGA */
512         if (d == 0xa000 && f != 0)
513                 return (ENXIO);
514
515         for (i = 0; i < nitems(t4_pciids); i++) {
516                 if (d == t4_pciids[i].device) {
517                         device_set_desc(dev, t4_pciids[i].desc);
518                         return (BUS_PROBE_DEFAULT);
519                 }
520         }
521
522         return (ENXIO);
523 }
524
525 static int
526 t5_probe(device_t dev)
527 {
528         int i;
529         uint16_t v = pci_get_vendor(dev);
530         uint16_t d = pci_get_device(dev);
531         uint8_t f = pci_get_function(dev);
532
533         if (v != PCI_VENDOR_ID_CHELSIO)
534                 return (ENXIO);
535
536         /* Attach only to PF0 of the FPGA */
537         if (d == 0xb000 && f != 0)
538                 return (ENXIO);
539
540         for (i = 0; i < nitems(t5_pciids); i++) {
541                 if (d == t5_pciids[i].device) {
542                         device_set_desc(dev, t5_pciids[i].desc);
543                         return (BUS_PROBE_DEFAULT);
544                 }
545         }
546
547         return (ENXIO);
548 }
549
550 static void
551 t5_attribute_workaround(device_t dev)
552 {
553         device_t root_port;
554         uint32_t v;
555
556         /*
557          * The T5 chips do not properly echo the No Snoop and Relaxed
558          * Ordering attributes when replying to a TLP from a Root
559          * Port.  As a workaround, find the parent Root Port and
560          * disable No Snoop and Relaxed Ordering.  Note that this
561          * affects all devices under this root port.
562          */
563         root_port = pci_find_pcie_root_port(dev);
564         if (root_port == NULL) {
565                 device_printf(dev, "Unable to find parent root port\n");
566                 return;
567         }
568
569         v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
570             PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
571         if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
572             0)
573                 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
574                     device_get_nameunit(root_port));
575 }
576
577 static int
578 t4_attach(device_t dev)
579 {
580         struct adapter *sc;
581         int rc = 0, i, n10g, n1g, rqidx, tqidx;
582         struct intrs_and_queues iaq;
583         struct sge *s;
584 #ifdef TCP_OFFLOAD
585         int ofld_rqidx, ofld_tqidx;
586 #endif
587
588         sc = device_get_softc(dev);
589         sc->dev = dev;
590
591         if ((pci_get_device(dev) & 0xff00) == 0x5400)
592                 t5_attribute_workaround(dev);
593         pci_enable_busmaster(dev);
594         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
595                 uint32_t v;
596
597                 pci_set_max_read_req(dev, 4096);
598                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
599                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
600                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
601
602                 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
603         }
604
605         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
606             device_get_nameunit(dev));
607         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
608         mtx_lock(&t4_list_lock);
609         SLIST_INSERT_HEAD(&t4_list, sc, link);
610         mtx_unlock(&t4_list_lock);
611
612         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
613         TAILQ_INIT(&sc->sfl);
614         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
615
616         rc = map_bars_0_and_4(sc);
617         if (rc != 0)
618                 goto done; /* error message displayed already */
619
620         /*
621          * This is the real PF# to which we're attaching.  Works from within PCI
622          * passthrough environments too, where pci_get_function() could return a
623          * different PF# depending on the passthrough configuration.  We need to
624          * use the real PF# in all our communication with the firmware.
625          */
626         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
627         sc->mbox = sc->pf;
628
629         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
630         sc->an_handler = an_not_handled;
631         for (i = 0; i < nitems(sc->cpl_handler); i++)
632                 sc->cpl_handler[i] = cpl_not_handled;
633         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
634                 sc->fw_msg_handler[i] = fw_msg_not_handled;
635         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
636         t4_init_sge_cpl_handlers(sc);
637
638         /* Prepare the adapter for operation */
639         rc = -t4_prep_adapter(sc);
640         if (rc != 0) {
641                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
642                 goto done;
643         }
644
645         /*
646          * Do this really early, with the memory windows set up even before the
647          * character device.  The userland tool's register i/o and mem read
648          * will work even in "recovery mode".
649          */
650         setup_memwin(sc);
651         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
652             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
653             device_get_nameunit(dev));
654         if (sc->cdev == NULL)
655                 device_printf(dev, "failed to create nexus char device.\n");
656         else
657                 sc->cdev->si_drv1 = sc;
658
659         /* Go no further if recovery mode has been requested. */
660         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
661                 device_printf(dev, "recovery mode.\n");
662                 goto done;
663         }
664
665         /* Prepare the firmware for operation */
666         rc = prep_firmware(sc);
667         if (rc != 0)
668                 goto done; /* error message displayed already */
669
670         rc = get_params__post_init(sc);
671         if (rc != 0)
672                 goto done; /* error message displayed already */
673
674         rc = set_params__post_init(sc);
675         if (rc != 0)
676                 goto done; /* error message displayed already */
677
678         rc = map_bar_2(sc);
679         if (rc != 0)
680                 goto done; /* error message displayed already */
681
682         rc = t4_create_dma_tag(sc);
683         if (rc != 0)
684                 goto done; /* error message displayed already */
685
686         /*
687          * First pass over all the ports - allocate VIs and initialize some
688          * basic parameters like mac address, port type, etc.  We also figure
689          * out whether a port is 10G or 1G and use that information when
690          * calculating how many interrupts to attempt to allocate.
691          */
692         n10g = n1g = 0;
693         for_each_port(sc, i) {
694                 struct port_info *pi;
695
696                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
697                 sc->port[i] = pi;
698
699                 /* These must be set before t4_port_init */
700                 pi->adapter = sc;
701                 pi->port_id = i;
702
703                 /* Allocate the vi and initialize parameters like mac addr */
704                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
705                 if (rc != 0) {
706                         device_printf(dev, "unable to initialize port %d: %d\n",
707                             i, rc);
708                         free(pi, M_CXGBE);
709                         sc->port[i] = NULL;
710                         goto done;
711                 }
712
713                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
714                     device_get_nameunit(dev), i);
715                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
716
717                 if (is_10G_port(pi) || is_40G_port(pi)) {
718                         n10g++;
719                         pi->tmr_idx = t4_tmr_idx_10g;
720                         pi->pktc_idx = t4_pktc_idx_10g;
721                 } else {
722                         n1g++;
723                         pi->tmr_idx = t4_tmr_idx_1g;
724                         pi->pktc_idx = t4_pktc_idx_1g;
725                 }
726
727                 pi->xact_addr_filt = -1;
728                 pi->linkdnrc = -1;
729
730                 pi->qsize_rxq = t4_qsize_rxq;
731                 pi->qsize_txq = t4_qsize_txq;
732
733                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
734                 if (pi->dev == NULL) {
735                         device_printf(dev,
736                             "failed to add device for port %d.\n", i);
737                         rc = ENXIO;
738                         goto done;
739                 }
740                 device_set_softc(pi->dev, pi);
741         }
742
743         /*
744          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
745          */
746         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
747         if (rc != 0)
748                 goto done; /* error message displayed already */
749
750         sc->intr_type = iaq.intr_type;
751         sc->intr_count = iaq.nirq;
752         sc->flags |= iaq.intr_flags;
753
754         s = &sc->sge;
755         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
756         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
757         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
758         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
759         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
760
761 #ifdef TCP_OFFLOAD
762         if (is_offload(sc)) {
763
764                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
765                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
766                 s->neq += s->nofldtxq + s->nofldrxq;
767                 s->niq += s->nofldrxq;
768
769                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
770                     M_CXGBE, M_ZERO | M_WAITOK);
771                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
772                     M_CXGBE, M_ZERO | M_WAITOK);
773         }
774 #endif
775
776         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
777             M_ZERO | M_WAITOK);
778         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
779             M_ZERO | M_WAITOK);
780         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
781             M_ZERO | M_WAITOK);
782         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
783             M_ZERO | M_WAITOK);
784         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
785             M_ZERO | M_WAITOK);
786
787         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
788             M_ZERO | M_WAITOK);
789
790         t4_init_l2t(sc, M_WAITOK);
791
792         /*
793          * Second pass over the ports.  This time we know the number of rx and
794          * tx queues that each port should get.
795          */
796         rqidx = tqidx = 0;
797 #ifdef TCP_OFFLOAD
798         ofld_rqidx = ofld_tqidx = 0;
799 #endif
800         for_each_port(sc, i) {
801                 struct port_info *pi = sc->port[i];
802
803                 if (pi == NULL)
804                         continue;
805
806                 pi->first_rxq = rqidx;
807                 pi->first_txq = tqidx;
808                 if (is_10G_port(pi) || is_40G_port(pi)) {
809                         pi->nrxq = iaq.nrxq10g;
810                         pi->ntxq = iaq.ntxq10g;
811                 } else {
812                         pi->nrxq = iaq.nrxq1g;
813                         pi->ntxq = iaq.ntxq1g;
814                 }
815
816                 if (pi->ntxq > 1)
817                         pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
818                 else
819                         pi->rsrv_noflowq = 0;
820
821                 rqidx += pi->nrxq;
822                 tqidx += pi->ntxq;
823
824 #ifdef TCP_OFFLOAD
825                 if (is_offload(sc)) {
826                         pi->first_ofld_rxq = ofld_rqidx;
827                         pi->first_ofld_txq = ofld_tqidx;
828                         if (is_10G_port(pi) || is_40G_port(pi)) {
829                                 pi->nofldrxq = iaq.nofldrxq10g;
830                                 pi->nofldtxq = iaq.nofldtxq10g;
831                         } else {
832                                 pi->nofldrxq = iaq.nofldrxq1g;
833                                 pi->nofldtxq = iaq.nofldtxq1g;
834                         }
835                         ofld_rqidx += pi->nofldrxq;
836                         ofld_tqidx += pi->nofldtxq;
837                 }
838 #endif
839         }
840
841         rc = setup_intr_handlers(sc);
842         if (rc != 0) {
843                 device_printf(dev,
844                     "failed to setup interrupt handlers: %d\n", rc);
845                 goto done;
846         }
847
848         rc = bus_generic_attach(dev);
849         if (rc != 0) {
850                 device_printf(dev,
851                     "failed to attach all child ports: %d\n", rc);
852                 goto done;
853         }
854
855         device_printf(dev,
856             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
857             sc->params.pci.width, sc->params.nports, sc->intr_count,
858             sc->intr_type == INTR_MSIX ? "MSI-X" :
859             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
860             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
861
862         t4_set_desc(sc);
863
864 done:
865         if (rc != 0 && sc->cdev) {
866                 /* cdev was created and so cxgbetool works; recover that way. */
867                 device_printf(dev,
868                     "error during attach, adapter is now in recovery mode.\n");
869                 rc = 0;
870         }
871
872         if (rc != 0)
873                 t4_detach(dev);
874         else
875                 t4_sysctls(sc);
876
877         return (rc);
878 }
879
880 /*
881  * Idempotent
882  */
883 static int
884 t4_detach(device_t dev)
885 {
886         struct adapter *sc;
887         struct port_info *pi;
888         int i, rc;
889
890         sc = device_get_softc(dev);
891
892         if (sc->flags & FULL_INIT_DONE)
893                 t4_intr_disable(sc);
894
895         if (sc->cdev) {
896                 destroy_dev(sc->cdev);
897                 sc->cdev = NULL;
898         }
899
900         rc = bus_generic_detach(dev);
901         if (rc) {
902                 device_printf(dev,
903                     "failed to detach child devices: %d\n", rc);
904                 return (rc);
905         }
906
907         for (i = 0; i < sc->intr_count; i++)
908                 t4_free_irq(sc, &sc->irq[i]);
909
910         for (i = 0; i < MAX_NPORTS; i++) {
911                 pi = sc->port[i];
912                 if (pi) {
913                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
914                         if (pi->dev)
915                                 device_delete_child(dev, pi->dev);
916
917                         mtx_destroy(&pi->pi_lock);
918                         free(pi, M_CXGBE);
919                 }
920         }
921
922         if (sc->flags & FULL_INIT_DONE)
923                 adapter_full_uninit(sc);
924
925         if (sc->flags & FW_OK)
926                 t4_fw_bye(sc, sc->mbox);
927
928         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
929                 pci_release_msi(dev);
930
931         if (sc->regs_res)
932                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
933                     sc->regs_res);
934
935         if (sc->udbs_res)
936                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
937                     sc->udbs_res);
938
939         if (sc->msix_res)
940                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
941                     sc->msix_res);
942
943         if (sc->l2t)
944                 t4_free_l2t(sc->l2t);
945
946 #ifdef TCP_OFFLOAD
947         free(sc->sge.ofld_rxq, M_CXGBE);
948         free(sc->sge.ofld_txq, M_CXGBE);
949 #endif
950         free(sc->irq, M_CXGBE);
951         free(sc->sge.rxq, M_CXGBE);
952         free(sc->sge.txq, M_CXGBE);
953         free(sc->sge.ctrlq, M_CXGBE);
954         free(sc->sge.iqmap, M_CXGBE);
955         free(sc->sge.eqmap, M_CXGBE);
956         free(sc->tids.ftid_tab, M_CXGBE);
957         t4_destroy_dma_tag(sc);
958         if (mtx_initialized(&sc->sc_lock)) {
959                 mtx_lock(&t4_list_lock);
960                 SLIST_REMOVE(&t4_list, sc, adapter, link);
961                 mtx_unlock(&t4_list_lock);
962                 mtx_destroy(&sc->sc_lock);
963         }
964
965         if (mtx_initialized(&sc->tids.ftid_lock))
966                 mtx_destroy(&sc->tids.ftid_lock);
967         if (mtx_initialized(&sc->sfl_lock))
968                 mtx_destroy(&sc->sfl_lock);
969
970         bzero(sc, sizeof(*sc));
971
972         return (0);
973 }
974
975
976 static int
977 cxgbe_probe(device_t dev)
978 {
979         char buf[128];
980         struct port_info *pi = device_get_softc(dev);
981
982         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
983         device_set_desc_copy(dev, buf);
984
985         return (BUS_PROBE_DEFAULT);
986 }
987
988 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
989     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
990     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
991 #define T4_CAP_ENABLE (T4_CAP)
992
993 static int
994 cxgbe_attach(device_t dev)
995 {
996         struct port_info *pi = device_get_softc(dev);
997         struct ifnet *ifp;
998
999         /* Allocate an ifnet and set it up */
1000         ifp = if_alloc(IFT_ETHER);
1001         if (ifp == NULL) {
1002                 device_printf(dev, "Cannot allocate ifnet\n");
1003                 return (ENOMEM);
1004         }
1005         pi->ifp = ifp;
1006         ifp->if_softc = pi;
1007
1008         callout_init(&pi->tick, CALLOUT_MPSAFE);
1009
1010         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1011         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1012
1013         ifp->if_init = cxgbe_init;
1014         ifp->if_ioctl = cxgbe_ioctl;
1015         ifp->if_transmit = cxgbe_transmit;
1016         ifp->if_qflush = cxgbe_qflush;
1017
1018         ifp->if_capabilities = T4_CAP;
1019 #ifdef TCP_OFFLOAD
1020         if (is_offload(pi->adapter))
1021                 ifp->if_capabilities |= IFCAP_TOE;
1022 #endif
1023         ifp->if_capenable = T4_CAP_ENABLE;
1024         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1025             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1026
1027         ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1028         ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
1029         ifp->if_hw_tsomaxsegsize = 65536;
1030
1031         /* Initialize ifmedia for this port */
1032         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1033             cxgbe_media_status);
1034         build_medialist(pi);
1035
1036         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1037             EVENTHANDLER_PRI_ANY);
1038
1039         ether_ifattach(ifp, pi->hw_addr);
1040
1041 #ifdef TCP_OFFLOAD
1042         if (is_offload(pi->adapter)) {
1043                 device_printf(dev,
1044                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1045                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1046         } else
1047 #endif
1048                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1049
1050         cxgbe_sysctls(pi);
1051
1052         return (0);
1053 }
1054
1055 static int
1056 cxgbe_detach(device_t dev)
1057 {
1058         struct port_info *pi = device_get_softc(dev);
1059         struct adapter *sc = pi->adapter;
1060         struct ifnet *ifp = pi->ifp;
1061
1062         /* Tell if_ioctl and if_init that the port is going away */
1063         ADAPTER_LOCK(sc);
1064         SET_DOOMED(pi);
1065         wakeup(&sc->flags);
1066         while (IS_BUSY(sc))
1067                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1068         SET_BUSY(sc);
1069 #ifdef INVARIANTS
1070         sc->last_op = "t4detach";
1071         sc->last_op_thr = curthread;
1072 #endif
1073         ADAPTER_UNLOCK(sc);
1074
1075         if (pi->vlan_c)
1076                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1077
1078         PORT_LOCK(pi);
1079         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1080         callout_stop(&pi->tick);
1081         PORT_UNLOCK(pi);
1082         callout_drain(&pi->tick);
1083
1084         /* Let detach proceed even if these fail. */
1085         cxgbe_uninit_synchronized(pi);
1086         port_full_uninit(pi);
1087
1088         ifmedia_removeall(&pi->media);
1089         ether_ifdetach(pi->ifp);
1090         if_free(pi->ifp);
1091
1092         ADAPTER_LOCK(sc);
1093         CLR_BUSY(sc);
1094         wakeup(&sc->flags);
1095         ADAPTER_UNLOCK(sc);
1096
1097         return (0);
1098 }
1099
1100 static void
1101 cxgbe_init(void *arg)
1102 {
1103         struct port_info *pi = arg;
1104         struct adapter *sc = pi->adapter;
1105
1106         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1107                 return;
1108         cxgbe_init_synchronized(pi);
1109         end_synchronized_op(sc, 0);
1110 }
1111
1112 static int
1113 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1114 {
1115         int rc = 0, mtu, flags;
1116         struct port_info *pi = ifp->if_softc;
1117         struct adapter *sc = pi->adapter;
1118         struct ifreq *ifr = (struct ifreq *)data;
1119         uint32_t mask;
1120
1121         switch (cmd) {
1122         case SIOCSIFMTU:
1123                 mtu = ifr->ifr_mtu;
1124                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1125                         return (EINVAL);
1126
1127                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1128                 if (rc)
1129                         return (rc);
1130                 ifp->if_mtu = mtu;
1131                 if (pi->flags & PORT_INIT_DONE) {
1132                         t4_update_fl_bufsize(ifp);
1133                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1134                                 rc = update_mac_settings(pi, XGMAC_MTU);
1135                 }
1136                 end_synchronized_op(sc, 0);
1137                 break;
1138
1139         case SIOCSIFFLAGS:
1140                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1141                 if (rc)
1142                         return (rc);
1143
1144                 if (ifp->if_flags & IFF_UP) {
1145                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1146                                 flags = pi->if_flags;
1147                                 if ((ifp->if_flags ^ flags) &
1148                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1149                                         rc = update_mac_settings(pi,
1150                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1151                                 }
1152                         } else
1153                                 rc = cxgbe_init_synchronized(pi);
1154                         pi->if_flags = ifp->if_flags;
1155                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1156                         rc = cxgbe_uninit_synchronized(pi);
1157                 end_synchronized_op(sc, 0);
1158                 break;
1159
1160         case SIOCADDMULTI:      
1161         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1162                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1163                 if (rc)
1164                         return (rc);
1165                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1166                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1167                 end_synchronized_op(sc, LOCK_HELD);
1168                 break;
1169
1170         case SIOCSIFCAP:
1171                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1172                 if (rc)
1173                         return (rc);
1174
1175                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1176                 if (mask & IFCAP_TXCSUM) {
1177                         ifp->if_capenable ^= IFCAP_TXCSUM;
1178                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1179
1180                         if (IFCAP_TSO4 & ifp->if_capenable &&
1181                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1182                                 ifp->if_capenable &= ~IFCAP_TSO4;
1183                                 if_printf(ifp,
1184                                     "tso4 disabled due to -txcsum.\n");
1185                         }
1186                 }
1187                 if (mask & IFCAP_TXCSUM_IPV6) {
1188                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1189                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1190
1191                         if (IFCAP_TSO6 & ifp->if_capenable &&
1192                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1193                                 ifp->if_capenable &= ~IFCAP_TSO6;
1194                                 if_printf(ifp,
1195                                     "tso6 disabled due to -txcsum6.\n");
1196                         }
1197                 }
1198                 if (mask & IFCAP_RXCSUM)
1199                         ifp->if_capenable ^= IFCAP_RXCSUM;
1200                 if (mask & IFCAP_RXCSUM_IPV6)
1201                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1202
1203                 /*
1204                  * Note that we leave CSUM_TSO alone (it is always set).  The
1205                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1206                  * sending a TSO request our way, so it's sufficient to toggle
1207                  * IFCAP_TSOx only.
1208                  */
1209                 if (mask & IFCAP_TSO4) {
1210                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1211                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1212                                 if_printf(ifp, "enable txcsum first.\n");
1213                                 rc = EAGAIN;
1214                                 goto fail;
1215                         }
1216                         ifp->if_capenable ^= IFCAP_TSO4;
1217                 }
1218                 if (mask & IFCAP_TSO6) {
1219                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1220                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1221                                 if_printf(ifp, "enable txcsum6 first.\n");
1222                                 rc = EAGAIN;
1223                                 goto fail;
1224                         }
1225                         ifp->if_capenable ^= IFCAP_TSO6;
1226                 }
1227                 if (mask & IFCAP_LRO) {
1228 #if defined(INET) || defined(INET6)
1229                         int i;
1230                         struct sge_rxq *rxq;
1231
1232                         ifp->if_capenable ^= IFCAP_LRO;
1233                         for_each_rxq(pi, i, rxq) {
1234                                 if (ifp->if_capenable & IFCAP_LRO)
1235                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1236                                 else
1237                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1238                         }
1239 #endif
1240                 }
1241 #ifdef TCP_OFFLOAD
1242                 if (mask & IFCAP_TOE) {
1243                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1244
1245                         rc = toe_capability(pi, enable);
1246                         if (rc != 0)
1247                                 goto fail;
1248
1249                         ifp->if_capenable ^= mask;
1250                 }
1251 #endif
1252                 if (mask & IFCAP_VLAN_HWTAGGING) {
1253                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1254                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1255                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1256                 }
1257                 if (mask & IFCAP_VLAN_MTU) {
1258                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1259
1260                         /* Need to find out how to disable auto-mtu-inflation */
1261                 }
1262                 if (mask & IFCAP_VLAN_HWTSO)
1263                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1264                 if (mask & IFCAP_VLAN_HWCSUM)
1265                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1266
1267 #ifdef VLAN_CAPABILITIES
1268                 VLAN_CAPABILITIES(ifp);
1269 #endif
1270 fail:
1271                 end_synchronized_op(sc, 0);
1272                 break;
1273
1274         case SIOCSIFMEDIA:
1275         case SIOCGIFMEDIA:
1276                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1277                 break;
1278
1279         default:
1280                 rc = ether_ioctl(ifp, cmd, data);
1281         }
1282
1283         return (rc);
1284 }
1285
1286 static int
1287 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1288 {
1289         struct port_info *pi = ifp->if_softc;
1290         struct adapter *sc = pi->adapter;
1291         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1292         struct buf_ring *br;
1293         int rc;
1294
1295         M_ASSERTPKTHDR(m);
1296
1297         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1298                 m_freem(m);
1299                 return (ENETDOWN);
1300         }
1301
1302         if (m->m_flags & M_FLOWID)
1303                 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1304                     + pi->rsrv_noflowq);
1305         br = txq->br;
1306
1307         if (TXQ_TRYLOCK(txq) == 0) {
1308                 struct sge_eq *eq = &txq->eq;
1309
1310                 /*
1311                  * It is possible that t4_eth_tx finishes up and releases the
1312                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1313                  * need to make sure that this mbuf doesn't just sit there in
1314                  * the drbr.
1315                  */
1316
1317                 rc = drbr_enqueue(ifp, br, m);
1318                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1319                     !(eq->flags & EQ_DOOMED))
1320                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1321                 return (rc);
1322         }
1323
1324         /*
1325          * txq->m is the mbuf that is held up due to a temporary shortage of
1326          * resources and it should be put on the wire first.  Then what's in
1327          * drbr and finally the mbuf that was just passed in to us.
1328          *
1329          * Return code should indicate the fate of the mbuf that was passed in
1330          * this time.
1331          */
1332
1333         TXQ_LOCK_ASSERT_OWNED(txq);
1334         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1335
1336                 /* Queued for transmission. */
1337
1338                 rc = drbr_enqueue(ifp, br, m);
1339                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1340                 (void) t4_eth_tx(ifp, txq, m);
1341                 TXQ_UNLOCK(txq);
1342                 return (rc);
1343         }
1344
1345         /* Direct transmission. */
1346         rc = t4_eth_tx(ifp, txq, m);
1347         if (rc != 0 && txq->m)
1348                 rc = 0; /* held, will be transmitted soon (hopefully) */
1349
1350         TXQ_UNLOCK(txq);
1351         return (rc);
1352 }
1353
1354 static void
1355 cxgbe_qflush(struct ifnet *ifp)
1356 {
1357         struct port_info *pi = ifp->if_softc;
1358         struct sge_txq *txq;
1359         int i;
1360         struct mbuf *m;
1361
1362         /* queues do not exist if !PORT_INIT_DONE. */
1363         if (pi->flags & PORT_INIT_DONE) {
1364                 for_each_txq(pi, i, txq) {
1365                         TXQ_LOCK(txq);
1366                         m_freem(txq->m);
1367                         txq->m = NULL;
1368                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1369                                 m_freem(m);
1370                         TXQ_UNLOCK(txq);
1371                 }
1372         }
1373         if_qflush(ifp);
1374 }
1375
1376 static int
1377 cxgbe_media_change(struct ifnet *ifp)
1378 {
1379         struct port_info *pi = ifp->if_softc;
1380
1381         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1382
1383         return (EOPNOTSUPP);
1384 }
1385
1386 static void
1387 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1388 {
1389         struct port_info *pi = ifp->if_softc;
1390         struct ifmedia_entry *cur = pi->media.ifm_cur;
1391         int speed = pi->link_cfg.speed;
1392         int data = (pi->port_type << 8) | pi->mod_type;
1393
1394         if (cur->ifm_data != data) {
1395                 build_medialist(pi);
1396                 cur = pi->media.ifm_cur;
1397         }
1398
1399         ifmr->ifm_status = IFM_AVALID;
1400         if (!pi->link_cfg.link_ok)
1401                 return;
1402
1403         ifmr->ifm_status |= IFM_ACTIVE;
1404
1405         /* active and current will differ iff current media is autoselect. */
1406         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1407                 return;
1408
1409         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1410         if (speed == SPEED_10000)
1411                 ifmr->ifm_active |= IFM_10G_T;
1412         else if (speed == SPEED_1000)
1413                 ifmr->ifm_active |= IFM_1000_T;
1414         else if (speed == SPEED_100)
1415                 ifmr->ifm_active |= IFM_100_TX;
1416         else if (speed == SPEED_10)
1417                 ifmr->ifm_active |= IFM_10_T;
1418         else
1419                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1420                             speed));
1421 }
1422
1423 void
1424 t4_fatal_err(struct adapter *sc)
1425 {
1426         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1427         t4_intr_disable(sc);
1428         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1429             device_get_nameunit(sc->dev));
1430 }
1431
1432 static int
1433 map_bars_0_and_4(struct adapter *sc)
1434 {
1435         sc->regs_rid = PCIR_BAR(0);
1436         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1437             &sc->regs_rid, RF_ACTIVE);
1438         if (sc->regs_res == NULL) {
1439                 device_printf(sc->dev, "cannot map registers.\n");
1440                 return (ENXIO);
1441         }
1442         sc->bt = rman_get_bustag(sc->regs_res);
1443         sc->bh = rman_get_bushandle(sc->regs_res);
1444         sc->mmio_len = rman_get_size(sc->regs_res);
1445         setbit(&sc->doorbells, DOORBELL_KDB);
1446
1447         sc->msix_rid = PCIR_BAR(4);
1448         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1449             &sc->msix_rid, RF_ACTIVE);
1450         if (sc->msix_res == NULL) {
1451                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1452                 return (ENXIO);
1453         }
1454
1455         return (0);
1456 }
1457
1458 static int
1459 map_bar_2(struct adapter *sc)
1460 {
1461
1462         /*
1463          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1464          * to map it if RDMA is disabled.
1465          */
1466         if (is_t4(sc) && sc->rdmacaps == 0)
1467                 return (0);
1468
1469         sc->udbs_rid = PCIR_BAR(2);
1470         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1471             &sc->udbs_rid, RF_ACTIVE);
1472         if (sc->udbs_res == NULL) {
1473                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1474                 return (ENXIO);
1475         }
1476         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1477
1478         if (is_t5(sc)) {
1479                 setbit(&sc->doorbells, DOORBELL_UDB);
1480 #if defined(__i386__) || defined(__amd64__)
1481                 if (t5_write_combine) {
1482                         int rc;
1483
1484                         /*
1485                          * Enable write combining on BAR2.  This is the
1486                          * userspace doorbell BAR and is split into 128B
1487                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1488                          * with an egress queue.  The first 64B has the doorbell
1489                          * and the second 64B can be used to submit a tx work
1490                          * request with an implicit doorbell.
1491                          */
1492
1493                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1494                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1495                         if (rc == 0) {
1496                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1497                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1498                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1499                         } else {
1500                                 device_printf(sc->dev,
1501                                     "couldn't enable write combining: %d\n",
1502                                     rc);
1503                         }
1504
1505                         t4_write_reg(sc, A_SGE_STAT_CFG,
1506                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1507                 }
1508 #endif
1509         }
1510
1511         return (0);
1512 }
1513
1514 static const struct memwin t4_memwin[] = {
1515         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1516         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1517         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1518 };
1519
1520 static const struct memwin t5_memwin[] = {
1521         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1522         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1523         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1524 };
1525
1526 static void
1527 setup_memwin(struct adapter *sc)
1528 {
1529         const struct memwin *mw;
1530         int i, n;
1531         uint32_t bar0;
1532
1533         if (is_t4(sc)) {
1534                 /*
1535                  * Read low 32b of bar0 indirectly via the hardware backdoor
1536                  * mechanism.  Works from within PCI passthrough environments
1537                  * too, where rman_get_start() can return a different value.  We
1538                  * need to program the T4 memory window decoders with the actual
1539                  * addresses that will be coming across the PCIe link.
1540                  */
1541                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1542                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1543
1544                 mw = &t4_memwin[0];
1545                 n = nitems(t4_memwin);
1546         } else {
1547                 /* T5 uses the relative offset inside the PCIe BAR */
1548                 bar0 = 0;
1549
1550                 mw = &t5_memwin[0];
1551                 n = nitems(t5_memwin);
1552         }
1553
1554         for (i = 0; i < n; i++, mw++) {
1555                 t4_write_reg(sc,
1556                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1557                     (mw->base + bar0) | V_BIR(0) |
1558                     V_WINDOW(ilog2(mw->aperture) - 10));
1559         }
1560
1561         /* flush */
1562         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1563 }
1564
1565 /*
1566  * Verify that the memory range specified by the addr/len pair is valid and lies
1567  * entirely within a single region (EDCx or MCx).
1568  */
1569 static int
1570 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1571 {
1572         uint32_t em, addr_len, maddr, mlen;
1573
1574         /* Memory can only be accessed in naturally aligned 4 byte units */
1575         if (addr & 3 || len & 3 || len == 0)
1576                 return (EINVAL);
1577
1578         /* Enabled memories */
1579         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1580         if (em & F_EDRAM0_ENABLE) {
1581                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1582                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1583                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1584                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1585                     addr + len <= maddr + mlen)
1586                         return (0);
1587         }
1588         if (em & F_EDRAM1_ENABLE) {
1589                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1590                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1591                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1592                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1593                     addr + len <= maddr + mlen)
1594                         return (0);
1595         }
1596         if (em & F_EXT_MEM_ENABLE) {
1597                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1598                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1599                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1600                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1601                     addr + len <= maddr + mlen)
1602                         return (0);
1603         }
1604         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1605                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1606                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1607                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1608                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1609                     addr + len <= maddr + mlen)
1610                         return (0);
1611         }
1612
1613         return (EFAULT);
1614 }
1615
1616 static int
1617 fwmtype_to_hwmtype(int mtype)
1618 {
1619
1620         switch (mtype) {
1621         case FW_MEMTYPE_EDC0:
1622                 return (MEM_EDC0);
1623         case FW_MEMTYPE_EDC1:
1624                 return (MEM_EDC1);
1625         case FW_MEMTYPE_EXTMEM:
1626                 return (MEM_MC0);
1627         case FW_MEMTYPE_EXTMEM1:
1628                 return (MEM_MC1);
1629         default:
1630                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1631         }
1632 }
1633
1634 /*
1635  * Verify that the memory range specified by the memtype/offset/len pair is
1636  * valid and lies entirely within the memtype specified.  The global address of
1637  * the start of the range is returned in addr.
1638  */
1639 static int
1640 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1641     uint32_t *addr)
1642 {
1643         uint32_t em, addr_len, maddr, mlen;
1644
1645         /* Memory can only be accessed in naturally aligned 4 byte units */
1646         if (off & 3 || len & 3 || len == 0)
1647                 return (EINVAL);
1648
1649         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1650         switch (fwmtype_to_hwmtype(mtype)) {
1651         case MEM_EDC0:
1652                 if (!(em & F_EDRAM0_ENABLE))
1653                         return (EINVAL);
1654                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1655                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1656                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1657                 break;
1658         case MEM_EDC1:
1659                 if (!(em & F_EDRAM1_ENABLE))
1660                         return (EINVAL);
1661                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1662                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1663                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1664                 break;
1665         case MEM_MC:
1666                 if (!(em & F_EXT_MEM_ENABLE))
1667                         return (EINVAL);
1668                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1669                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1670                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1671                 break;
1672         case MEM_MC1:
1673                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1674                         return (EINVAL);
1675                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1676                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1677                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1678                 break;
1679         default:
1680                 return (EINVAL);
1681         }
1682
1683         if (mlen > 0 && off < mlen && off + len <= mlen) {
1684                 *addr = maddr + off;    /* global address */
1685                 return (0);
1686         }
1687
1688         return (EFAULT);
1689 }
1690
1691 static void
1692 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1693 {
1694         const struct memwin *mw;
1695
1696         if (is_t4(sc)) {
1697                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1698                     ("%s: incorrect memwin# (%d)", __func__, win));
1699                 mw = &t4_memwin[win];
1700         } else {
1701                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1702                     ("%s: incorrect memwin# (%d)", __func__, win));
1703                 mw = &t5_memwin[win];
1704         }
1705
1706         if (base != NULL)
1707                 *base = mw->base;
1708         if (aperture != NULL)
1709                 *aperture = mw->aperture;
1710 }
1711
1712 /*
1713  * Positions the memory window such that it can be used to access the specified
1714  * address in the chip's address space.  The return value is the offset of addr
1715  * from the start of the window.
1716  */
1717 static uint32_t
1718 position_memwin(struct adapter *sc, int n, uint32_t addr)
1719 {
1720         uint32_t start, pf;
1721         uint32_t reg;
1722
1723         KASSERT(n >= 0 && n <= 3,
1724             ("%s: invalid window %d.", __func__, n));
1725         KASSERT((addr & 3) == 0,
1726             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1727
1728         if (is_t4(sc)) {
1729                 pf = 0;
1730                 start = addr & ~0xf;    /* start must be 16B aligned */
1731         } else {
1732                 pf = V_PFNUM(sc->pf);
1733                 start = addr & ~0x7f;   /* start must be 128B aligned */
1734         }
1735         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1736
1737         t4_write_reg(sc, reg, start | pf);
1738         t4_read_reg(sc, reg);
1739
1740         return (addr - start);
1741 }
1742
1743 static int
1744 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1745     struct intrs_and_queues *iaq)
1746 {
1747         int rc, itype, navail, nrxq10g, nrxq1g, n;
1748         int nofldrxq10g = 0, nofldrxq1g = 0;
1749
1750         bzero(iaq, sizeof(*iaq));
1751
1752         iaq->ntxq10g = t4_ntxq10g;
1753         iaq->ntxq1g = t4_ntxq1g;
1754         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1755         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1756         iaq->rsrv_noflowq = t4_rsrv_noflowq;
1757 #ifdef TCP_OFFLOAD
1758         if (is_offload(sc)) {
1759                 iaq->nofldtxq10g = t4_nofldtxq10g;
1760                 iaq->nofldtxq1g = t4_nofldtxq1g;
1761                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1762                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1763         }
1764 #endif
1765
1766         for (itype = INTR_MSIX; itype; itype >>= 1) {
1767
1768                 if ((itype & t4_intr_types) == 0)
1769                         continue;       /* not allowed */
1770
1771                 if (itype == INTR_MSIX)
1772                         navail = pci_msix_count(sc->dev);
1773                 else if (itype == INTR_MSI)
1774                         navail = pci_msi_count(sc->dev);
1775                 else
1776                         navail = 1;
1777 restart:
1778                 if (navail == 0)
1779                         continue;
1780
1781                 iaq->intr_type = itype;
1782                 iaq->intr_flags = 0;
1783
1784                 /*
1785                  * Best option: an interrupt vector for errors, one for the
1786                  * firmware event queue, and one each for each rxq (NIC as well
1787                  * as offload).
1788                  */
1789                 iaq->nirq = T4_EXTRA_INTR;
1790                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1791                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1792                 if (iaq->nirq <= navail &&
1793                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1794                         iaq->intr_flags |= INTR_DIRECT;
1795                         goto allocate;
1796                 }
1797
1798                 /*
1799                  * Second best option: an interrupt vector for errors, one for
1800                  * the firmware event queue, and one each for either NIC or
1801                  * offload rxq's.
1802                  */
1803                 iaq->nirq = T4_EXTRA_INTR;
1804                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1805                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1806                 if (iaq->nirq <= navail &&
1807                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1808                         goto allocate;
1809
1810                 /*
1811                  * Next best option: an interrupt vector for errors, one for the
1812                  * firmware event queue, and at least one per port.  At this
1813                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1814                  * what's available to us.
1815                  */
1816                 iaq->nirq = T4_EXTRA_INTR;
1817                 iaq->nirq += n10g + n1g;
1818                 if (iaq->nirq <= navail) {
1819                         int leftover = navail - iaq->nirq;
1820
1821                         if (n10g > 0) {
1822                                 int target = max(nrxq10g, nofldrxq10g);
1823
1824                                 n = 1;
1825                                 while (n < target && leftover >= n10g) {
1826                                         leftover -= n10g;
1827                                         iaq->nirq += n10g;
1828                                         n++;
1829                                 }
1830                                 iaq->nrxq10g = min(n, nrxq10g);
1831 #ifdef TCP_OFFLOAD
1832                                 if (is_offload(sc))
1833                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1834 #endif
1835                         }
1836
1837                         if (n1g > 0) {
1838                                 int target = max(nrxq1g, nofldrxq1g);
1839
1840                                 n = 1;
1841                                 while (n < target && leftover >= n1g) {
1842                                         leftover -= n1g;
1843                                         iaq->nirq += n1g;
1844                                         n++;
1845                                 }
1846                                 iaq->nrxq1g = min(n, nrxq1g);
1847 #ifdef TCP_OFFLOAD
1848                                 if (is_offload(sc))
1849                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1850 #endif
1851                         }
1852
1853                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1854                                 goto allocate;
1855                 }
1856
1857                 /*
1858                  * Least desirable option: one interrupt vector for everything.
1859                  */
1860                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1861 #ifdef TCP_OFFLOAD
1862                 if (is_offload(sc))
1863                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1864 #endif
1865
1866 allocate:
1867                 navail = iaq->nirq;
1868                 rc = 0;
1869                 if (itype == INTR_MSIX)
1870                         rc = pci_alloc_msix(sc->dev, &navail);
1871                 else if (itype == INTR_MSI)
1872                         rc = pci_alloc_msi(sc->dev, &navail);
1873
1874                 if (rc == 0) {
1875                         if (navail == iaq->nirq)
1876                                 return (0);
1877
1878                         /*
1879                          * Didn't get the number requested.  Use whatever number
1880                          * the kernel is willing to allocate (it's in navail).
1881                          */
1882                         device_printf(sc->dev, "fewer vectors than requested, "
1883                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1884                             itype, iaq->nirq, navail);
1885                         pci_release_msi(sc->dev);
1886                         goto restart;
1887                 }
1888
1889                 device_printf(sc->dev,
1890                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1891                     itype, rc, iaq->nirq, navail);
1892         }
1893
1894         device_printf(sc->dev,
1895             "failed to find a usable interrupt type.  "
1896             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1897             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1898
1899         return (ENXIO);
1900 }
1901
1902 #define FW_VERSION(chip) ( \
1903     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1904     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1905     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1906     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1907 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1908
1909 struct fw_info {
1910         uint8_t chip;
1911         char *kld_name;
1912         char *fw_mod_name;
1913         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1914 } fw_info[] = {
1915         {
1916                 .chip = CHELSIO_T4,
1917                 .kld_name = "t4fw_cfg",
1918                 .fw_mod_name = "t4fw",
1919                 .fw_hdr = {
1920                         .chip = FW_HDR_CHIP_T4,
1921                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1922                         .intfver_nic = FW_INTFVER(T4, NIC),
1923                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1924                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1925                         .intfver_ri = FW_INTFVER(T4, RI),
1926                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1927                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1928                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1929                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1930                 },
1931         }, {
1932                 .chip = CHELSIO_T5,
1933                 .kld_name = "t5fw_cfg",
1934                 .fw_mod_name = "t5fw",
1935                 .fw_hdr = {
1936                         .chip = FW_HDR_CHIP_T5,
1937                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1938                         .intfver_nic = FW_INTFVER(T5, NIC),
1939                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1940                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1941                         .intfver_ri = FW_INTFVER(T5, RI),
1942                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1943                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1944                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1945                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1946                 },
1947         }
1948 };
1949
1950 static struct fw_info *
1951 find_fw_info(int chip)
1952 {
1953         int i;
1954
1955         for (i = 0; i < nitems(fw_info); i++) {
1956                 if (fw_info[i].chip == chip)
1957                         return (&fw_info[i]);
1958         }
1959         return (NULL);
1960 }
1961
1962 /*
1963  * Is the given firmware API compatible with the one the driver was compiled
1964  * with?
1965  */
1966 static int
1967 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1968 {
1969
1970         /* short circuit if it's the exact same firmware version */
1971         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1972                 return (1);
1973
1974         /*
1975          * XXX: Is this too conservative?  Perhaps I should limit this to the
1976          * features that are supported in the driver.
1977          */
1978 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1979         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1980             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1981             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1982                 return (1);
1983 #undef SAME_INTF
1984
1985         return (0);
1986 }
1987
1988 /*
1989  * The firmware in the KLD is usable, but should it be installed?  This routine
1990  * explains itself in detail if it indicates the KLD firmware should be
1991  * installed.
1992  */
1993 static int
1994 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1995 {
1996         const char *reason;
1997
1998         if (!card_fw_usable) {
1999                 reason = "incompatible or unusable";
2000                 goto install;
2001         }
2002
2003         if (k > c) {
2004                 reason = "older than the version bundled with this driver";
2005                 goto install;
2006         }
2007
2008         if (t4_fw_install == 2 && k != c) {
2009                 reason = "different than the version bundled with this driver";
2010                 goto install;
2011         }
2012
2013         return (0);
2014
2015 install:
2016         if (t4_fw_install == 0) {
2017                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2018                     "but the driver is prohibited from installing a different "
2019                     "firmware on the card.\n",
2020                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2021                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2022
2023                 return (0);
2024         }
2025
2026         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2027             "installing firmware %u.%u.%u.%u on card.\n",
2028             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2029             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2030             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2031             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2032
2033         return (1);
2034 }
2035 /*
2036  * Establish contact with the firmware and determine if we are the master driver
2037  * or not, and whether we are responsible for chip initialization.
2038  */
2039 static int
2040 prep_firmware(struct adapter *sc)
2041 {
2042         const struct firmware *fw = NULL, *default_cfg;
2043         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2044         enum dev_state state;
2045         struct fw_info *fw_info;
2046         struct fw_hdr *card_fw;         /* fw on the card */
2047         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2048         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2049                                            against */
2050
2051         /* Contact firmware. */
2052         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2053         if (rc < 0 || state == DEV_STATE_ERR) {
2054                 rc = -rc;
2055                 device_printf(sc->dev,
2056                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2057                 return (rc);
2058         }
2059         pf = rc;
2060         if (pf == sc->mbox)
2061                 sc->flags |= MASTER_PF;
2062         else if (state == DEV_STATE_UNINIT) {
2063                 /*
2064                  * We didn't get to be the master so we definitely won't be
2065                  * configuring the chip.  It's a bug if someone else hasn't
2066                  * configured it already.
2067                  */
2068                 device_printf(sc->dev, "couldn't be master(%d), "
2069                     "device not already initialized either(%d).\n", rc, state);
2070                 return (EDOOFUS);
2071         }
2072
2073         /* This is the firmware whose headers the driver was compiled against */
2074         fw_info = find_fw_info(chip_id(sc));
2075         if (fw_info == NULL) {
2076                 device_printf(sc->dev,
2077                     "unable to look up firmware information for chip %d.\n",
2078                     chip_id(sc));
2079                 return (EINVAL);
2080         }
2081         drv_fw = &fw_info->fw_hdr;
2082
2083         /*
2084          * The firmware KLD contains many modules.  The KLD name is also the
2085          * name of the module that contains the default config file.
2086          */
2087         default_cfg = firmware_get(fw_info->kld_name);
2088
2089         /* Read the header of the firmware on the card */
2090         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2091         rc = -t4_read_flash(sc, FLASH_FW_START,
2092             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2093         if (rc == 0)
2094                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2095         else {
2096                 device_printf(sc->dev,
2097                     "Unable to read card's firmware header: %d\n", rc);
2098                 card_fw_usable = 0;
2099         }
2100
2101         /* This is the firmware in the KLD */
2102         fw = firmware_get(fw_info->fw_mod_name);
2103         if (fw != NULL) {
2104                 kld_fw = (const void *)fw->data;
2105                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2106         } else {
2107                 kld_fw = NULL;
2108                 kld_fw_usable = 0;
2109         }
2110
2111         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2112             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2113                 /*
2114                  * Common case: the firmware on the card is an exact match and
2115                  * the KLD is an exact match too, or the KLD is
2116                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2117                  * here -- use cxgbetool loadfw if you want to reinstall the
2118                  * same firmware as the one on the card.
2119                  */
2120         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2121             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2122             be32toh(card_fw->fw_ver))) {
2123
2124                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2125                 if (rc != 0) {
2126                         device_printf(sc->dev,
2127                             "failed to install firmware: %d\n", rc);
2128                         goto done;
2129                 }
2130
2131                 /* Installed successfully, update the cached header too. */
2132                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2133                 card_fw_usable = 1;
2134                 need_fw_reset = 0;      /* already reset as part of load_fw */
2135         }
2136
2137         if (!card_fw_usable) {
2138                 uint32_t d, c, k;
2139
2140                 d = ntohl(drv_fw->fw_ver);
2141                 c = ntohl(card_fw->fw_ver);
2142                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2143
2144                 device_printf(sc->dev, "Cannot find a usable firmware: "
2145                     "fw_install %d, chip state %d, "
2146                     "driver compiled with %d.%d.%d.%d, "
2147                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2148                     t4_fw_install, state,
2149                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2150                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2151                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2152                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2153                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2154                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2155                 rc = EINVAL;
2156                 goto done;
2157         }
2158
2159         /* We're using whatever's on the card and it's known to be good. */
2160         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2161         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2162             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2163             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2164             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2165             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2166         t4_get_tp_version(sc, &sc->params.tp_vers);
2167
2168         /* Reset device */
2169         if (need_fw_reset &&
2170             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2171                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2172                 if (rc != ETIMEDOUT && rc != EIO)
2173                         t4_fw_bye(sc, sc->mbox);
2174                 goto done;
2175         }
2176         sc->flags |= FW_OK;
2177
2178         rc = get_params__pre_init(sc);
2179         if (rc != 0)
2180                 goto done; /* error message displayed already */
2181
2182         /* Partition adapter resources as specified in the config file. */
2183         if (state == DEV_STATE_UNINIT) {
2184
2185                 KASSERT(sc->flags & MASTER_PF,
2186                     ("%s: trying to change chip settings when not master.",
2187                     __func__));
2188
2189                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2190                 if (rc != 0)
2191                         goto done;      /* error message displayed already */
2192
2193                 t4_tweak_chip_settings(sc);
2194
2195                 /* get basic stuff going */
2196                 rc = -t4_fw_initialize(sc, sc->mbox);
2197                 if (rc != 0) {
2198                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2199                         goto done;
2200                 }
2201         } else {
2202                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2203                 sc->cfcsum = 0;
2204         }
2205
2206 done:
2207         free(card_fw, M_CXGBE);
2208         if (fw != NULL)
2209                 firmware_put(fw, FIRMWARE_UNLOAD);
2210         if (default_cfg != NULL)
2211                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2212
2213         return (rc);
2214 }
2215
2216 #define FW_PARAM_DEV(param) \
2217         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2218          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2219 #define FW_PARAM_PFVF(param) \
2220         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2221          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2222
2223 /*
2224  * Partition chip resources for use between various PFs, VFs, etc.
2225  */
2226 static int
2227 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2228     const char *name_prefix)
2229 {
2230         const struct firmware *cfg = NULL;
2231         int rc = 0;
2232         struct fw_caps_config_cmd caps;
2233         uint32_t mtype, moff, finicsum, cfcsum;
2234
2235         /*
2236          * Figure out what configuration file to use.  Pick the default config
2237          * file for the card if the user hasn't specified one explicitly.
2238          */
2239         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2240         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2241                 /* Card specific overrides go here. */
2242                 if (pci_get_device(sc->dev) == 0x440a)
2243                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2244                 if (is_fpga(sc))
2245                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2246         }
2247
2248         /*
2249          * We need to load another module if the profile is anything except
2250          * "default" or "flash".
2251          */
2252         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2253             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2254                 char s[32];
2255
2256                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2257                 cfg = firmware_get(s);
2258                 if (cfg == NULL) {
2259                         if (default_cfg != NULL) {
2260                                 device_printf(sc->dev,
2261                                     "unable to load module \"%s\" for "
2262                                     "configuration profile \"%s\", will use "
2263                                     "the default config file instead.\n",
2264                                     s, sc->cfg_file);
2265                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2266                                     "%s", DEFAULT_CF);
2267                         } else {
2268                                 device_printf(sc->dev,
2269                                     "unable to load module \"%s\" for "
2270                                     "configuration profile \"%s\", will use "
2271                                     "the config file on the card's flash "
2272                                     "instead.\n", s, sc->cfg_file);
2273                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2274                                     "%s", FLASH_CF);
2275                         }
2276                 }
2277         }
2278
2279         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2280             default_cfg == NULL) {
2281                 device_printf(sc->dev,
2282                     "default config file not available, will use the config "
2283                     "file on the card's flash instead.\n");
2284                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2285         }
2286
2287         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2288                 u_int cflen, i, n;
2289                 const uint32_t *cfdata;
2290                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2291
2292                 KASSERT(cfg != NULL || default_cfg != NULL,
2293                     ("%s: no config to upload", __func__));
2294
2295                 /*
2296                  * Ask the firmware where it wants us to upload the config file.
2297                  */
2298                 param = FW_PARAM_DEV(CF);
2299                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2300                 if (rc != 0) {
2301                         /* No support for config file?  Shouldn't happen. */
2302                         device_printf(sc->dev,
2303                             "failed to query config file location: %d.\n", rc);
2304                         goto done;
2305                 }
2306                 mtype = G_FW_PARAMS_PARAM_Y(val);
2307                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2308
2309                 /*
2310                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2311                  * useless stuffing/comments at the end of the config file so
2312                  * it's ok to simply throw away the last remaining bytes when
2313                  * the config file is not an exact multiple of 4.  This also
2314                  * helps with the validate_mt_off_len check.
2315                  */
2316                 if (cfg != NULL) {
2317                         cflen = cfg->datasize & ~3;
2318                         cfdata = cfg->data;
2319                 } else {
2320                         cflen = default_cfg->datasize & ~3;
2321                         cfdata = default_cfg->data;
2322                 }
2323
2324                 if (cflen > FLASH_CFG_MAX_SIZE) {
2325                         device_printf(sc->dev,
2326                             "config file too long (%d, max allowed is %d).  "
2327                             "Will try to use the config on the card, if any.\n",
2328                             cflen, FLASH_CFG_MAX_SIZE);
2329                         goto use_config_on_flash;
2330                 }
2331
2332                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2333                 if (rc != 0) {
2334                         device_printf(sc->dev,
2335                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2336                             "Will try to use the config on the card, if any.\n",
2337                             __func__, mtype, moff, cflen, rc);
2338                         goto use_config_on_flash;
2339                 }
2340
2341                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2342                 while (cflen) {
2343                         off = position_memwin(sc, 2, addr);
2344                         n = min(cflen, mw_aperture - off);
2345                         for (i = 0; i < n; i += 4)
2346                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2347                         cflen -= n;
2348                         addr += n;
2349                 }
2350         } else {
2351 use_config_on_flash:
2352                 mtype = FW_MEMTYPE_FLASH;
2353                 moff = t4_flash_cfg_addr(sc);
2354         }
2355
2356         bzero(&caps, sizeof(caps));
2357         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2358             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2359         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2360             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2361             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2362         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2363         if (rc != 0) {
2364                 device_printf(sc->dev,
2365                     "failed to pre-process config file: %d "
2366                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2367                 goto done;
2368         }
2369
2370         finicsum = be32toh(caps.finicsum);
2371         cfcsum = be32toh(caps.cfcsum);
2372         if (finicsum != cfcsum) {
2373                 device_printf(sc->dev,
2374                     "WARNING: config file checksum mismatch: %08x %08x\n",
2375                     finicsum, cfcsum);
2376         }
2377         sc->cfcsum = cfcsum;
2378
2379 #define LIMIT_CAPS(x) do { \
2380         caps.x &= htobe16(t4_##x##_allowed); \
2381 } while (0)
2382
2383         /*
2384          * Let the firmware know what features will (not) be used so it can tune
2385          * things accordingly.
2386          */
2387         LIMIT_CAPS(linkcaps);
2388         LIMIT_CAPS(niccaps);
2389         LIMIT_CAPS(toecaps);
2390         LIMIT_CAPS(rdmacaps);
2391         LIMIT_CAPS(iscsicaps);
2392         LIMIT_CAPS(fcoecaps);
2393 #undef LIMIT_CAPS
2394
2395         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2396             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2397         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2398         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2399         if (rc != 0) {
2400                 device_printf(sc->dev,
2401                     "failed to process config file: %d.\n", rc);
2402         }
2403 done:
2404         if (cfg != NULL)
2405                 firmware_put(cfg, FIRMWARE_UNLOAD);
2406         return (rc);
2407 }
2408
2409 /*
2410  * Retrieve parameters that are needed (or nice to have) very early.
2411  */
2412 static int
2413 get_params__pre_init(struct adapter *sc)
2414 {
2415         int rc;
2416         uint32_t param[2], val[2];
2417         struct fw_devlog_cmd cmd;
2418         struct devlog_params *dlog = &sc->params.devlog;
2419
2420         param[0] = FW_PARAM_DEV(PORTVEC);
2421         param[1] = FW_PARAM_DEV(CCLK);
2422         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2423         if (rc != 0) {
2424                 device_printf(sc->dev,
2425                     "failed to query parameters (pre_init): %d.\n", rc);
2426                 return (rc);
2427         }
2428
2429         sc->params.portvec = val[0];
2430         sc->params.nports = bitcount32(val[0]);
2431         sc->params.vpd.cclk = val[1];
2432
2433         /* Read device log parameters. */
2434         bzero(&cmd, sizeof(cmd));
2435         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2436             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2437         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2438         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2439         if (rc != 0) {
2440                 device_printf(sc->dev,
2441                     "failed to get devlog parameters: %d.\n", rc);
2442                 bzero(dlog, sizeof (*dlog));
2443                 rc = 0; /* devlog isn't critical for device operation */
2444         } else {
2445                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2446                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2447                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2448                 dlog->size = be32toh(cmd.memsize_devlog);
2449         }
2450
2451         return (rc);
2452 }
2453
2454 /*
2455  * Retrieve various parameters that are of interest to the driver.  The device
2456  * has been initialized by the firmware at this point.
2457  */
2458 static int
2459 get_params__post_init(struct adapter *sc)
2460 {
2461         int rc;
2462         uint32_t param[7], val[7];
2463         struct fw_caps_config_cmd caps;
2464
2465         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2466         param[1] = FW_PARAM_PFVF(EQ_START);
2467         param[2] = FW_PARAM_PFVF(FILTER_START);
2468         param[3] = FW_PARAM_PFVF(FILTER_END);
2469         param[4] = FW_PARAM_PFVF(L2T_START);
2470         param[5] = FW_PARAM_PFVF(L2T_END);
2471         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2472         if (rc != 0) {
2473                 device_printf(sc->dev,
2474                     "failed to query parameters (post_init): %d.\n", rc);
2475                 return (rc);
2476         }
2477
2478         sc->sge.iq_start = val[0];
2479         sc->sge.eq_start = val[1];
2480         sc->tids.ftid_base = val[2];
2481         sc->tids.nftids = val[3] - val[2] + 1;
2482         sc->params.ftid_min = val[2];
2483         sc->params.ftid_max = val[3];
2484         sc->vres.l2t.start = val[4];
2485         sc->vres.l2t.size = val[5] - val[4] + 1;
2486         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2487             ("%s: L2 table size (%u) larger than expected (%u)",
2488             __func__, sc->vres.l2t.size, L2T_SIZE));
2489
2490         /* get capabilites */
2491         bzero(&caps, sizeof(caps));
2492         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2493             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2494         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2495         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2496         if (rc != 0) {
2497                 device_printf(sc->dev,
2498                     "failed to get card capabilities: %d.\n", rc);
2499                 return (rc);
2500         }
2501
2502 #define READ_CAPS(x) do { \
2503         sc->x = htobe16(caps.x); \
2504 } while (0)
2505         READ_CAPS(linkcaps);
2506         READ_CAPS(niccaps);
2507         READ_CAPS(toecaps);
2508         READ_CAPS(rdmacaps);
2509         READ_CAPS(iscsicaps);
2510         READ_CAPS(fcoecaps);
2511
2512         if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2513                 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2514                 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2515                 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2516                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2517                 if (rc != 0) {
2518                         device_printf(sc->dev,
2519                             "failed to query NIC parameters: %d.\n", rc);
2520                         return (rc);
2521                 }
2522                 sc->tids.etid_base = val[0];
2523                 sc->params.etid_min = val[0];
2524                 sc->tids.netids = val[1] - val[0] + 1;
2525                 sc->params.netids = sc->tids.netids;
2526                 sc->params.eo_wr_cred = val[2];
2527                 sc->params.ethoffload = 1;
2528         }
2529
2530         if (sc->toecaps) {
2531                 /* query offload-related parameters */
2532                 param[0] = FW_PARAM_DEV(NTID);
2533                 param[1] = FW_PARAM_PFVF(SERVER_START);
2534                 param[2] = FW_PARAM_PFVF(SERVER_END);
2535                 param[3] = FW_PARAM_PFVF(TDDP_START);
2536                 param[4] = FW_PARAM_PFVF(TDDP_END);
2537                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2538                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2539                 if (rc != 0) {
2540                         device_printf(sc->dev,
2541                             "failed to query TOE parameters: %d.\n", rc);
2542                         return (rc);
2543                 }
2544                 sc->tids.ntids = val[0];
2545                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2546                 sc->tids.stid_base = val[1];
2547                 sc->tids.nstids = val[2] - val[1] + 1;
2548                 sc->vres.ddp.start = val[3];
2549                 sc->vres.ddp.size = val[4] - val[3] + 1;
2550                 sc->params.ofldq_wr_cred = val[5];
2551                 sc->params.offload = 1;
2552         }
2553         if (sc->rdmacaps) {
2554                 param[0] = FW_PARAM_PFVF(STAG_START);
2555                 param[1] = FW_PARAM_PFVF(STAG_END);
2556                 param[2] = FW_PARAM_PFVF(RQ_START);
2557                 param[3] = FW_PARAM_PFVF(RQ_END);
2558                 param[4] = FW_PARAM_PFVF(PBL_START);
2559                 param[5] = FW_PARAM_PFVF(PBL_END);
2560                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2561                 if (rc != 0) {
2562                         device_printf(sc->dev,
2563                             "failed to query RDMA parameters(1): %d.\n", rc);
2564                         return (rc);
2565                 }
2566                 sc->vres.stag.start = val[0];
2567                 sc->vres.stag.size = val[1] - val[0] + 1;
2568                 sc->vres.rq.start = val[2];
2569                 sc->vres.rq.size = val[3] - val[2] + 1;
2570                 sc->vres.pbl.start = val[4];
2571                 sc->vres.pbl.size = val[5] - val[4] + 1;
2572
2573                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2574                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2575                 param[2] = FW_PARAM_PFVF(CQ_START);
2576                 param[3] = FW_PARAM_PFVF(CQ_END);
2577                 param[4] = FW_PARAM_PFVF(OCQ_START);
2578                 param[5] = FW_PARAM_PFVF(OCQ_END);
2579                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2580                 if (rc != 0) {
2581                         device_printf(sc->dev,
2582                             "failed to query RDMA parameters(2): %d.\n", rc);
2583                         return (rc);
2584                 }
2585                 sc->vres.qp.start = val[0];
2586                 sc->vres.qp.size = val[1] - val[0] + 1;
2587                 sc->vres.cq.start = val[2];
2588                 sc->vres.cq.size = val[3] - val[2] + 1;
2589                 sc->vres.ocq.start = val[4];
2590                 sc->vres.ocq.size = val[5] - val[4] + 1;
2591         }
2592         if (sc->iscsicaps) {
2593                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2594                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2595                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2596                 if (rc != 0) {
2597                         device_printf(sc->dev,
2598                             "failed to query iSCSI parameters: %d.\n", rc);
2599                         return (rc);
2600                 }
2601                 sc->vres.iscsi.start = val[0];
2602                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2603         }
2604
2605         /*
2606          * We've got the params we wanted to query via the firmware.  Now grab
2607          * some others directly from the chip.
2608          */
2609         rc = t4_read_chip_settings(sc);
2610
2611         return (rc);
2612 }
2613
2614 static int
2615 set_params__post_init(struct adapter *sc)
2616 {
2617         uint32_t param, val;
2618
2619         /* ask for encapsulated CPLs */
2620         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2621         val = 1;
2622         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2623
2624         return (0);
2625 }
2626
2627 #undef FW_PARAM_PFVF
2628 #undef FW_PARAM_DEV
2629
2630 static void
2631 t4_set_desc(struct adapter *sc)
2632 {
2633         char buf[128];
2634         struct adapter_params *p = &sc->params;
2635
2636         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2637             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2638             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2639
2640         device_set_desc_copy(sc->dev, buf);
2641 }
2642
2643 static void
2644 build_medialist(struct port_info *pi)
2645 {
2646         struct ifmedia *media = &pi->media;
2647         int data, m;
2648
2649         PORT_LOCK(pi);
2650
2651         ifmedia_removeall(media);
2652
2653         m = IFM_ETHER | IFM_FDX;
2654         data = (pi->port_type << 8) | pi->mod_type;
2655
2656         switch(pi->port_type) {
2657         case FW_PORT_TYPE_BT_XFI:
2658         case FW_PORT_TYPE_BT_XAUI:
2659                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2660                 /* fall through */
2661
2662         case FW_PORT_TYPE_BT_SGMII:
2663                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2664                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2665                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2666                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2667                 break;
2668
2669         case FW_PORT_TYPE_CX4:
2670                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2671                 ifmedia_set(media, m | IFM_10G_CX4);
2672                 break;
2673
2674         case FW_PORT_TYPE_QSFP_10G:
2675         case FW_PORT_TYPE_SFP:
2676         case FW_PORT_TYPE_FIBER_XFI:
2677         case FW_PORT_TYPE_FIBER_XAUI:
2678                 switch (pi->mod_type) {
2679
2680                 case FW_PORT_MOD_TYPE_LR:
2681                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2682                         ifmedia_set(media, m | IFM_10G_LR);
2683                         break;
2684
2685                 case FW_PORT_MOD_TYPE_SR:
2686                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2687                         ifmedia_set(media, m | IFM_10G_SR);
2688                         break;
2689
2690                 case FW_PORT_MOD_TYPE_LRM:
2691                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2692                         ifmedia_set(media, m | IFM_10G_LRM);
2693                         break;
2694
2695                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2696                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2697                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2698                         ifmedia_set(media, m | IFM_10G_TWINAX);
2699                         break;
2700
2701                 case FW_PORT_MOD_TYPE_NONE:
2702                         m &= ~IFM_FDX;
2703                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2704                         ifmedia_set(media, m | IFM_NONE);
2705                         break;
2706
2707                 case FW_PORT_MOD_TYPE_NA:
2708                 case FW_PORT_MOD_TYPE_ER:
2709                 default:
2710                         device_printf(pi->dev,
2711                             "unknown port_type (%d), mod_type (%d)\n",
2712                             pi->port_type, pi->mod_type);
2713                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2714                         ifmedia_set(media, m | IFM_UNKNOWN);
2715                         break;
2716                 }
2717                 break;
2718
2719         case FW_PORT_TYPE_QSFP:
2720                 switch (pi->mod_type) {
2721
2722                 case FW_PORT_MOD_TYPE_LR:
2723                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2724                         ifmedia_set(media, m | IFM_40G_LR4);
2725                         break;
2726
2727                 case FW_PORT_MOD_TYPE_SR:
2728                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2729                         ifmedia_set(media, m | IFM_40G_SR4);
2730                         break;
2731
2732                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2733                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2734                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2735                         ifmedia_set(media, m | IFM_40G_CR4);
2736                         break;
2737
2738                 case FW_PORT_MOD_TYPE_NONE:
2739                         m &= ~IFM_FDX;
2740                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2741                         ifmedia_set(media, m | IFM_NONE);
2742                         break;
2743
2744                 default:
2745                         device_printf(pi->dev,
2746                             "unknown port_type (%d), mod_type (%d)\n",
2747                             pi->port_type, pi->mod_type);
2748                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2749                         ifmedia_set(media, m | IFM_UNKNOWN);
2750                         break;
2751                 }
2752                 break;
2753
2754         default:
2755                 device_printf(pi->dev,
2756                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2757                     pi->mod_type);
2758                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2759                 ifmedia_set(media, m | IFM_UNKNOWN);
2760                 break;
2761         }
2762
2763         PORT_UNLOCK(pi);
2764 }
2765
2766 #define FW_MAC_EXACT_CHUNK      7
2767
2768 /*
2769  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2770  * indicates which parameters should be programmed (the rest are left alone).
2771  */
2772 static int
2773 update_mac_settings(struct port_info *pi, int flags)
2774 {
2775         int rc;
2776         struct ifnet *ifp = pi->ifp;
2777         struct adapter *sc = pi->adapter;
2778         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2779
2780         ASSERT_SYNCHRONIZED_OP(sc);
2781         KASSERT(flags, ("%s: not told what to update.", __func__));
2782
2783         if (flags & XGMAC_MTU)
2784                 mtu = ifp->if_mtu;
2785
2786         if (flags & XGMAC_PROMISC)
2787                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2788
2789         if (flags & XGMAC_ALLMULTI)
2790                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2791
2792         if (flags & XGMAC_VLANEX)
2793                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2794
2795         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2796             vlanex, false);
2797         if (rc) {
2798                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2799                 return (rc);
2800         }
2801
2802         if (flags & XGMAC_UCADDR) {
2803                 uint8_t ucaddr[ETHER_ADDR_LEN];
2804
2805                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2806                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2807                     ucaddr, true, true);
2808                 if (rc < 0) {
2809                         rc = -rc;
2810                         if_printf(ifp, "change_mac failed: %d\n", rc);
2811                         return (rc);
2812                 } else {
2813                         pi->xact_addr_filt = rc;
2814                         rc = 0;
2815                 }
2816         }
2817
2818         if (flags & XGMAC_MCADDRS) {
2819                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2820                 int del = 1;
2821                 uint64_t hash = 0;
2822                 struct ifmultiaddr *ifma;
2823                 int i = 0, j;
2824
2825                 if_maddr_rlock(ifp);
2826                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2827                         if (ifma->ifma_addr->sa_family != AF_LINK)
2828                                 continue;
2829                         mcaddr[i++] =
2830                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2831
2832                         if (i == FW_MAC_EXACT_CHUNK) {
2833                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2834                                     del, i, mcaddr, NULL, &hash, 0);
2835                                 if (rc < 0) {
2836                                         rc = -rc;
2837                                         for (j = 0; j < i; j++) {
2838                                                 if_printf(ifp,
2839                                                     "failed to add mc address"
2840                                                     " %02x:%02x:%02x:"
2841                                                     "%02x:%02x:%02x rc=%d\n",
2842                                                     mcaddr[j][0], mcaddr[j][1],
2843                                                     mcaddr[j][2], mcaddr[j][3],
2844                                                     mcaddr[j][4], mcaddr[j][5],
2845                                                     rc);
2846                                         }
2847                                         goto mcfail;
2848                                 }
2849                                 del = 0;
2850                                 i = 0;
2851                         }
2852                 }
2853                 if (i > 0) {
2854                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2855                             del, i, mcaddr, NULL, &hash, 0);
2856                         if (rc < 0) {
2857                                 rc = -rc;
2858                                 for (j = 0; j < i; j++) {
2859                                         if_printf(ifp,
2860                                             "failed to add mc address"
2861                                             " %02x:%02x:%02x:"
2862                                             "%02x:%02x:%02x rc=%d\n",
2863                                             mcaddr[j][0], mcaddr[j][1],
2864                                             mcaddr[j][2], mcaddr[j][3],
2865                                             mcaddr[j][4], mcaddr[j][5],
2866                                             rc);
2867                                 }
2868                                 goto mcfail;
2869                         }
2870                 }
2871
2872                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2873                 if (rc != 0)
2874                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2875 mcfail:
2876                 if_maddr_runlock(ifp);
2877         }
2878
2879         return (rc);
2880 }
2881
2882 int
2883 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2884     char *wmesg)
2885 {
2886         int rc, pri;
2887
2888 #ifdef WITNESS
2889         /* the caller thinks it's ok to sleep, but is it really? */
2890         if (flags & SLEEP_OK)
2891                 pause("t4slptst", 1);
2892 #endif
2893
2894         if (INTR_OK)
2895                 pri = PCATCH;
2896         else
2897                 pri = 0;
2898
2899         ADAPTER_LOCK(sc);
2900         for (;;) {
2901
2902                 if (pi && IS_DOOMED(pi)) {
2903                         rc = ENXIO;
2904                         goto done;
2905                 }
2906
2907                 if (!IS_BUSY(sc)) {
2908                         rc = 0;
2909                         break;
2910                 }
2911
2912                 if (!(flags & SLEEP_OK)) {
2913                         rc = EBUSY;
2914                         goto done;
2915                 }
2916
2917                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2918                         rc = EINTR;
2919                         goto done;
2920                 }
2921         }
2922
2923         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2924         SET_BUSY(sc);
2925 #ifdef INVARIANTS
2926         sc->last_op = wmesg;
2927         sc->last_op_thr = curthread;
2928 #endif
2929
2930 done:
2931         if (!(flags & HOLD_LOCK) || rc)
2932                 ADAPTER_UNLOCK(sc);
2933
2934         return (rc);
2935 }
2936
2937 void
2938 end_synchronized_op(struct adapter *sc, int flags)
2939 {
2940
2941         if (flags & LOCK_HELD)
2942                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2943         else
2944                 ADAPTER_LOCK(sc);
2945
2946         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2947         CLR_BUSY(sc);
2948         wakeup(&sc->flags);
2949         ADAPTER_UNLOCK(sc);
2950 }
2951
2952 static int
2953 cxgbe_init_synchronized(struct port_info *pi)
2954 {
2955         struct adapter *sc = pi->adapter;
2956         struct ifnet *ifp = pi->ifp;
2957         int rc = 0;
2958
2959         ASSERT_SYNCHRONIZED_OP(sc);
2960
2961         if (isset(&sc->open_device_map, pi->port_id)) {
2962                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2963                     ("mismatch between open_device_map and if_drv_flags"));
2964                 return (0);     /* already running */
2965         }
2966
2967         if (!(sc->flags & FULL_INIT_DONE) &&
2968             ((rc = adapter_full_init(sc)) != 0))
2969                 return (rc);    /* error message displayed already */
2970
2971         if (!(pi->flags & PORT_INIT_DONE) &&
2972             ((rc = port_full_init(pi)) != 0))
2973                 return (rc); /* error message displayed already */
2974
2975         rc = update_mac_settings(pi, XGMAC_ALL);
2976         if (rc)
2977                 goto done;      /* error message displayed already */
2978
2979         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2980         if (rc != 0) {
2981                 if_printf(ifp, "start_link failed: %d\n", rc);
2982                 goto done;
2983         }
2984
2985         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2986         if (rc != 0) {
2987                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2988                 goto done;
2989         }
2990
2991         /* all ok */
2992         setbit(&sc->open_device_map, pi->port_id);
2993         PORT_LOCK(pi);
2994         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2995         PORT_UNLOCK(pi);
2996
2997         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2998 done:
2999         if (rc != 0)
3000                 cxgbe_uninit_synchronized(pi);
3001
3002         return (rc);
3003 }
3004
3005 /*
3006  * Idempotent.
3007  */
3008 static int
3009 cxgbe_uninit_synchronized(struct port_info *pi)
3010 {
3011         struct adapter *sc = pi->adapter;
3012         struct ifnet *ifp = pi->ifp;
3013         int rc;
3014
3015         ASSERT_SYNCHRONIZED_OP(sc);
3016
3017         /*
3018          * Disable the VI so that all its data in either direction is discarded
3019          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
3020          * tick) intact as the TP can deliver negative advice or data that it's
3021          * holding in its RAM (for an offloaded connection) even after the VI is
3022          * disabled.
3023          */
3024         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
3025         if (rc) {
3026                 if_printf(ifp, "disable_vi failed: %d\n", rc);
3027                 return (rc);
3028         }
3029
3030         clrbit(&sc->open_device_map, pi->port_id);
3031         PORT_LOCK(pi);
3032         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3033         PORT_UNLOCK(pi);
3034
3035         pi->link_cfg.link_ok = 0;
3036         pi->link_cfg.speed = 0;
3037         pi->linkdnrc = -1;
3038         t4_os_link_changed(sc, pi->port_id, 0, -1);
3039
3040         return (0);
3041 }
3042
3043 /*
3044  * It is ok for this function to fail midway and return right away.  t4_detach
3045  * will walk the entire sc->irq list and clean up whatever is valid.
3046  */
3047 static int
3048 setup_intr_handlers(struct adapter *sc)
3049 {
3050         int rc, rid, p, q;
3051         char s[8];
3052         struct irq *irq;
3053         struct port_info *pi;
3054         struct sge_rxq *rxq;
3055 #ifdef TCP_OFFLOAD
3056         struct sge_ofld_rxq *ofld_rxq;
3057 #endif
3058
3059         /*
3060          * Setup interrupts.
3061          */
3062         irq = &sc->irq[0];
3063         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3064         if (sc->intr_count == 1) {
3065                 KASSERT(!(sc->flags & INTR_DIRECT),
3066                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3067
3068                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3069                 if (rc != 0)
3070                         return (rc);
3071         } else {
3072                 /* Multiple interrupts. */
3073                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3074                     ("%s: too few intr.", __func__));
3075
3076                 /* The first one is always error intr */
3077                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3078                 if (rc != 0)
3079                         return (rc);
3080                 irq++;
3081                 rid++;
3082
3083                 /* The second one is always the firmware event queue */
3084                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3085                     "evt");
3086                 if (rc != 0)
3087                         return (rc);
3088                 irq++;
3089                 rid++;
3090
3091                 /*
3092                  * Note that if INTR_DIRECT is not set then either the NIC rx
3093                  * queues or (exclusive or) the TOE rx queueus will be taking
3094                  * direct interrupts.
3095                  *
3096                  * There is no need to check for is_offload(sc) as nofldrxq
3097                  * will be 0 if offload is disabled.
3098                  */
3099                 for_each_port(sc, p) {
3100                         pi = sc->port[p];
3101
3102 #ifdef TCP_OFFLOAD
3103                         /*
3104                          * Skip over the NIC queues if they aren't taking direct
3105                          * interrupts.
3106                          */
3107                         if (!(sc->flags & INTR_DIRECT) &&
3108                             pi->nofldrxq > pi->nrxq)
3109                                 goto ofld_queues;
3110 #endif
3111                         rxq = &sc->sge.rxq[pi->first_rxq];
3112                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3113                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3114                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3115                                     s);
3116                                 if (rc != 0)
3117                                         return (rc);
3118                                 irq++;
3119                                 rid++;
3120                         }
3121
3122 #ifdef TCP_OFFLOAD
3123                         /*
3124                          * Skip over the offload queues if they aren't taking
3125                          * direct interrupts.
3126                          */
3127                         if (!(sc->flags & INTR_DIRECT))
3128                                 continue;
3129 ofld_queues:
3130                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3131                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3132                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3133                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3134                                     ofld_rxq, s);
3135                                 if (rc != 0)
3136                                         return (rc);
3137                                 irq++;
3138                                 rid++;
3139                         }
3140 #endif
3141                 }
3142         }
3143
3144         return (0);
3145 }
3146
3147 static int
3148 adapter_full_init(struct adapter *sc)
3149 {
3150         int rc, i;
3151
3152         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3153         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3154             ("%s: FULL_INIT_DONE already", __func__));
3155
3156         /*
3157          * queues that belong to the adapter (not any particular port).
3158          */
3159         rc = t4_setup_adapter_queues(sc);
3160         if (rc != 0)
3161                 goto done;
3162
3163         for (i = 0; i < nitems(sc->tq); i++) {
3164                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3165                     taskqueue_thread_enqueue, &sc->tq[i]);
3166                 if (sc->tq[i] == NULL) {
3167                         device_printf(sc->dev,
3168                             "failed to allocate task queue %d\n", i);
3169                         rc = ENOMEM;
3170                         goto done;
3171                 }
3172                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3173                     device_get_nameunit(sc->dev), i);
3174         }
3175
3176         t4_intr_enable(sc);
3177         sc->flags |= FULL_INIT_DONE;
3178 done:
3179         if (rc != 0)
3180                 adapter_full_uninit(sc);
3181
3182         return (rc);
3183 }
3184
3185 static int
3186 adapter_full_uninit(struct adapter *sc)
3187 {
3188         int i;
3189
3190         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3191
3192         t4_teardown_adapter_queues(sc);
3193
3194         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3195                 taskqueue_free(sc->tq[i]);
3196                 sc->tq[i] = NULL;
3197         }
3198
3199         sc->flags &= ~FULL_INIT_DONE;
3200
3201         return (0);
3202 }
3203
3204 static int
3205 port_full_init(struct port_info *pi)
3206 {
3207         struct adapter *sc = pi->adapter;
3208         struct ifnet *ifp = pi->ifp;
3209         uint16_t *rss;
3210         struct sge_rxq *rxq;
3211         int rc, i, j;
3212
3213         ASSERT_SYNCHRONIZED_OP(sc);
3214         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3215             ("%s: PORT_INIT_DONE already", __func__));
3216
3217         sysctl_ctx_init(&pi->ctx);
3218         pi->flags |= PORT_SYSCTL_CTX;
3219
3220         /*
3221          * Allocate tx/rx/fl queues for this port.
3222          */
3223         rc = t4_setup_port_queues(pi);
3224         if (rc != 0)
3225                 goto done;      /* error message displayed already */
3226
3227         /*
3228          * Setup RSS for this port.  Save a copy of the RSS table for later use.
3229          */
3230         rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3231         for (i = 0; i < pi->rss_size;) {
3232                 for_each_rxq(pi, j, rxq) {
3233                         rss[i++] = rxq->iq.abs_id;
3234                         if (i == pi->rss_size)
3235                                 break;
3236                 }
3237         }
3238
3239         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3240             pi->rss_size);
3241         if (rc != 0) {
3242                 if_printf(ifp, "rss_config failed: %d\n", rc);
3243                 goto done;
3244         }
3245
3246         pi->rss = rss;
3247         pi->flags |= PORT_INIT_DONE;
3248 done:
3249         if (rc != 0)
3250                 port_full_uninit(pi);
3251
3252         return (rc);
3253 }
3254
3255 /*
3256  * Idempotent.
3257  */
3258 static int
3259 port_full_uninit(struct port_info *pi)
3260 {
3261         struct adapter *sc = pi->adapter;
3262         int i;
3263         struct sge_rxq *rxq;
3264         struct sge_txq *txq;
3265 #ifdef TCP_OFFLOAD
3266         struct sge_ofld_rxq *ofld_rxq;
3267         struct sge_wrq *ofld_txq;
3268 #endif
3269
3270         if (pi->flags & PORT_INIT_DONE) {
3271
3272                 /* Need to quiesce queues.  XXX: ctrl queues? */
3273
3274                 for_each_txq(pi, i, txq) {
3275                         quiesce_eq(sc, &txq->eq);
3276                 }
3277
3278 #ifdef TCP_OFFLOAD
3279                 for_each_ofld_txq(pi, i, ofld_txq) {
3280                         quiesce_eq(sc, &ofld_txq->eq);
3281                 }
3282 #endif
3283
3284                 for_each_rxq(pi, i, rxq) {
3285                         quiesce_iq(sc, &rxq->iq);
3286                         quiesce_fl(sc, &rxq->fl);
3287                 }
3288
3289 #ifdef TCP_OFFLOAD
3290                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3291                         quiesce_iq(sc, &ofld_rxq->iq);
3292                         quiesce_fl(sc, &ofld_rxq->fl);
3293                 }
3294 #endif
3295                 free(pi->rss, M_CXGBE);
3296         }
3297
3298         t4_teardown_port_queues(pi);
3299         pi->flags &= ~PORT_INIT_DONE;
3300
3301         return (0);
3302 }
3303
3304 static void
3305 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3306 {
3307         EQ_LOCK(eq);
3308         eq->flags |= EQ_DOOMED;
3309
3310         /*
3311          * Wait for the response to a credit flush if one's
3312          * pending.
3313          */
3314         while (eq->flags & EQ_CRFLUSHED)
3315                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3316         EQ_UNLOCK(eq);
3317
3318         callout_drain(&eq->tx_callout); /* XXX: iffy */
3319         pause("callout", 10);           /* Still iffy */
3320
3321         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3322 }
3323
3324 static void
3325 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3326 {
3327         (void) sc;      /* unused */
3328
3329         /* Synchronize with the interrupt handler */
3330         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3331                 pause("iqfree", 1);
3332 }
3333
3334 static void
3335 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3336 {
3337         mtx_lock(&sc->sfl_lock);
3338         FL_LOCK(fl);
3339         fl->flags |= FL_DOOMED;
3340         FL_UNLOCK(fl);
3341         mtx_unlock(&sc->sfl_lock);
3342
3343         callout_drain(&sc->sfl_callout);
3344         KASSERT((fl->flags & FL_STARVING) == 0,
3345             ("%s: still starving", __func__));
3346 }
3347
3348 static int
3349 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3350     driver_intr_t *handler, void *arg, char *name)
3351 {
3352         int rc;
3353
3354         irq->rid = rid;
3355         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3356             RF_SHAREABLE | RF_ACTIVE);
3357         if (irq->res == NULL) {
3358                 device_printf(sc->dev,
3359                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3360                 return (ENOMEM);
3361         }
3362
3363         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3364             NULL, handler, arg, &irq->tag);
3365         if (rc != 0) {
3366                 device_printf(sc->dev,
3367                     "failed to setup interrupt for rid %d, name %s: %d\n",
3368                     rid, name, rc);
3369         } else if (name)
3370                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3371
3372         return (rc);
3373 }
3374
3375 static int
3376 t4_free_irq(struct adapter *sc, struct irq *irq)
3377 {
3378         if (irq->tag)
3379                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3380         if (irq->res)
3381                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3382
3383         bzero(irq, sizeof(*irq));
3384
3385         return (0);
3386 }
3387
3388 static void
3389 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3390     unsigned int end)
3391 {
3392         uint32_t *p = (uint32_t *)(buf + start);
3393
3394         for ( ; start <= end; start += sizeof(uint32_t))
3395                 *p++ = t4_read_reg(sc, start);
3396 }
3397
3398 static void
3399 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3400 {
3401         int i, n;
3402         const unsigned int *reg_ranges;
3403         static const unsigned int t4_reg_ranges[] = {
3404                 0x1008, 0x1108,
3405                 0x1180, 0x11b4,
3406                 0x11fc, 0x123c,
3407                 0x1300, 0x173c,
3408                 0x1800, 0x18fc,
3409                 0x3000, 0x30d8,
3410                 0x30e0, 0x5924,
3411                 0x5960, 0x59d4,
3412                 0x5a00, 0x5af8,
3413                 0x6000, 0x6098,
3414                 0x6100, 0x6150,
3415                 0x6200, 0x6208,
3416                 0x6240, 0x6248,
3417                 0x6280, 0x6338,
3418                 0x6370, 0x638c,
3419                 0x6400, 0x643c,
3420                 0x6500, 0x6524,
3421                 0x6a00, 0x6a38,
3422                 0x6a60, 0x6a78,
3423                 0x6b00, 0x6b84,
3424                 0x6bf0, 0x6c84,
3425                 0x6cf0, 0x6d84,
3426                 0x6df0, 0x6e84,
3427                 0x6ef0, 0x6f84,
3428                 0x6ff0, 0x7084,
3429                 0x70f0, 0x7184,
3430                 0x71f0, 0x7284,
3431                 0x72f0, 0x7384,
3432                 0x73f0, 0x7450,
3433                 0x7500, 0x7530,
3434                 0x7600, 0x761c,
3435                 0x7680, 0x76cc,
3436                 0x7700, 0x7798,
3437                 0x77c0, 0x77fc,
3438                 0x7900, 0x79fc,
3439                 0x7b00, 0x7c38,
3440                 0x7d00, 0x7efc,
3441                 0x8dc0, 0x8e1c,
3442                 0x8e30, 0x8e78,
3443                 0x8ea0, 0x8f6c,
3444                 0x8fc0, 0x9074,
3445                 0x90fc, 0x90fc,
3446                 0x9400, 0x9458,
3447                 0x9600, 0x96bc,
3448                 0x9800, 0x9808,
3449                 0x9820, 0x983c,
3450                 0x9850, 0x9864,
3451                 0x9c00, 0x9c6c,
3452                 0x9c80, 0x9cec,
3453                 0x9d00, 0x9d6c,
3454                 0x9d80, 0x9dec,
3455                 0x9e00, 0x9e6c,
3456                 0x9e80, 0x9eec,
3457                 0x9f00, 0x9f6c,
3458                 0x9f80, 0x9fec,
3459                 0xd004, 0xd03c,
3460                 0xdfc0, 0xdfe0,
3461                 0xe000, 0xea7c,
3462                 0xf000, 0x11110,
3463                 0x11118, 0x11190,
3464                 0x19040, 0x1906c,
3465                 0x19078, 0x19080,
3466                 0x1908c, 0x19124,
3467                 0x19150, 0x191b0,
3468                 0x191d0, 0x191e8,
3469                 0x19238, 0x1924c,
3470                 0x193f8, 0x19474,
3471                 0x19490, 0x194f8,
3472                 0x19800, 0x19f30,
3473                 0x1a000, 0x1a06c,
3474                 0x1a0b0, 0x1a120,
3475                 0x1a128, 0x1a138,
3476                 0x1a190, 0x1a1c4,
3477                 0x1a1fc, 0x1a1fc,
3478                 0x1e040, 0x1e04c,
3479                 0x1e284, 0x1e28c,
3480                 0x1e2c0, 0x1e2c0,
3481                 0x1e2e0, 0x1e2e0,
3482                 0x1e300, 0x1e384,
3483                 0x1e3c0, 0x1e3c8,
3484                 0x1e440, 0x1e44c,
3485                 0x1e684, 0x1e68c,
3486                 0x1e6c0, 0x1e6c0,
3487                 0x1e6e0, 0x1e6e0,
3488                 0x1e700, 0x1e784,
3489                 0x1e7c0, 0x1e7c8,
3490                 0x1e840, 0x1e84c,
3491                 0x1ea84, 0x1ea8c,
3492                 0x1eac0, 0x1eac0,
3493                 0x1eae0, 0x1eae0,
3494                 0x1eb00, 0x1eb84,
3495                 0x1ebc0, 0x1ebc8,
3496                 0x1ec40, 0x1ec4c,
3497                 0x1ee84, 0x1ee8c,
3498                 0x1eec0, 0x1eec0,
3499                 0x1eee0, 0x1eee0,
3500                 0x1ef00, 0x1ef84,
3501                 0x1efc0, 0x1efc8,
3502                 0x1f040, 0x1f04c,
3503                 0x1f284, 0x1f28c,
3504                 0x1f2c0, 0x1f2c0,
3505                 0x1f2e0, 0x1f2e0,
3506                 0x1f300, 0x1f384,
3507                 0x1f3c0, 0x1f3c8,
3508                 0x1f440, 0x1f44c,
3509                 0x1f684, 0x1f68c,
3510                 0x1f6c0, 0x1f6c0,
3511                 0x1f6e0, 0x1f6e0,
3512                 0x1f700, 0x1f784,
3513                 0x1f7c0, 0x1f7c8,
3514                 0x1f840, 0x1f84c,
3515                 0x1fa84, 0x1fa8c,
3516                 0x1fac0, 0x1fac0,
3517                 0x1fae0, 0x1fae0,
3518                 0x1fb00, 0x1fb84,
3519                 0x1fbc0, 0x1fbc8,
3520                 0x1fc40, 0x1fc4c,
3521                 0x1fe84, 0x1fe8c,
3522                 0x1fec0, 0x1fec0,
3523                 0x1fee0, 0x1fee0,
3524                 0x1ff00, 0x1ff84,
3525                 0x1ffc0, 0x1ffc8,
3526                 0x20000, 0x2002c,
3527                 0x20100, 0x2013c,
3528                 0x20190, 0x201c8,
3529                 0x20200, 0x20318,
3530                 0x20400, 0x20528,
3531                 0x20540, 0x20614,
3532                 0x21000, 0x21040,
3533                 0x2104c, 0x21060,
3534                 0x210c0, 0x210ec,
3535                 0x21200, 0x21268,
3536                 0x21270, 0x21284,
3537                 0x212fc, 0x21388,
3538                 0x21400, 0x21404,
3539                 0x21500, 0x21518,
3540                 0x2152c, 0x2153c,
3541                 0x21550, 0x21554,
3542                 0x21600, 0x21600,
3543                 0x21608, 0x21628,
3544                 0x21630, 0x2163c,
3545                 0x21700, 0x2171c,
3546                 0x21780, 0x2178c,
3547                 0x21800, 0x21c38,
3548                 0x21c80, 0x21d7c,
3549                 0x21e00, 0x21e04,
3550                 0x22000, 0x2202c,
3551                 0x22100, 0x2213c,
3552                 0x22190, 0x221c8,
3553                 0x22200, 0x22318,
3554                 0x22400, 0x22528,
3555                 0x22540, 0x22614,
3556                 0x23000, 0x23040,
3557                 0x2304c, 0x23060,
3558                 0x230c0, 0x230ec,
3559                 0x23200, 0x23268,
3560                 0x23270, 0x23284,
3561                 0x232fc, 0x23388,
3562                 0x23400, 0x23404,
3563                 0x23500, 0x23518,
3564                 0x2352c, 0x2353c,
3565                 0x23550, 0x23554,
3566                 0x23600, 0x23600,
3567                 0x23608, 0x23628,
3568                 0x23630, 0x2363c,
3569                 0x23700, 0x2371c,
3570                 0x23780, 0x2378c,
3571                 0x23800, 0x23c38,
3572                 0x23c80, 0x23d7c,
3573                 0x23e00, 0x23e04,
3574                 0x24000, 0x2402c,
3575                 0x24100, 0x2413c,
3576                 0x24190, 0x241c8,
3577                 0x24200, 0x24318,
3578                 0x24400, 0x24528,
3579                 0x24540, 0x24614,
3580                 0x25000, 0x25040,
3581                 0x2504c, 0x25060,
3582                 0x250c0, 0x250ec,
3583                 0x25200, 0x25268,
3584                 0x25270, 0x25284,
3585                 0x252fc, 0x25388,
3586                 0x25400, 0x25404,
3587                 0x25500, 0x25518,
3588                 0x2552c, 0x2553c,
3589                 0x25550, 0x25554,
3590                 0x25600, 0x25600,
3591                 0x25608, 0x25628,
3592                 0x25630, 0x2563c,
3593                 0x25700, 0x2571c,
3594                 0x25780, 0x2578c,
3595                 0x25800, 0x25c38,
3596                 0x25c80, 0x25d7c,
3597                 0x25e00, 0x25e04,
3598                 0x26000, 0x2602c,
3599                 0x26100, 0x2613c,
3600                 0x26190, 0x261c8,
3601                 0x26200, 0x26318,
3602                 0x26400, 0x26528,
3603                 0x26540, 0x26614,
3604                 0x27000, 0x27040,
3605                 0x2704c, 0x27060,
3606                 0x270c0, 0x270ec,
3607                 0x27200, 0x27268,
3608                 0x27270, 0x27284,
3609                 0x272fc, 0x27388,
3610                 0x27400, 0x27404,
3611                 0x27500, 0x27518,
3612                 0x2752c, 0x2753c,
3613                 0x27550, 0x27554,
3614                 0x27600, 0x27600,
3615                 0x27608, 0x27628,
3616                 0x27630, 0x2763c,
3617                 0x27700, 0x2771c,
3618                 0x27780, 0x2778c,
3619                 0x27800, 0x27c38,
3620                 0x27c80, 0x27d7c,
3621                 0x27e00, 0x27e04
3622         };
3623         static const unsigned int t5_reg_ranges[] = {
3624                 0x1008, 0x1148,
3625                 0x1180, 0x11b4,
3626                 0x11fc, 0x123c,
3627                 0x1280, 0x173c,
3628                 0x1800, 0x18fc,
3629                 0x3000, 0x3028,
3630                 0x3060, 0x30d8,
3631                 0x30e0, 0x30fc,
3632                 0x3140, 0x357c,
3633                 0x35a8, 0x35cc,
3634                 0x35ec, 0x35ec,
3635                 0x3600, 0x5624,
3636                 0x56cc, 0x575c,
3637                 0x580c, 0x5814,
3638                 0x5890, 0x58bc,
3639                 0x5940, 0x59dc,
3640                 0x59fc, 0x5a18,
3641                 0x5a60, 0x5a9c,
3642                 0x5b94, 0x5bfc,
3643                 0x6000, 0x6040,
3644                 0x6058, 0x614c,
3645                 0x7700, 0x7798,
3646                 0x77c0, 0x78fc,
3647                 0x7b00, 0x7c54,
3648                 0x7d00, 0x7efc,
3649                 0x8dc0, 0x8de0,
3650                 0x8df8, 0x8e84,
3651                 0x8ea0, 0x8f84,
3652                 0x8fc0, 0x90f8,
3653                 0x9400, 0x9470,
3654                 0x9600, 0x96f4,
3655                 0x9800, 0x9808,
3656                 0x9820, 0x983c,
3657                 0x9850, 0x9864,
3658                 0x9c00, 0x9c6c,
3659                 0x9c80, 0x9cec,
3660                 0x9d00, 0x9d6c,
3661                 0x9d80, 0x9dec,
3662                 0x9e00, 0x9e6c,
3663                 0x9e80, 0x9eec,
3664                 0x9f00, 0x9f6c,
3665                 0x9f80, 0xa020,
3666                 0xd004, 0xd03c,
3667                 0xdfc0, 0xdfe0,
3668                 0xe000, 0x11088,
3669                 0x1109c, 0x11110,
3670                 0x11118, 0x1117c,
3671                 0x11190, 0x11204,
3672                 0x19040, 0x1906c,
3673                 0x19078, 0x19080,
3674                 0x1908c, 0x19124,
3675                 0x19150, 0x191b0,
3676                 0x191d0, 0x191e8,
3677                 0x19238, 0x19290,
3678                 0x193f8, 0x19474,
3679                 0x19490, 0x194cc,
3680                 0x194f0, 0x194f8,
3681                 0x19c00, 0x19c60,
3682                 0x19c94, 0x19e10,
3683                 0x19e50, 0x19f34,
3684                 0x19f40, 0x19f50,
3685                 0x19f90, 0x19fe4,
3686                 0x1a000, 0x1a06c,
3687                 0x1a0b0, 0x1a120,
3688                 0x1a128, 0x1a138,
3689                 0x1a190, 0x1a1c4,
3690                 0x1a1fc, 0x1a1fc,
3691                 0x1e008, 0x1e00c,
3692                 0x1e040, 0x1e04c,
3693                 0x1e284, 0x1e290,
3694                 0x1e2c0, 0x1e2c0,
3695                 0x1e2e0, 0x1e2e0,
3696                 0x1e300, 0x1e384,
3697                 0x1e3c0, 0x1e3c8,
3698                 0x1e408, 0x1e40c,
3699                 0x1e440, 0x1e44c,
3700                 0x1e684, 0x1e690,
3701                 0x1e6c0, 0x1e6c0,
3702                 0x1e6e0, 0x1e6e0,
3703                 0x1e700, 0x1e784,
3704                 0x1e7c0, 0x1e7c8,
3705                 0x1e808, 0x1e80c,
3706                 0x1e840, 0x1e84c,
3707                 0x1ea84, 0x1ea90,
3708                 0x1eac0, 0x1eac0,
3709                 0x1eae0, 0x1eae0,
3710                 0x1eb00, 0x1eb84,
3711                 0x1ebc0, 0x1ebc8,
3712                 0x1ec08, 0x1ec0c,
3713                 0x1ec40, 0x1ec4c,
3714                 0x1ee84, 0x1ee90,
3715                 0x1eec0, 0x1eec0,
3716                 0x1eee0, 0x1eee0,
3717                 0x1ef00, 0x1ef84,
3718                 0x1efc0, 0x1efc8,
3719                 0x1f008, 0x1f00c,
3720                 0x1f040, 0x1f04c,
3721                 0x1f284, 0x1f290,
3722                 0x1f2c0, 0x1f2c0,
3723                 0x1f2e0, 0x1f2e0,
3724                 0x1f300, 0x1f384,
3725                 0x1f3c0, 0x1f3c8,
3726                 0x1f408, 0x1f40c,
3727                 0x1f440, 0x1f44c,
3728                 0x1f684, 0x1f690,
3729                 0x1f6c0, 0x1f6c0,
3730                 0x1f6e0, 0x1f6e0,
3731                 0x1f700, 0x1f784,
3732                 0x1f7c0, 0x1f7c8,
3733                 0x1f808, 0x1f80c,
3734                 0x1f840, 0x1f84c,
3735                 0x1fa84, 0x1fa90,
3736                 0x1fac0, 0x1fac0,
3737                 0x1fae0, 0x1fae0,
3738                 0x1fb00, 0x1fb84,
3739                 0x1fbc0, 0x1fbc8,
3740                 0x1fc08, 0x1fc0c,
3741                 0x1fc40, 0x1fc4c,
3742                 0x1fe84, 0x1fe90,
3743                 0x1fec0, 0x1fec0,
3744                 0x1fee0, 0x1fee0,
3745                 0x1ff00, 0x1ff84,
3746                 0x1ffc0, 0x1ffc8,
3747                 0x30000, 0x30030,
3748                 0x30100, 0x30144,
3749                 0x30190, 0x301d0,
3750                 0x30200, 0x30318,
3751                 0x30400, 0x3052c,
3752                 0x30540, 0x3061c,
3753                 0x30800, 0x30834,
3754                 0x308c0, 0x30908,
3755                 0x30910, 0x309ac,
3756                 0x30a00, 0x30a2c,
3757                 0x30a44, 0x30a50,
3758                 0x30a74, 0x30c24,
3759                 0x30d00, 0x30d00,
3760                 0x30d08, 0x30d14,
3761                 0x30d1c, 0x30d20,
3762                 0x30d3c, 0x30d50,
3763                 0x31200, 0x3120c,
3764                 0x31220, 0x31220,
3765                 0x31240, 0x31240,
3766                 0x31600, 0x3160c,
3767                 0x31a00, 0x31a1c,
3768                 0x31e00, 0x31e20,
3769                 0x31e38, 0x31e3c,
3770                 0x31e80, 0x31e80,
3771                 0x31e88, 0x31ea8,
3772                 0x31eb0, 0x31eb4,
3773                 0x31ec8, 0x31ed4,
3774                 0x31fb8, 0x32004,
3775                 0x32200, 0x32200,
3776                 0x32208, 0x32240,
3777                 0x32248, 0x32280,
3778                 0x32288, 0x322c0,
3779                 0x322c8, 0x322fc,
3780                 0x32600, 0x32630,
3781                 0x32a00, 0x32abc,
3782                 0x32b00, 0x32b70,
3783                 0x33000, 0x33048,
3784                 0x33060, 0x3309c,
3785                 0x330f0, 0x33148,
3786                 0x33160, 0x3319c,
3787                 0x331f0, 0x332e4,
3788                 0x332f8, 0x333e4,
3789                 0x333f8, 0x33448,
3790                 0x33460, 0x3349c,
3791                 0x334f0, 0x33548,
3792                 0x33560, 0x3359c,
3793                 0x335f0, 0x336e4,
3794                 0x336f8, 0x337e4,
3795                 0x337f8, 0x337fc,
3796                 0x33814, 0x33814,
3797                 0x3382c, 0x3382c,
3798                 0x33880, 0x3388c,
3799                 0x338e8, 0x338ec,
3800                 0x33900, 0x33948,
3801                 0x33960, 0x3399c,
3802                 0x339f0, 0x33ae4,
3803                 0x33af8, 0x33b10,
3804                 0x33b28, 0x33b28,
3805                 0x33b3c, 0x33b50,
3806                 0x33bf0, 0x33c10,
3807                 0x33c28, 0x33c28,
3808                 0x33c3c, 0x33c50,
3809                 0x33cf0, 0x33cfc,
3810                 0x34000, 0x34030,
3811                 0x34100, 0x34144,
3812                 0x34190, 0x341d0,
3813                 0x34200, 0x34318,
3814                 0x34400, 0x3452c,
3815                 0x34540, 0x3461c,
3816                 0x34800, 0x34834,
3817                 0x348c0, 0x34908,
3818                 0x34910, 0x349ac,
3819                 0x34a00, 0x34a2c,
3820                 0x34a44, 0x34a50,
3821                 0x34a74, 0x34c24,
3822                 0x34d00, 0x34d00,
3823                 0x34d08, 0x34d14,
3824                 0x34d1c, 0x34d20,
3825                 0x34d3c, 0x34d50,
3826                 0x35200, 0x3520c,
3827                 0x35220, 0x35220,
3828                 0x35240, 0x35240,
3829                 0x35600, 0x3560c,
3830                 0x35a00, 0x35a1c,
3831                 0x35e00, 0x35e20,
3832                 0x35e38, 0x35e3c,
3833                 0x35e80, 0x35e80,
3834                 0x35e88, 0x35ea8,
3835                 0x35eb0, 0x35eb4,
3836                 0x35ec8, 0x35ed4,
3837                 0x35fb8, 0x36004,
3838                 0x36200, 0x36200,
3839                 0x36208, 0x36240,
3840                 0x36248, 0x36280,
3841                 0x36288, 0x362c0,
3842                 0x362c8, 0x362fc,
3843                 0x36600, 0x36630,
3844                 0x36a00, 0x36abc,
3845                 0x36b00, 0x36b70,
3846                 0x37000, 0x37048,
3847                 0x37060, 0x3709c,
3848                 0x370f0, 0x37148,
3849                 0x37160, 0x3719c,
3850                 0x371f0, 0x372e4,
3851                 0x372f8, 0x373e4,
3852                 0x373f8, 0x37448,
3853                 0x37460, 0x3749c,
3854                 0x374f0, 0x37548,
3855                 0x37560, 0x3759c,
3856                 0x375f0, 0x376e4,
3857                 0x376f8, 0x377e4,
3858                 0x377f8, 0x377fc,
3859                 0x37814, 0x37814,
3860                 0x3782c, 0x3782c,
3861                 0x37880, 0x3788c,
3862                 0x378e8, 0x378ec,
3863                 0x37900, 0x37948,
3864                 0x37960, 0x3799c,
3865                 0x379f0, 0x37ae4,
3866                 0x37af8, 0x37b10,
3867                 0x37b28, 0x37b28,
3868                 0x37b3c, 0x37b50,
3869                 0x37bf0, 0x37c10,
3870                 0x37c28, 0x37c28,
3871                 0x37c3c, 0x37c50,
3872                 0x37cf0, 0x37cfc,
3873                 0x38000, 0x38030,
3874                 0x38100, 0x38144,
3875                 0x38190, 0x381d0,
3876                 0x38200, 0x38318,
3877                 0x38400, 0x3852c,
3878                 0x38540, 0x3861c,
3879                 0x38800, 0x38834,
3880                 0x388c0, 0x38908,
3881                 0x38910, 0x389ac,
3882                 0x38a00, 0x38a2c,
3883                 0x38a44, 0x38a50,
3884                 0x38a74, 0x38c24,
3885                 0x38d00, 0x38d00,
3886                 0x38d08, 0x38d14,
3887                 0x38d1c, 0x38d20,
3888                 0x38d3c, 0x38d50,
3889                 0x39200, 0x3920c,
3890                 0x39220, 0x39220,
3891                 0x39240, 0x39240,
3892                 0x39600, 0x3960c,
3893                 0x39a00, 0x39a1c,
3894                 0x39e00, 0x39e20,
3895                 0x39e38, 0x39e3c,
3896                 0x39e80, 0x39e80,
3897                 0x39e88, 0x39ea8,
3898                 0x39eb0, 0x39eb4,
3899                 0x39ec8, 0x39ed4,
3900                 0x39fb8, 0x3a004,
3901                 0x3a200, 0x3a200,
3902                 0x3a208, 0x3a240,
3903                 0x3a248, 0x3a280,
3904                 0x3a288, 0x3a2c0,
3905                 0x3a2c8, 0x3a2fc,
3906                 0x3a600, 0x3a630,
3907                 0x3aa00, 0x3aabc,
3908                 0x3ab00, 0x3ab70,
3909                 0x3b000, 0x3b048,
3910                 0x3b060, 0x3b09c,
3911                 0x3b0f0, 0x3b148,
3912                 0x3b160, 0x3b19c,
3913                 0x3b1f0, 0x3b2e4,
3914                 0x3b2f8, 0x3b3e4,
3915                 0x3b3f8, 0x3b448,
3916                 0x3b460, 0x3b49c,
3917                 0x3b4f0, 0x3b548,
3918                 0x3b560, 0x3b59c,
3919                 0x3b5f0, 0x3b6e4,
3920                 0x3b6f8, 0x3b7e4,
3921                 0x3b7f8, 0x3b7fc,
3922                 0x3b814, 0x3b814,
3923                 0x3b82c, 0x3b82c,
3924                 0x3b880, 0x3b88c,
3925                 0x3b8e8, 0x3b8ec,
3926                 0x3b900, 0x3b948,
3927                 0x3b960, 0x3b99c,
3928                 0x3b9f0, 0x3bae4,
3929                 0x3baf8, 0x3bb10,
3930                 0x3bb28, 0x3bb28,
3931                 0x3bb3c, 0x3bb50,
3932                 0x3bbf0, 0x3bc10,
3933                 0x3bc28, 0x3bc28,
3934                 0x3bc3c, 0x3bc50,
3935                 0x3bcf0, 0x3bcfc,
3936                 0x3c000, 0x3c030,
3937                 0x3c100, 0x3c144,
3938                 0x3c190, 0x3c1d0,
3939                 0x3c200, 0x3c318,
3940                 0x3c400, 0x3c52c,
3941                 0x3c540, 0x3c61c,
3942                 0x3c800, 0x3c834,
3943                 0x3c8c0, 0x3c908,
3944                 0x3c910, 0x3c9ac,
3945                 0x3ca00, 0x3ca2c,
3946                 0x3ca44, 0x3ca50,
3947                 0x3ca74, 0x3cc24,
3948                 0x3cd00, 0x3cd00,
3949                 0x3cd08, 0x3cd14,
3950                 0x3cd1c, 0x3cd20,
3951                 0x3cd3c, 0x3cd50,
3952                 0x3d200, 0x3d20c,
3953                 0x3d220, 0x3d220,
3954                 0x3d240, 0x3d240,
3955                 0x3d600, 0x3d60c,
3956                 0x3da00, 0x3da1c,
3957                 0x3de00, 0x3de20,
3958                 0x3de38, 0x3de3c,
3959                 0x3de80, 0x3de80,
3960                 0x3de88, 0x3dea8,
3961                 0x3deb0, 0x3deb4,
3962                 0x3dec8, 0x3ded4,
3963                 0x3dfb8, 0x3e004,
3964                 0x3e200, 0x3e200,
3965                 0x3e208, 0x3e240,
3966                 0x3e248, 0x3e280,
3967                 0x3e288, 0x3e2c0,
3968                 0x3e2c8, 0x3e2fc,
3969                 0x3e600, 0x3e630,
3970                 0x3ea00, 0x3eabc,
3971                 0x3eb00, 0x3eb70,
3972                 0x3f000, 0x3f048,
3973                 0x3f060, 0x3f09c,
3974                 0x3f0f0, 0x3f148,
3975                 0x3f160, 0x3f19c,
3976                 0x3f1f0, 0x3f2e4,
3977                 0x3f2f8, 0x3f3e4,
3978                 0x3f3f8, 0x3f448,
3979                 0x3f460, 0x3f49c,
3980                 0x3f4f0, 0x3f548,
3981                 0x3f560, 0x3f59c,
3982                 0x3f5f0, 0x3f6e4,
3983                 0x3f6f8, 0x3f7e4,
3984                 0x3f7f8, 0x3f7fc,
3985                 0x3f814, 0x3f814,
3986                 0x3f82c, 0x3f82c,
3987                 0x3f880, 0x3f88c,
3988                 0x3f8e8, 0x3f8ec,
3989                 0x3f900, 0x3f948,
3990                 0x3f960, 0x3f99c,
3991                 0x3f9f0, 0x3fae4,
3992                 0x3faf8, 0x3fb10,
3993                 0x3fb28, 0x3fb28,
3994                 0x3fb3c, 0x3fb50,
3995                 0x3fbf0, 0x3fc10,
3996                 0x3fc28, 0x3fc28,
3997                 0x3fc3c, 0x3fc50,
3998                 0x3fcf0, 0x3fcfc,
3999                 0x40000, 0x4000c,
4000                 0x40040, 0x40068,
4001                 0x4007c, 0x40144,
4002                 0x40180, 0x4018c,
4003                 0x40200, 0x40298,
4004                 0x402ac, 0x4033c,
4005                 0x403f8, 0x403fc,
4006                 0x41304, 0x413c4,
4007                 0x41400, 0x4141c,
4008                 0x41480, 0x414d0,
4009                 0x44000, 0x44078,
4010                 0x440c0, 0x44278,
4011                 0x442c0, 0x44478,
4012                 0x444c0, 0x44678,
4013                 0x446c0, 0x44878,
4014                 0x448c0, 0x449fc,
4015                 0x45000, 0x45068,
4016                 0x45080, 0x45084,
4017                 0x450a0, 0x450b0,
4018                 0x45200, 0x45268,
4019                 0x45280, 0x45284,
4020                 0x452a0, 0x452b0,
4021                 0x460c0, 0x460e4,
4022                 0x47000, 0x4708c,
4023                 0x47200, 0x47250,
4024                 0x47400, 0x47420,
4025                 0x47600, 0x47618,
4026                 0x47800, 0x47814,
4027                 0x48000, 0x4800c,
4028                 0x48040, 0x48068,
4029                 0x4807c, 0x48144,
4030                 0x48180, 0x4818c,
4031                 0x48200, 0x48298,
4032                 0x482ac, 0x4833c,
4033                 0x483f8, 0x483fc,
4034                 0x49304, 0x493c4,
4035                 0x49400, 0x4941c,
4036                 0x49480, 0x494d0,
4037                 0x4c000, 0x4c078,
4038                 0x4c0c0, 0x4c278,
4039                 0x4c2c0, 0x4c478,
4040                 0x4c4c0, 0x4c678,
4041                 0x4c6c0, 0x4c878,
4042                 0x4c8c0, 0x4c9fc,
4043                 0x4d000, 0x4d068,
4044                 0x4d080, 0x4d084,
4045                 0x4d0a0, 0x4d0b0,
4046                 0x4d200, 0x4d268,
4047                 0x4d280, 0x4d284,
4048                 0x4d2a0, 0x4d2b0,
4049                 0x4e0c0, 0x4e0e4,
4050                 0x4f000, 0x4f08c,
4051                 0x4f200, 0x4f250,
4052                 0x4f400, 0x4f420,
4053                 0x4f600, 0x4f618,
4054                 0x4f800, 0x4f814,
4055                 0x50000, 0x500cc,
4056                 0x50400, 0x50400,
4057                 0x50800, 0x508cc,
4058                 0x50c00, 0x50c00,
4059                 0x51000, 0x5101c,
4060                 0x51300, 0x51308,
4061         };
4062
4063         if (is_t4(sc)) {
4064                 reg_ranges = &t4_reg_ranges[0];
4065                 n = nitems(t4_reg_ranges);
4066         } else {
4067                 reg_ranges = &t5_reg_ranges[0];
4068                 n = nitems(t5_reg_ranges);
4069         }
4070
4071         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4072         for (i = 0; i < n; i += 2)
4073                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4074 }
4075
4076 static void
4077 cxgbe_tick(void *arg)
4078 {
4079         struct port_info *pi = arg;
4080         struct adapter *sc = pi->adapter;
4081         struct ifnet *ifp = pi->ifp;
4082         struct sge_txq *txq;
4083         int i, drops;
4084         struct port_stats *s = &pi->stats;
4085
4086         PORT_LOCK(pi);
4087         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4088                 PORT_UNLOCK(pi);
4089                 return; /* without scheduling another callout */
4090         }
4091
4092         t4_get_port_stats(sc, pi->tx_chan, s);
4093
4094         ifp->if_opackets = s->tx_frames - s->tx_pause;
4095         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4096         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4097         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4098         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4099         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4100         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4101             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4102             s->rx_trunc3;
4103         for (i = 0; i < 4; i++) {
4104                 if (pi->rx_chan_map & (1 << i)) {
4105                         uint32_t v;
4106
4107                         /*
4108                          * XXX: indirect reads from the same ADDR/DATA pair can
4109                          * race with each other.
4110                          */
4111                         t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4112                             1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4113                         ifp->if_iqdrops += v;
4114                 }
4115         }
4116
4117         drops = s->tx_drop;
4118         for_each_txq(pi, i, txq)
4119                 drops += txq->br->br_drops;
4120         ifp->if_snd.ifq_drops = drops;
4121
4122         ifp->if_oerrors = s->tx_error_frames;
4123         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4124             s->rx_fcs_err + s->rx_len_err;
4125
4126         callout_schedule(&pi->tick, hz);
4127         PORT_UNLOCK(pi);
4128 }
4129
4130 static void
4131 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4132 {
4133         struct ifnet *vlan;
4134
4135         if (arg != ifp || ifp->if_type != IFT_ETHER)
4136                 return;
4137
4138         vlan = VLAN_DEVAT(ifp, vid);
4139         VLAN_SETCOOKIE(vlan, ifp);
4140 }
4141
4142 static int
4143 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4144 {
4145
4146 #ifdef INVARIANTS
4147         panic("%s: opcode 0x%02x on iq %p with payload %p",
4148             __func__, rss->opcode, iq, m);
4149 #else
4150         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4151             __func__, rss->opcode, iq, m);
4152         m_freem(m);
4153 #endif
4154         return (EDOOFUS);
4155 }
4156
4157 int
4158 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4159 {
4160         uintptr_t *loc, new;
4161
4162         if (opcode >= nitems(sc->cpl_handler))
4163                 return (EINVAL);
4164
4165         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4166         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4167         atomic_store_rel_ptr(loc, new);
4168
4169         return (0);
4170 }
4171
4172 static int
4173 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4174 {
4175
4176 #ifdef INVARIANTS
4177         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4178 #else
4179         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4180             __func__, iq, ctrl);
4181 #endif
4182         return (EDOOFUS);
4183 }
4184
4185 int
4186 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4187 {
4188         uintptr_t *loc, new;
4189
4190         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4191         loc = (uintptr_t *) &sc->an_handler;
4192         atomic_store_rel_ptr(loc, new);
4193
4194         return (0);
4195 }
4196
4197 static int
4198 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4199 {
4200         const struct cpl_fw6_msg *cpl =
4201             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4202
4203 #ifdef INVARIANTS
4204         panic("%s: fw_msg type %d", __func__, cpl->type);
4205 #else
4206         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4207 #endif
4208         return (EDOOFUS);
4209 }
4210
4211 int
4212 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4213 {
4214         uintptr_t *loc, new;
4215
4216         if (type >= nitems(sc->fw_msg_handler))
4217                 return (EINVAL);
4218
4219         /*
4220          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4221          * handler dispatch table.  Reject any attempt to install a handler for
4222          * this subtype.
4223          */
4224         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4225                 return (EINVAL);
4226
4227         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4228         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4229         atomic_store_rel_ptr(loc, new);
4230
4231         return (0);
4232 }
4233
4234 static int
4235 t4_sysctls(struct adapter *sc)
4236 {
4237         struct sysctl_ctx_list *ctx;
4238         struct sysctl_oid *oid;
4239         struct sysctl_oid_list *children, *c0;
4240         static char *caps[] = {
4241                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4242                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4243                     "\6HASHFILTER\7ETHOFLD",
4244                 "\20\1TOE",                             /* caps[2] toecaps */
4245                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4246                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4247                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4248                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4249                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4250                     "\4PO_INITIAOR\5PO_TARGET"
4251         };
4252         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4253
4254         ctx = device_get_sysctl_ctx(sc->dev);
4255
4256         /*
4257          * dev.t4nex.X.
4258          */
4259         oid = device_get_sysctl_tree(sc->dev);
4260         c0 = children = SYSCTL_CHILDREN(oid);
4261
4262         sc->sc_do_rxcopy = 1;
4263         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4264             &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4265
4266         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4267             sc->params.nports, "# of ports");
4268
4269         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4270             NULL, chip_rev(sc), "chip hardware revision");
4271
4272         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4273             CTLFLAG_RD, sc->fw_version, 0, "firmware version");
4274
4275         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4276             CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
4277
4278         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4279             sc->cfcsum, "config file checksum");
4280
4281         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4282             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4283             sysctl_bitfield, "A", "available doorbells");
4284
4285         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4286             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4287             sysctl_bitfield, "A", "available link capabilities");
4288
4289         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4290             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4291             sysctl_bitfield, "A", "available NIC capabilities");
4292
4293         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4294             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4295             sysctl_bitfield, "A", "available TCP offload capabilities");
4296
4297         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4298             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4299             sysctl_bitfield, "A", "available RDMA capabilities");
4300
4301         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4302             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4303             sysctl_bitfield, "A", "available iSCSI capabilities");
4304
4305         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4306             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4307             sysctl_bitfield, "A", "available FCoE capabilities");
4308
4309         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4310             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4311
4312         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4313             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4314             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4315             "interrupt holdoff timer values (us)");
4316
4317         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4318             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4319             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4320             "interrupt holdoff packet counter values");
4321
4322         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4323             NULL, sc->tids.nftids, "number of filters");
4324
4325         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4326             CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4327             "chip temperature (in Celsius)");
4328
4329         t4_sge_sysctls(sc, ctx, children);
4330
4331 #ifdef SBUF_DRAIN
4332         /*
4333          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4334          */
4335         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4336             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4337             "logs and miscellaneous information");
4338         children = SYSCTL_CHILDREN(oid);
4339
4340         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4341             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4342             sysctl_cctrl, "A", "congestion control");
4343
4344         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4345             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4346             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4347
4348         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4349             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4350             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4351
4352         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4353             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4354             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4355
4356         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4357             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4358             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4359
4360         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4361             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4362             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4363
4364         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4365             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4366             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4367
4368         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4369             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4370             sysctl_cim_la, "A", "CIM logic analyzer");
4371
4372         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4373             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4374             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4375
4376         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4377             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4378             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4379
4380         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4381             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4382             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4383
4384         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4385             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4386             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4387
4388         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4389             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4390             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4391
4392         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4393             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4394             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4395
4396         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4397             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4398             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4399
4400         if (is_t5(sc)) {
4401                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4402                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4403                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4404
4405                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4406                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4407                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4408         }
4409
4410         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4411             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4412             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4413
4414         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4415             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4416             sysctl_cim_qcfg, "A", "CIM queue configuration");
4417
4418         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4419             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4420             sysctl_cpl_stats, "A", "CPL statistics");
4421
4422         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4423             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4424             sysctl_ddp_stats, "A", "DDP statistics");
4425
4426         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4427             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4428             sysctl_devlog, "A", "firmware's device log");
4429
4430         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4431             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4432             sysctl_fcoe_stats, "A", "FCoE statistics");
4433
4434         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4435             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4436             sysctl_hw_sched, "A", "hardware scheduler ");
4437
4438         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4439             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4440             sysctl_l2t, "A", "hardware L2 table");
4441
4442         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4443             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4444             sysctl_lb_stats, "A", "loopback statistics");
4445
4446         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4447             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4448             sysctl_meminfo, "A", "memory regions");
4449
4450         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4451             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4452             sysctl_mps_tcam, "A", "MPS TCAM entries");
4453
4454         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4455             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4456             sysctl_path_mtus, "A", "path MTUs");
4457
4458         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4459             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4460             sysctl_pm_stats, "A", "PM statistics");
4461
4462         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4463             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4464             sysctl_rdma_stats, "A", "RDMA statistics");
4465
4466         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4467             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4468             sysctl_tcp_stats, "A", "TCP statistics");
4469
4470         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4471             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4472             sysctl_tids, "A", "TID information");
4473
4474         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4475             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4476             sysctl_tp_err_stats, "A", "TP error statistics");
4477
4478         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4479             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4480             sysctl_tp_la, "A", "TP logic analyzer");
4481
4482         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4483             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4484             sysctl_tx_rate, "A", "Tx rate");
4485
4486         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4487             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4488             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4489
4490         if (is_t5(sc)) {
4491                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4492                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4493                     sysctl_wcwr_stats, "A", "write combined work requests");
4494         }
4495 #endif
4496
4497 #ifdef TCP_OFFLOAD
4498         if (is_offload(sc)) {
4499                 /*
4500                  * dev.t4nex.X.toe.
4501                  */
4502                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4503                     NULL, "TOE parameters");
4504                 children = SYSCTL_CHILDREN(oid);
4505
4506                 sc->tt.sndbuf = 256 * 1024;
4507                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4508                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4509
4510                 sc->tt.ddp = 0;
4511                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4512                     &sc->tt.ddp, 0, "DDP allowed");
4513
4514                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4515                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4516                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4517
4518                 sc->tt.ddp_thres =
4519                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4520                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4521                     &sc->tt.ddp_thres, 0, "DDP threshold");
4522
4523                 sc->tt.rx_coalesce = 1;
4524                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4525                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4526         }
4527 #endif
4528
4529
4530         return (0);
4531 }
4532
4533 static int
4534 cxgbe_sysctls(struct port_info *pi)
4535 {
4536         struct sysctl_ctx_list *ctx;
4537         struct sysctl_oid *oid;
4538         struct sysctl_oid_list *children;
4539         struct adapter *sc = pi->adapter;
4540
4541         ctx = device_get_sysctl_ctx(pi->dev);
4542
4543         /*
4544          * dev.cxgbe.X.
4545          */
4546         oid = device_get_sysctl_tree(pi->dev);
4547         children = SYSCTL_CHILDREN(oid);
4548
4549         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4550            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4551         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4552                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4553                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4554                     "PHY temperature (in Celsius)");
4555                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4556                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4557                     "PHY firmware version");
4558         }
4559         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4560             &pi->nrxq, 0, "# of rx queues");
4561         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4562             &pi->ntxq, 0, "# of tx queues");
4563         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4564             &pi->first_rxq, 0, "index of first rx queue");
4565         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4566             &pi->first_txq, 0, "index of first tx queue");
4567         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4568             CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4569             "Reserve queue 0 for non-flowid packets");
4570
4571 #ifdef TCP_OFFLOAD
4572         if (is_offload(sc)) {
4573                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4574                     &pi->nofldrxq, 0,
4575                     "# of rx queues for offloaded TCP connections");
4576                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4577                     &pi->nofldtxq, 0,
4578                     "# of tx queues for offloaded TCP connections");
4579                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4580                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4581                     "index of first TOE rx queue");
4582                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4583                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4584                     "index of first TOE tx queue");
4585         }
4586 #endif
4587
4588         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4589             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4590             "holdoff timer index");
4591         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4592             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4593             "holdoff packet counter index");
4594
4595         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4596             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4597             "rx queue size");
4598         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4599             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4600             "tx queue size");
4601
4602         /*
4603          * dev.cxgbe.X.stats.
4604          */
4605         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4606             NULL, "port statistics");
4607         children = SYSCTL_CHILDREN(oid);
4608
4609 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4610         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4611             CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
4612             sysctl_handle_t4_reg64, "QU", desc)
4613
4614         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4615             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4616         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4617             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4618         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4619             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4620         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4621             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4622         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4623             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4624         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4625             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4626         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4627             "# of tx frames in this range",
4628             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4629         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4630             "# of tx frames in this range",
4631             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4632         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4633             "# of tx frames in this range",
4634             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4635         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4636             "# of tx frames in this range",
4637             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4638         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4639             "# of tx frames in this range",
4640             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4641         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4642             "# of tx frames in this range",
4643             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4644         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4645             "# of tx frames in this range",
4646             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4647         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4648             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4649         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4650             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4651         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4652             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4653         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4654             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4655         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4656             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4657         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4658             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4659         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4660             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4661         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4662             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4663         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4664             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4665         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4666             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4667
4668         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4669             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4670         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4671             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4672         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4673             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4674         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4675             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4676         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4677             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4678         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4679             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4680         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4681             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4682         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4683             "# of frames received with bad FCS",
4684             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4685         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4686             "# of frames received with length error",
4687             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4688         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4689             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4690         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4691             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4692         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4693             "# of rx frames in this range",
4694             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4695         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4696             "# of rx frames in this range",
4697             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4698         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4699             "# of rx frames in this range",
4700             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4701         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4702             "# of rx frames in this range",
4703             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4704         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4705             "# of rx frames in this range",
4706             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4707         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4708             "# of rx frames in this range",
4709             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4710         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4711             "# of rx frames in this range",
4712             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4713         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4714             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4715         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4716             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4717         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4718             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4719         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4720             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4721         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4722             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4723         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4724             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4725         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4726             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4727         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4728             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4729         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4730             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4731
4732 #undef SYSCTL_ADD_T4_REG64
4733
4734 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4735         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4736             &pi->stats.name, desc)
4737
4738         /* We get these from port_stats and they may be stale by upto 1s */
4739         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4740             "# drops due to buffer-group 0 overflows");
4741         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4742             "# drops due to buffer-group 1 overflows");
4743         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4744             "# drops due to buffer-group 2 overflows");
4745         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4746             "# drops due to buffer-group 3 overflows");
4747         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4748             "# of buffer-group 0 truncated packets");
4749         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4750             "# of buffer-group 1 truncated packets");
4751         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4752             "# of buffer-group 2 truncated packets");
4753         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4754             "# of buffer-group 3 truncated packets");
4755
4756 #undef SYSCTL_ADD_T4_PORTSTAT
4757
4758         return (0);
4759 }
4760
4761 static int
4762 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4763 {
4764         int rc, *i;
4765         struct sbuf sb;
4766
4767         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4768         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4769                 sbuf_printf(&sb, "%d ", *i);
4770         sbuf_trim(&sb);
4771         sbuf_finish(&sb);
4772         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4773         sbuf_delete(&sb);
4774         return (rc);
4775 }
4776
4777 static int
4778 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4779 {
4780         int rc;
4781         struct sbuf *sb;
4782
4783         rc = sysctl_wire_old_buffer(req, 0);
4784         if (rc != 0)
4785                 return(rc);
4786
4787         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4788         if (sb == NULL)
4789                 return (ENOMEM);
4790
4791         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4792         rc = sbuf_finish(sb);
4793         sbuf_delete(sb);
4794
4795         return (rc);
4796 }
4797
4798 static int
4799 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4800 {
4801         struct port_info *pi = arg1;
4802         int op = arg2;
4803         struct adapter *sc = pi->adapter;
4804         u_int v;
4805         int rc;
4806
4807         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4808         if (rc)
4809                 return (rc);
4810         /* XXX: magic numbers */
4811         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4812             &v);
4813         end_synchronized_op(sc, 0);
4814         if (rc)
4815                 return (rc);
4816         if (op == 0)
4817                 v /= 256;
4818
4819         rc = sysctl_handle_int(oidp, &v, 0, req);
4820         return (rc);
4821 }
4822
4823 static int
4824 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4825 {
4826         struct port_info *pi = arg1;
4827         int rc, val;
4828
4829         val = pi->rsrv_noflowq;
4830         rc = sysctl_handle_int(oidp, &val, 0, req);
4831         if (rc != 0 || req->newptr == NULL)
4832                 return (rc);
4833
4834         if ((val >= 1) && (pi->ntxq > 1))
4835                 pi->rsrv_noflowq = 1;
4836         else
4837                 pi->rsrv_noflowq = 0;
4838
4839         return (rc);
4840 }
4841
4842 static int
4843 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4844 {
4845         struct port_info *pi = arg1;
4846         struct adapter *sc = pi->adapter;
4847         int idx, rc, i;
4848         struct sge_rxq *rxq;
4849 #ifdef TCP_OFFLOAD
4850         struct sge_ofld_rxq *ofld_rxq;
4851 #endif
4852         uint8_t v;
4853
4854         idx = pi->tmr_idx;
4855
4856         rc = sysctl_handle_int(oidp, &idx, 0, req);
4857         if (rc != 0 || req->newptr == NULL)
4858                 return (rc);
4859
4860         if (idx < 0 || idx >= SGE_NTIMERS)
4861                 return (EINVAL);
4862
4863         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4864             "t4tmr");
4865         if (rc)
4866                 return (rc);
4867
4868         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4869         for_each_rxq(pi, i, rxq) {
4870 #ifdef atomic_store_rel_8
4871                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4872 #else
4873                 rxq->iq.intr_params = v;
4874 #endif
4875         }
4876 #ifdef TCP_OFFLOAD
4877         for_each_ofld_rxq(pi, i, ofld_rxq) {
4878 #ifdef atomic_store_rel_8
4879                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4880 #else
4881                 ofld_rxq->iq.intr_params = v;
4882 #endif
4883         }
4884 #endif
4885         pi->tmr_idx = idx;
4886
4887         end_synchronized_op(sc, LOCK_HELD);
4888         return (0);
4889 }
4890
4891 static int
4892 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4893 {
4894         struct port_info *pi = arg1;
4895         struct adapter *sc = pi->adapter;
4896         int idx, rc;
4897
4898         idx = pi->pktc_idx;
4899
4900         rc = sysctl_handle_int(oidp, &idx, 0, req);
4901         if (rc != 0 || req->newptr == NULL)
4902                 return (rc);
4903
4904         if (idx < -1 || idx >= SGE_NCOUNTERS)
4905                 return (EINVAL);
4906
4907         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4908             "t4pktc");
4909         if (rc)
4910                 return (rc);
4911
4912         if (pi->flags & PORT_INIT_DONE)
4913                 rc = EBUSY; /* cannot be changed once the queues are created */
4914         else
4915                 pi->pktc_idx = idx;
4916
4917         end_synchronized_op(sc, LOCK_HELD);
4918         return (rc);
4919 }
4920
4921 static int
4922 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4923 {
4924         struct port_info *pi = arg1;
4925         struct adapter *sc = pi->adapter;
4926         int qsize, rc;
4927
4928         qsize = pi->qsize_rxq;
4929
4930         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4931         if (rc != 0 || req->newptr == NULL)
4932                 return (rc);
4933
4934         if (qsize < 128 || (qsize & 7))
4935                 return (EINVAL);
4936
4937         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4938             "t4rxqs");
4939         if (rc)
4940                 return (rc);
4941
4942         if (pi->flags & PORT_INIT_DONE)
4943                 rc = EBUSY; /* cannot be changed once the queues are created */
4944         else
4945                 pi->qsize_rxq = qsize;
4946
4947         end_synchronized_op(sc, LOCK_HELD);
4948         return (rc);
4949 }
4950
4951 static int
4952 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4953 {
4954         struct port_info *pi = arg1;
4955         struct adapter *sc = pi->adapter;
4956         int qsize, rc;
4957
4958         qsize = pi->qsize_txq;
4959
4960         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4961         if (rc != 0 || req->newptr == NULL)
4962                 return (rc);
4963
4964         /* bufring size must be powerof2 */
4965         if (qsize < 128 || !powerof2(qsize))
4966                 return (EINVAL);
4967
4968         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4969             "t4txqs");
4970         if (rc)
4971                 return (rc);
4972
4973         if (pi->flags & PORT_INIT_DONE)
4974                 rc = EBUSY; /* cannot be changed once the queues are created */
4975         else
4976                 pi->qsize_txq = qsize;
4977
4978         end_synchronized_op(sc, LOCK_HELD);
4979         return (rc);
4980 }
4981
4982 static int
4983 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4984 {
4985         struct adapter *sc = arg1;
4986         int reg = arg2;
4987         uint64_t val;
4988
4989         val = t4_read_reg64(sc, reg);
4990
4991         return (sysctl_handle_64(oidp, &val, 0, req));
4992 }
4993
4994 static int
4995 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4996 {
4997         struct adapter *sc = arg1;
4998         int rc, t;
4999         uint32_t param, val;
5000
5001         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
5002         if (rc)
5003                 return (rc);
5004         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5005             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5006             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5007         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5008         end_synchronized_op(sc, 0);
5009         if (rc)
5010                 return (rc);
5011
5012         /* unknown is returned as 0 but we display -1 in that case */
5013         t = val == 0 ? -1 : val;
5014
5015         rc = sysctl_handle_int(oidp, &t, 0, req);
5016         return (rc);
5017 }
5018
5019 #ifdef SBUF_DRAIN
5020 static int
5021 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5022 {
5023         struct adapter *sc = arg1;
5024         struct sbuf *sb;
5025         int rc, i;
5026         uint16_t incr[NMTUS][NCCTRL_WIN];
5027         static const char *dec_fac[] = {
5028                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5029                 "0.9375"
5030         };
5031
5032         rc = sysctl_wire_old_buffer(req, 0);
5033         if (rc != 0)
5034                 return (rc);
5035
5036         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5037         if (sb == NULL)
5038                 return (ENOMEM);
5039
5040         t4_read_cong_tbl(sc, incr);
5041
5042         for (i = 0; i < NCCTRL_WIN; ++i) {
5043                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5044                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5045                     incr[5][i], incr[6][i], incr[7][i]);
5046                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5047                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5048                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5049                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5050         }
5051
5052         rc = sbuf_finish(sb);
5053         sbuf_delete(sb);
5054
5055         return (rc);
5056 }
5057
5058 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5059         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
5060         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5061         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
5062 };
5063
5064 static int
5065 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5066 {
5067         struct adapter *sc = arg1;
5068         struct sbuf *sb;
5069         int rc, i, n, qid = arg2;
5070         uint32_t *buf, *p;
5071         char *qtype;
5072         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5073
5074         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5075             ("%s: bad qid %d\n", __func__, qid));
5076
5077         if (qid < CIM_NUM_IBQ) {
5078                 /* inbound queue */
5079                 qtype = "IBQ";
5080                 n = 4 * CIM_IBQ_SIZE;
5081                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5082                 rc = t4_read_cim_ibq(sc, qid, buf, n);
5083         } else {
5084                 /* outbound queue */
5085                 qtype = "OBQ";
5086                 qid -= CIM_NUM_IBQ;
5087                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5088                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5089                 rc = t4_read_cim_obq(sc, qid, buf, n);
5090         }
5091
5092         if (rc < 0) {
5093                 rc = -rc;
5094                 goto done;
5095         }
5096         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5097
5098         rc = sysctl_wire_old_buffer(req, 0);
5099         if (rc != 0)
5100                 goto done;
5101
5102         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5103         if (sb == NULL) {
5104                 rc = ENOMEM;
5105                 goto done;
5106         }
5107
5108         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5109         for (i = 0, p = buf; i < n; i += 16, p += 4)
5110                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5111                     p[2], p[3]);
5112
5113         rc = sbuf_finish(sb);
5114         sbuf_delete(sb);
5115 done:
5116         free(buf, M_CXGBE);
5117         return (rc);
5118 }
5119
5120 static int
5121 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5122 {
5123         struct adapter *sc = arg1;
5124         u_int cfg;
5125         struct sbuf *sb;
5126         uint32_t *buf, *p;
5127         int rc;
5128
5129         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5130         if (rc != 0)
5131                 return (rc);
5132
5133         rc = sysctl_wire_old_buffer(req, 0);
5134         if (rc != 0)
5135                 return (rc);
5136
5137         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5138         if (sb == NULL)
5139                 return (ENOMEM);
5140
5141         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5142             M_ZERO | M_WAITOK);
5143
5144         rc = -t4_cim_read_la(sc, buf, NULL);
5145         if (rc != 0)
5146                 goto done;
5147
5148         sbuf_printf(sb, "Status   Data      PC%s",
5149             cfg & F_UPDBGLACAPTPCONLY ? "" :
5150             "     LS0Stat  LS0Addr             LS0Data");
5151
5152         KASSERT((sc->params.cim_la_size & 7) == 0,
5153             ("%s: p will walk off the end of buf", __func__));
5154
5155         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5156                 if (cfg & F_UPDBGLACAPTPCONLY) {
5157                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5158                             p[6], p[7]);
5159                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5160                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5161                             p[4] & 0xff, p[5] >> 8);
5162                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5163                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5164                             p[1] & 0xf, p[2] >> 4);
5165                 } else {
5166                         sbuf_printf(sb,
5167                             "\n  %02x   %x%07x %x%07x %08x %08x "
5168                             "%08x%08x%08x%08x",
5169                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5170                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5171                             p[6], p[7]);
5172                 }
5173         }
5174
5175         rc = sbuf_finish(sb);
5176         sbuf_delete(sb);
5177 done:
5178         free(buf, M_CXGBE);
5179         return (rc);
5180 }
5181
5182 static int
5183 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5184 {
5185         struct adapter *sc = arg1;
5186         u_int i;
5187         struct sbuf *sb;
5188         uint32_t *buf, *p;
5189         int rc;
5190
5191         rc = sysctl_wire_old_buffer(req, 0);
5192         if (rc != 0)
5193                 return (rc);
5194
5195         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5196         if (sb == NULL)
5197                 return (ENOMEM);
5198
5199         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5200             M_ZERO | M_WAITOK);
5201
5202         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5203         p = buf;
5204
5205         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5206                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5207                     p[1], p[0]);
5208         }
5209
5210         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5211         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5212                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5213                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5214                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5215                     (p[1] >> 2) | ((p[2] & 3) << 30),
5216                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5217                     p[0] & 1);
5218         }
5219
5220         rc = sbuf_finish(sb);
5221         sbuf_delete(sb);
5222         free(buf, M_CXGBE);
5223         return (rc);
5224 }
5225
5226 static int
5227 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5228 {
5229         struct adapter *sc = arg1;
5230         u_int i;
5231         struct sbuf *sb;
5232         uint32_t *buf, *p;
5233         int rc;
5234
5235         rc = sysctl_wire_old_buffer(req, 0);
5236         if (rc != 0)
5237                 return (rc);
5238
5239         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5240         if (sb == NULL)
5241                 return (ENOMEM);
5242
5243         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5244             M_ZERO | M_WAITOK);
5245
5246         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5247         p = buf;
5248
5249         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5250         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5251                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5252                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5253                     p[4], p[3], p[2], p[1], p[0]);
5254         }
5255
5256         sbuf_printf(sb, "\n\nCntl ID               Data");
5257         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5258                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5259                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5260         }
5261
5262         rc = sbuf_finish(sb);
5263         sbuf_delete(sb);
5264         free(buf, M_CXGBE);
5265         return (rc);
5266 }
5267
5268 static int
5269 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5270 {
5271         struct adapter *sc = arg1;
5272         struct sbuf *sb;
5273         int rc, i;
5274         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5275         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5276         uint16_t thres[CIM_NUM_IBQ];
5277         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5278         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5279         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5280
5281         if (is_t4(sc)) {
5282                 cim_num_obq = CIM_NUM_OBQ;
5283                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5284                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5285         } else {
5286                 cim_num_obq = CIM_NUM_OBQ_T5;
5287                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5288                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5289         }
5290         nq = CIM_NUM_IBQ + cim_num_obq;
5291
5292         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5293         if (rc == 0)
5294                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5295         if (rc != 0)
5296                 return (rc);
5297
5298         t4_read_cimq_cfg(sc, base, size, thres);
5299
5300         rc = sysctl_wire_old_buffer(req, 0);
5301         if (rc != 0)
5302                 return (rc);
5303
5304         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5305         if (sb == NULL)
5306                 return (ENOMEM);
5307
5308         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5309
5310         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5311                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5312                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5313                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5314                     G_QUEREMFLITS(p[2]) * 16);
5315         for ( ; i < nq; i++, p += 4, wr += 2)
5316                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5317                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5318                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5319                     G_QUEREMFLITS(p[2]) * 16);
5320
5321         rc = sbuf_finish(sb);
5322         sbuf_delete(sb);
5323
5324         return (rc);
5325 }
5326
5327 static int
5328 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5329 {
5330         struct adapter *sc = arg1;
5331         struct sbuf *sb;
5332         int rc;
5333         struct tp_cpl_stats stats;
5334
5335         rc = sysctl_wire_old_buffer(req, 0);
5336         if (rc != 0)
5337                 return (rc);
5338
5339         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5340         if (sb == NULL)
5341                 return (ENOMEM);
5342
5343         t4_tp_get_cpl_stats(sc, &stats);
5344
5345         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5346             "channel 3\n");
5347         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5348                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5349         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5350                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5351
5352         rc = sbuf_finish(sb);
5353         sbuf_delete(sb);
5354
5355         return (rc);
5356 }
5357
5358 static int
5359 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5360 {
5361         struct adapter *sc = arg1;
5362         struct sbuf *sb;
5363         int rc;
5364         struct tp_usm_stats stats;
5365
5366         rc = sysctl_wire_old_buffer(req, 0);
5367         if (rc != 0)
5368                 return(rc);
5369
5370         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5371         if (sb == NULL)
5372                 return (ENOMEM);
5373
5374         t4_get_usm_stats(sc, &stats);
5375
5376         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5377         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5378         sbuf_printf(sb, "Drops:  %u", stats.drops);
5379
5380         rc = sbuf_finish(sb);
5381         sbuf_delete(sb);
5382
5383         return (rc);
5384 }
5385
5386 const char *devlog_level_strings[] = {
5387         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5388         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5389         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5390         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5391         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5392         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5393 };
5394
5395 const char *devlog_facility_strings[] = {
5396         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5397         [FW_DEVLOG_FACILITY_CF]         = "CF",
5398         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5399         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5400         [FW_DEVLOG_FACILITY_RES]        = "RES",
5401         [FW_DEVLOG_FACILITY_HW]         = "HW",
5402         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5403         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5404         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5405         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5406         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5407         [FW_DEVLOG_FACILITY_VI]         = "VI",
5408         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5409         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5410         [FW_DEVLOG_FACILITY_TM]         = "TM",
5411         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5412         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5413         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5414         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5415         [FW_DEVLOG_FACILITY_RI]         = "RI",
5416         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5417         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5418         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5419         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5420 };
5421
5422 static int
5423 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5424 {
5425         struct adapter *sc = arg1;
5426         struct devlog_params *dparams = &sc->params.devlog;
5427         struct fw_devlog_e *buf, *e;
5428         int i, j, rc, nentries, first = 0, m;
5429         struct sbuf *sb;
5430         uint64_t ftstamp = UINT64_MAX;
5431
5432         if (dparams->start == 0) {
5433                 dparams->memtype = FW_MEMTYPE_EDC0;
5434                 dparams->start = 0x84000;
5435                 dparams->size = 32768;
5436         }
5437
5438         nentries = dparams->size / sizeof(struct fw_devlog_e);
5439
5440         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5441         if (buf == NULL)
5442                 return (ENOMEM);
5443
5444         m = fwmtype_to_hwmtype(dparams->memtype);
5445         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5446         if (rc != 0)
5447                 goto done;
5448
5449         for (i = 0; i < nentries; i++) {
5450                 e = &buf[i];
5451
5452                 if (e->timestamp == 0)
5453                         break;  /* end */
5454
5455                 e->timestamp = be64toh(e->timestamp);
5456                 e->seqno = be32toh(e->seqno);
5457                 for (j = 0; j < 8; j++)
5458                         e->params[j] = be32toh(e->params[j]);
5459
5460                 if (e->timestamp < ftstamp) {
5461                         ftstamp = e->timestamp;
5462                         first = i;
5463                 }
5464         }
5465
5466         if (buf[first].timestamp == 0)
5467                 goto done;      /* nothing in the log */
5468
5469         rc = sysctl_wire_old_buffer(req, 0);
5470         if (rc != 0)
5471                 goto done;
5472
5473         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5474         if (sb == NULL) {
5475                 rc = ENOMEM;
5476                 goto done;
5477         }
5478         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5479             "Seq#", "Tstamp", "Level", "Facility", "Message");
5480
5481         i = first;
5482         do {
5483                 e = &buf[i];
5484                 if (e->timestamp == 0)
5485                         break;  /* end */
5486
5487                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5488                     e->seqno, e->timestamp,
5489                     (e->level < nitems(devlog_level_strings) ?
5490                         devlog_level_strings[e->level] : "UNKNOWN"),
5491                     (e->facility < nitems(devlog_facility_strings) ?
5492                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5493                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5494                     e->params[2], e->params[3], e->params[4],
5495                     e->params[5], e->params[6], e->params[7]);
5496
5497                 if (++i == nentries)
5498                         i = 0;
5499         } while (i != first);
5500
5501         rc = sbuf_finish(sb);
5502         sbuf_delete(sb);
5503 done:
5504         free(buf, M_CXGBE);
5505         return (rc);
5506 }
5507
5508 static int
5509 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5510 {
5511         struct adapter *sc = arg1;
5512         struct sbuf *sb;
5513         int rc;
5514         struct tp_fcoe_stats stats[4];
5515
5516         rc = sysctl_wire_old_buffer(req, 0);
5517         if (rc != 0)
5518                 return (rc);
5519
5520         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5521         if (sb == NULL)
5522                 return (ENOMEM);
5523
5524         t4_get_fcoe_stats(sc, 0, &stats[0]);
5525         t4_get_fcoe_stats(sc, 1, &stats[1]);
5526         t4_get_fcoe_stats(sc, 2, &stats[2]);
5527         t4_get_fcoe_stats(sc, 3, &stats[3]);
5528
5529         sbuf_printf(sb, "                   channel 0        channel 1        "
5530             "channel 2        channel 3\n");
5531         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5532             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5533             stats[3].octetsDDP);
5534         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5535             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5536         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5537             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5538             stats[3].framesDrop);
5539
5540         rc = sbuf_finish(sb);
5541         sbuf_delete(sb);
5542
5543         return (rc);
5544 }
5545
5546 static int
5547 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5548 {
5549         struct adapter *sc = arg1;
5550         struct sbuf *sb;
5551         int rc, i;
5552         unsigned int map, kbps, ipg, mode;
5553         unsigned int pace_tab[NTX_SCHED];
5554
5555         rc = sysctl_wire_old_buffer(req, 0);
5556         if (rc != 0)
5557                 return (rc);
5558
5559         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5560         if (sb == NULL)
5561                 return (ENOMEM);
5562
5563         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5564         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5565         t4_read_pace_tbl(sc, pace_tab);
5566
5567         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5568             "Class IPG (0.1 ns)   Flow IPG (us)");
5569
5570         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5571                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5572                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5573                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5574                 if (kbps)
5575                         sbuf_printf(sb, "%9u     ", kbps);
5576                 else
5577                         sbuf_printf(sb, " disabled     ");
5578
5579                 if (ipg)
5580                         sbuf_printf(sb, "%13u        ", ipg);
5581                 else
5582                         sbuf_printf(sb, "     disabled        ");
5583
5584                 if (pace_tab[i])
5585                         sbuf_printf(sb, "%10u", pace_tab[i]);
5586                 else
5587                         sbuf_printf(sb, "  disabled");
5588         }
5589
5590         rc = sbuf_finish(sb);
5591         sbuf_delete(sb);
5592
5593         return (rc);
5594 }
5595
5596 static int
5597 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5598 {
5599         struct adapter *sc = arg1;
5600         struct sbuf *sb;
5601         int rc, i, j;
5602         uint64_t *p0, *p1;
5603         struct lb_port_stats s[2];
5604         static const char *stat_name[] = {
5605                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5606                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5607                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5608                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5609                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5610                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5611                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5612         };
5613
5614         rc = sysctl_wire_old_buffer(req, 0);
5615         if (rc != 0)
5616                 return (rc);
5617
5618         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5619         if (sb == NULL)
5620                 return (ENOMEM);
5621
5622         memset(s, 0, sizeof(s));
5623
5624         for (i = 0; i < 4; i += 2) {
5625                 t4_get_lb_stats(sc, i, &s[0]);
5626                 t4_get_lb_stats(sc, i + 1, &s[1]);
5627
5628                 p0 = &s[0].octets;
5629                 p1 = &s[1].octets;
5630                 sbuf_printf(sb, "%s                       Loopback %u"
5631                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5632
5633                 for (j = 0; j < nitems(stat_name); j++)
5634                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5635                                    *p0++, *p1++);
5636         }
5637
5638         rc = sbuf_finish(sb);
5639         sbuf_delete(sb);
5640
5641         return (rc);
5642 }
5643
5644 static int
5645 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5646 {
5647         int rc = 0;
5648         struct port_info *pi = arg1;
5649         struct sbuf *sb;
5650         static const char *linkdnreasons[] = {
5651                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5652                 "PHY overheated", "unknown", "rx los", "reserved7"
5653         };
5654
5655         rc = sysctl_wire_old_buffer(req, 0);
5656         if (rc != 0)
5657                 return(rc);
5658         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5659         if (sb == NULL)
5660                 return (ENOMEM);
5661
5662         if (pi->linkdnrc < 0)
5663                 sbuf_printf(sb, "n/a");
5664         else if (pi->linkdnrc < nitems(linkdnreasons))
5665                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5666         else
5667                 sbuf_printf(sb, "%d", pi->linkdnrc);
5668
5669         rc = sbuf_finish(sb);
5670         sbuf_delete(sb);
5671
5672         return (rc);
5673 }
5674
5675 struct mem_desc {
5676         unsigned int base;
5677         unsigned int limit;
5678         unsigned int idx;
5679 };
5680
5681 static int
5682 mem_desc_cmp(const void *a, const void *b)
5683 {
5684         return ((const struct mem_desc *)a)->base -
5685                ((const struct mem_desc *)b)->base;
5686 }
5687
5688 static void
5689 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5690     unsigned int to)
5691 {
5692         unsigned int size;
5693
5694         size = to - from + 1;
5695         if (size == 0)
5696                 return;
5697
5698         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5699         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5700 }
5701
5702 static int
5703 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5704 {
5705         struct adapter *sc = arg1;
5706         struct sbuf *sb;
5707         int rc, i, n;
5708         uint32_t lo, hi, used, alloc;
5709         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5710         static const char *region[] = {
5711                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5712                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5713                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5714                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5715                 "RQUDP region:", "PBL region:", "TXPBL region:",
5716                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5717                 "On-chip queues:"
5718         };
5719         struct mem_desc avail[4];
5720         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5721         struct mem_desc *md = mem;
5722
5723         rc = sysctl_wire_old_buffer(req, 0);
5724         if (rc != 0)
5725                 return (rc);
5726
5727         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5728         if (sb == NULL)
5729                 return (ENOMEM);
5730
5731         for (i = 0; i < nitems(mem); i++) {
5732                 mem[i].limit = 0;
5733                 mem[i].idx = i;
5734         }
5735
5736         /* Find and sort the populated memory ranges */
5737         i = 0;
5738         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5739         if (lo & F_EDRAM0_ENABLE) {
5740                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5741                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5742                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5743                 avail[i].idx = 0;
5744                 i++;
5745         }
5746         if (lo & F_EDRAM1_ENABLE) {
5747                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5748                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5749                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5750                 avail[i].idx = 1;
5751                 i++;
5752         }
5753         if (lo & F_EXT_MEM_ENABLE) {
5754                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5755                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5756                 avail[i].limit = avail[i].base +
5757                     (G_EXT_MEM_SIZE(hi) << 20);
5758                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5759                 i++;
5760         }
5761         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5762                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5763                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5764                 avail[i].limit = avail[i].base +
5765                     (G_EXT_MEM1_SIZE(hi) << 20);
5766                 avail[i].idx = 4;
5767                 i++;
5768         }
5769         if (!i)                                    /* no memory available */
5770                 return 0;
5771         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5772
5773         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5774         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5775         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5776         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5777         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5778         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5779         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5780         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5781         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5782
5783         /* the next few have explicit upper bounds */
5784         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5785         md->limit = md->base - 1 +
5786                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5787                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5788         md++;
5789
5790         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5791         md->limit = md->base - 1 +
5792                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5793                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5794         md++;
5795
5796         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5797                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5798                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5799                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5800         } else {
5801                 md->base = 0;
5802                 md->idx = nitems(region);  /* hide it */
5803         }
5804         md++;
5805
5806 #define ulp_region(reg) \
5807         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5808         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5809
5810         ulp_region(RX_ISCSI);
5811         ulp_region(RX_TDDP);
5812         ulp_region(TX_TPT);
5813         ulp_region(RX_STAG);
5814         ulp_region(RX_RQ);
5815         ulp_region(RX_RQUDP);
5816         ulp_region(RX_PBL);
5817         ulp_region(TX_PBL);
5818 #undef ulp_region
5819
5820         md->base = 0;
5821         md->idx = nitems(region);
5822         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5823                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5824                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5825                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5826         }
5827         md++;
5828
5829         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5830         md->limit = md->base + sc->tids.ntids - 1;
5831         md++;
5832         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5833         md->limit = md->base + sc->tids.ntids - 1;
5834         md++;
5835
5836         md->base = sc->vres.ocq.start;
5837         if (sc->vres.ocq.size)
5838                 md->limit = md->base + sc->vres.ocq.size - 1;
5839         else
5840                 md->idx = nitems(region);  /* hide it */
5841         md++;
5842
5843         /* add any address-space holes, there can be up to 3 */
5844         for (n = 0; n < i - 1; n++)
5845                 if (avail[n].limit < avail[n + 1].base)
5846                         (md++)->base = avail[n].limit;
5847         if (avail[n].limit)
5848                 (md++)->base = avail[n].limit;
5849
5850         n = md - mem;
5851         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5852
5853         for (lo = 0; lo < i; lo++)
5854                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5855                                 avail[lo].limit - 1);
5856
5857         sbuf_printf(sb, "\n");
5858         for (i = 0; i < n; i++) {
5859                 if (mem[i].idx >= nitems(region))
5860                         continue;                        /* skip holes */
5861                 if (!mem[i].limit)
5862                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5863                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5864                                 mem[i].limit);
5865         }
5866
5867         sbuf_printf(sb, "\n");
5868         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5869         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5870         mem_region_show(sb, "uP RAM:", lo, hi);
5871
5872         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5873         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5874         mem_region_show(sb, "uP Extmem2:", lo, hi);
5875
5876         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5877         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5878                    G_PMRXMAXPAGE(lo),
5879                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5880                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5881
5882         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5883         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5884         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5885                    G_PMTXMAXPAGE(lo),
5886                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5887                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5888         sbuf_printf(sb, "%u p-structs\n",
5889                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5890
5891         for (i = 0; i < 4; i++) {
5892                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5893                 if (is_t4(sc)) {
5894                         used = G_USED(lo);
5895                         alloc = G_ALLOC(lo);
5896                 } else {
5897                         used = G_T5_USED(lo);
5898                         alloc = G_T5_ALLOC(lo);
5899                 }
5900                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5901                            i, used, alloc);
5902         }
5903         for (i = 0; i < 4; i++) {
5904                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5905                 if (is_t4(sc)) {
5906                         used = G_USED(lo);
5907                         alloc = G_ALLOC(lo);
5908                 } else {
5909                         used = G_T5_USED(lo);
5910                         alloc = G_T5_ALLOC(lo);
5911                 }
5912                 sbuf_printf(sb,
5913                            "\nLoopback %d using %u pages out of %u allocated",
5914                            i, used, alloc);
5915         }
5916
5917         rc = sbuf_finish(sb);
5918         sbuf_delete(sb);
5919
5920         return (rc);
5921 }
5922
5923 static inline void
5924 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5925 {
5926         *mask = x | y;
5927         y = htobe64(y);
5928         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5929 }
5930
5931 static int
5932 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5933 {
5934         struct adapter *sc = arg1;
5935         struct sbuf *sb;
5936         int rc, i, n;
5937
5938         rc = sysctl_wire_old_buffer(req, 0);
5939         if (rc != 0)
5940                 return (rc);
5941
5942         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5943         if (sb == NULL)
5944                 return (ENOMEM);
5945
5946         sbuf_printf(sb,
5947             "Idx  Ethernet address     Mask     Vld Ports PF"
5948             "  VF              Replication             P0 P1 P2 P3  ML");
5949         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5950             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5951         for (i = 0; i < n; i++) {
5952                 uint64_t tcamx, tcamy, mask;
5953                 uint32_t cls_lo, cls_hi;
5954                 uint8_t addr[ETHER_ADDR_LEN];
5955
5956                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5957                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5958                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5959                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5960
5961                 if (tcamx & tcamy)
5962                         continue;
5963
5964                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5965                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5966                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5967                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5968                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5969                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5970                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5971
5972                 if (cls_lo & F_REPLICATE) {
5973                         struct fw_ldst_cmd ldst_cmd;
5974
5975                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5976                         ldst_cmd.op_to_addrspace =
5977                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5978                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5979                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5980                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5981                         ldst_cmd.u.mps.fid_ctl =
5982                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5983                                 V_FW_LDST_CMD_CTL(i));
5984
5985                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5986                             "t4mps");
5987                         if (rc)
5988                                 break;
5989                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5990                             sizeof(ldst_cmd), &ldst_cmd);
5991                         end_synchronized_op(sc, 0);
5992
5993                         if (rc != 0) {
5994                                 sbuf_printf(sb,
5995                                     " ------------ error %3u ------------", rc);
5996                                 rc = 0;
5997                         } else {
5998                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5999                                     be32toh(ldst_cmd.u.mps.rplc127_96),
6000                                     be32toh(ldst_cmd.u.mps.rplc95_64),
6001                                     be32toh(ldst_cmd.u.mps.rplc63_32),
6002                                     be32toh(ldst_cmd.u.mps.rplc31_0));
6003                         }
6004                 } else
6005                         sbuf_printf(sb, "%36s", "");
6006
6007                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6008                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6009                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6010         }
6011
6012         if (rc)
6013                 (void) sbuf_finish(sb);
6014         else
6015                 rc = sbuf_finish(sb);
6016         sbuf_delete(sb);
6017
6018         return (rc);
6019 }
6020
6021 static int
6022 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6023 {
6024         struct adapter *sc = arg1;
6025         struct sbuf *sb;
6026         int rc;
6027         uint16_t mtus[NMTUS];
6028
6029         rc = sysctl_wire_old_buffer(req, 0);
6030         if (rc != 0)
6031                 return (rc);
6032
6033         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6034         if (sb == NULL)
6035                 return (ENOMEM);
6036
6037         t4_read_mtu_tbl(sc, mtus, NULL);
6038
6039         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6040             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6041             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6042             mtus[14], mtus[15]);
6043
6044         rc = sbuf_finish(sb);
6045         sbuf_delete(sb);
6046
6047         return (rc);
6048 }
6049
6050 static int
6051 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6052 {
6053         struct adapter *sc = arg1;
6054         struct sbuf *sb;
6055         int rc, i;
6056         uint32_t cnt[PM_NSTATS];
6057         uint64_t cyc[PM_NSTATS];
6058         static const char *rx_stats[] = {
6059                 "Read:", "Write bypass:", "Write mem:", "Flush:"
6060         };
6061         static const char *tx_stats[] = {
6062                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6063         };
6064
6065         rc = sysctl_wire_old_buffer(req, 0);
6066         if (rc != 0)
6067                 return (rc);
6068
6069         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6070         if (sb == NULL)
6071                 return (ENOMEM);
6072
6073         t4_pmtx_get_stats(sc, cnt, cyc);
6074         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6075         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6076                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6077                     cyc[i]);
6078
6079         t4_pmrx_get_stats(sc, cnt, cyc);
6080         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6081         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6082                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6083                     cyc[i]);
6084
6085         rc = sbuf_finish(sb);
6086         sbuf_delete(sb);
6087
6088         return (rc);
6089 }
6090
6091 static int
6092 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6093 {
6094         struct adapter *sc = arg1;
6095         struct sbuf *sb;
6096         int rc;
6097         struct tp_rdma_stats stats;
6098
6099         rc = sysctl_wire_old_buffer(req, 0);
6100         if (rc != 0)
6101                 return (rc);
6102
6103         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6104         if (sb == NULL)
6105                 return (ENOMEM);
6106
6107         t4_tp_get_rdma_stats(sc, &stats);
6108         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6109         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6110
6111         rc = sbuf_finish(sb);
6112         sbuf_delete(sb);
6113
6114         return (rc);
6115 }
6116
6117 static int
6118 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6119 {
6120         struct adapter *sc = arg1;
6121         struct sbuf *sb;
6122         int rc;
6123         struct tp_tcp_stats v4, v6;
6124
6125         rc = sysctl_wire_old_buffer(req, 0);
6126         if (rc != 0)
6127                 return (rc);
6128
6129         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6130         if (sb == NULL)
6131                 return (ENOMEM);
6132
6133         t4_tp_get_tcp_stats(sc, &v4, &v6);
6134         sbuf_printf(sb,
6135             "                                IP                 IPv6\n");
6136         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6137             v4.tcpOutRsts, v6.tcpOutRsts);
6138         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6139             v4.tcpInSegs, v6.tcpInSegs);
6140         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6141             v4.tcpOutSegs, v6.tcpOutSegs);
6142         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6143             v4.tcpRetransSegs, v6.tcpRetransSegs);
6144
6145         rc = sbuf_finish(sb);
6146         sbuf_delete(sb);
6147
6148         return (rc);
6149 }
6150
6151 static int
6152 sysctl_tids(SYSCTL_HANDLER_ARGS)
6153 {
6154         struct adapter *sc = arg1;
6155         struct sbuf *sb;
6156         int rc;
6157         struct tid_info *t = &sc->tids;
6158
6159         rc = sysctl_wire_old_buffer(req, 0);
6160         if (rc != 0)
6161                 return (rc);
6162
6163         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6164         if (sb == NULL)
6165                 return (ENOMEM);
6166
6167         if (t->natids) {
6168                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6169                     t->atids_in_use);
6170         }
6171
6172         if (t->ntids) {
6173                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6174                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6175
6176                         if (b) {
6177                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6178                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6179                                     t->ntids - 1);
6180                         } else {
6181                                 sbuf_printf(sb, "TID range: %u-%u",
6182                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6183                                     t->ntids - 1);
6184                         }
6185                 } else
6186                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6187                 sbuf_printf(sb, ", in use: %u\n",
6188                     atomic_load_acq_int(&t->tids_in_use));
6189         }
6190
6191         if (t->nstids) {
6192                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6193                     t->stid_base + t->nstids - 1, t->stids_in_use);
6194         }
6195
6196         if (t->nftids) {
6197                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6198                     t->ftid_base + t->nftids - 1);
6199         }
6200
6201         if (t->netids) {
6202                 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6203                     t->etid_base + t->netids - 1);
6204         }
6205
6206         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6207             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6208             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6209
6210         rc = sbuf_finish(sb);
6211         sbuf_delete(sb);
6212
6213         return (rc);
6214 }
6215
6216 static int
6217 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6218 {
6219         struct adapter *sc = arg1;
6220         struct sbuf *sb;
6221         int rc;
6222         struct tp_err_stats stats;
6223
6224         rc = sysctl_wire_old_buffer(req, 0);
6225         if (rc != 0)
6226                 return (rc);
6227
6228         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6229         if (sb == NULL)
6230                 return (ENOMEM);
6231
6232         t4_tp_get_err_stats(sc, &stats);
6233
6234         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6235                       "channel 3\n");
6236         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6237             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6238             stats.macInErrs[3]);
6239         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6240             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6241             stats.hdrInErrs[3]);
6242         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6243             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6244             stats.tcpInErrs[3]);
6245         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6246             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6247             stats.tcp6InErrs[3]);
6248         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6249             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6250             stats.tnlCongDrops[3]);
6251         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6252             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6253             stats.tnlTxDrops[3]);
6254         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6255             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6256             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6257         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6258             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6259             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6260         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6261             stats.ofldNoNeigh, stats.ofldCongDefer);
6262
6263         rc = sbuf_finish(sb);
6264         sbuf_delete(sb);
6265
6266         return (rc);
6267 }
6268
6269 struct field_desc {
6270         const char *name;
6271         u_int start;
6272         u_int width;
6273 };
6274
6275 static void
6276 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6277 {
6278         char buf[32];
6279         int line_size = 0;
6280
6281         while (f->name) {
6282                 uint64_t mask = (1ULL << f->width) - 1;
6283                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6284                     ((uintmax_t)v >> f->start) & mask);
6285
6286                 if (line_size + len >= 79) {
6287                         line_size = 8;
6288                         sbuf_printf(sb, "\n        ");
6289                 }
6290                 sbuf_printf(sb, "%s ", buf);
6291                 line_size += len + 1;
6292                 f++;
6293         }
6294         sbuf_printf(sb, "\n");
6295 }
6296
6297 static struct field_desc tp_la0[] = {
6298         { "RcfOpCodeOut", 60, 4 },
6299         { "State", 56, 4 },
6300         { "WcfState", 52, 4 },
6301         { "RcfOpcSrcOut", 50, 2 },
6302         { "CRxError", 49, 1 },
6303         { "ERxError", 48, 1 },
6304         { "SanityFailed", 47, 1 },
6305         { "SpuriousMsg", 46, 1 },
6306         { "FlushInputMsg", 45, 1 },
6307         { "FlushInputCpl", 44, 1 },
6308         { "RssUpBit", 43, 1 },
6309         { "RssFilterHit", 42, 1 },
6310         { "Tid", 32, 10 },
6311         { "InitTcb", 31, 1 },
6312         { "LineNumber", 24, 7 },
6313         { "Emsg", 23, 1 },
6314         { "EdataOut", 22, 1 },
6315         { "Cmsg", 21, 1 },
6316         { "CdataOut", 20, 1 },
6317         { "EreadPdu", 19, 1 },
6318         { "CreadPdu", 18, 1 },
6319         { "TunnelPkt", 17, 1 },
6320         { "RcfPeerFin", 16, 1 },
6321         { "RcfReasonOut", 12, 4 },
6322         { "TxCchannel", 10, 2 },
6323         { "RcfTxChannel", 8, 2 },
6324         { "RxEchannel", 6, 2 },
6325         { "RcfRxChannel", 5, 1 },
6326         { "RcfDataOutSrdy", 4, 1 },
6327         { "RxDvld", 3, 1 },
6328         { "RxOoDvld", 2, 1 },
6329         { "RxCongestion", 1, 1 },
6330         { "TxCongestion", 0, 1 },
6331         { NULL }
6332 };
6333
6334 static struct field_desc tp_la1[] = {
6335         { "CplCmdIn", 56, 8 },
6336         { "CplCmdOut", 48, 8 },
6337         { "ESynOut", 47, 1 },
6338         { "EAckOut", 46, 1 },
6339         { "EFinOut", 45, 1 },
6340         { "ERstOut", 44, 1 },
6341         { "SynIn", 43, 1 },
6342         { "AckIn", 42, 1 },
6343         { "FinIn", 41, 1 },
6344         { "RstIn", 40, 1 },
6345         { "DataIn", 39, 1 },
6346         { "DataInVld", 38, 1 },
6347         { "PadIn", 37, 1 },
6348         { "RxBufEmpty", 36, 1 },
6349         { "RxDdp", 35, 1 },
6350         { "RxFbCongestion", 34, 1 },
6351         { "TxFbCongestion", 33, 1 },
6352         { "TxPktSumSrdy", 32, 1 },
6353         { "RcfUlpType", 28, 4 },
6354         { "Eread", 27, 1 },
6355         { "Ebypass", 26, 1 },
6356         { "Esave", 25, 1 },
6357         { "Static0", 24, 1 },
6358         { "Cread", 23, 1 },
6359         { "Cbypass", 22, 1 },
6360         { "Csave", 21, 1 },
6361         { "CPktOut", 20, 1 },
6362         { "RxPagePoolFull", 18, 2 },
6363         { "RxLpbkPkt", 17, 1 },
6364         { "TxLpbkPkt", 16, 1 },
6365         { "RxVfValid", 15, 1 },
6366         { "SynLearned", 14, 1 },
6367         { "SetDelEntry", 13, 1 },
6368         { "SetInvEntry", 12, 1 },
6369         { "CpcmdDvld", 11, 1 },
6370         { "CpcmdSave", 10, 1 },
6371         { "RxPstructsFull", 8, 2 },
6372         { "EpcmdDvld", 7, 1 },
6373         { "EpcmdFlush", 6, 1 },
6374         { "EpcmdTrimPrefix", 5, 1 },
6375         { "EpcmdTrimPostfix", 4, 1 },
6376         { "ERssIp4Pkt", 3, 1 },
6377         { "ERssIp6Pkt", 2, 1 },
6378         { "ERssTcpUdpPkt", 1, 1 },
6379         { "ERssFceFipPkt", 0, 1 },
6380         { NULL }
6381 };
6382
6383 static struct field_desc tp_la2[] = {
6384         { "CplCmdIn", 56, 8 },
6385         { "MpsVfVld", 55, 1 },
6386         { "MpsPf", 52, 3 },
6387         { "MpsVf", 44, 8 },
6388         { "SynIn", 43, 1 },
6389         { "AckIn", 42, 1 },
6390         { "FinIn", 41, 1 },
6391         { "RstIn", 40, 1 },
6392         { "DataIn", 39, 1 },
6393         { "DataInVld", 38, 1 },
6394         { "PadIn", 37, 1 },
6395         { "RxBufEmpty", 36, 1 },
6396         { "RxDdp", 35, 1 },
6397         { "RxFbCongestion", 34, 1 },
6398         { "TxFbCongestion", 33, 1 },
6399         { "TxPktSumSrdy", 32, 1 },
6400         { "RcfUlpType", 28, 4 },
6401         { "Eread", 27, 1 },
6402         { "Ebypass", 26, 1 },
6403         { "Esave", 25, 1 },
6404         { "Static0", 24, 1 },
6405         { "Cread", 23, 1 },
6406         { "Cbypass", 22, 1 },
6407         { "Csave", 21, 1 },
6408         { "CPktOut", 20, 1 },
6409         { "RxPagePoolFull", 18, 2 },
6410         { "RxLpbkPkt", 17, 1 },
6411         { "TxLpbkPkt", 16, 1 },
6412         { "RxVfValid", 15, 1 },
6413         { "SynLearned", 14, 1 },
6414         { "SetDelEntry", 13, 1 },
6415         { "SetInvEntry", 12, 1 },
6416         { "CpcmdDvld", 11, 1 },
6417         { "CpcmdSave", 10, 1 },
6418         { "RxPstructsFull", 8, 2 },
6419         { "EpcmdDvld", 7, 1 },
6420         { "EpcmdFlush", 6, 1 },
6421         { "EpcmdTrimPrefix", 5, 1 },
6422         { "EpcmdTrimPostfix", 4, 1 },
6423         { "ERssIp4Pkt", 3, 1 },
6424         { "ERssIp6Pkt", 2, 1 },
6425         { "ERssTcpUdpPkt", 1, 1 },
6426         { "ERssFceFipPkt", 0, 1 },
6427         { NULL }
6428 };
6429
6430 static void
6431 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6432 {
6433
6434         field_desc_show(sb, *p, tp_la0);
6435 }
6436
6437 static void
6438 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6439 {
6440
6441         if (idx)
6442                 sbuf_printf(sb, "\n");
6443         field_desc_show(sb, p[0], tp_la0);
6444         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6445                 field_desc_show(sb, p[1], tp_la0);
6446 }
6447
6448 static void
6449 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6450 {
6451
6452         if (idx)
6453                 sbuf_printf(sb, "\n");
6454         field_desc_show(sb, p[0], tp_la0);
6455         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6456                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6457 }
6458
6459 static int
6460 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6461 {
6462         struct adapter *sc = arg1;
6463         struct sbuf *sb;
6464         uint64_t *buf, *p;
6465         int rc;
6466         u_int i, inc;
6467         void (*show_func)(struct sbuf *, uint64_t *, int);
6468
6469         rc = sysctl_wire_old_buffer(req, 0);
6470         if (rc != 0)
6471                 return (rc);
6472
6473         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6474         if (sb == NULL)
6475                 return (ENOMEM);
6476
6477         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6478
6479         t4_tp_read_la(sc, buf, NULL);
6480         p = buf;
6481
6482         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6483         case 2:
6484                 inc = 2;
6485                 show_func = tp_la_show2;
6486                 break;
6487         case 3:
6488                 inc = 2;
6489                 show_func = tp_la_show3;
6490                 break;
6491         default:
6492                 inc = 1;
6493                 show_func = tp_la_show;
6494         }
6495
6496         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6497                 (*show_func)(sb, p, i);
6498
6499         rc = sbuf_finish(sb);
6500         sbuf_delete(sb);
6501         free(buf, M_CXGBE);
6502         return (rc);
6503 }
6504
6505 static int
6506 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6507 {
6508         struct adapter *sc = arg1;
6509         struct sbuf *sb;
6510         int rc;
6511         u64 nrate[NCHAN], orate[NCHAN];
6512
6513         rc = sysctl_wire_old_buffer(req, 0);
6514         if (rc != 0)
6515                 return (rc);
6516
6517         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6518         if (sb == NULL)
6519                 return (ENOMEM);
6520
6521         t4_get_chan_txrate(sc, nrate, orate);
6522         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6523                  "channel 3\n");
6524         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6525             nrate[0], nrate[1], nrate[2], nrate[3]);
6526         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6527             orate[0], orate[1], orate[2], orate[3]);
6528
6529         rc = sbuf_finish(sb);
6530         sbuf_delete(sb);
6531
6532         return (rc);
6533 }
6534
6535 static int
6536 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6537 {
6538         struct adapter *sc = arg1;
6539         struct sbuf *sb;
6540         uint32_t *buf, *p;
6541         int rc, i;
6542
6543         rc = sysctl_wire_old_buffer(req, 0);
6544         if (rc != 0)
6545                 return (rc);
6546
6547         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6548         if (sb == NULL)
6549                 return (ENOMEM);
6550
6551         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6552             M_ZERO | M_WAITOK);
6553
6554         t4_ulprx_read_la(sc, buf);
6555         p = buf;
6556
6557         sbuf_printf(sb, "      Pcmd        Type   Message"
6558             "                Data");
6559         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6560                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6561                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6562         }
6563
6564         rc = sbuf_finish(sb);
6565         sbuf_delete(sb);
6566         free(buf, M_CXGBE);
6567         return (rc);
6568 }
6569
6570 static int
6571 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6572 {
6573         struct adapter *sc = arg1;
6574         struct sbuf *sb;
6575         int rc, v;
6576
6577         rc = sysctl_wire_old_buffer(req, 0);
6578         if (rc != 0)
6579                 return (rc);
6580
6581         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6582         if (sb == NULL)
6583                 return (ENOMEM);
6584
6585         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6586         if (G_STATSOURCE_T5(v) == 7) {
6587                 if (G_STATMODE(v) == 0) {
6588                         sbuf_printf(sb, "total %d, incomplete %d",
6589                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6590                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6591                 } else if (G_STATMODE(v) == 1) {
6592                         sbuf_printf(sb, "total %d, data overflow %d",
6593                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6594                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6595                 }
6596         }
6597         rc = sbuf_finish(sb);
6598         sbuf_delete(sb);
6599
6600         return (rc);
6601 }
6602 #endif
6603
6604 static inline void
6605 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6606 {
6607         struct buf_ring *br;
6608         struct mbuf *m;
6609
6610         TXQ_LOCK_ASSERT_OWNED(txq);
6611
6612         br = txq->br;
6613         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6614         if (m)
6615                 t4_eth_tx(ifp, txq, m);
6616 }
6617
6618 void
6619 t4_tx_callout(void *arg)
6620 {
6621         struct sge_eq *eq = arg;
6622         struct adapter *sc;
6623
6624         if (EQ_TRYLOCK(eq) == 0)
6625                 goto reschedule;
6626
6627         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6628                 EQ_UNLOCK(eq);
6629 reschedule:
6630                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6631                         callout_schedule(&eq->tx_callout, 1);
6632                 return;
6633         }
6634
6635         EQ_LOCK_ASSERT_OWNED(eq);
6636
6637         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6638
6639                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6640                         struct sge_txq *txq = arg;
6641                         struct port_info *pi = txq->ifp->if_softc;
6642
6643                         sc = pi->adapter;
6644                 } else {
6645                         struct sge_wrq *wrq = arg;
6646
6647                         sc = wrq->adapter;
6648                 }
6649
6650                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6651         }
6652
6653         EQ_UNLOCK(eq);
6654 }
6655
6656 void
6657 t4_tx_task(void *arg, int count)
6658 {
6659         struct sge_eq *eq = arg;
6660
6661         EQ_LOCK(eq);
6662         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6663                 struct sge_txq *txq = arg;
6664                 txq_start(txq->ifp, txq);
6665         } else {
6666                 struct sge_wrq *wrq = arg;
6667                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6668         }
6669         EQ_UNLOCK(eq);
6670 }
6671
6672 static uint32_t
6673 fconf_to_mode(uint32_t fconf)
6674 {
6675         uint32_t mode;
6676
6677         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6678             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6679
6680         if (fconf & F_FRAGMENTATION)
6681                 mode |= T4_FILTER_IP_FRAGMENT;
6682
6683         if (fconf & F_MPSHITTYPE)
6684                 mode |= T4_FILTER_MPS_HIT_TYPE;
6685
6686         if (fconf & F_MACMATCH)
6687                 mode |= T4_FILTER_MAC_IDX;
6688
6689         if (fconf & F_ETHERTYPE)
6690                 mode |= T4_FILTER_ETH_TYPE;
6691
6692         if (fconf & F_PROTOCOL)
6693                 mode |= T4_FILTER_IP_PROTO;
6694
6695         if (fconf & F_TOS)
6696                 mode |= T4_FILTER_IP_TOS;
6697
6698         if (fconf & F_VLAN)
6699                 mode |= T4_FILTER_VLAN;
6700
6701         if (fconf & F_VNIC_ID)
6702                 mode |= T4_FILTER_VNIC;
6703
6704         if (fconf & F_PORT)
6705                 mode |= T4_FILTER_PORT;
6706
6707         if (fconf & F_FCOE)
6708                 mode |= T4_FILTER_FCoE;
6709
6710         return (mode);
6711 }
6712
6713 static uint32_t
6714 mode_to_fconf(uint32_t mode)
6715 {
6716         uint32_t fconf = 0;
6717
6718         if (mode & T4_FILTER_IP_FRAGMENT)
6719                 fconf |= F_FRAGMENTATION;
6720
6721         if (mode & T4_FILTER_MPS_HIT_TYPE)
6722                 fconf |= F_MPSHITTYPE;
6723
6724         if (mode & T4_FILTER_MAC_IDX)
6725                 fconf |= F_MACMATCH;
6726
6727         if (mode & T4_FILTER_ETH_TYPE)
6728                 fconf |= F_ETHERTYPE;
6729
6730         if (mode & T4_FILTER_IP_PROTO)
6731                 fconf |= F_PROTOCOL;
6732
6733         if (mode & T4_FILTER_IP_TOS)
6734                 fconf |= F_TOS;
6735
6736         if (mode & T4_FILTER_VLAN)
6737                 fconf |= F_VLAN;
6738
6739         if (mode & T4_FILTER_VNIC)
6740                 fconf |= F_VNIC_ID;
6741
6742         if (mode & T4_FILTER_PORT)
6743                 fconf |= F_PORT;
6744
6745         if (mode & T4_FILTER_FCoE)
6746                 fconf |= F_FCOE;
6747
6748         return (fconf);
6749 }
6750
6751 static uint32_t
6752 fspec_to_fconf(struct t4_filter_specification *fs)
6753 {
6754         uint32_t fconf = 0;
6755
6756         if (fs->val.frag || fs->mask.frag)
6757                 fconf |= F_FRAGMENTATION;
6758
6759         if (fs->val.matchtype || fs->mask.matchtype)
6760                 fconf |= F_MPSHITTYPE;
6761
6762         if (fs->val.macidx || fs->mask.macidx)
6763                 fconf |= F_MACMATCH;
6764
6765         if (fs->val.ethtype || fs->mask.ethtype)
6766                 fconf |= F_ETHERTYPE;
6767
6768         if (fs->val.proto || fs->mask.proto)
6769                 fconf |= F_PROTOCOL;
6770
6771         if (fs->val.tos || fs->mask.tos)
6772                 fconf |= F_TOS;
6773
6774         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6775                 fconf |= F_VLAN;
6776
6777         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6778                 fconf |= F_VNIC_ID;
6779
6780         if (fs->val.iport || fs->mask.iport)
6781                 fconf |= F_PORT;
6782
6783         if (fs->val.fcoe || fs->mask.fcoe)
6784                 fconf |= F_FCOE;
6785
6786         return (fconf);
6787 }
6788
6789 static int
6790 get_filter_mode(struct adapter *sc, uint32_t *mode)
6791 {
6792         int rc;
6793         uint32_t fconf;
6794
6795         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6796             "t4getfm");
6797         if (rc)
6798                 return (rc);
6799
6800         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6801             A_TP_VLAN_PRI_MAP);
6802
6803         if (sc->params.tp.vlan_pri_map != fconf) {
6804                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6805                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6806                     fconf);
6807                 sc->params.tp.vlan_pri_map = fconf;
6808         }
6809
6810         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6811
6812         end_synchronized_op(sc, LOCK_HELD);
6813         return (0);
6814 }
6815
6816 static int
6817 set_filter_mode(struct adapter *sc, uint32_t mode)
6818 {
6819         uint32_t fconf;
6820         int rc;
6821
6822         fconf = mode_to_fconf(mode);
6823
6824         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6825             "t4setfm");
6826         if (rc)
6827                 return (rc);
6828
6829         if (sc->tids.ftids_in_use > 0) {
6830                 rc = EBUSY;
6831                 goto done;
6832         }
6833
6834 #ifdef TCP_OFFLOAD
6835         if (sc->offload_map) {
6836                 rc = EBUSY;
6837                 goto done;
6838         }
6839 #endif
6840
6841 #ifdef notyet
6842         rc = -t4_set_filter_mode(sc, fconf);
6843         if (rc == 0)
6844                 sc->filter_mode = fconf;
6845 #else
6846         rc = ENOTSUP;
6847 #endif
6848
6849 done:
6850         end_synchronized_op(sc, LOCK_HELD);
6851         return (rc);
6852 }
6853
6854 static inline uint64_t
6855 get_filter_hits(struct adapter *sc, uint32_t fid)
6856 {
6857         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6858         uint64_t hits;
6859
6860         memwin_info(sc, 0, &mw_base, NULL);
6861         off = position_memwin(sc, 0,
6862             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6863         if (is_t4(sc)) {
6864                 hits = t4_read_reg64(sc, mw_base + off + 16);
6865                 hits = be64toh(hits);
6866         } else {
6867                 hits = t4_read_reg(sc, mw_base + off + 24);
6868                 hits = be32toh(hits);
6869         }
6870
6871         return (hits);
6872 }
6873
6874 static int
6875 get_filter(struct adapter *sc, struct t4_filter *t)
6876 {
6877         int i, rc, nfilters = sc->tids.nftids;
6878         struct filter_entry *f;
6879
6880         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6881             "t4getf");
6882         if (rc)
6883                 return (rc);
6884
6885         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6886             t->idx >= nfilters) {
6887                 t->idx = 0xffffffff;
6888                 goto done;
6889         }
6890
6891         f = &sc->tids.ftid_tab[t->idx];
6892         for (i = t->idx; i < nfilters; i++, f++) {
6893                 if (f->valid) {
6894                         t->idx = i;
6895                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6896                         t->smtidx = f->smtidx;
6897                         if (f->fs.hitcnts)
6898                                 t->hits = get_filter_hits(sc, t->idx);
6899                         else
6900                                 t->hits = UINT64_MAX;
6901                         t->fs = f->fs;
6902
6903                         goto done;
6904                 }
6905         }
6906
6907         t->idx = 0xffffffff;
6908 done:
6909         end_synchronized_op(sc, LOCK_HELD);
6910         return (0);
6911 }
6912
6913 static int
6914 set_filter(struct adapter *sc, struct t4_filter *t)
6915 {
6916         unsigned int nfilters, nports;
6917         struct filter_entry *f;
6918         int i, rc;
6919
6920         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6921         if (rc)
6922                 return (rc);
6923
6924         nfilters = sc->tids.nftids;
6925         nports = sc->params.nports;
6926
6927         if (nfilters == 0) {
6928                 rc = ENOTSUP;
6929                 goto done;
6930         }
6931
6932         if (!(sc->flags & FULL_INIT_DONE)) {
6933                 rc = EAGAIN;
6934                 goto done;
6935         }
6936
6937         if (t->idx >= nfilters) {
6938                 rc = EINVAL;
6939                 goto done;
6940         }
6941
6942         /* Validate against the global filter mode */
6943         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6944             sc->params.tp.vlan_pri_map) {
6945                 rc = E2BIG;
6946                 goto done;
6947         }
6948
6949         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6950                 rc = EINVAL;
6951                 goto done;
6952         }
6953
6954         if (t->fs.val.iport >= nports) {
6955                 rc = EINVAL;
6956                 goto done;
6957         }
6958
6959         /* Can't specify an iq if not steering to it */
6960         if (!t->fs.dirsteer && t->fs.iq) {
6961                 rc = EINVAL;
6962                 goto done;
6963         }
6964
6965         /* IPv6 filter idx must be 4 aligned */
6966         if (t->fs.type == 1 &&
6967             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6968                 rc = EINVAL;
6969                 goto done;
6970         }
6971
6972         if (sc->tids.ftid_tab == NULL) {
6973                 KASSERT(sc->tids.ftids_in_use == 0,
6974                     ("%s: no memory allocated but filters_in_use > 0",
6975                     __func__));
6976
6977                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6978                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6979                 if (sc->tids.ftid_tab == NULL) {
6980                         rc = ENOMEM;
6981                         goto done;
6982                 }
6983                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6984         }
6985
6986         for (i = 0; i < 4; i++) {
6987                 f = &sc->tids.ftid_tab[t->idx + i];
6988
6989                 if (f->pending || f->valid) {
6990                         rc = EBUSY;
6991                         goto done;
6992                 }
6993                 if (f->locked) {
6994                         rc = EPERM;
6995                         goto done;
6996                 }
6997
6998                 if (t->fs.type == 0)
6999                         break;
7000         }
7001
7002         f = &sc->tids.ftid_tab[t->idx];
7003         f->fs = t->fs;
7004
7005         rc = set_filter_wr(sc, t->idx);
7006 done:
7007         end_synchronized_op(sc, 0);
7008
7009         if (rc == 0) {
7010                 mtx_lock(&sc->tids.ftid_lock);
7011                 for (;;) {
7012                         if (f->pending == 0) {
7013                                 rc = f->valid ? 0 : EIO;
7014                                 break;
7015                         }
7016
7017                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7018                             PCATCH, "t4setfw", 0)) {
7019                                 rc = EINPROGRESS;
7020                                 break;
7021                         }
7022                 }
7023                 mtx_unlock(&sc->tids.ftid_lock);
7024         }
7025         return (rc);
7026 }
7027
7028 static int
7029 del_filter(struct adapter *sc, struct t4_filter *t)
7030 {
7031         unsigned int nfilters;
7032         struct filter_entry *f;
7033         int rc;
7034
7035         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7036         if (rc)
7037                 return (rc);
7038
7039         nfilters = sc->tids.nftids;
7040
7041         if (nfilters == 0) {
7042                 rc = ENOTSUP;
7043                 goto done;
7044         }
7045
7046         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7047             t->idx >= nfilters) {
7048                 rc = EINVAL;
7049                 goto done;
7050         }
7051
7052         if (!(sc->flags & FULL_INIT_DONE)) {
7053                 rc = EAGAIN;
7054                 goto done;
7055         }
7056
7057         f = &sc->tids.ftid_tab[t->idx];
7058
7059         if (f->pending) {
7060                 rc = EBUSY;
7061                 goto done;
7062         }
7063         if (f->locked) {
7064                 rc = EPERM;
7065                 goto done;
7066         }
7067
7068         if (f->valid) {
7069                 t->fs = f->fs;  /* extra info for the caller */
7070                 rc = del_filter_wr(sc, t->idx);
7071         }
7072
7073 done:
7074         end_synchronized_op(sc, 0);
7075
7076         if (rc == 0) {
7077                 mtx_lock(&sc->tids.ftid_lock);
7078                 for (;;) {
7079                         if (f->pending == 0) {
7080                                 rc = f->valid ? EIO : 0;
7081                                 break;
7082                         }
7083
7084                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7085                             PCATCH, "t4delfw", 0)) {
7086                                 rc = EINPROGRESS;
7087                                 break;
7088                         }
7089                 }
7090                 mtx_unlock(&sc->tids.ftid_lock);
7091         }
7092
7093         return (rc);
7094 }
7095
7096 static void
7097 clear_filter(struct filter_entry *f)
7098 {
7099         if (f->l2t)
7100                 t4_l2t_release(f->l2t);
7101
7102         bzero(f, sizeof (*f));
7103 }
7104
7105 static int
7106 set_filter_wr(struct adapter *sc, int fidx)
7107 {
7108         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7109         struct wrqe *wr;
7110         struct fw_filter_wr *fwr;
7111         unsigned int ftid;
7112
7113         ASSERT_SYNCHRONIZED_OP(sc);
7114
7115         if (f->fs.newdmac || f->fs.newvlan) {
7116                 /* This filter needs an L2T entry; allocate one. */
7117                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7118                 if (f->l2t == NULL)
7119                         return (EAGAIN);
7120                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7121                     f->fs.dmac)) {
7122                         t4_l2t_release(f->l2t);
7123                         f->l2t = NULL;
7124                         return (ENOMEM);
7125                 }
7126         }
7127
7128         ftid = sc->tids.ftid_base + fidx;
7129
7130         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7131         if (wr == NULL)
7132                 return (ENOMEM);
7133
7134         fwr = wrtod(wr);
7135         bzero(fwr, sizeof (*fwr));
7136
7137         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7138         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7139         fwr->tid_to_iq =
7140             htobe32(V_FW_FILTER_WR_TID(ftid) |
7141                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7142                 V_FW_FILTER_WR_NOREPLY(0) |
7143                 V_FW_FILTER_WR_IQ(f->fs.iq));
7144         fwr->del_filter_to_l2tix =
7145             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7146                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7147                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7148                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7149                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7150                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7151                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7152                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7153                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7154                     f->fs.newvlan == VLAN_REWRITE) |
7155                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7156                     f->fs.newvlan == VLAN_REWRITE) |
7157                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7158                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7159                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7160                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7161         fwr->ethtype = htobe16(f->fs.val.ethtype);
7162         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7163         fwr->frag_to_ovlan_vldm =
7164             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7165                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7166                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7167                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7168                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7169                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7170         fwr->smac_sel = 0;
7171         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7172             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7173         fwr->maci_to_matchtypem =
7174             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7175                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7176                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7177                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7178                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7179                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7180                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7181                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7182         fwr->ptcl = f->fs.val.proto;
7183         fwr->ptclm = f->fs.mask.proto;
7184         fwr->ttyp = f->fs.val.tos;
7185         fwr->ttypm = f->fs.mask.tos;
7186         fwr->ivlan = htobe16(f->fs.val.vlan);
7187         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7188         fwr->ovlan = htobe16(f->fs.val.vnic);
7189         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7190         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7191         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7192         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7193         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7194         fwr->lp = htobe16(f->fs.val.dport);
7195         fwr->lpm = htobe16(f->fs.mask.dport);
7196         fwr->fp = htobe16(f->fs.val.sport);
7197         fwr->fpm = htobe16(f->fs.mask.sport);
7198         if (f->fs.newsmac)
7199                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7200
7201         f->pending = 1;
7202         sc->tids.ftids_in_use++;
7203
7204         t4_wrq_tx(sc, wr);
7205         return (0);
7206 }
7207
7208 static int
7209 del_filter_wr(struct adapter *sc, int fidx)
7210 {
7211         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7212         struct wrqe *wr;
7213         struct fw_filter_wr *fwr;
7214         unsigned int ftid;
7215
7216         ftid = sc->tids.ftid_base + fidx;
7217
7218         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7219         if (wr == NULL)
7220                 return (ENOMEM);
7221         fwr = wrtod(wr);
7222         bzero(fwr, sizeof (*fwr));
7223
7224         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7225
7226         f->pending = 1;
7227         t4_wrq_tx(sc, wr);
7228         return (0);
7229 }
7230
7231 int
7232 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7233 {
7234         struct adapter *sc = iq->adapter;
7235         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7236         unsigned int idx = GET_TID(rpl);
7237         unsigned int rc;
7238         struct filter_entry *f;
7239
7240         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7241             rss->opcode));
7242
7243         if (is_ftid(sc, idx)) {
7244
7245                 idx -= sc->tids.ftid_base;
7246                 f = &sc->tids.ftid_tab[idx];
7247                 rc = G_COOKIE(rpl->cookie);
7248
7249                 mtx_lock(&sc->tids.ftid_lock);
7250                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7251                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7252                             __func__, idx));
7253                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7254                         f->pending = 0;  /* asynchronous setup completed */
7255                         f->valid = 1;
7256                 } else {
7257                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7258                                 /* Add or delete failed, display an error */
7259                                 log(LOG_ERR,
7260                                     "filter %u setup failed with error %u\n",
7261                                     idx, rc);
7262                         }
7263
7264                         clear_filter(f);
7265                         sc->tids.ftids_in_use--;
7266                 }
7267                 wakeup(&sc->tids.ftid_tab);
7268                 mtx_unlock(&sc->tids.ftid_lock);
7269         }
7270
7271         return (0);
7272 }
7273
7274 static int
7275 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7276 {
7277         int rc;
7278
7279         if (cntxt->cid > M_CTXTQID)
7280                 return (EINVAL);
7281
7282         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7283             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7284                 return (EINVAL);
7285
7286         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7287         if (rc)
7288                 return (rc);
7289
7290         if (sc->flags & FW_OK) {
7291                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7292                     &cntxt->data[0]);
7293                 if (rc == 0)
7294                         goto done;
7295         }
7296
7297         /*
7298          * Read via firmware failed or wasn't even attempted.  Read directly via
7299          * the backdoor.
7300          */
7301         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7302 done:
7303         end_synchronized_op(sc, 0);
7304         return (rc);
7305 }
7306
7307 static int
7308 load_fw(struct adapter *sc, struct t4_data *fw)
7309 {
7310         int rc;
7311         uint8_t *fw_data;
7312
7313         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7314         if (rc)
7315                 return (rc);
7316
7317         if (sc->flags & FULL_INIT_DONE) {
7318                 rc = EBUSY;
7319                 goto done;
7320         }
7321
7322         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7323         if (fw_data == NULL) {
7324                 rc = ENOMEM;
7325                 goto done;
7326         }
7327
7328         rc = copyin(fw->data, fw_data, fw->len);
7329         if (rc == 0)
7330                 rc = -t4_load_fw(sc, fw_data, fw->len);
7331
7332         free(fw_data, M_CXGBE);
7333 done:
7334         end_synchronized_op(sc, 0);
7335         return (rc);
7336 }
7337
7338 static int
7339 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7340 {
7341         uint32_t addr, off, remaining, i, n;
7342         uint32_t *buf, *b;
7343         uint32_t mw_base, mw_aperture;
7344         int rc;
7345         uint8_t *dst;
7346
7347         rc = validate_mem_range(sc, mr->addr, mr->len);
7348         if (rc != 0)
7349                 return (rc);
7350
7351         memwin_info(sc, win, &mw_base, &mw_aperture);
7352         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7353         addr = mr->addr;
7354         remaining = mr->len;
7355         dst = (void *)mr->data;
7356
7357         while (remaining) {
7358                 off = position_memwin(sc, win, addr);
7359
7360                 /* number of bytes that we'll copy in the inner loop */
7361                 n = min(remaining, mw_aperture - off);
7362                 for (i = 0; i < n; i += 4)
7363                         *b++ = t4_read_reg(sc, mw_base + off + i);
7364
7365                 rc = copyout(buf, dst, n);
7366                 if (rc != 0)
7367                         break;
7368
7369                 b = buf;
7370                 dst += n;
7371                 remaining -= n;
7372                 addr += n;
7373         }
7374
7375         free(buf, M_CXGBE);
7376         return (rc);
7377 }
7378
7379 static int
7380 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7381 {
7382         int rc;
7383
7384         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7385                 return (EINVAL);
7386
7387         if (i2cd->len > sizeof(i2cd->data))
7388                 return (EFBIG);
7389
7390         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7391         if (rc)
7392                 return (rc);
7393         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7394             i2cd->offset, i2cd->len, &i2cd->data[0]);
7395         end_synchronized_op(sc, 0);
7396
7397         return (rc);
7398 }
7399
7400 static int
7401 in_range(int val, int lo, int hi)
7402 {
7403
7404         return (val < 0 || (val <= hi && val >= lo));
7405 }
7406
7407 static int
7408 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7409 {
7410         int fw_subcmd, fw_type, rc;
7411
7412         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7413         if (rc)
7414                 return (rc);
7415
7416         if (!(sc->flags & FULL_INIT_DONE)) {
7417                 rc = EAGAIN;
7418                 goto done;
7419         }
7420
7421         /*
7422          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7423          * sub-command and type are in common locations.)
7424          */
7425         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7426                 fw_subcmd = FW_SCHED_SC_CONFIG;
7427         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7428                 fw_subcmd = FW_SCHED_SC_PARAMS;
7429         else {
7430                 rc = EINVAL;
7431                 goto done;
7432         }
7433         if (p->type == SCHED_CLASS_TYPE_PACKET)
7434                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7435         else {
7436                 rc = EINVAL;
7437                 goto done;
7438         }
7439
7440         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7441                 /* Vet our parameters ..*/
7442                 if (p->u.config.minmax < 0) {
7443                         rc = EINVAL;
7444                         goto done;
7445                 }
7446
7447                 /* And pass the request to the firmware ...*/
7448                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7449                 goto done;
7450         }
7451
7452         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7453                 int fw_level;
7454                 int fw_mode;
7455                 int fw_rateunit;
7456                 int fw_ratemode;
7457
7458                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7459                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7460                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7461                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7462                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7463                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7464                 else {
7465                         rc = EINVAL;
7466                         goto done;
7467                 }
7468
7469                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7470                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7471                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7472                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7473                 else {
7474                         rc = EINVAL;
7475                         goto done;
7476                 }
7477
7478                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7479                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7480                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7481                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7482                 else {
7483                         rc = EINVAL;
7484                         goto done;
7485                 }
7486
7487                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7488                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7489                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7490                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7491                 else {
7492                         rc = EINVAL;
7493                         goto done;
7494                 }
7495
7496                 /* Vet our parameters ... */
7497                 if (!in_range(p->u.params.channel, 0, 3) ||
7498                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7499                     !in_range(p->u.params.minrate, 0, 10000000) ||
7500                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7501                     !in_range(p->u.params.weight, 0, 100)) {
7502                         rc = ERANGE;
7503                         goto done;
7504                 }
7505
7506                 /*
7507                  * Translate any unset parameters into the firmware's
7508                  * nomenclature and/or fail the call if the parameters
7509                  * are required ...
7510                  */
7511                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7512                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7513                         rc = EINVAL;
7514                         goto done;
7515                 }
7516                 if (p->u.params.minrate < 0)
7517                         p->u.params.minrate = 0;
7518                 if (p->u.params.maxrate < 0) {
7519                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7520                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7521                                 rc = EINVAL;
7522                                 goto done;
7523                         } else
7524                                 p->u.params.maxrate = 0;
7525                 }
7526                 if (p->u.params.weight < 0) {
7527                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7528                                 rc = EINVAL;
7529                                 goto done;
7530                         } else
7531                                 p->u.params.weight = 0;
7532                 }
7533                 if (p->u.params.pktsize < 0) {
7534                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7535                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7536                                 rc = EINVAL;
7537                                 goto done;
7538                         } else
7539                                 p->u.params.pktsize = 0;
7540                 }
7541
7542                 /* See what the firmware thinks of the request ... */
7543                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7544                     fw_rateunit, fw_ratemode, p->u.params.channel,
7545                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7546                     p->u.params.weight, p->u.params.pktsize);
7547                 goto done;
7548         }
7549
7550         rc = EINVAL;
7551 done:
7552         end_synchronized_op(sc, 0);
7553         return (rc);
7554 }
7555
7556 static int
7557 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7558 {
7559         struct port_info *pi = NULL;
7560         struct sge_txq *txq;
7561         uint32_t fw_mnem, fw_queue, fw_class;
7562         int i, rc;
7563
7564         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7565         if (rc)
7566                 return (rc);
7567
7568         if (!(sc->flags & FULL_INIT_DONE)) {
7569                 rc = EAGAIN;
7570                 goto done;
7571         }
7572
7573         if (p->port >= sc->params.nports) {
7574                 rc = EINVAL;
7575                 goto done;
7576         }
7577
7578         pi = sc->port[p->port];
7579         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7580                 rc = EINVAL;
7581                 goto done;
7582         }
7583
7584         /*
7585          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7586          * Scheduling Class in this case).
7587          */
7588         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7589             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7590         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7591
7592         /*
7593          * If op.queue is non-negative, then we're only changing the scheduling
7594          * on a single specified TX queue.
7595          */
7596         if (p->queue >= 0) {
7597                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7598                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7599                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7600                     &fw_class);
7601                 goto done;
7602         }
7603
7604         /*
7605          * Change the scheduling on all the TX queues for the
7606          * interface.
7607          */
7608         for_each_txq(pi, i, txq) {
7609                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7610                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7611                     &fw_class);
7612                 if (rc)
7613                         goto done;
7614         }
7615
7616         rc = 0;
7617 done:
7618         end_synchronized_op(sc, 0);
7619         return (rc);
7620 }
7621
7622 int
7623 t4_os_find_pci_capability(struct adapter *sc, int cap)
7624 {
7625         int i;
7626
7627         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7628 }
7629
7630 int
7631 t4_os_pci_save_state(struct adapter *sc)
7632 {
7633         device_t dev;
7634         struct pci_devinfo *dinfo;
7635
7636         dev = sc->dev;
7637         dinfo = device_get_ivars(dev);
7638
7639         pci_cfg_save(dev, dinfo, 0);
7640         return (0);
7641 }
7642
7643 int
7644 t4_os_pci_restore_state(struct adapter *sc)
7645 {
7646         device_t dev;
7647         struct pci_devinfo *dinfo;
7648
7649         dev = sc->dev;
7650         dinfo = device_get_ivars(dev);
7651
7652         pci_cfg_restore(dev, dinfo);
7653         return (0);
7654 }
7655
7656 void
7657 t4_os_portmod_changed(const struct adapter *sc, int idx)
7658 {
7659         struct port_info *pi = sc->port[idx];
7660         static const char *mod_str[] = {
7661                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7662         };
7663
7664         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7665                 if_printf(pi->ifp, "transceiver unplugged.\n");
7666         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7667                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7668         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7669                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7670         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7671                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7672                     mod_str[pi->mod_type]);
7673         } else {
7674                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7675                     pi->mod_type);
7676         }
7677 }
7678
7679 void
7680 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7681 {
7682         struct port_info *pi = sc->port[idx];
7683         struct ifnet *ifp = pi->ifp;
7684
7685         if (link_stat) {
7686                 pi->linkdnrc = -1;
7687                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7688                 if_link_state_change(ifp, LINK_STATE_UP);
7689         } else {
7690                 if (reason >= 0)
7691                         pi->linkdnrc = reason;
7692                 if_link_state_change(ifp, LINK_STATE_DOWN);
7693         }
7694 }
7695
7696 void
7697 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7698 {
7699         struct adapter *sc;
7700
7701         mtx_lock(&t4_list_lock);
7702         SLIST_FOREACH(sc, &t4_list, link) {
7703                 /*
7704                  * func should not make any assumptions about what state sc is
7705                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7706                  */
7707                 func(sc, arg);
7708         }
7709         mtx_unlock(&t4_list_lock);
7710 }
7711
7712 static int
7713 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7714 {
7715        return (0);
7716 }
7717
7718 static int
7719 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7720 {
7721        return (0);
7722 }
7723
7724 static int
7725 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7726     struct thread *td)
7727 {
7728         int rc;
7729         struct adapter *sc = dev->si_drv1;
7730
7731         rc = priv_check(td, PRIV_DRIVER);
7732         if (rc != 0)
7733                 return (rc);
7734
7735         switch (cmd) {
7736         case CHELSIO_T4_GETREG: {
7737                 struct t4_reg *edata = (struct t4_reg *)data;
7738
7739                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7740                         return (EFAULT);
7741
7742                 if (edata->size == 4)
7743                         edata->val = t4_read_reg(sc, edata->addr);
7744                 else if (edata->size == 8)
7745                         edata->val = t4_read_reg64(sc, edata->addr);
7746                 else
7747                         return (EINVAL);
7748
7749                 break;
7750         }
7751         case CHELSIO_T4_SETREG: {
7752                 struct t4_reg *edata = (struct t4_reg *)data;
7753
7754                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7755                         return (EFAULT);
7756
7757                 if (edata->size == 4) {
7758                         if (edata->val & 0xffffffff00000000)
7759                                 return (EINVAL);
7760                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7761                 } else if (edata->size == 8)
7762                         t4_write_reg64(sc, edata->addr, edata->val);
7763                 else
7764                         return (EINVAL);
7765                 break;
7766         }
7767         case CHELSIO_T4_REGDUMP: {
7768                 struct t4_regdump *regs = (struct t4_regdump *)data;
7769                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7770                 uint8_t *buf;
7771
7772                 if (regs->len < reglen) {
7773                         regs->len = reglen; /* hint to the caller */
7774                         return (ENOBUFS);
7775                 }
7776
7777                 regs->len = reglen;
7778                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7779                 t4_get_regs(sc, regs, buf);
7780                 rc = copyout(buf, regs->data, reglen);
7781                 free(buf, M_CXGBE);
7782                 break;
7783         }
7784         case CHELSIO_T4_GET_FILTER_MODE:
7785                 rc = get_filter_mode(sc, (uint32_t *)data);
7786                 break;
7787         case CHELSIO_T4_SET_FILTER_MODE:
7788                 rc = set_filter_mode(sc, *(uint32_t *)data);
7789                 break;
7790         case CHELSIO_T4_GET_FILTER:
7791                 rc = get_filter(sc, (struct t4_filter *)data);
7792                 break;
7793         case CHELSIO_T4_SET_FILTER:
7794                 rc = set_filter(sc, (struct t4_filter *)data);
7795                 break;
7796         case CHELSIO_T4_DEL_FILTER:
7797                 rc = del_filter(sc, (struct t4_filter *)data);
7798                 break;
7799         case CHELSIO_T4_GET_SGE_CONTEXT:
7800                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7801                 break;
7802         case CHELSIO_T4_LOAD_FW:
7803                 rc = load_fw(sc, (struct t4_data *)data);
7804                 break;
7805         case CHELSIO_T4_GET_MEM:
7806                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7807                 break;
7808         case CHELSIO_T4_GET_I2C:
7809                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7810                 break;
7811         case CHELSIO_T4_CLEAR_STATS: {
7812                 int i;
7813                 u_int port_id = *(uint32_t *)data;
7814                 struct port_info *pi;
7815
7816                 if (port_id >= sc->params.nports)
7817                         return (EINVAL);
7818                 pi = sc->port[port_id];
7819
7820                 /* MAC stats */
7821                 t4_clr_port_stats(sc, pi->tx_chan);
7822
7823                 if (pi->flags & PORT_INIT_DONE) {
7824                         struct sge_rxq *rxq;
7825                         struct sge_txq *txq;
7826                         struct sge_wrq *wrq;
7827
7828                         for_each_rxq(pi, i, rxq) {
7829 #if defined(INET) || defined(INET6)
7830                                 rxq->lro.lro_queued = 0;
7831                                 rxq->lro.lro_flushed = 0;
7832 #endif
7833                                 rxq->rxcsum = 0;
7834                                 rxq->vlan_extraction = 0;
7835                         }
7836
7837                         for_each_txq(pi, i, txq) {
7838                                 txq->txcsum = 0;
7839                                 txq->tso_wrs = 0;
7840                                 txq->vlan_insertion = 0;
7841                                 txq->imm_wrs = 0;
7842                                 txq->sgl_wrs = 0;
7843                                 txq->txpkt_wrs = 0;
7844                                 txq->txpkts_wrs = 0;
7845                                 txq->txpkts_pkts = 0;
7846                                 txq->br->br_drops = 0;
7847                                 txq->no_dmamap = 0;
7848                                 txq->no_desc = 0;
7849                         }
7850
7851 #ifdef TCP_OFFLOAD
7852                         /* nothing to clear for each ofld_rxq */
7853
7854                         for_each_ofld_txq(pi, i, wrq) {
7855                                 wrq->tx_wrs = 0;
7856                                 wrq->no_desc = 0;
7857                         }
7858 #endif
7859                         wrq = &sc->sge.ctrlq[pi->port_id];
7860                         wrq->tx_wrs = 0;
7861                         wrq->no_desc = 0;
7862                 }
7863                 break;
7864         }
7865         case CHELSIO_T4_SCHED_CLASS:
7866                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
7867                 break;
7868         case CHELSIO_T4_SCHED_QUEUE:
7869                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7870                 break;
7871         default:
7872                 rc = EINVAL;
7873         }
7874
7875         return (rc);
7876 }
7877
7878 #ifdef TCP_OFFLOAD
7879 static int
7880 toe_capability(struct port_info *pi, int enable)
7881 {
7882         int rc;
7883         struct adapter *sc = pi->adapter;
7884
7885         ASSERT_SYNCHRONIZED_OP(sc);
7886
7887         if (!is_offload(sc))
7888                 return (ENODEV);
7889
7890         if (enable) {
7891                 if (!(sc->flags & FULL_INIT_DONE)) {
7892                         rc = cxgbe_init_synchronized(pi);
7893                         if (rc)
7894                                 return (rc);
7895                 }
7896
7897                 if (isset(&sc->offload_map, pi->port_id))
7898                         return (0);
7899
7900                 if (!(sc->flags & TOM_INIT_DONE)) {
7901                         rc = t4_activate_uld(sc, ULD_TOM);
7902                         if (rc == EAGAIN) {
7903                                 log(LOG_WARNING,
7904                                     "You must kldload t4_tom.ko before trying "
7905                                     "to enable TOE on a cxgbe interface.\n");
7906                         }
7907                         if (rc != 0)
7908                                 return (rc);
7909                         KASSERT(sc->tom_softc != NULL,
7910                             ("%s: TOM activated but softc NULL", __func__));
7911                         KASSERT(sc->flags & TOM_INIT_DONE,
7912                             ("%s: TOM activated but flag not set", __func__));
7913                 }
7914
7915                 setbit(&sc->offload_map, pi->port_id);
7916         } else {
7917                 if (!isset(&sc->offload_map, pi->port_id))
7918                         return (0);
7919
7920                 KASSERT(sc->flags & TOM_INIT_DONE,
7921                     ("%s: TOM never initialized?", __func__));
7922                 clrbit(&sc->offload_map, pi->port_id);
7923         }
7924
7925         return (0);
7926 }
7927
7928 /*
7929  * Add an upper layer driver to the global list.
7930  */
7931 int
7932 t4_register_uld(struct uld_info *ui)
7933 {
7934         int rc = 0;
7935         struct uld_info *u;
7936
7937         mtx_lock(&t4_uld_list_lock);
7938         SLIST_FOREACH(u, &t4_uld_list, link) {
7939             if (u->uld_id == ui->uld_id) {
7940                     rc = EEXIST;
7941                     goto done;
7942             }
7943         }
7944
7945         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7946         ui->refcount = 0;
7947 done:
7948         mtx_unlock(&t4_uld_list_lock);
7949         return (rc);
7950 }
7951
7952 int
7953 t4_unregister_uld(struct uld_info *ui)
7954 {
7955         int rc = EINVAL;
7956         struct uld_info *u;
7957
7958         mtx_lock(&t4_uld_list_lock);
7959
7960         SLIST_FOREACH(u, &t4_uld_list, link) {
7961             if (u == ui) {
7962                     if (ui->refcount > 0) {
7963                             rc = EBUSY;
7964                             goto done;
7965                     }
7966
7967                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7968                     rc = 0;
7969                     goto done;
7970             }
7971         }
7972 done:
7973         mtx_unlock(&t4_uld_list_lock);
7974         return (rc);
7975 }
7976
7977 int
7978 t4_activate_uld(struct adapter *sc, int id)
7979 {
7980         int rc = EAGAIN;
7981         struct uld_info *ui;
7982
7983         ASSERT_SYNCHRONIZED_OP(sc);
7984
7985         mtx_lock(&t4_uld_list_lock);
7986
7987         SLIST_FOREACH(ui, &t4_uld_list, link) {
7988                 if (ui->uld_id == id) {
7989                         rc = ui->activate(sc);
7990                         if (rc == 0)
7991                                 ui->refcount++;
7992                         goto done;
7993                 }
7994         }
7995 done:
7996         mtx_unlock(&t4_uld_list_lock);
7997
7998         return (rc);
7999 }
8000
8001 int
8002 t4_deactivate_uld(struct adapter *sc, int id)
8003 {
8004         int rc = EINVAL;
8005         struct uld_info *ui;
8006
8007         ASSERT_SYNCHRONIZED_OP(sc);
8008
8009         mtx_lock(&t4_uld_list_lock);
8010
8011         SLIST_FOREACH(ui, &t4_uld_list, link) {
8012                 if (ui->uld_id == id) {
8013                         rc = ui->deactivate(sc);
8014                         if (rc == 0)
8015                                 ui->refcount--;
8016                         goto done;
8017                 }
8018         }
8019 done:
8020         mtx_unlock(&t4_uld_list_lock);
8021
8022         return (rc);
8023 }
8024 #endif
8025
8026 /*
8027  * Come up with reasonable defaults for some of the tunables, provided they're
8028  * not set by the user (in which case we'll use the values as is).
8029  */
8030 static void
8031 tweak_tunables(void)
8032 {
8033         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
8034
8035         if (t4_ntxq10g < 1)
8036                 t4_ntxq10g = min(nc, NTXQ_10G);
8037
8038         if (t4_ntxq1g < 1)
8039                 t4_ntxq1g = min(nc, NTXQ_1G);
8040
8041         if (t4_nrxq10g < 1)
8042                 t4_nrxq10g = min(nc, NRXQ_10G);
8043
8044         if (t4_nrxq1g < 1)
8045                 t4_nrxq1g = min(nc, NRXQ_1G);
8046
8047 #ifdef TCP_OFFLOAD
8048         if (t4_nofldtxq10g < 1)
8049                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8050
8051         if (t4_nofldtxq1g < 1)
8052                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8053
8054         if (t4_nofldrxq10g < 1)
8055                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8056
8057         if (t4_nofldrxq1g < 1)
8058                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8059
8060         if (t4_toecaps_allowed == -1)
8061                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8062 #else
8063         if (t4_toecaps_allowed == -1)
8064                 t4_toecaps_allowed = 0;
8065 #endif
8066
8067         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8068                 t4_tmr_idx_10g = TMR_IDX_10G;
8069
8070         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8071                 t4_pktc_idx_10g = PKTC_IDX_10G;
8072
8073         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8074                 t4_tmr_idx_1g = TMR_IDX_1G;
8075
8076         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8077                 t4_pktc_idx_1g = PKTC_IDX_1G;
8078
8079         if (t4_qsize_txq < 128)
8080                 t4_qsize_txq = 128;
8081
8082         if (t4_qsize_rxq < 128)
8083                 t4_qsize_rxq = 128;
8084         while (t4_qsize_rxq & 7)
8085                 t4_qsize_rxq++;
8086
8087         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8088 }
8089
8090 static int
8091 mod_event(module_t mod, int cmd, void *arg)
8092 {
8093         int rc = 0;
8094         static int loaded = 0;
8095
8096         switch (cmd) {
8097         case MOD_LOAD:
8098                 if (atomic_fetchadd_int(&loaded, 1))
8099                         break;
8100                 t4_sge_modload();
8101                 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
8102                 SLIST_INIT(&t4_list);
8103 #ifdef TCP_OFFLOAD
8104                 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
8105                 SLIST_INIT(&t4_uld_list);
8106 #endif
8107                 tweak_tunables();
8108                 break;
8109
8110         case MOD_UNLOAD:
8111                 if (atomic_fetchadd_int(&loaded, -1) > 1)
8112                         break;
8113 #ifdef TCP_OFFLOAD
8114                 mtx_lock(&t4_uld_list_lock);
8115                 if (!SLIST_EMPTY(&t4_uld_list)) {
8116                         rc = EBUSY;
8117                         mtx_unlock(&t4_uld_list_lock);
8118                         break;
8119                 }
8120                 mtx_unlock(&t4_uld_list_lock);
8121                 mtx_destroy(&t4_uld_list_lock);
8122 #endif
8123                 mtx_lock(&t4_list_lock);
8124                 if (!SLIST_EMPTY(&t4_list)) {
8125                         rc = EBUSY;
8126                         mtx_unlock(&t4_list_lock);
8127                         break;
8128                 }
8129                 mtx_unlock(&t4_list_lock);
8130                 mtx_destroy(&t4_list_lock);
8131                 break;
8132         }
8133
8134         return (rc);
8135 }
8136
8137 static devclass_t t4_devclass, t5_devclass;
8138 static devclass_t cxgbe_devclass, cxl_devclass;
8139
8140 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8141 MODULE_VERSION(t4nex, 1);
8142 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8143
8144 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8145 MODULE_VERSION(t5nex, 1);
8146 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8147
8148 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8149 MODULE_VERSION(cxgbe, 1);
8150
8151 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8152 MODULE_VERSION(cxl, 1);