]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/cxgbe/t4_main.c
MFC r259382:
[FreeBSD/stable/10.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 static int t4_rsrv_noflowq = 0;
201 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203 #ifdef TCP_OFFLOAD
204 #define NOFLDTXQ_10G 8
205 static int t4_nofldtxq10g = -1;
206 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208 #define NOFLDRXQ_10G 2
209 static int t4_nofldrxq10g = -1;
210 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212 #define NOFLDTXQ_1G 2
213 static int t4_nofldtxq1g = -1;
214 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216 #define NOFLDRXQ_1G 1
217 static int t4_nofldrxq1g = -1;
218 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 #endif
220
221 /*
222  * Holdoff parameters for 10G and 1G ports.
223  */
224 #define TMR_IDX_10G 1
225 static int t4_tmr_idx_10g = TMR_IDX_10G;
226 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228 #define PKTC_IDX_10G (-1)
229 static int t4_pktc_idx_10g = PKTC_IDX_10G;
230 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232 #define TMR_IDX_1G 1
233 static int t4_tmr_idx_1g = TMR_IDX_1G;
234 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236 #define PKTC_IDX_1G (-1)
237 static int t4_pktc_idx_1g = PKTC_IDX_1G;
238 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240 /*
241  * Size (# of entries) of each tx and rx queue.
242  */
243 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249 /*
250  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251  */
252 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255 /*
256  * Configuration file.
257  */
258 #define DEFAULT_CF      "default"
259 #define FLASH_CF        "flash"
260 #define UWIRE_CF        "uwire"
261 #define FPGA_CF         "fpga"
262 static char t4_cfg_file[32] = DEFAULT_CF;
263 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265 /*
266  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267  * encouraged respectively).
268  */
269 static unsigned int t4_fw_install = 1;
270 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272 /*
273  * ASIC features that will be used.  Disable the ones you don't want so that the
274  * chip resources aren't wasted on features that will not be used.
275  */
276 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
277 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282 static int t4_toecaps_allowed = -1;
283 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285 static int t4_rdmacaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288 static int t4_iscsicaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291 static int t4_fcoecaps_allowed = 0;
292 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294 static int t5_write_combine = 0;
295 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297 struct intrs_and_queues {
298         int intr_type;          /* INTx, MSI, or MSI-X */
299         int nirq;               /* Number of vectors */
300         int intr_flags;
301         int ntxq10g;            /* # of NIC txq's for each 10G port */
302         int nrxq10g;            /* # of NIC rxq's for each 10G port */
303         int ntxq1g;             /* # of NIC txq's for each 1G port */
304         int nrxq1g;             /* # of NIC rxq's for each 1G port */
305         int rsrv_noflowq;       /* Flag whether to reserve queue 0 */
306 #ifdef TCP_OFFLOAD
307         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
308         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
309         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
310         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
311 #endif
312 };
313
314 struct filter_entry {
315         uint32_t valid:1;       /* filter allocated and valid */
316         uint32_t locked:1;      /* filter is administratively locked */
317         uint32_t pending:1;     /* filter action is pending firmware reply */
318         uint32_t smtidx:8;      /* Source MAC Table index for smac */
319         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
320
321         struct t4_filter_specification fs;
322 };
323
324 enum {
325         XGMAC_MTU       = (1 << 0),
326         XGMAC_PROMISC   = (1 << 1),
327         XGMAC_ALLMULTI  = (1 << 2),
328         XGMAC_VLANEX    = (1 << 3),
329         XGMAC_UCADDR    = (1 << 4),
330         XGMAC_MCADDRS   = (1 << 5),
331
332         XGMAC_ALL       = 0xffff
333 };
334
335 static int map_bars_0_and_4(struct adapter *);
336 static int map_bar_2(struct adapter *);
337 static void setup_memwin(struct adapter *);
338 static int validate_mem_range(struct adapter *, uint32_t, int);
339 static int fwmtype_to_hwmtype(int);
340 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341     uint32_t *);
342 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343 static uint32_t position_memwin(struct adapter *, int, uint32_t);
344 static int cfg_itype_and_nqueues(struct adapter *, int, int,
345     struct intrs_and_queues *);
346 static int prep_firmware(struct adapter *);
347 static int partition_resources(struct adapter *, const struct firmware *,
348     const char *);
349 static int get_params__pre_init(struct adapter *);
350 static int get_params__post_init(struct adapter *);
351 static int set_params__post_init(struct adapter *);
352 static void t4_set_desc(struct adapter *);
353 static void build_medialist(struct port_info *);
354 static int update_mac_settings(struct port_info *, int);
355 static int cxgbe_init_synchronized(struct port_info *);
356 static int cxgbe_uninit_synchronized(struct port_info *);
357 static int setup_intr_handlers(struct adapter *);
358 static int adapter_full_init(struct adapter *);
359 static int adapter_full_uninit(struct adapter *);
360 static int port_full_init(struct port_info *);
361 static int port_full_uninit(struct port_info *);
362 static void quiesce_eq(struct adapter *, struct sge_eq *);
363 static void quiesce_iq(struct adapter *, struct sge_iq *);
364 static void quiesce_fl(struct adapter *, struct sge_fl *);
365 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366     driver_intr_t *, void *, char *);
367 static int t4_free_irq(struct adapter *, struct irq *);
368 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369     unsigned int);
370 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371 static void cxgbe_tick(void *);
372 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
373 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
374     struct mbuf *);
375 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
376 static int fw_msg_not_handled(struct adapter *, const __be64 *);
377 static int t4_sysctls(struct adapter *);
378 static int cxgbe_sysctls(struct port_info *);
379 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
380 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
381 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
382 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
383 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
384 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
385 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
386 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
388 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
389 #ifdef SBUF_DRAIN
390 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
392 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
393 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
394 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
395 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
396 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
398 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
399 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
401 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
403 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
404 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
405 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
406 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
407 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
408 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
409 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
410 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
411 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
412 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
413 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
414 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
415 #endif
416 static inline void txq_start(struct ifnet *, struct sge_txq *);
417 static uint32_t fconf_to_mode(uint32_t);
418 static uint32_t mode_to_fconf(uint32_t);
419 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
420 static int get_filter_mode(struct adapter *, uint32_t *);
421 static int set_filter_mode(struct adapter *, uint32_t);
422 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
423 static int get_filter(struct adapter *, struct t4_filter *);
424 static int set_filter(struct adapter *, struct t4_filter *);
425 static int del_filter(struct adapter *, struct t4_filter *);
426 static void clear_filter(struct filter_entry *);
427 static int set_filter_wr(struct adapter *, int);
428 static int del_filter_wr(struct adapter *, int);
429 static int get_sge_context(struct adapter *, struct t4_sge_context *);
430 static int load_fw(struct adapter *, struct t4_data *);
431 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
432 static int read_i2c(struct adapter *, struct t4_i2c_data *);
433 static int set_sched_class(struct adapter *, struct t4_sched_params *);
434 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
435 #ifdef TCP_OFFLOAD
436 static int toe_capability(struct port_info *, int);
437 #endif
438 static int mod_event(module_t, int, void *);
439
440 struct {
441         uint16_t device;
442         char *desc;
443 } t4_pciids[] = {
444         {0xa000, "Chelsio Terminator 4 FPGA"},
445         {0x4400, "Chelsio T440-dbg"},
446         {0x4401, "Chelsio T420-CR"},
447         {0x4402, "Chelsio T422-CR"},
448         {0x4403, "Chelsio T440-CR"},
449         {0x4404, "Chelsio T420-BCH"},
450         {0x4405, "Chelsio T440-BCH"},
451         {0x4406, "Chelsio T440-CH"},
452         {0x4407, "Chelsio T420-SO"},
453         {0x4408, "Chelsio T420-CX"},
454         {0x4409, "Chelsio T420-BT"},
455         {0x440a, "Chelsio T404-BT"},
456         {0x440e, "Chelsio T440-LP-CR"},
457 }, t5_pciids[] = {
458         {0xb000, "Chelsio Terminator 5 FPGA"},
459         {0x5400, "Chelsio T580-dbg"},
460         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
461         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
462         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
463         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
464         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
465         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
466         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
467         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
468         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
469         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
470         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
471         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
472 #ifdef notyet
473         {0x5404,  "Chelsio T520-BCH"},
474         {0x5405,  "Chelsio T540-BCH"},
475         {0x5406,  "Chelsio T540-CH"},
476         {0x5408,  "Chelsio T520-CX"},
477         {0x540b,  "Chelsio B520-SR"},
478         {0x540c,  "Chelsio B504-BT"},
479         {0x540f,  "Chelsio Amsterdam"},
480         {0x5413,  "Chelsio T580-CHR"},
481 #endif
482 };
483
484 #ifdef TCP_OFFLOAD
485 /*
486  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
487  * exactly the same for both rxq and ofld_rxq.
488  */
489 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
490 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
491 #endif
492
493 /* No easy way to include t4_msg.h before adapter.h so we check this way */
494 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
495 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
496
497 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
498
499 static int
500 t4_probe(device_t dev)
501 {
502         int i;
503         uint16_t v = pci_get_vendor(dev);
504         uint16_t d = pci_get_device(dev);
505         uint8_t f = pci_get_function(dev);
506
507         if (v != PCI_VENDOR_ID_CHELSIO)
508                 return (ENXIO);
509
510         /* Attach only to PF0 of the FPGA */
511         if (d == 0xa000 && f != 0)
512                 return (ENXIO);
513
514         for (i = 0; i < nitems(t4_pciids); i++) {
515                 if (d == t4_pciids[i].device) {
516                         device_set_desc(dev, t4_pciids[i].desc);
517                         return (BUS_PROBE_DEFAULT);
518                 }
519         }
520
521         return (ENXIO);
522 }
523
524 static int
525 t5_probe(device_t dev)
526 {
527         int i;
528         uint16_t v = pci_get_vendor(dev);
529         uint16_t d = pci_get_device(dev);
530         uint8_t f = pci_get_function(dev);
531
532         if (v != PCI_VENDOR_ID_CHELSIO)
533                 return (ENXIO);
534
535         /* Attach only to PF0 of the FPGA */
536         if (d == 0xb000 && f != 0)
537                 return (ENXIO);
538
539         for (i = 0; i < nitems(t5_pciids); i++) {
540                 if (d == t5_pciids[i].device) {
541                         device_set_desc(dev, t5_pciids[i].desc);
542                         return (BUS_PROBE_DEFAULT);
543                 }
544         }
545
546         return (ENXIO);
547 }
548
549 static int
550 t4_attach(device_t dev)
551 {
552         struct adapter *sc;
553         int rc = 0, i, n10g, n1g, rqidx, tqidx;
554         struct intrs_and_queues iaq;
555         struct sge *s;
556 #ifdef TCP_OFFLOAD
557         int ofld_rqidx, ofld_tqidx;
558 #endif
559
560         sc = device_get_softc(dev);
561         sc->dev = dev;
562
563         pci_enable_busmaster(dev);
564         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
565                 uint32_t v;
566
567                 pci_set_max_read_req(dev, 4096);
568                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
569                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
570                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
571         }
572
573         sc->traceq = -1;
574         mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
575         snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
576             device_get_nameunit(dev));
577
578         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
579             device_get_nameunit(dev));
580         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
581         sx_xlock(&t4_list_lock);
582         SLIST_INSERT_HEAD(&t4_list, sc, link);
583         sx_xunlock(&t4_list_lock);
584
585         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
586         TAILQ_INIT(&sc->sfl);
587         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
588
589         rc = map_bars_0_and_4(sc);
590         if (rc != 0)
591                 goto done; /* error message displayed already */
592
593         /*
594          * This is the real PF# to which we're attaching.  Works from within PCI
595          * passthrough environments too, where pci_get_function() could return a
596          * different PF# depending on the passthrough configuration.  We need to
597          * use the real PF# in all our communication with the firmware.
598          */
599         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
600         sc->mbox = sc->pf;
601
602         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
603         sc->an_handler = an_not_handled;
604         for (i = 0; i < nitems(sc->cpl_handler); i++)
605                 sc->cpl_handler[i] = cpl_not_handled;
606         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
607                 sc->fw_msg_handler[i] = fw_msg_not_handled;
608         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
609         t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
610         t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
611         t4_init_sge_cpl_handlers(sc);
612
613         /* Prepare the adapter for operation */
614         rc = -t4_prep_adapter(sc);
615         if (rc != 0) {
616                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
617                 goto done;
618         }
619
620         /*
621          * Do this really early, with the memory windows set up even before the
622          * character device.  The userland tool's register i/o and mem read
623          * will work even in "recovery mode".
624          */
625         setup_memwin(sc);
626         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
627             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
628             device_get_nameunit(dev));
629         if (sc->cdev == NULL)
630                 device_printf(dev, "failed to create nexus char device.\n");
631         else
632                 sc->cdev->si_drv1 = sc;
633
634         /* Go no further if recovery mode has been requested. */
635         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
636                 device_printf(dev, "recovery mode.\n");
637                 goto done;
638         }
639
640         /* Prepare the firmware for operation */
641         rc = prep_firmware(sc);
642         if (rc != 0)
643                 goto done; /* error message displayed already */
644
645         rc = get_params__post_init(sc);
646         if (rc != 0)
647                 goto done; /* error message displayed already */
648
649         rc = set_params__post_init(sc);
650         if (rc != 0)
651                 goto done; /* error message displayed already */
652
653         rc = map_bar_2(sc);
654         if (rc != 0)
655                 goto done; /* error message displayed already */
656
657         rc = t4_create_dma_tag(sc);
658         if (rc != 0)
659                 goto done; /* error message displayed already */
660
661         /*
662          * First pass over all the ports - allocate VIs and initialize some
663          * basic parameters like mac address, port type, etc.  We also figure
664          * out whether a port is 10G or 1G and use that information when
665          * calculating how many interrupts to attempt to allocate.
666          */
667         n10g = n1g = 0;
668         for_each_port(sc, i) {
669                 struct port_info *pi;
670
671                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
672                 sc->port[i] = pi;
673
674                 /* These must be set before t4_port_init */
675                 pi->adapter = sc;
676                 pi->port_id = i;
677
678                 /* Allocate the vi and initialize parameters like mac addr */
679                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
680                 if (rc != 0) {
681                         device_printf(dev, "unable to initialize port %d: %d\n",
682                             i, rc);
683                         free(pi, M_CXGBE);
684                         sc->port[i] = NULL;
685                         goto done;
686                 }
687
688                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
689                     device_get_nameunit(dev), i);
690                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
691                 sc->chan_map[pi->tx_chan] = i;
692
693                 if (is_10G_port(pi) || is_40G_port(pi)) {
694                         n10g++;
695                         pi->tmr_idx = t4_tmr_idx_10g;
696                         pi->pktc_idx = t4_pktc_idx_10g;
697                 } else {
698                         n1g++;
699                         pi->tmr_idx = t4_tmr_idx_1g;
700                         pi->pktc_idx = t4_pktc_idx_1g;
701                 }
702
703                 pi->xact_addr_filt = -1;
704                 pi->linkdnrc = -1;
705
706                 pi->qsize_rxq = t4_qsize_rxq;
707                 pi->qsize_txq = t4_qsize_txq;
708
709                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
710                 if (pi->dev == NULL) {
711                         device_printf(dev,
712                             "failed to add device for port %d.\n", i);
713                         rc = ENXIO;
714                         goto done;
715                 }
716                 device_set_softc(pi->dev, pi);
717         }
718
719         /*
720          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
721          */
722         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
723         if (rc != 0)
724                 goto done; /* error message displayed already */
725
726         sc->intr_type = iaq.intr_type;
727         sc->intr_count = iaq.nirq;
728         sc->flags |= iaq.intr_flags;
729
730         s = &sc->sge;
731         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
732         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
733         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
734         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
735         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
736
737 #ifdef TCP_OFFLOAD
738         if (is_offload(sc)) {
739
740                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
741                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
742                 s->neq += s->nofldtxq + s->nofldrxq;
743                 s->niq += s->nofldrxq;
744
745                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
746                     M_CXGBE, M_ZERO | M_WAITOK);
747                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
748                     M_CXGBE, M_ZERO | M_WAITOK);
749         }
750 #endif
751
752         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
753             M_ZERO | M_WAITOK);
754         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
755             M_ZERO | M_WAITOK);
756         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
757             M_ZERO | M_WAITOK);
758         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
759             M_ZERO | M_WAITOK);
760         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
761             M_ZERO | M_WAITOK);
762
763         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
764             M_ZERO | M_WAITOK);
765
766         t4_init_l2t(sc, M_WAITOK);
767
768         /*
769          * Second pass over the ports.  This time we know the number of rx and
770          * tx queues that each port should get.
771          */
772         rqidx = tqidx = 0;
773 #ifdef TCP_OFFLOAD
774         ofld_rqidx = ofld_tqidx = 0;
775 #endif
776         for_each_port(sc, i) {
777                 struct port_info *pi = sc->port[i];
778
779                 if (pi == NULL)
780                         continue;
781
782                 pi->first_rxq = rqidx;
783                 pi->first_txq = tqidx;
784                 if (is_10G_port(pi) || is_40G_port(pi)) {
785                         pi->nrxq = iaq.nrxq10g;
786                         pi->ntxq = iaq.ntxq10g;
787                 } else {
788                         pi->nrxq = iaq.nrxq1g;
789                         pi->ntxq = iaq.ntxq1g;
790                 }
791
792                 if (pi->ntxq > 1)
793                         pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
794                 else
795                         pi->rsrv_noflowq = 0;
796
797                 rqidx += pi->nrxq;
798                 tqidx += pi->ntxq;
799
800 #ifdef TCP_OFFLOAD
801                 if (is_offload(sc)) {
802                         pi->first_ofld_rxq = ofld_rqidx;
803                         pi->first_ofld_txq = ofld_tqidx;
804                         if (is_10G_port(pi) || is_40G_port(pi)) {
805                                 pi->nofldrxq = iaq.nofldrxq10g;
806                                 pi->nofldtxq = iaq.nofldtxq10g;
807                         } else {
808                                 pi->nofldrxq = iaq.nofldrxq1g;
809                                 pi->nofldtxq = iaq.nofldtxq1g;
810                         }
811                         ofld_rqidx += pi->nofldrxq;
812                         ofld_tqidx += pi->nofldtxq;
813                 }
814 #endif
815         }
816
817         rc = setup_intr_handlers(sc);
818         if (rc != 0) {
819                 device_printf(dev,
820                     "failed to setup interrupt handlers: %d\n", rc);
821                 goto done;
822         }
823
824         rc = bus_generic_attach(dev);
825         if (rc != 0) {
826                 device_printf(dev,
827                     "failed to attach all child ports: %d\n", rc);
828                 goto done;
829         }
830
831         device_printf(dev,
832             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
833             sc->params.pci.width, sc->params.nports, sc->intr_count,
834             sc->intr_type == INTR_MSIX ? "MSI-X" :
835             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
836             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
837
838         t4_set_desc(sc);
839
840 done:
841         if (rc != 0 && sc->cdev) {
842                 /* cdev was created and so cxgbetool works; recover that way. */
843                 device_printf(dev,
844                     "error during attach, adapter is now in recovery mode.\n");
845                 rc = 0;
846         }
847
848         if (rc != 0)
849                 t4_detach(dev);
850         else
851                 t4_sysctls(sc);
852
853         return (rc);
854 }
855
856 /*
857  * Idempotent
858  */
859 static int
860 t4_detach(device_t dev)
861 {
862         struct adapter *sc;
863         struct port_info *pi;
864         int i, rc;
865
866         sc = device_get_softc(dev);
867
868         if (sc->flags & FULL_INIT_DONE)
869                 t4_intr_disable(sc);
870
871         if (sc->cdev) {
872                 destroy_dev(sc->cdev);
873                 sc->cdev = NULL;
874         }
875
876         rc = bus_generic_detach(dev);
877         if (rc) {
878                 device_printf(dev,
879                     "failed to detach child devices: %d\n", rc);
880                 return (rc);
881         }
882
883         for (i = 0; i < sc->intr_count; i++)
884                 t4_free_irq(sc, &sc->irq[i]);
885
886         for (i = 0; i < MAX_NPORTS; i++) {
887                 pi = sc->port[i];
888                 if (pi) {
889                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
890                         if (pi->dev)
891                                 device_delete_child(dev, pi->dev);
892
893                         mtx_destroy(&pi->pi_lock);
894                         free(pi, M_CXGBE);
895                 }
896         }
897
898         if (sc->flags & FULL_INIT_DONE)
899                 adapter_full_uninit(sc);
900
901         if (sc->flags & FW_OK)
902                 t4_fw_bye(sc, sc->mbox);
903
904         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
905                 pci_release_msi(dev);
906
907         if (sc->regs_res)
908                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
909                     sc->regs_res);
910
911         if (sc->udbs_res)
912                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
913                     sc->udbs_res);
914
915         if (sc->msix_res)
916                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
917                     sc->msix_res);
918
919         if (sc->l2t)
920                 t4_free_l2t(sc->l2t);
921
922 #ifdef TCP_OFFLOAD
923         free(sc->sge.ofld_rxq, M_CXGBE);
924         free(sc->sge.ofld_txq, M_CXGBE);
925 #endif
926         free(sc->irq, M_CXGBE);
927         free(sc->sge.rxq, M_CXGBE);
928         free(sc->sge.txq, M_CXGBE);
929         free(sc->sge.ctrlq, M_CXGBE);
930         free(sc->sge.iqmap, M_CXGBE);
931         free(sc->sge.eqmap, M_CXGBE);
932         free(sc->tids.ftid_tab, M_CXGBE);
933         t4_destroy_dma_tag(sc);
934         if (mtx_initialized(&sc->sc_lock)) {
935                 sx_xlock(&t4_list_lock);
936                 SLIST_REMOVE(&t4_list, sc, adapter, link);
937                 sx_xunlock(&t4_list_lock);
938                 mtx_destroy(&sc->sc_lock);
939         }
940
941         if (mtx_initialized(&sc->tids.ftid_lock))
942                 mtx_destroy(&sc->tids.ftid_lock);
943         if (mtx_initialized(&sc->sfl_lock))
944                 mtx_destroy(&sc->sfl_lock);
945         if (mtx_initialized(&sc->ifp_lock))
946                 mtx_destroy(&sc->ifp_lock);
947
948         bzero(sc, sizeof(*sc));
949
950         return (0);
951 }
952
953
954 static int
955 cxgbe_probe(device_t dev)
956 {
957         char buf[128];
958         struct port_info *pi = device_get_softc(dev);
959
960         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
961         device_set_desc_copy(dev, buf);
962
963         return (BUS_PROBE_DEFAULT);
964 }
965
966 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
967     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
968     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
969 #define T4_CAP_ENABLE (T4_CAP)
970
971 static int
972 cxgbe_attach(device_t dev)
973 {
974         struct port_info *pi = device_get_softc(dev);
975         struct ifnet *ifp;
976
977         /* Allocate an ifnet and set it up */
978         ifp = if_alloc(IFT_ETHER);
979         if (ifp == NULL) {
980                 device_printf(dev, "Cannot allocate ifnet\n");
981                 return (ENOMEM);
982         }
983         pi->ifp = ifp;
984         ifp->if_softc = pi;
985
986         callout_init(&pi->tick, CALLOUT_MPSAFE);
987
988         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
989         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
990
991         ifp->if_init = cxgbe_init;
992         ifp->if_ioctl = cxgbe_ioctl;
993         ifp->if_transmit = cxgbe_transmit;
994         ifp->if_qflush = cxgbe_qflush;
995
996         ifp->if_capabilities = T4_CAP;
997 #ifdef TCP_OFFLOAD
998         if (is_offload(pi->adapter))
999                 ifp->if_capabilities |= IFCAP_TOE;
1000 #endif
1001         ifp->if_capenable = T4_CAP_ENABLE;
1002         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1003             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1004
1005         /* Initialize ifmedia for this port */
1006         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1007             cxgbe_media_status);
1008         build_medialist(pi);
1009
1010         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1011             EVENTHANDLER_PRI_ANY);
1012
1013         ether_ifattach(ifp, pi->hw_addr);
1014
1015 #ifdef TCP_OFFLOAD
1016         if (is_offload(pi->adapter)) {
1017                 device_printf(dev,
1018                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1019                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1020         } else
1021 #endif
1022                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1023
1024         cxgbe_sysctls(pi);
1025
1026         return (0);
1027 }
1028
1029 static int
1030 cxgbe_detach(device_t dev)
1031 {
1032         struct port_info *pi = device_get_softc(dev);
1033         struct adapter *sc = pi->adapter;
1034         struct ifnet *ifp = pi->ifp;
1035
1036         /* Tell if_ioctl and if_init that the port is going away */
1037         ADAPTER_LOCK(sc);
1038         SET_DOOMED(pi);
1039         wakeup(&sc->flags);
1040         while (IS_BUSY(sc))
1041                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1042         SET_BUSY(sc);
1043 #ifdef INVARIANTS
1044         sc->last_op = "t4detach";
1045         sc->last_op_thr = curthread;
1046 #endif
1047         ADAPTER_UNLOCK(sc);
1048
1049         if (pi->flags & HAS_TRACEQ) {
1050                 sc->traceq = -1;        /* cloner should not create ifnet */
1051                 t4_tracer_port_detach(sc);
1052         }
1053
1054         if (pi->vlan_c)
1055                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1056
1057         PORT_LOCK(pi);
1058         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1059         callout_stop(&pi->tick);
1060         PORT_UNLOCK(pi);
1061         callout_drain(&pi->tick);
1062
1063         /* Let detach proceed even if these fail. */
1064         cxgbe_uninit_synchronized(pi);
1065         port_full_uninit(pi);
1066
1067         ifmedia_removeall(&pi->media);
1068         ether_ifdetach(pi->ifp);
1069         if_free(pi->ifp);
1070
1071         ADAPTER_LOCK(sc);
1072         CLR_BUSY(sc);
1073         wakeup(&sc->flags);
1074         ADAPTER_UNLOCK(sc);
1075
1076         return (0);
1077 }
1078
1079 static void
1080 cxgbe_init(void *arg)
1081 {
1082         struct port_info *pi = arg;
1083         struct adapter *sc = pi->adapter;
1084
1085         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1086                 return;
1087         cxgbe_init_synchronized(pi);
1088         end_synchronized_op(sc, 0);
1089 }
1090
1091 static int
1092 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1093 {
1094         int rc = 0, mtu, flags;
1095         struct port_info *pi = ifp->if_softc;
1096         struct adapter *sc = pi->adapter;
1097         struct ifreq *ifr = (struct ifreq *)data;
1098         uint32_t mask;
1099
1100         switch (cmd) {
1101         case SIOCSIFMTU:
1102                 mtu = ifr->ifr_mtu;
1103                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1104                         return (EINVAL);
1105
1106                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1107                 if (rc)
1108                         return (rc);
1109                 ifp->if_mtu = mtu;
1110                 if (pi->flags & PORT_INIT_DONE) {
1111                         t4_update_fl_bufsize(ifp);
1112                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1113                                 rc = update_mac_settings(pi, XGMAC_MTU);
1114                 }
1115                 end_synchronized_op(sc, 0);
1116                 break;
1117
1118         case SIOCSIFFLAGS:
1119                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1120                 if (rc)
1121                         return (rc);
1122
1123                 if (ifp->if_flags & IFF_UP) {
1124                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1125                                 flags = pi->if_flags;
1126                                 if ((ifp->if_flags ^ flags) &
1127                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1128                                         rc = update_mac_settings(pi,
1129                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1130                                 }
1131                         } else
1132                                 rc = cxgbe_init_synchronized(pi);
1133                         pi->if_flags = ifp->if_flags;
1134                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1135                         rc = cxgbe_uninit_synchronized(pi);
1136                 end_synchronized_op(sc, 0);
1137                 break;
1138
1139         case SIOCADDMULTI:      
1140         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1141                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1142                 if (rc)
1143                         return (rc);
1144                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1145                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1146                 end_synchronized_op(sc, LOCK_HELD);
1147                 break;
1148
1149         case SIOCSIFCAP:
1150                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1151                 if (rc)
1152                         return (rc);
1153
1154                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1155                 if (mask & IFCAP_TXCSUM) {
1156                         ifp->if_capenable ^= IFCAP_TXCSUM;
1157                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1158
1159                         if (IFCAP_TSO4 & ifp->if_capenable &&
1160                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1161                                 ifp->if_capenable &= ~IFCAP_TSO4;
1162                                 if_printf(ifp,
1163                                     "tso4 disabled due to -txcsum.\n");
1164                         }
1165                 }
1166                 if (mask & IFCAP_TXCSUM_IPV6) {
1167                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1168                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1169
1170                         if (IFCAP_TSO6 & ifp->if_capenable &&
1171                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1172                                 ifp->if_capenable &= ~IFCAP_TSO6;
1173                                 if_printf(ifp,
1174                                     "tso6 disabled due to -txcsum6.\n");
1175                         }
1176                 }
1177                 if (mask & IFCAP_RXCSUM)
1178                         ifp->if_capenable ^= IFCAP_RXCSUM;
1179                 if (mask & IFCAP_RXCSUM_IPV6)
1180                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1181
1182                 /*
1183                  * Note that we leave CSUM_TSO alone (it is always set).  The
1184                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1185                  * sending a TSO request our way, so it's sufficient to toggle
1186                  * IFCAP_TSOx only.
1187                  */
1188                 if (mask & IFCAP_TSO4) {
1189                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1190                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1191                                 if_printf(ifp, "enable txcsum first.\n");
1192                                 rc = EAGAIN;
1193                                 goto fail;
1194                         }
1195                         ifp->if_capenable ^= IFCAP_TSO4;
1196                 }
1197                 if (mask & IFCAP_TSO6) {
1198                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1199                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1200                                 if_printf(ifp, "enable txcsum6 first.\n");
1201                                 rc = EAGAIN;
1202                                 goto fail;
1203                         }
1204                         ifp->if_capenable ^= IFCAP_TSO6;
1205                 }
1206                 if (mask & IFCAP_LRO) {
1207 #if defined(INET) || defined(INET6)
1208                         int i;
1209                         struct sge_rxq *rxq;
1210
1211                         ifp->if_capenable ^= IFCAP_LRO;
1212                         for_each_rxq(pi, i, rxq) {
1213                                 if (ifp->if_capenable & IFCAP_LRO)
1214                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1215                                 else
1216                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1217                         }
1218 #endif
1219                 }
1220 #ifdef TCP_OFFLOAD
1221                 if (mask & IFCAP_TOE) {
1222                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1223
1224                         rc = toe_capability(pi, enable);
1225                         if (rc != 0)
1226                                 goto fail;
1227
1228                         ifp->if_capenable ^= mask;
1229                 }
1230 #endif
1231                 if (mask & IFCAP_VLAN_HWTAGGING) {
1232                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1233                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1234                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1235                 }
1236                 if (mask & IFCAP_VLAN_MTU) {
1237                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1238
1239                         /* Need to find out how to disable auto-mtu-inflation */
1240                 }
1241                 if (mask & IFCAP_VLAN_HWTSO)
1242                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1243                 if (mask & IFCAP_VLAN_HWCSUM)
1244                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1245
1246 #ifdef VLAN_CAPABILITIES
1247                 VLAN_CAPABILITIES(ifp);
1248 #endif
1249 fail:
1250                 end_synchronized_op(sc, 0);
1251                 break;
1252
1253         case SIOCSIFMEDIA:
1254         case SIOCGIFMEDIA:
1255                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1256                 break;
1257
1258         default:
1259                 rc = ether_ioctl(ifp, cmd, data);
1260         }
1261
1262         return (rc);
1263 }
1264
1265 static int
1266 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1267 {
1268         struct port_info *pi = ifp->if_softc;
1269         struct adapter *sc = pi->adapter;
1270         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1271         struct buf_ring *br;
1272         int rc;
1273
1274         M_ASSERTPKTHDR(m);
1275
1276         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1277                 m_freem(m);
1278                 return (ENETDOWN);
1279         }
1280
1281         if (m->m_flags & M_FLOWID)
1282                 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1283                     + pi->rsrv_noflowq);
1284         br = txq->br;
1285
1286         if (TXQ_TRYLOCK(txq) == 0) {
1287                 struct sge_eq *eq = &txq->eq;
1288
1289                 /*
1290                  * It is possible that t4_eth_tx finishes up and releases the
1291                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1292                  * need to make sure that this mbuf doesn't just sit there in
1293                  * the drbr.
1294                  */
1295
1296                 rc = drbr_enqueue(ifp, br, m);
1297                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1298                     !(eq->flags & EQ_DOOMED))
1299                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1300                 return (rc);
1301         }
1302
1303         /*
1304          * txq->m is the mbuf that is held up due to a temporary shortage of
1305          * resources and it should be put on the wire first.  Then what's in
1306          * drbr and finally the mbuf that was just passed in to us.
1307          *
1308          * Return code should indicate the fate of the mbuf that was passed in
1309          * this time.
1310          */
1311
1312         TXQ_LOCK_ASSERT_OWNED(txq);
1313         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1314
1315                 /* Queued for transmission. */
1316
1317                 rc = drbr_enqueue(ifp, br, m);
1318                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1319                 (void) t4_eth_tx(ifp, txq, m);
1320                 TXQ_UNLOCK(txq);
1321                 return (rc);
1322         }
1323
1324         /* Direct transmission. */
1325         rc = t4_eth_tx(ifp, txq, m);
1326         if (rc != 0 && txq->m)
1327                 rc = 0; /* held, will be transmitted soon (hopefully) */
1328
1329         TXQ_UNLOCK(txq);
1330         return (rc);
1331 }
1332
1333 static void
1334 cxgbe_qflush(struct ifnet *ifp)
1335 {
1336         struct port_info *pi = ifp->if_softc;
1337         struct sge_txq *txq;
1338         int i;
1339         struct mbuf *m;
1340
1341         /* queues do not exist if !PORT_INIT_DONE. */
1342         if (pi->flags & PORT_INIT_DONE) {
1343                 for_each_txq(pi, i, txq) {
1344                         TXQ_LOCK(txq);
1345                         m_freem(txq->m);
1346                         txq->m = NULL;
1347                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1348                                 m_freem(m);
1349                         TXQ_UNLOCK(txq);
1350                 }
1351         }
1352         if_qflush(ifp);
1353 }
1354
1355 static int
1356 cxgbe_media_change(struct ifnet *ifp)
1357 {
1358         struct port_info *pi = ifp->if_softc;
1359
1360         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1361
1362         return (EOPNOTSUPP);
1363 }
1364
1365 static void
1366 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1367 {
1368         struct port_info *pi = ifp->if_softc;
1369         struct ifmedia_entry *cur = pi->media.ifm_cur;
1370         int speed = pi->link_cfg.speed;
1371         int data = (pi->port_type << 8) | pi->mod_type;
1372
1373         if (cur->ifm_data != data) {
1374                 build_medialist(pi);
1375                 cur = pi->media.ifm_cur;
1376         }
1377
1378         ifmr->ifm_status = IFM_AVALID;
1379         if (!pi->link_cfg.link_ok)
1380                 return;
1381
1382         ifmr->ifm_status |= IFM_ACTIVE;
1383
1384         /* active and current will differ iff current media is autoselect. */
1385         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1386                 return;
1387
1388         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1389         if (speed == SPEED_10000)
1390                 ifmr->ifm_active |= IFM_10G_T;
1391         else if (speed == SPEED_1000)
1392                 ifmr->ifm_active |= IFM_1000_T;
1393         else if (speed == SPEED_100)
1394                 ifmr->ifm_active |= IFM_100_TX;
1395         else if (speed == SPEED_10)
1396                 ifmr->ifm_active |= IFM_10_T;
1397         else
1398                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1399                             speed));
1400 }
1401
1402 void
1403 t4_fatal_err(struct adapter *sc)
1404 {
1405         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1406         t4_intr_disable(sc);
1407         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1408             device_get_nameunit(sc->dev));
1409 }
1410
1411 static int
1412 map_bars_0_and_4(struct adapter *sc)
1413 {
1414         sc->regs_rid = PCIR_BAR(0);
1415         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1416             &sc->regs_rid, RF_ACTIVE);
1417         if (sc->regs_res == NULL) {
1418                 device_printf(sc->dev, "cannot map registers.\n");
1419                 return (ENXIO);
1420         }
1421         sc->bt = rman_get_bustag(sc->regs_res);
1422         sc->bh = rman_get_bushandle(sc->regs_res);
1423         sc->mmio_len = rman_get_size(sc->regs_res);
1424         setbit(&sc->doorbells, DOORBELL_KDB);
1425
1426         sc->msix_rid = PCIR_BAR(4);
1427         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1428             &sc->msix_rid, RF_ACTIVE);
1429         if (sc->msix_res == NULL) {
1430                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1431                 return (ENXIO);
1432         }
1433
1434         return (0);
1435 }
1436
1437 static int
1438 map_bar_2(struct adapter *sc)
1439 {
1440
1441         /*
1442          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1443          * to map it if RDMA is disabled.
1444          */
1445         if (is_t4(sc) && sc->rdmacaps == 0)
1446                 return (0);
1447
1448         sc->udbs_rid = PCIR_BAR(2);
1449         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1450             &sc->udbs_rid, RF_ACTIVE);
1451         if (sc->udbs_res == NULL) {
1452                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1453                 return (ENXIO);
1454         }
1455         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1456
1457         if (is_t5(sc)) {
1458                 setbit(&sc->doorbells, DOORBELL_UDB);
1459 #if defined(__i386__) || defined(__amd64__)
1460                 if (t5_write_combine) {
1461                         int rc;
1462
1463                         /*
1464                          * Enable write combining on BAR2.  This is the
1465                          * userspace doorbell BAR and is split into 128B
1466                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1467                          * with an egress queue.  The first 64B has the doorbell
1468                          * and the second 64B can be used to submit a tx work
1469                          * request with an implicit doorbell.
1470                          */
1471
1472                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1473                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1474                         if (rc == 0) {
1475                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1476                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1477                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1478                         } else {
1479                                 device_printf(sc->dev,
1480                                     "couldn't enable write combining: %d\n",
1481                                     rc);
1482                         }
1483
1484                         t4_write_reg(sc, A_SGE_STAT_CFG,
1485                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1486                 }
1487 #endif
1488         }
1489
1490         return (0);
1491 }
1492
1493 static const struct memwin t4_memwin[] = {
1494         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1495         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1496         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1497 };
1498
1499 static const struct memwin t5_memwin[] = {
1500         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1501         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1502         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1503 };
1504
1505 static void
1506 setup_memwin(struct adapter *sc)
1507 {
1508         const struct memwin *mw;
1509         int i, n;
1510         uint32_t bar0;
1511
1512         if (is_t4(sc)) {
1513                 /*
1514                  * Read low 32b of bar0 indirectly via the hardware backdoor
1515                  * mechanism.  Works from within PCI passthrough environments
1516                  * too, where rman_get_start() can return a different value.  We
1517                  * need to program the T4 memory window decoders with the actual
1518                  * addresses that will be coming across the PCIe link.
1519                  */
1520                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1521                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1522
1523                 mw = &t4_memwin[0];
1524                 n = nitems(t4_memwin);
1525         } else {
1526                 /* T5 uses the relative offset inside the PCIe BAR */
1527                 bar0 = 0;
1528
1529                 mw = &t5_memwin[0];
1530                 n = nitems(t5_memwin);
1531         }
1532
1533         for (i = 0; i < n; i++, mw++) {
1534                 t4_write_reg(sc,
1535                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1536                     (mw->base + bar0) | V_BIR(0) |
1537                     V_WINDOW(ilog2(mw->aperture) - 10));
1538         }
1539
1540         /* flush */
1541         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1542 }
1543
1544 /*
1545  * Verify that the memory range specified by the addr/len pair is valid and lies
1546  * entirely within a single region (EDCx or MCx).
1547  */
1548 static int
1549 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1550 {
1551         uint32_t em, addr_len, maddr, mlen;
1552
1553         /* Memory can only be accessed in naturally aligned 4 byte units */
1554         if (addr & 3 || len & 3 || len == 0)
1555                 return (EINVAL);
1556
1557         /* Enabled memories */
1558         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1559         if (em & F_EDRAM0_ENABLE) {
1560                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1561                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1562                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1563                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1564                     addr + len <= maddr + mlen)
1565                         return (0);
1566         }
1567         if (em & F_EDRAM1_ENABLE) {
1568                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1569                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1570                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1571                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1572                     addr + len <= maddr + mlen)
1573                         return (0);
1574         }
1575         if (em & F_EXT_MEM_ENABLE) {
1576                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1577                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1578                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1579                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1580                     addr + len <= maddr + mlen)
1581                         return (0);
1582         }
1583         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1584                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1585                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1586                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1587                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1588                     addr + len <= maddr + mlen)
1589                         return (0);
1590         }
1591
1592         return (EFAULT);
1593 }
1594
1595 static int
1596 fwmtype_to_hwmtype(int mtype)
1597 {
1598
1599         switch (mtype) {
1600         case FW_MEMTYPE_EDC0:
1601                 return (MEM_EDC0);
1602         case FW_MEMTYPE_EDC1:
1603                 return (MEM_EDC1);
1604         case FW_MEMTYPE_EXTMEM:
1605                 return (MEM_MC0);
1606         case FW_MEMTYPE_EXTMEM1:
1607                 return (MEM_MC1);
1608         default:
1609                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1610         }
1611 }
1612
1613 /*
1614  * Verify that the memory range specified by the memtype/offset/len pair is
1615  * valid and lies entirely within the memtype specified.  The global address of
1616  * the start of the range is returned in addr.
1617  */
1618 static int
1619 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1620     uint32_t *addr)
1621 {
1622         uint32_t em, addr_len, maddr, mlen;
1623
1624         /* Memory can only be accessed in naturally aligned 4 byte units */
1625         if (off & 3 || len & 3 || len == 0)
1626                 return (EINVAL);
1627
1628         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1629         switch (fwmtype_to_hwmtype(mtype)) {
1630         case MEM_EDC0:
1631                 if (!(em & F_EDRAM0_ENABLE))
1632                         return (EINVAL);
1633                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1634                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1635                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1636                 break;
1637         case MEM_EDC1:
1638                 if (!(em & F_EDRAM1_ENABLE))
1639                         return (EINVAL);
1640                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1641                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1642                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1643                 break;
1644         case MEM_MC:
1645                 if (!(em & F_EXT_MEM_ENABLE))
1646                         return (EINVAL);
1647                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1648                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1649                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1650                 break;
1651         case MEM_MC1:
1652                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1653                         return (EINVAL);
1654                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1655                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1656                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1657                 break;
1658         default:
1659                 return (EINVAL);
1660         }
1661
1662         if (mlen > 0 && off < mlen && off + len <= mlen) {
1663                 *addr = maddr + off;    /* global address */
1664                 return (0);
1665         }
1666
1667         return (EFAULT);
1668 }
1669
1670 static void
1671 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1672 {
1673         const struct memwin *mw;
1674
1675         if (is_t4(sc)) {
1676                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1677                     ("%s: incorrect memwin# (%d)", __func__, win));
1678                 mw = &t4_memwin[win];
1679         } else {
1680                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1681                     ("%s: incorrect memwin# (%d)", __func__, win));
1682                 mw = &t5_memwin[win];
1683         }
1684
1685         if (base != NULL)
1686                 *base = mw->base;
1687         if (aperture != NULL)
1688                 *aperture = mw->aperture;
1689 }
1690
1691 /*
1692  * Positions the memory window such that it can be used to access the specified
1693  * address in the chip's address space.  The return value is the offset of addr
1694  * from the start of the window.
1695  */
1696 static uint32_t
1697 position_memwin(struct adapter *sc, int n, uint32_t addr)
1698 {
1699         uint32_t start, pf;
1700         uint32_t reg;
1701
1702         KASSERT(n >= 0 && n <= 3,
1703             ("%s: invalid window %d.", __func__, n));
1704         KASSERT((addr & 3) == 0,
1705             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1706
1707         if (is_t4(sc)) {
1708                 pf = 0;
1709                 start = addr & ~0xf;    /* start must be 16B aligned */
1710         } else {
1711                 pf = V_PFNUM(sc->pf);
1712                 start = addr & ~0x7f;   /* start must be 128B aligned */
1713         }
1714         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1715
1716         t4_write_reg(sc, reg, start | pf);
1717         t4_read_reg(sc, reg);
1718
1719         return (addr - start);
1720 }
1721
1722 static int
1723 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1724     struct intrs_and_queues *iaq)
1725 {
1726         int rc, itype, navail, nrxq10g, nrxq1g, n;
1727         int nofldrxq10g = 0, nofldrxq1g = 0;
1728
1729         bzero(iaq, sizeof(*iaq));
1730
1731         iaq->ntxq10g = t4_ntxq10g;
1732         iaq->ntxq1g = t4_ntxq1g;
1733         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1734         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1735         iaq->rsrv_noflowq = t4_rsrv_noflowq;
1736 #ifdef TCP_OFFLOAD
1737         if (is_offload(sc)) {
1738                 iaq->nofldtxq10g = t4_nofldtxq10g;
1739                 iaq->nofldtxq1g = t4_nofldtxq1g;
1740                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1741                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1742         }
1743 #endif
1744
1745         for (itype = INTR_MSIX; itype; itype >>= 1) {
1746
1747                 if ((itype & t4_intr_types) == 0)
1748                         continue;       /* not allowed */
1749
1750                 if (itype == INTR_MSIX)
1751                         navail = pci_msix_count(sc->dev);
1752                 else if (itype == INTR_MSI)
1753                         navail = pci_msi_count(sc->dev);
1754                 else
1755                         navail = 1;
1756 restart:
1757                 if (navail == 0)
1758                         continue;
1759
1760                 iaq->intr_type = itype;
1761                 iaq->intr_flags = 0;
1762
1763                 /*
1764                  * Best option: an interrupt vector for errors, one for the
1765                  * firmware event queue, and one each for each rxq (NIC as well
1766                  * as offload).
1767                  */
1768                 iaq->nirq = T4_EXTRA_INTR;
1769                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1770                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1771                 if (iaq->nirq <= navail &&
1772                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1773                         iaq->intr_flags |= INTR_DIRECT;
1774                         goto allocate;
1775                 }
1776
1777                 /*
1778                  * Second best option: an interrupt vector for errors, one for
1779                  * the firmware event queue, and one each for either NIC or
1780                  * offload rxq's.
1781                  */
1782                 iaq->nirq = T4_EXTRA_INTR;
1783                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1784                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1785                 if (iaq->nirq <= navail &&
1786                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1787                         goto allocate;
1788
1789                 /*
1790                  * Next best option: an interrupt vector for errors, one for the
1791                  * firmware event queue, and at least one per port.  At this
1792                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1793                  * what's available to us.
1794                  */
1795                 iaq->nirq = T4_EXTRA_INTR;
1796                 iaq->nirq += n10g + n1g;
1797                 if (iaq->nirq <= navail) {
1798                         int leftover = navail - iaq->nirq;
1799
1800                         if (n10g > 0) {
1801                                 int target = max(nrxq10g, nofldrxq10g);
1802
1803                                 n = 1;
1804                                 while (n < target && leftover >= n10g) {
1805                                         leftover -= n10g;
1806                                         iaq->nirq += n10g;
1807                                         n++;
1808                                 }
1809                                 iaq->nrxq10g = min(n, nrxq10g);
1810 #ifdef TCP_OFFLOAD
1811                                 if (is_offload(sc))
1812                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1813 #endif
1814                         }
1815
1816                         if (n1g > 0) {
1817                                 int target = max(nrxq1g, nofldrxq1g);
1818
1819                                 n = 1;
1820                                 while (n < target && leftover >= n1g) {
1821                                         leftover -= n1g;
1822                                         iaq->nirq += n1g;
1823                                         n++;
1824                                 }
1825                                 iaq->nrxq1g = min(n, nrxq1g);
1826 #ifdef TCP_OFFLOAD
1827                                 if (is_offload(sc))
1828                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1829 #endif
1830                         }
1831
1832                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1833                                 goto allocate;
1834                 }
1835
1836                 /*
1837                  * Least desirable option: one interrupt vector for everything.
1838                  */
1839                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1840 #ifdef TCP_OFFLOAD
1841                 if (is_offload(sc))
1842                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1843 #endif
1844
1845 allocate:
1846                 navail = iaq->nirq;
1847                 rc = 0;
1848                 if (itype == INTR_MSIX)
1849                         rc = pci_alloc_msix(sc->dev, &navail);
1850                 else if (itype == INTR_MSI)
1851                         rc = pci_alloc_msi(sc->dev, &navail);
1852
1853                 if (rc == 0) {
1854                         if (navail == iaq->nirq)
1855                                 return (0);
1856
1857                         /*
1858                          * Didn't get the number requested.  Use whatever number
1859                          * the kernel is willing to allocate (it's in navail).
1860                          */
1861                         device_printf(sc->dev, "fewer vectors than requested, "
1862                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1863                             itype, iaq->nirq, navail);
1864                         pci_release_msi(sc->dev);
1865                         goto restart;
1866                 }
1867
1868                 device_printf(sc->dev,
1869                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1870                     itype, rc, iaq->nirq, navail);
1871         }
1872
1873         device_printf(sc->dev,
1874             "failed to find a usable interrupt type.  "
1875             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1876             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1877
1878         return (ENXIO);
1879 }
1880
1881 #define FW_VERSION(chip) ( \
1882     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1883     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1884     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1885     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1886 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1887
1888 struct fw_info {
1889         uint8_t chip;
1890         char *kld_name;
1891         char *fw_mod_name;
1892         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1893 } fw_info[] = {
1894         {
1895                 .chip = CHELSIO_T4,
1896                 .kld_name = "t4fw_cfg",
1897                 .fw_mod_name = "t4fw",
1898                 .fw_hdr = {
1899                         .chip = FW_HDR_CHIP_T4,
1900                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1901                         .intfver_nic = FW_INTFVER(T4, NIC),
1902                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1903                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1904                         .intfver_ri = FW_INTFVER(T4, RI),
1905                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1906                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1907                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1908                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1909                 },
1910         }, {
1911                 .chip = CHELSIO_T5,
1912                 .kld_name = "t5fw_cfg",
1913                 .fw_mod_name = "t5fw",
1914                 .fw_hdr = {
1915                         .chip = FW_HDR_CHIP_T5,
1916                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1917                         .intfver_nic = FW_INTFVER(T5, NIC),
1918                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1919                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1920                         .intfver_ri = FW_INTFVER(T5, RI),
1921                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1922                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1923                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1924                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1925                 },
1926         }
1927 };
1928
1929 static struct fw_info *
1930 find_fw_info(int chip)
1931 {
1932         int i;
1933
1934         for (i = 0; i < nitems(fw_info); i++) {
1935                 if (fw_info[i].chip == chip)
1936                         return (&fw_info[i]);
1937         }
1938         return (NULL);
1939 }
1940
1941 /*
1942  * Is the given firmware API compatible with the one the driver was compiled
1943  * with?
1944  */
1945 static int
1946 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1947 {
1948
1949         /* short circuit if it's the exact same firmware version */
1950         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1951                 return (1);
1952
1953         /*
1954          * XXX: Is this too conservative?  Perhaps I should limit this to the
1955          * features that are supported in the driver.
1956          */
1957 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1958         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1959             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1960             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1961                 return (1);
1962 #undef SAME_INTF
1963
1964         return (0);
1965 }
1966
1967 /*
1968  * The firmware in the KLD is usable, but should it be installed?  This routine
1969  * explains itself in detail if it indicates the KLD firmware should be
1970  * installed.
1971  */
1972 static int
1973 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1974 {
1975         const char *reason;
1976
1977         if (!card_fw_usable) {
1978                 reason = "incompatible or unusable";
1979                 goto install;
1980         }
1981
1982         if (k > c) {
1983                 reason = "older than the version bundled with this driver";
1984                 goto install;
1985         }
1986
1987         if (t4_fw_install == 2 && k != c) {
1988                 reason = "different than the version bundled with this driver";
1989                 goto install;
1990         }
1991
1992         return (0);
1993
1994 install:
1995         if (t4_fw_install == 0) {
1996                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1997                     "but the driver is prohibited from installing a different "
1998                     "firmware on the card.\n",
1999                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2000                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
2001
2002                 return (0);
2003         }
2004
2005         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2006             "installing firmware %u.%u.%u.%u on card.\n",
2007             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2008             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2009             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2010             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2011
2012         return (1);
2013 }
2014 /*
2015  * Establish contact with the firmware and determine if we are the master driver
2016  * or not, and whether we are responsible for chip initialization.
2017  */
2018 static int
2019 prep_firmware(struct adapter *sc)
2020 {
2021         const struct firmware *fw = NULL, *default_cfg;
2022         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2023         enum dev_state state;
2024         struct fw_info *fw_info;
2025         struct fw_hdr *card_fw;         /* fw on the card */
2026         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2027         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2028                                            against */
2029
2030         /* Contact firmware. */
2031         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2032         if (rc < 0 || state == DEV_STATE_ERR) {
2033                 rc = -rc;
2034                 device_printf(sc->dev,
2035                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2036                 return (rc);
2037         }
2038         pf = rc;
2039         if (pf == sc->mbox)
2040                 sc->flags |= MASTER_PF;
2041         else if (state == DEV_STATE_UNINIT) {
2042                 /*
2043                  * We didn't get to be the master so we definitely won't be
2044                  * configuring the chip.  It's a bug if someone else hasn't
2045                  * configured it already.
2046                  */
2047                 device_printf(sc->dev, "couldn't be master(%d), "
2048                     "device not already initialized either(%d).\n", rc, state);
2049                 return (EDOOFUS);
2050         }
2051
2052         /* This is the firmware whose headers the driver was compiled against */
2053         fw_info = find_fw_info(chip_id(sc));
2054         if (fw_info == NULL) {
2055                 device_printf(sc->dev,
2056                     "unable to look up firmware information for chip %d.\n",
2057                     chip_id(sc));
2058                 return (EINVAL);
2059         }
2060         drv_fw = &fw_info->fw_hdr;
2061
2062         /*
2063          * The firmware KLD contains many modules.  The KLD name is also the
2064          * name of the module that contains the default config file.
2065          */
2066         default_cfg = firmware_get(fw_info->kld_name);
2067
2068         /* Read the header of the firmware on the card */
2069         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2070         rc = -t4_read_flash(sc, FLASH_FW_START,
2071             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2072         if (rc == 0)
2073                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2074         else {
2075                 device_printf(sc->dev,
2076                     "Unable to read card's firmware header: %d\n", rc);
2077                 card_fw_usable = 0;
2078         }
2079
2080         /* This is the firmware in the KLD */
2081         fw = firmware_get(fw_info->fw_mod_name);
2082         if (fw != NULL) {
2083                 kld_fw = (const void *)fw->data;
2084                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2085         } else {
2086                 kld_fw = NULL;
2087                 kld_fw_usable = 0;
2088         }
2089
2090         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2091             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2092                 /*
2093                  * Common case: the firmware on the card is an exact match and
2094                  * the KLD is an exact match too, or the KLD is
2095                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2096                  * here -- use cxgbetool loadfw if you want to reinstall the
2097                  * same firmware as the one on the card.
2098                  */
2099         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2100             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2101             be32toh(card_fw->fw_ver))) {
2102
2103                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2104                 if (rc != 0) {
2105                         device_printf(sc->dev,
2106                             "failed to install firmware: %d\n", rc);
2107                         goto done;
2108                 }
2109
2110                 /* Installed successfully, update the cached header too. */
2111                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2112                 card_fw_usable = 1;
2113                 need_fw_reset = 0;      /* already reset as part of load_fw */
2114         }
2115
2116         if (!card_fw_usable) {
2117                 uint32_t d, c, k;
2118
2119                 d = ntohl(drv_fw->fw_ver);
2120                 c = ntohl(card_fw->fw_ver);
2121                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2122
2123                 device_printf(sc->dev, "Cannot find a usable firmware: "
2124                     "fw_install %d, chip state %d, "
2125                     "driver compiled with %d.%d.%d.%d, "
2126                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2127                     t4_fw_install, state,
2128                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2129                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2130                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2131                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2132                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2133                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2134                 rc = EINVAL;
2135                 goto done;
2136         }
2137
2138         /* We're using whatever's on the card and it's known to be good. */
2139         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2140         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2141             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2142             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2143             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2144             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2145         t4_get_tp_version(sc, &sc->params.tp_vers);
2146
2147         /* Reset device */
2148         if (need_fw_reset &&
2149             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2150                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2151                 if (rc != ETIMEDOUT && rc != EIO)
2152                         t4_fw_bye(sc, sc->mbox);
2153                 goto done;
2154         }
2155         sc->flags |= FW_OK;
2156
2157         rc = get_params__pre_init(sc);
2158         if (rc != 0)
2159                 goto done; /* error message displayed already */
2160
2161         /* Partition adapter resources as specified in the config file. */
2162         if (state == DEV_STATE_UNINIT) {
2163
2164                 KASSERT(sc->flags & MASTER_PF,
2165                     ("%s: trying to change chip settings when not master.",
2166                     __func__));
2167
2168                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2169                 if (rc != 0)
2170                         goto done;      /* error message displayed already */
2171
2172                 t4_tweak_chip_settings(sc);
2173
2174                 /* get basic stuff going */
2175                 rc = -t4_fw_initialize(sc, sc->mbox);
2176                 if (rc != 0) {
2177                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2178                         goto done;
2179                 }
2180         } else {
2181                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2182                 sc->cfcsum = 0;
2183         }
2184
2185 done:
2186         free(card_fw, M_CXGBE);
2187         if (fw != NULL)
2188                 firmware_put(fw, FIRMWARE_UNLOAD);
2189         if (default_cfg != NULL)
2190                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2191
2192         return (rc);
2193 }
2194
2195 #define FW_PARAM_DEV(param) \
2196         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2197          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2198 #define FW_PARAM_PFVF(param) \
2199         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2200          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2201
2202 /*
2203  * Partition chip resources for use between various PFs, VFs, etc.
2204  */
2205 static int
2206 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2207     const char *name_prefix)
2208 {
2209         const struct firmware *cfg = NULL;
2210         int rc = 0;
2211         struct fw_caps_config_cmd caps;
2212         uint32_t mtype, moff, finicsum, cfcsum;
2213
2214         /*
2215          * Figure out what configuration file to use.  Pick the default config
2216          * file for the card if the user hasn't specified one explicitly.
2217          */
2218         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2219         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2220                 /* Card specific overrides go here. */
2221                 if (pci_get_device(sc->dev) == 0x440a)
2222                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2223                 if (is_fpga(sc))
2224                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2225         }
2226
2227         /*
2228          * We need to load another module if the profile is anything except
2229          * "default" or "flash".
2230          */
2231         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2232             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2233                 char s[32];
2234
2235                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2236                 cfg = firmware_get(s);
2237                 if (cfg == NULL) {
2238                         if (default_cfg != NULL) {
2239                                 device_printf(sc->dev,
2240                                     "unable to load module \"%s\" for "
2241                                     "configuration profile \"%s\", will use "
2242                                     "the default config file instead.\n",
2243                                     s, sc->cfg_file);
2244                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2245                                     "%s", DEFAULT_CF);
2246                         } else {
2247                                 device_printf(sc->dev,
2248                                     "unable to load module \"%s\" for "
2249                                     "configuration profile \"%s\", will use "
2250                                     "the config file on the card's flash "
2251                                     "instead.\n", s, sc->cfg_file);
2252                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2253                                     "%s", FLASH_CF);
2254                         }
2255                 }
2256         }
2257
2258         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2259             default_cfg == NULL) {
2260                 device_printf(sc->dev,
2261                     "default config file not available, will use the config "
2262                     "file on the card's flash instead.\n");
2263                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2264         }
2265
2266         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2267                 u_int cflen, i, n;
2268                 const uint32_t *cfdata;
2269                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2270
2271                 KASSERT(cfg != NULL || default_cfg != NULL,
2272                     ("%s: no config to upload", __func__));
2273
2274                 /*
2275                  * Ask the firmware where it wants us to upload the config file.
2276                  */
2277                 param = FW_PARAM_DEV(CF);
2278                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2279                 if (rc != 0) {
2280                         /* No support for config file?  Shouldn't happen. */
2281                         device_printf(sc->dev,
2282                             "failed to query config file location: %d.\n", rc);
2283                         goto done;
2284                 }
2285                 mtype = G_FW_PARAMS_PARAM_Y(val);
2286                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2287
2288                 /*
2289                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2290                  * useless stuffing/comments at the end of the config file so
2291                  * it's ok to simply throw away the last remaining bytes when
2292                  * the config file is not an exact multiple of 4.  This also
2293                  * helps with the validate_mt_off_len check.
2294                  */
2295                 if (cfg != NULL) {
2296                         cflen = cfg->datasize & ~3;
2297                         cfdata = cfg->data;
2298                 } else {
2299                         cflen = default_cfg->datasize & ~3;
2300                         cfdata = default_cfg->data;
2301                 }
2302
2303                 if (cflen > FLASH_CFG_MAX_SIZE) {
2304                         device_printf(sc->dev,
2305                             "config file too long (%d, max allowed is %d).  "
2306                             "Will try to use the config on the card, if any.\n",
2307                             cflen, FLASH_CFG_MAX_SIZE);
2308                         goto use_config_on_flash;
2309                 }
2310
2311                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2312                 if (rc != 0) {
2313                         device_printf(sc->dev,
2314                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2315                             "Will try to use the config on the card, if any.\n",
2316                             __func__, mtype, moff, cflen, rc);
2317                         goto use_config_on_flash;
2318                 }
2319
2320                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2321                 while (cflen) {
2322                         off = position_memwin(sc, 2, addr);
2323                         n = min(cflen, mw_aperture - off);
2324                         for (i = 0; i < n; i += 4)
2325                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2326                         cflen -= n;
2327                         addr += n;
2328                 }
2329         } else {
2330 use_config_on_flash:
2331                 mtype = FW_MEMTYPE_FLASH;
2332                 moff = t4_flash_cfg_addr(sc);
2333         }
2334
2335         bzero(&caps, sizeof(caps));
2336         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2337             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2338         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2339             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2340             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2341         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2342         if (rc != 0) {
2343                 device_printf(sc->dev,
2344                     "failed to pre-process config file: %d "
2345                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2346                 goto done;
2347         }
2348
2349         finicsum = be32toh(caps.finicsum);
2350         cfcsum = be32toh(caps.cfcsum);
2351         if (finicsum != cfcsum) {
2352                 device_printf(sc->dev,
2353                     "WARNING: config file checksum mismatch: %08x %08x\n",
2354                     finicsum, cfcsum);
2355         }
2356         sc->cfcsum = cfcsum;
2357
2358 #define LIMIT_CAPS(x) do { \
2359         caps.x &= htobe16(t4_##x##_allowed); \
2360 } while (0)
2361
2362         /*
2363          * Let the firmware know what features will (not) be used so it can tune
2364          * things accordingly.
2365          */
2366         LIMIT_CAPS(linkcaps);
2367         LIMIT_CAPS(niccaps);
2368         LIMIT_CAPS(toecaps);
2369         LIMIT_CAPS(rdmacaps);
2370         LIMIT_CAPS(iscsicaps);
2371         LIMIT_CAPS(fcoecaps);
2372 #undef LIMIT_CAPS
2373
2374         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2375             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2376         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2377         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2378         if (rc != 0) {
2379                 device_printf(sc->dev,
2380                     "failed to process config file: %d.\n", rc);
2381         }
2382 done:
2383         if (cfg != NULL)
2384                 firmware_put(cfg, FIRMWARE_UNLOAD);
2385         return (rc);
2386 }
2387
2388 /*
2389  * Retrieve parameters that are needed (or nice to have) very early.
2390  */
2391 static int
2392 get_params__pre_init(struct adapter *sc)
2393 {
2394         int rc;
2395         uint32_t param[2], val[2];
2396         struct fw_devlog_cmd cmd;
2397         struct devlog_params *dlog = &sc->params.devlog;
2398
2399         param[0] = FW_PARAM_DEV(PORTVEC);
2400         param[1] = FW_PARAM_DEV(CCLK);
2401         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2402         if (rc != 0) {
2403                 device_printf(sc->dev,
2404                     "failed to query parameters (pre_init): %d.\n", rc);
2405                 return (rc);
2406         }
2407
2408         sc->params.portvec = val[0];
2409         sc->params.nports = bitcount32(val[0]);
2410         sc->params.vpd.cclk = val[1];
2411
2412         /* Read device log parameters. */
2413         bzero(&cmd, sizeof(cmd));
2414         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2415             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2416         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2417         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2418         if (rc != 0) {
2419                 device_printf(sc->dev,
2420                     "failed to get devlog parameters: %d.\n", rc);
2421                 bzero(dlog, sizeof (*dlog));
2422                 rc = 0; /* devlog isn't critical for device operation */
2423         } else {
2424                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2425                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2426                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2427                 dlog->size = be32toh(cmd.memsize_devlog);
2428         }
2429
2430         return (rc);
2431 }
2432
2433 /*
2434  * Retrieve various parameters that are of interest to the driver.  The device
2435  * has been initialized by the firmware at this point.
2436  */
2437 static int
2438 get_params__post_init(struct adapter *sc)
2439 {
2440         int rc;
2441         uint32_t param[7], val[7];
2442         struct fw_caps_config_cmd caps;
2443
2444         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2445         param[1] = FW_PARAM_PFVF(EQ_START);
2446         param[2] = FW_PARAM_PFVF(FILTER_START);
2447         param[3] = FW_PARAM_PFVF(FILTER_END);
2448         param[4] = FW_PARAM_PFVF(L2T_START);
2449         param[5] = FW_PARAM_PFVF(L2T_END);
2450         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2451         if (rc != 0) {
2452                 device_printf(sc->dev,
2453                     "failed to query parameters (post_init): %d.\n", rc);
2454                 return (rc);
2455         }
2456
2457         sc->sge.iq_start = val[0];
2458         sc->sge.eq_start = val[1];
2459         sc->tids.ftid_base = val[2];
2460         sc->tids.nftids = val[3] - val[2] + 1;
2461         sc->params.ftid_min = val[2];
2462         sc->params.ftid_max = val[3];
2463         sc->vres.l2t.start = val[4];
2464         sc->vres.l2t.size = val[5] - val[4] + 1;
2465         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2466             ("%s: L2 table size (%u) larger than expected (%u)",
2467             __func__, sc->vres.l2t.size, L2T_SIZE));
2468
2469         /* get capabilites */
2470         bzero(&caps, sizeof(caps));
2471         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2472             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2473         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2474         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2475         if (rc != 0) {
2476                 device_printf(sc->dev,
2477                     "failed to get card capabilities: %d.\n", rc);
2478                 return (rc);
2479         }
2480
2481 #define READ_CAPS(x) do { \
2482         sc->x = htobe16(caps.x); \
2483 } while (0)
2484         READ_CAPS(linkcaps);
2485         READ_CAPS(niccaps);
2486         READ_CAPS(toecaps);
2487         READ_CAPS(rdmacaps);
2488         READ_CAPS(iscsicaps);
2489         READ_CAPS(fcoecaps);
2490
2491         if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2492                 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2493                 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2494                 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2495                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2496                 if (rc != 0) {
2497                         device_printf(sc->dev,
2498                             "failed to query NIC parameters: %d.\n", rc);
2499                         return (rc);
2500                 }
2501                 sc->tids.etid_base = val[0];
2502                 sc->params.etid_min = val[0];
2503                 sc->tids.netids = val[1] - val[0] + 1;
2504                 sc->params.netids = sc->tids.netids;
2505                 sc->params.eo_wr_cred = val[2];
2506                 sc->params.ethoffload = 1;
2507         }
2508
2509         if (sc->toecaps) {
2510                 /* query offload-related parameters */
2511                 param[0] = FW_PARAM_DEV(NTID);
2512                 param[1] = FW_PARAM_PFVF(SERVER_START);
2513                 param[2] = FW_PARAM_PFVF(SERVER_END);
2514                 param[3] = FW_PARAM_PFVF(TDDP_START);
2515                 param[4] = FW_PARAM_PFVF(TDDP_END);
2516                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2517                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2518                 if (rc != 0) {
2519                         device_printf(sc->dev,
2520                             "failed to query TOE parameters: %d.\n", rc);
2521                         return (rc);
2522                 }
2523                 sc->tids.ntids = val[0];
2524                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2525                 sc->tids.stid_base = val[1];
2526                 sc->tids.nstids = val[2] - val[1] + 1;
2527                 sc->vres.ddp.start = val[3];
2528                 sc->vres.ddp.size = val[4] - val[3] + 1;
2529                 sc->params.ofldq_wr_cred = val[5];
2530                 sc->params.offload = 1;
2531         }
2532         if (sc->rdmacaps) {
2533                 param[0] = FW_PARAM_PFVF(STAG_START);
2534                 param[1] = FW_PARAM_PFVF(STAG_END);
2535                 param[2] = FW_PARAM_PFVF(RQ_START);
2536                 param[3] = FW_PARAM_PFVF(RQ_END);
2537                 param[4] = FW_PARAM_PFVF(PBL_START);
2538                 param[5] = FW_PARAM_PFVF(PBL_END);
2539                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2540                 if (rc != 0) {
2541                         device_printf(sc->dev,
2542                             "failed to query RDMA parameters(1): %d.\n", rc);
2543                         return (rc);
2544                 }
2545                 sc->vres.stag.start = val[0];
2546                 sc->vres.stag.size = val[1] - val[0] + 1;
2547                 sc->vres.rq.start = val[2];
2548                 sc->vres.rq.size = val[3] - val[2] + 1;
2549                 sc->vres.pbl.start = val[4];
2550                 sc->vres.pbl.size = val[5] - val[4] + 1;
2551
2552                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2553                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2554                 param[2] = FW_PARAM_PFVF(CQ_START);
2555                 param[3] = FW_PARAM_PFVF(CQ_END);
2556                 param[4] = FW_PARAM_PFVF(OCQ_START);
2557                 param[5] = FW_PARAM_PFVF(OCQ_END);
2558                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2559                 if (rc != 0) {
2560                         device_printf(sc->dev,
2561                             "failed to query RDMA parameters(2): %d.\n", rc);
2562                         return (rc);
2563                 }
2564                 sc->vres.qp.start = val[0];
2565                 sc->vres.qp.size = val[1] - val[0] + 1;
2566                 sc->vres.cq.start = val[2];
2567                 sc->vres.cq.size = val[3] - val[2] + 1;
2568                 sc->vres.ocq.start = val[4];
2569                 sc->vres.ocq.size = val[5] - val[4] + 1;
2570         }
2571         if (sc->iscsicaps) {
2572                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2573                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2574                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2575                 if (rc != 0) {
2576                         device_printf(sc->dev,
2577                             "failed to query iSCSI parameters: %d.\n", rc);
2578                         return (rc);
2579                 }
2580                 sc->vres.iscsi.start = val[0];
2581                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2582         }
2583
2584         /*
2585          * We've got the params we wanted to query via the firmware.  Now grab
2586          * some others directly from the chip.
2587          */
2588         rc = t4_read_chip_settings(sc);
2589
2590         return (rc);
2591 }
2592
2593 static int
2594 set_params__post_init(struct adapter *sc)
2595 {
2596         uint32_t param, val;
2597
2598         /* ask for encapsulated CPLs */
2599         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2600         val = 1;
2601         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2602
2603         return (0);
2604 }
2605
2606 #undef FW_PARAM_PFVF
2607 #undef FW_PARAM_DEV
2608
2609 static void
2610 t4_set_desc(struct adapter *sc)
2611 {
2612         char buf[128];
2613         struct adapter_params *p = &sc->params;
2614
2615         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2616             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2617             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2618
2619         device_set_desc_copy(sc->dev, buf);
2620 }
2621
2622 static void
2623 build_medialist(struct port_info *pi)
2624 {
2625         struct ifmedia *media = &pi->media;
2626         int data, m;
2627
2628         PORT_LOCK(pi);
2629
2630         ifmedia_removeall(media);
2631
2632         m = IFM_ETHER | IFM_FDX;
2633         data = (pi->port_type << 8) | pi->mod_type;
2634
2635         switch(pi->port_type) {
2636         case FW_PORT_TYPE_BT_XFI:
2637                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2638                 break;
2639
2640         case FW_PORT_TYPE_BT_XAUI:
2641                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2642                 /* fall through */
2643
2644         case FW_PORT_TYPE_BT_SGMII:
2645                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2646                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2647                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2648                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2649                 break;
2650
2651         case FW_PORT_TYPE_CX4:
2652                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2653                 ifmedia_set(media, m | IFM_10G_CX4);
2654                 break;
2655
2656         case FW_PORT_TYPE_QSFP_10G:
2657         case FW_PORT_TYPE_SFP:
2658         case FW_PORT_TYPE_FIBER_XFI:
2659         case FW_PORT_TYPE_FIBER_XAUI:
2660                 switch (pi->mod_type) {
2661
2662                 case FW_PORT_MOD_TYPE_LR:
2663                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2664                         ifmedia_set(media, m | IFM_10G_LR);
2665                         break;
2666
2667                 case FW_PORT_MOD_TYPE_SR:
2668                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2669                         ifmedia_set(media, m | IFM_10G_SR);
2670                         break;
2671
2672                 case FW_PORT_MOD_TYPE_LRM:
2673                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2674                         ifmedia_set(media, m | IFM_10G_LRM);
2675                         break;
2676
2677                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2678                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2679                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2680                         ifmedia_set(media, m | IFM_10G_TWINAX);
2681                         break;
2682
2683                 case FW_PORT_MOD_TYPE_NONE:
2684                         m &= ~IFM_FDX;
2685                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2686                         ifmedia_set(media, m | IFM_NONE);
2687                         break;
2688
2689                 case FW_PORT_MOD_TYPE_NA:
2690                 case FW_PORT_MOD_TYPE_ER:
2691                 default:
2692                         device_printf(pi->dev,
2693                             "unknown port_type (%d), mod_type (%d)\n",
2694                             pi->port_type, pi->mod_type);
2695                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2696                         ifmedia_set(media, m | IFM_UNKNOWN);
2697                         break;
2698                 }
2699                 break;
2700
2701         case FW_PORT_TYPE_QSFP:
2702                 switch (pi->mod_type) {
2703
2704                 case FW_PORT_MOD_TYPE_LR:
2705                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2706                         ifmedia_set(media, m | IFM_40G_LR4);
2707                         break;
2708
2709                 case FW_PORT_MOD_TYPE_SR:
2710                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2711                         ifmedia_set(media, m | IFM_40G_SR4);
2712                         break;
2713
2714                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2715                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2716                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2717                         ifmedia_set(media, m | IFM_40G_CR4);
2718                         break;
2719
2720                 case FW_PORT_MOD_TYPE_NONE:
2721                         m &= ~IFM_FDX;
2722                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2723                         ifmedia_set(media, m | IFM_NONE);
2724                         break;
2725
2726                 default:
2727                         device_printf(pi->dev,
2728                             "unknown port_type (%d), mod_type (%d)\n",
2729                             pi->port_type, pi->mod_type);
2730                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2731                         ifmedia_set(media, m | IFM_UNKNOWN);
2732                         break;
2733                 }
2734                 break;
2735
2736         default:
2737                 device_printf(pi->dev,
2738                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2739                     pi->mod_type);
2740                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2741                 ifmedia_set(media, m | IFM_UNKNOWN);
2742                 break;
2743         }
2744
2745         PORT_UNLOCK(pi);
2746 }
2747
2748 #define FW_MAC_EXACT_CHUNK      7
2749
2750 /*
2751  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2752  * indicates which parameters should be programmed (the rest are left alone).
2753  */
2754 static int
2755 update_mac_settings(struct port_info *pi, int flags)
2756 {
2757         int rc;
2758         struct ifnet *ifp = pi->ifp;
2759         struct adapter *sc = pi->adapter;
2760         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2761
2762         ASSERT_SYNCHRONIZED_OP(sc);
2763         KASSERT(flags, ("%s: not told what to update.", __func__));
2764
2765         if (flags & XGMAC_MTU)
2766                 mtu = ifp->if_mtu;
2767
2768         if (flags & XGMAC_PROMISC)
2769                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2770
2771         if (flags & XGMAC_ALLMULTI)
2772                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2773
2774         if (flags & XGMAC_VLANEX)
2775                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2776
2777         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2778             vlanex, false);
2779         if (rc) {
2780                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2781                 return (rc);
2782         }
2783
2784         if (flags & XGMAC_UCADDR) {
2785                 uint8_t ucaddr[ETHER_ADDR_LEN];
2786
2787                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2788                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2789                     ucaddr, true, true);
2790                 if (rc < 0) {
2791                         rc = -rc;
2792                         if_printf(ifp, "change_mac failed: %d\n", rc);
2793                         return (rc);
2794                 } else {
2795                         pi->xact_addr_filt = rc;
2796                         rc = 0;
2797                 }
2798         }
2799
2800         if (flags & XGMAC_MCADDRS) {
2801                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2802                 int del = 1;
2803                 uint64_t hash = 0;
2804                 struct ifmultiaddr *ifma;
2805                 int i = 0, j;
2806
2807                 if_maddr_rlock(ifp);
2808                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2809                         if (ifma->ifma_addr->sa_family != AF_LINK)
2810                                 continue;
2811                         mcaddr[i++] =
2812                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2813
2814                         if (i == FW_MAC_EXACT_CHUNK) {
2815                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2816                                     del, i, mcaddr, NULL, &hash, 0);
2817                                 if (rc < 0) {
2818                                         rc = -rc;
2819                                         for (j = 0; j < i; j++) {
2820                                                 if_printf(ifp,
2821                                                     "failed to add mc address"
2822                                                     " %02x:%02x:%02x:"
2823                                                     "%02x:%02x:%02x rc=%d\n",
2824                                                     mcaddr[j][0], mcaddr[j][1],
2825                                                     mcaddr[j][2], mcaddr[j][3],
2826                                                     mcaddr[j][4], mcaddr[j][5],
2827                                                     rc);
2828                                         }
2829                                         goto mcfail;
2830                                 }
2831                                 del = 0;
2832                                 i = 0;
2833                         }
2834                 }
2835                 if (i > 0) {
2836                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2837                             del, i, mcaddr, NULL, &hash, 0);
2838                         if (rc < 0) {
2839                                 rc = -rc;
2840                                 for (j = 0; j < i; j++) {
2841                                         if_printf(ifp,
2842                                             "failed to add mc address"
2843                                             " %02x:%02x:%02x:"
2844                                             "%02x:%02x:%02x rc=%d\n",
2845                                             mcaddr[j][0], mcaddr[j][1],
2846                                             mcaddr[j][2], mcaddr[j][3],
2847                                             mcaddr[j][4], mcaddr[j][5],
2848                                             rc);
2849                                 }
2850                                 goto mcfail;
2851                         }
2852                 }
2853
2854                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2855                 if (rc != 0)
2856                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2857 mcfail:
2858                 if_maddr_runlock(ifp);
2859         }
2860
2861         return (rc);
2862 }
2863
2864 int
2865 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2866     char *wmesg)
2867 {
2868         int rc, pri;
2869
2870 #ifdef WITNESS
2871         /* the caller thinks it's ok to sleep, but is it really? */
2872         if (flags & SLEEP_OK)
2873                 pause("t4slptst", 1);
2874 #endif
2875
2876         if (INTR_OK)
2877                 pri = PCATCH;
2878         else
2879                 pri = 0;
2880
2881         ADAPTER_LOCK(sc);
2882         for (;;) {
2883
2884                 if (pi && IS_DOOMED(pi)) {
2885                         rc = ENXIO;
2886                         goto done;
2887                 }
2888
2889                 if (!IS_BUSY(sc)) {
2890                         rc = 0;
2891                         break;
2892                 }
2893
2894                 if (!(flags & SLEEP_OK)) {
2895                         rc = EBUSY;
2896                         goto done;
2897                 }
2898
2899                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2900                         rc = EINTR;
2901                         goto done;
2902                 }
2903         }
2904
2905         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2906         SET_BUSY(sc);
2907 #ifdef INVARIANTS
2908         sc->last_op = wmesg;
2909         sc->last_op_thr = curthread;
2910 #endif
2911
2912 done:
2913         if (!(flags & HOLD_LOCK) || rc)
2914                 ADAPTER_UNLOCK(sc);
2915
2916         return (rc);
2917 }
2918
2919 void
2920 end_synchronized_op(struct adapter *sc, int flags)
2921 {
2922
2923         if (flags & LOCK_HELD)
2924                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2925         else
2926                 ADAPTER_LOCK(sc);
2927
2928         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2929         CLR_BUSY(sc);
2930         wakeup(&sc->flags);
2931         ADAPTER_UNLOCK(sc);
2932 }
2933
2934 static int
2935 cxgbe_init_synchronized(struct port_info *pi)
2936 {
2937         struct adapter *sc = pi->adapter;
2938         struct ifnet *ifp = pi->ifp;
2939         int rc = 0;
2940
2941         ASSERT_SYNCHRONIZED_OP(sc);
2942
2943         if (isset(&sc->open_device_map, pi->port_id)) {
2944                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2945                     ("mismatch between open_device_map and if_drv_flags"));
2946                 return (0);     /* already running */
2947         }
2948
2949         if (!(sc->flags & FULL_INIT_DONE) &&
2950             ((rc = adapter_full_init(sc)) != 0))
2951                 return (rc);    /* error message displayed already */
2952
2953         if (!(pi->flags & PORT_INIT_DONE) &&
2954             ((rc = port_full_init(pi)) != 0))
2955                 return (rc); /* error message displayed already */
2956
2957         rc = update_mac_settings(pi, XGMAC_ALL);
2958         if (rc)
2959                 goto done;      /* error message displayed already */
2960
2961         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2962         if (rc != 0) {
2963                 if_printf(ifp, "start_link failed: %d\n", rc);
2964                 goto done;
2965         }
2966
2967         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2968         if (rc != 0) {
2969                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2970                 goto done;
2971         }
2972
2973         /*
2974          * The first iq of the first port to come up is used for tracing.
2975          */
2976         if (sc->traceq < 0) {
2977                 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2978                 t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2979                     A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2980                     V_QUEUENUMBER(sc->traceq));
2981                 pi->flags |= HAS_TRACEQ;
2982         }
2983
2984         /* all ok */
2985         setbit(&sc->open_device_map, pi->port_id);
2986         PORT_LOCK(pi);
2987         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2988         PORT_UNLOCK(pi);
2989
2990         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2991 done:
2992         if (rc != 0)
2993                 cxgbe_uninit_synchronized(pi);
2994
2995         return (rc);
2996 }
2997
2998 /*
2999  * Idempotent.
3000  */
3001 static int
3002 cxgbe_uninit_synchronized(struct port_info *pi)
3003 {
3004         struct adapter *sc = pi->adapter;
3005         struct ifnet *ifp = pi->ifp;
3006         int rc;
3007
3008         ASSERT_SYNCHRONIZED_OP(sc);
3009
3010         /*
3011          * Disable the VI so that all its data in either direction is discarded
3012          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
3013          * tick) intact as the TP can deliver negative advice or data that it's
3014          * holding in its RAM (for an offloaded connection) even after the VI is
3015          * disabled.
3016          */
3017         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
3018         if (rc) {
3019                 if_printf(ifp, "disable_vi failed: %d\n", rc);
3020                 return (rc);
3021         }
3022
3023         clrbit(&sc->open_device_map, pi->port_id);
3024         PORT_LOCK(pi);
3025         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3026         PORT_UNLOCK(pi);
3027
3028         pi->link_cfg.link_ok = 0;
3029         pi->link_cfg.speed = 0;
3030         pi->linkdnrc = -1;
3031         t4_os_link_changed(sc, pi->port_id, 0, -1);
3032
3033         return (0);
3034 }
3035
3036 /*
3037  * It is ok for this function to fail midway and return right away.  t4_detach
3038  * will walk the entire sc->irq list and clean up whatever is valid.
3039  */
3040 static int
3041 setup_intr_handlers(struct adapter *sc)
3042 {
3043         int rc, rid, p, q;
3044         char s[8];
3045         struct irq *irq;
3046         struct port_info *pi;
3047         struct sge_rxq *rxq;
3048 #ifdef TCP_OFFLOAD
3049         struct sge_ofld_rxq *ofld_rxq;
3050 #endif
3051
3052         /*
3053          * Setup interrupts.
3054          */
3055         irq = &sc->irq[0];
3056         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3057         if (sc->intr_count == 1) {
3058                 KASSERT(!(sc->flags & INTR_DIRECT),
3059                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3060
3061                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3062                 if (rc != 0)
3063                         return (rc);
3064         } else {
3065                 /* Multiple interrupts. */
3066                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3067                     ("%s: too few intr.", __func__));
3068
3069                 /* The first one is always error intr */
3070                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3071                 if (rc != 0)
3072                         return (rc);
3073                 irq++;
3074                 rid++;
3075
3076                 /* The second one is always the firmware event queue */
3077                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3078                     "evt");
3079                 if (rc != 0)
3080                         return (rc);
3081                 irq++;
3082                 rid++;
3083
3084                 /*
3085                  * Note that if INTR_DIRECT is not set then either the NIC rx
3086                  * queues or (exclusive or) the TOE rx queueus will be taking
3087                  * direct interrupts.
3088                  *
3089                  * There is no need to check for is_offload(sc) as nofldrxq
3090                  * will be 0 if offload is disabled.
3091                  */
3092                 for_each_port(sc, p) {
3093                         pi = sc->port[p];
3094
3095 #ifdef TCP_OFFLOAD
3096                         /*
3097                          * Skip over the NIC queues if they aren't taking direct
3098                          * interrupts.
3099                          */
3100                         if (!(sc->flags & INTR_DIRECT) &&
3101                             pi->nofldrxq > pi->nrxq)
3102                                 goto ofld_queues;
3103 #endif
3104                         rxq = &sc->sge.rxq[pi->first_rxq];
3105                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3106                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3107                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3108                                     s);
3109                                 if (rc != 0)
3110                                         return (rc);
3111                                 irq++;
3112                                 rid++;
3113                         }
3114
3115 #ifdef TCP_OFFLOAD
3116                         /*
3117                          * Skip over the offload queues if they aren't taking
3118                          * direct interrupts.
3119                          */
3120                         if (!(sc->flags & INTR_DIRECT))
3121                                 continue;
3122 ofld_queues:
3123                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3124                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3125                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3126                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3127                                     ofld_rxq, s);
3128                                 if (rc != 0)
3129                                         return (rc);
3130                                 irq++;
3131                                 rid++;
3132                         }
3133 #endif
3134                 }
3135         }
3136
3137         return (0);
3138 }
3139
3140 static int
3141 adapter_full_init(struct adapter *sc)
3142 {
3143         int rc, i;
3144
3145         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3146         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3147             ("%s: FULL_INIT_DONE already", __func__));
3148
3149         /*
3150          * queues that belong to the adapter (not any particular port).
3151          */
3152         rc = t4_setup_adapter_queues(sc);
3153         if (rc != 0)
3154                 goto done;
3155
3156         for (i = 0; i < nitems(sc->tq); i++) {
3157                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3158                     taskqueue_thread_enqueue, &sc->tq[i]);
3159                 if (sc->tq[i] == NULL) {
3160                         device_printf(sc->dev,
3161                             "failed to allocate task queue %d\n", i);
3162                         rc = ENOMEM;
3163                         goto done;
3164                 }
3165                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3166                     device_get_nameunit(sc->dev), i);
3167         }
3168
3169         t4_intr_enable(sc);
3170         sc->flags |= FULL_INIT_DONE;
3171 done:
3172         if (rc != 0)
3173                 adapter_full_uninit(sc);
3174
3175         return (rc);
3176 }
3177
3178 static int
3179 adapter_full_uninit(struct adapter *sc)
3180 {
3181         int i;
3182
3183         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3184
3185         t4_teardown_adapter_queues(sc);
3186
3187         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3188                 taskqueue_free(sc->tq[i]);
3189                 sc->tq[i] = NULL;
3190         }
3191
3192         sc->flags &= ~FULL_INIT_DONE;
3193
3194         return (0);
3195 }
3196
3197 static int
3198 port_full_init(struct port_info *pi)
3199 {
3200         struct adapter *sc = pi->adapter;
3201         struct ifnet *ifp = pi->ifp;
3202         uint16_t *rss;
3203         struct sge_rxq *rxq;
3204         int rc, i, j;
3205
3206         ASSERT_SYNCHRONIZED_OP(sc);
3207         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3208             ("%s: PORT_INIT_DONE already", __func__));
3209
3210         sysctl_ctx_init(&pi->ctx);
3211         pi->flags |= PORT_SYSCTL_CTX;
3212
3213         /*
3214          * Allocate tx/rx/fl queues for this port.
3215          */
3216         rc = t4_setup_port_queues(pi);
3217         if (rc != 0)
3218                 goto done;      /* error message displayed already */
3219
3220         /*
3221          * Setup RSS for this port.  Save a copy of the RSS table for later use.
3222          */
3223         rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3224         for (i = 0; i < pi->rss_size;) {
3225                 for_each_rxq(pi, j, rxq) {
3226                         rss[i++] = rxq->iq.abs_id;
3227                         if (i == pi->rss_size)
3228                                 break;
3229                 }
3230         }
3231
3232         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3233             pi->rss_size);
3234         if (rc != 0) {
3235                 if_printf(ifp, "rss_config failed: %d\n", rc);
3236                 goto done;
3237         }
3238
3239         pi->rss = rss;
3240         pi->flags |= PORT_INIT_DONE;
3241 done:
3242         if (rc != 0)
3243                 port_full_uninit(pi);
3244
3245         return (rc);
3246 }
3247
3248 /*
3249  * Idempotent.
3250  */
3251 static int
3252 port_full_uninit(struct port_info *pi)
3253 {
3254         struct adapter *sc = pi->adapter;
3255         int i;
3256         struct sge_rxq *rxq;
3257         struct sge_txq *txq;
3258 #ifdef TCP_OFFLOAD
3259         struct sge_ofld_rxq *ofld_rxq;
3260         struct sge_wrq *ofld_txq;
3261 #endif
3262
3263         if (pi->flags & PORT_INIT_DONE) {
3264
3265                 /* Need to quiesce queues.  XXX: ctrl queues? */
3266
3267                 for_each_txq(pi, i, txq) {
3268                         quiesce_eq(sc, &txq->eq);
3269                 }
3270
3271 #ifdef TCP_OFFLOAD
3272                 for_each_ofld_txq(pi, i, ofld_txq) {
3273                         quiesce_eq(sc, &ofld_txq->eq);
3274                 }
3275 #endif
3276
3277                 for_each_rxq(pi, i, rxq) {
3278                         quiesce_iq(sc, &rxq->iq);
3279                         quiesce_fl(sc, &rxq->fl);
3280                 }
3281
3282 #ifdef TCP_OFFLOAD
3283                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3284                         quiesce_iq(sc, &ofld_rxq->iq);
3285                         quiesce_fl(sc, &ofld_rxq->fl);
3286                 }
3287 #endif
3288                 free(pi->rss, M_CXGBE);
3289         }
3290
3291         t4_teardown_port_queues(pi);
3292         pi->flags &= ~PORT_INIT_DONE;
3293
3294         return (0);
3295 }
3296
3297 static void
3298 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3299 {
3300         EQ_LOCK(eq);
3301         eq->flags |= EQ_DOOMED;
3302
3303         /*
3304          * Wait for the response to a credit flush if one's
3305          * pending.
3306          */
3307         while (eq->flags & EQ_CRFLUSHED)
3308                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3309         EQ_UNLOCK(eq);
3310
3311         callout_drain(&eq->tx_callout); /* XXX: iffy */
3312         pause("callout", 10);           /* Still iffy */
3313
3314         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3315 }
3316
3317 static void
3318 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3319 {
3320         (void) sc;      /* unused */
3321
3322         /* Synchronize with the interrupt handler */
3323         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3324                 pause("iqfree", 1);
3325 }
3326
3327 static void
3328 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3329 {
3330         mtx_lock(&sc->sfl_lock);
3331         FL_LOCK(fl);
3332         fl->flags |= FL_DOOMED;
3333         FL_UNLOCK(fl);
3334         mtx_unlock(&sc->sfl_lock);
3335
3336         callout_drain(&sc->sfl_callout);
3337         KASSERT((fl->flags & FL_STARVING) == 0,
3338             ("%s: still starving", __func__));
3339 }
3340
3341 static int
3342 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3343     driver_intr_t *handler, void *arg, char *name)
3344 {
3345         int rc;
3346
3347         irq->rid = rid;
3348         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3349             RF_SHAREABLE | RF_ACTIVE);
3350         if (irq->res == NULL) {
3351                 device_printf(sc->dev,
3352                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3353                 return (ENOMEM);
3354         }
3355
3356         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3357             NULL, handler, arg, &irq->tag);
3358         if (rc != 0) {
3359                 device_printf(sc->dev,
3360                     "failed to setup interrupt for rid %d, name %s: %d\n",
3361                     rid, name, rc);
3362         } else if (name)
3363                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3364
3365         return (rc);
3366 }
3367
3368 static int
3369 t4_free_irq(struct adapter *sc, struct irq *irq)
3370 {
3371         if (irq->tag)
3372                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3373         if (irq->res)
3374                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3375
3376         bzero(irq, sizeof(*irq));
3377
3378         return (0);
3379 }
3380
3381 static void
3382 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3383     unsigned int end)
3384 {
3385         uint32_t *p = (uint32_t *)(buf + start);
3386
3387         for ( ; start <= end; start += sizeof(uint32_t))
3388                 *p++ = t4_read_reg(sc, start);
3389 }
3390
3391 static void
3392 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3393 {
3394         int i, n;
3395         const unsigned int *reg_ranges;
3396         static const unsigned int t4_reg_ranges[] = {
3397                 0x1008, 0x1108,
3398                 0x1180, 0x11b4,
3399                 0x11fc, 0x123c,
3400                 0x1300, 0x173c,
3401                 0x1800, 0x18fc,
3402                 0x3000, 0x30d8,
3403                 0x30e0, 0x5924,
3404                 0x5960, 0x59d4,
3405                 0x5a00, 0x5af8,
3406                 0x6000, 0x6098,
3407                 0x6100, 0x6150,
3408                 0x6200, 0x6208,
3409                 0x6240, 0x6248,
3410                 0x6280, 0x6338,
3411                 0x6370, 0x638c,
3412                 0x6400, 0x643c,
3413                 0x6500, 0x6524,
3414                 0x6a00, 0x6a38,
3415                 0x6a60, 0x6a78,
3416                 0x6b00, 0x6b84,
3417                 0x6bf0, 0x6c84,
3418                 0x6cf0, 0x6d84,
3419                 0x6df0, 0x6e84,
3420                 0x6ef0, 0x6f84,
3421                 0x6ff0, 0x7084,
3422                 0x70f0, 0x7184,
3423                 0x71f0, 0x7284,
3424                 0x72f0, 0x7384,
3425                 0x73f0, 0x7450,
3426                 0x7500, 0x7530,
3427                 0x7600, 0x761c,
3428                 0x7680, 0x76cc,
3429                 0x7700, 0x7798,
3430                 0x77c0, 0x77fc,
3431                 0x7900, 0x79fc,
3432                 0x7b00, 0x7c38,
3433                 0x7d00, 0x7efc,
3434                 0x8dc0, 0x8e1c,
3435                 0x8e30, 0x8e78,
3436                 0x8ea0, 0x8f6c,
3437                 0x8fc0, 0x9074,
3438                 0x90fc, 0x90fc,
3439                 0x9400, 0x9458,
3440                 0x9600, 0x96bc,
3441                 0x9800, 0x9808,
3442                 0x9820, 0x983c,
3443                 0x9850, 0x9864,
3444                 0x9c00, 0x9c6c,
3445                 0x9c80, 0x9cec,
3446                 0x9d00, 0x9d6c,
3447                 0x9d80, 0x9dec,
3448                 0x9e00, 0x9e6c,
3449                 0x9e80, 0x9eec,
3450                 0x9f00, 0x9f6c,
3451                 0x9f80, 0x9fec,
3452                 0xd004, 0xd03c,
3453                 0xdfc0, 0xdfe0,
3454                 0xe000, 0xea7c,
3455                 0xf000, 0x11110,
3456                 0x11118, 0x11190,
3457                 0x19040, 0x1906c,
3458                 0x19078, 0x19080,
3459                 0x1908c, 0x19124,
3460                 0x19150, 0x191b0,
3461                 0x191d0, 0x191e8,
3462                 0x19238, 0x1924c,
3463                 0x193f8, 0x19474,
3464                 0x19490, 0x194f8,
3465                 0x19800, 0x19f30,
3466                 0x1a000, 0x1a06c,
3467                 0x1a0b0, 0x1a120,
3468                 0x1a128, 0x1a138,
3469                 0x1a190, 0x1a1c4,
3470                 0x1a1fc, 0x1a1fc,
3471                 0x1e040, 0x1e04c,
3472                 0x1e284, 0x1e28c,
3473                 0x1e2c0, 0x1e2c0,
3474                 0x1e2e0, 0x1e2e0,
3475                 0x1e300, 0x1e384,
3476                 0x1e3c0, 0x1e3c8,
3477                 0x1e440, 0x1e44c,
3478                 0x1e684, 0x1e68c,
3479                 0x1e6c0, 0x1e6c0,
3480                 0x1e6e0, 0x1e6e0,
3481                 0x1e700, 0x1e784,
3482                 0x1e7c0, 0x1e7c8,
3483                 0x1e840, 0x1e84c,
3484                 0x1ea84, 0x1ea8c,
3485                 0x1eac0, 0x1eac0,
3486                 0x1eae0, 0x1eae0,
3487                 0x1eb00, 0x1eb84,
3488                 0x1ebc0, 0x1ebc8,
3489                 0x1ec40, 0x1ec4c,
3490                 0x1ee84, 0x1ee8c,
3491                 0x1eec0, 0x1eec0,
3492                 0x1eee0, 0x1eee0,
3493                 0x1ef00, 0x1ef84,
3494                 0x1efc0, 0x1efc8,
3495                 0x1f040, 0x1f04c,
3496                 0x1f284, 0x1f28c,
3497                 0x1f2c0, 0x1f2c0,
3498                 0x1f2e0, 0x1f2e0,
3499                 0x1f300, 0x1f384,
3500                 0x1f3c0, 0x1f3c8,
3501                 0x1f440, 0x1f44c,
3502                 0x1f684, 0x1f68c,
3503                 0x1f6c0, 0x1f6c0,
3504                 0x1f6e0, 0x1f6e0,
3505                 0x1f700, 0x1f784,
3506                 0x1f7c0, 0x1f7c8,
3507                 0x1f840, 0x1f84c,
3508                 0x1fa84, 0x1fa8c,
3509                 0x1fac0, 0x1fac0,
3510                 0x1fae0, 0x1fae0,
3511                 0x1fb00, 0x1fb84,
3512                 0x1fbc0, 0x1fbc8,
3513                 0x1fc40, 0x1fc4c,
3514                 0x1fe84, 0x1fe8c,
3515                 0x1fec0, 0x1fec0,
3516                 0x1fee0, 0x1fee0,
3517                 0x1ff00, 0x1ff84,
3518                 0x1ffc0, 0x1ffc8,
3519                 0x20000, 0x2002c,
3520                 0x20100, 0x2013c,
3521                 0x20190, 0x201c8,
3522                 0x20200, 0x20318,
3523                 0x20400, 0x20528,
3524                 0x20540, 0x20614,
3525                 0x21000, 0x21040,
3526                 0x2104c, 0x21060,
3527                 0x210c0, 0x210ec,
3528                 0x21200, 0x21268,
3529                 0x21270, 0x21284,
3530                 0x212fc, 0x21388,
3531                 0x21400, 0x21404,
3532                 0x21500, 0x21518,
3533                 0x2152c, 0x2153c,
3534                 0x21550, 0x21554,
3535                 0x21600, 0x21600,
3536                 0x21608, 0x21628,
3537                 0x21630, 0x2163c,
3538                 0x21700, 0x2171c,
3539                 0x21780, 0x2178c,
3540                 0x21800, 0x21c38,
3541                 0x21c80, 0x21d7c,
3542                 0x21e00, 0x21e04,
3543                 0x22000, 0x2202c,
3544                 0x22100, 0x2213c,
3545                 0x22190, 0x221c8,
3546                 0x22200, 0x22318,
3547                 0x22400, 0x22528,
3548                 0x22540, 0x22614,
3549                 0x23000, 0x23040,
3550                 0x2304c, 0x23060,
3551                 0x230c0, 0x230ec,
3552                 0x23200, 0x23268,
3553                 0x23270, 0x23284,
3554                 0x232fc, 0x23388,
3555                 0x23400, 0x23404,
3556                 0x23500, 0x23518,
3557                 0x2352c, 0x2353c,
3558                 0x23550, 0x23554,
3559                 0x23600, 0x23600,
3560                 0x23608, 0x23628,
3561                 0x23630, 0x2363c,
3562                 0x23700, 0x2371c,
3563                 0x23780, 0x2378c,
3564                 0x23800, 0x23c38,
3565                 0x23c80, 0x23d7c,
3566                 0x23e00, 0x23e04,
3567                 0x24000, 0x2402c,
3568                 0x24100, 0x2413c,
3569                 0x24190, 0x241c8,
3570                 0x24200, 0x24318,
3571                 0x24400, 0x24528,
3572                 0x24540, 0x24614,
3573                 0x25000, 0x25040,
3574                 0x2504c, 0x25060,
3575                 0x250c0, 0x250ec,
3576                 0x25200, 0x25268,
3577                 0x25270, 0x25284,
3578                 0x252fc, 0x25388,
3579                 0x25400, 0x25404,
3580                 0x25500, 0x25518,
3581                 0x2552c, 0x2553c,
3582                 0x25550, 0x25554,
3583                 0x25600, 0x25600,
3584                 0x25608, 0x25628,
3585                 0x25630, 0x2563c,
3586                 0x25700, 0x2571c,
3587                 0x25780, 0x2578c,
3588                 0x25800, 0x25c38,
3589                 0x25c80, 0x25d7c,
3590                 0x25e00, 0x25e04,
3591                 0x26000, 0x2602c,
3592                 0x26100, 0x2613c,
3593                 0x26190, 0x261c8,
3594                 0x26200, 0x26318,
3595                 0x26400, 0x26528,
3596                 0x26540, 0x26614,
3597                 0x27000, 0x27040,
3598                 0x2704c, 0x27060,
3599                 0x270c0, 0x270ec,
3600                 0x27200, 0x27268,
3601                 0x27270, 0x27284,
3602                 0x272fc, 0x27388,
3603                 0x27400, 0x27404,
3604                 0x27500, 0x27518,
3605                 0x2752c, 0x2753c,
3606                 0x27550, 0x27554,
3607                 0x27600, 0x27600,
3608                 0x27608, 0x27628,
3609                 0x27630, 0x2763c,
3610                 0x27700, 0x2771c,
3611                 0x27780, 0x2778c,
3612                 0x27800, 0x27c38,
3613                 0x27c80, 0x27d7c,
3614                 0x27e00, 0x27e04
3615         };
3616         static const unsigned int t5_reg_ranges[] = {
3617                 0x1008, 0x1148,
3618                 0x1180, 0x11b4,
3619                 0x11fc, 0x123c,
3620                 0x1280, 0x173c,
3621                 0x1800, 0x18fc,
3622                 0x3000, 0x3028,
3623                 0x3060, 0x30d8,
3624                 0x30e0, 0x30fc,
3625                 0x3140, 0x357c,
3626                 0x35a8, 0x35cc,
3627                 0x35ec, 0x35ec,
3628                 0x3600, 0x5624,
3629                 0x56cc, 0x575c,
3630                 0x580c, 0x5814,
3631                 0x5890, 0x58bc,
3632                 0x5940, 0x59dc,
3633                 0x59fc, 0x5a18,
3634                 0x5a60, 0x5a9c,
3635                 0x5b94, 0x5bfc,
3636                 0x6000, 0x6040,
3637                 0x6058, 0x614c,
3638                 0x7700, 0x7798,
3639                 0x77c0, 0x78fc,
3640                 0x7b00, 0x7c54,
3641                 0x7d00, 0x7efc,
3642                 0x8dc0, 0x8de0,
3643                 0x8df8, 0x8e84,
3644                 0x8ea0, 0x8f84,
3645                 0x8fc0, 0x90f8,
3646                 0x9400, 0x9470,
3647                 0x9600, 0x96f4,
3648                 0x9800, 0x9808,
3649                 0x9820, 0x983c,
3650                 0x9850, 0x9864,
3651                 0x9c00, 0x9c6c,
3652                 0x9c80, 0x9cec,
3653                 0x9d00, 0x9d6c,
3654                 0x9d80, 0x9dec,
3655                 0x9e00, 0x9e6c,
3656                 0x9e80, 0x9eec,
3657                 0x9f00, 0x9f6c,
3658                 0x9f80, 0xa020,
3659                 0xd004, 0xd03c,
3660                 0xdfc0, 0xdfe0,
3661                 0xe000, 0x11088,
3662                 0x1109c, 0x11110,
3663                 0x11118, 0x1117c,
3664                 0x11190, 0x11204,
3665                 0x19040, 0x1906c,
3666                 0x19078, 0x19080,
3667                 0x1908c, 0x19124,
3668                 0x19150, 0x191b0,
3669                 0x191d0, 0x191e8,
3670                 0x19238, 0x19290,
3671                 0x193f8, 0x19474,
3672                 0x19490, 0x194cc,
3673                 0x194f0, 0x194f8,
3674                 0x19c00, 0x19c60,
3675                 0x19c94, 0x19e10,
3676                 0x19e50, 0x19f34,
3677                 0x19f40, 0x19f50,
3678                 0x19f90, 0x19fe4,
3679                 0x1a000, 0x1a06c,
3680                 0x1a0b0, 0x1a120,
3681                 0x1a128, 0x1a138,
3682                 0x1a190, 0x1a1c4,
3683                 0x1a1fc, 0x1a1fc,
3684                 0x1e008, 0x1e00c,
3685                 0x1e040, 0x1e04c,
3686                 0x1e284, 0x1e290,
3687                 0x1e2c0, 0x1e2c0,
3688                 0x1e2e0, 0x1e2e0,
3689                 0x1e300, 0x1e384,
3690                 0x1e3c0, 0x1e3c8,
3691                 0x1e408, 0x1e40c,
3692                 0x1e440, 0x1e44c,
3693                 0x1e684, 0x1e690,
3694                 0x1e6c0, 0x1e6c0,
3695                 0x1e6e0, 0x1e6e0,
3696                 0x1e700, 0x1e784,
3697                 0x1e7c0, 0x1e7c8,
3698                 0x1e808, 0x1e80c,
3699                 0x1e840, 0x1e84c,
3700                 0x1ea84, 0x1ea90,
3701                 0x1eac0, 0x1eac0,
3702                 0x1eae0, 0x1eae0,
3703                 0x1eb00, 0x1eb84,
3704                 0x1ebc0, 0x1ebc8,
3705                 0x1ec08, 0x1ec0c,
3706                 0x1ec40, 0x1ec4c,
3707                 0x1ee84, 0x1ee90,
3708                 0x1eec0, 0x1eec0,
3709                 0x1eee0, 0x1eee0,
3710                 0x1ef00, 0x1ef84,
3711                 0x1efc0, 0x1efc8,
3712                 0x1f008, 0x1f00c,
3713                 0x1f040, 0x1f04c,
3714                 0x1f284, 0x1f290,
3715                 0x1f2c0, 0x1f2c0,
3716                 0x1f2e0, 0x1f2e0,
3717                 0x1f300, 0x1f384,
3718                 0x1f3c0, 0x1f3c8,
3719                 0x1f408, 0x1f40c,
3720                 0x1f440, 0x1f44c,
3721                 0x1f684, 0x1f690,
3722                 0x1f6c0, 0x1f6c0,
3723                 0x1f6e0, 0x1f6e0,
3724                 0x1f700, 0x1f784,
3725                 0x1f7c0, 0x1f7c8,
3726                 0x1f808, 0x1f80c,
3727                 0x1f840, 0x1f84c,
3728                 0x1fa84, 0x1fa90,
3729                 0x1fac0, 0x1fac0,
3730                 0x1fae0, 0x1fae0,
3731                 0x1fb00, 0x1fb84,
3732                 0x1fbc0, 0x1fbc8,
3733                 0x1fc08, 0x1fc0c,
3734                 0x1fc40, 0x1fc4c,
3735                 0x1fe84, 0x1fe90,
3736                 0x1fec0, 0x1fec0,
3737                 0x1fee0, 0x1fee0,
3738                 0x1ff00, 0x1ff84,
3739                 0x1ffc0, 0x1ffc8,
3740                 0x30000, 0x30030,
3741                 0x30100, 0x30144,
3742                 0x30190, 0x301d0,
3743                 0x30200, 0x30318,
3744                 0x30400, 0x3052c,
3745                 0x30540, 0x3061c,
3746                 0x30800, 0x30834,
3747                 0x308c0, 0x30908,
3748                 0x30910, 0x309ac,
3749                 0x30a00, 0x30a2c,
3750                 0x30a44, 0x30a50,
3751                 0x30a74, 0x30c24,
3752                 0x30d00, 0x30d00,
3753                 0x30d08, 0x30d14,
3754                 0x30d1c, 0x30d20,
3755                 0x30d3c, 0x30d50,
3756                 0x31200, 0x3120c,
3757                 0x31220, 0x31220,
3758                 0x31240, 0x31240,
3759                 0x31600, 0x3160c,
3760                 0x31a00, 0x31a1c,
3761                 0x31e00, 0x31e20,
3762                 0x31e38, 0x31e3c,
3763                 0x31e80, 0x31e80,
3764                 0x31e88, 0x31ea8,
3765                 0x31eb0, 0x31eb4,
3766                 0x31ec8, 0x31ed4,
3767                 0x31fb8, 0x32004,
3768                 0x32200, 0x32200,
3769                 0x32208, 0x32240,
3770                 0x32248, 0x32280,
3771                 0x32288, 0x322c0,
3772                 0x322c8, 0x322fc,
3773                 0x32600, 0x32630,
3774                 0x32a00, 0x32abc,
3775                 0x32b00, 0x32b70,
3776                 0x33000, 0x33048,
3777                 0x33060, 0x3309c,
3778                 0x330f0, 0x33148,
3779                 0x33160, 0x3319c,
3780                 0x331f0, 0x332e4,
3781                 0x332f8, 0x333e4,
3782                 0x333f8, 0x33448,
3783                 0x33460, 0x3349c,
3784                 0x334f0, 0x33548,
3785                 0x33560, 0x3359c,
3786                 0x335f0, 0x336e4,
3787                 0x336f8, 0x337e4,
3788                 0x337f8, 0x337fc,
3789                 0x33814, 0x33814,
3790                 0x3382c, 0x3382c,
3791                 0x33880, 0x3388c,
3792                 0x338e8, 0x338ec,
3793                 0x33900, 0x33948,
3794                 0x33960, 0x3399c,
3795                 0x339f0, 0x33ae4,
3796                 0x33af8, 0x33b10,
3797                 0x33b28, 0x33b28,
3798                 0x33b3c, 0x33b50,
3799                 0x33bf0, 0x33c10,
3800                 0x33c28, 0x33c28,
3801                 0x33c3c, 0x33c50,
3802                 0x33cf0, 0x33cfc,
3803                 0x34000, 0x34030,
3804                 0x34100, 0x34144,
3805                 0x34190, 0x341d0,
3806                 0x34200, 0x34318,
3807                 0x34400, 0x3452c,
3808                 0x34540, 0x3461c,
3809                 0x34800, 0x34834,
3810                 0x348c0, 0x34908,
3811                 0x34910, 0x349ac,
3812                 0x34a00, 0x34a2c,
3813                 0x34a44, 0x34a50,
3814                 0x34a74, 0x34c24,
3815                 0x34d00, 0x34d00,
3816                 0x34d08, 0x34d14,
3817                 0x34d1c, 0x34d20,
3818                 0x34d3c, 0x34d50,
3819                 0x35200, 0x3520c,
3820                 0x35220, 0x35220,
3821                 0x35240, 0x35240,
3822                 0x35600, 0x3560c,
3823                 0x35a00, 0x35a1c,
3824                 0x35e00, 0x35e20,
3825                 0x35e38, 0x35e3c,
3826                 0x35e80, 0x35e80,
3827                 0x35e88, 0x35ea8,
3828                 0x35eb0, 0x35eb4,
3829                 0x35ec8, 0x35ed4,
3830                 0x35fb8, 0x36004,
3831                 0x36200, 0x36200,
3832                 0x36208, 0x36240,
3833                 0x36248, 0x36280,
3834                 0x36288, 0x362c0,
3835                 0x362c8, 0x362fc,
3836                 0x36600, 0x36630,
3837                 0x36a00, 0x36abc,
3838                 0x36b00, 0x36b70,
3839                 0x37000, 0x37048,
3840                 0x37060, 0x3709c,
3841                 0x370f0, 0x37148,
3842                 0x37160, 0x3719c,
3843                 0x371f0, 0x372e4,
3844                 0x372f8, 0x373e4,
3845                 0x373f8, 0x37448,
3846                 0x37460, 0x3749c,
3847                 0x374f0, 0x37548,
3848                 0x37560, 0x3759c,
3849                 0x375f0, 0x376e4,
3850                 0x376f8, 0x377e4,
3851                 0x377f8, 0x377fc,
3852                 0x37814, 0x37814,
3853                 0x3782c, 0x3782c,
3854                 0x37880, 0x3788c,
3855                 0x378e8, 0x378ec,
3856                 0x37900, 0x37948,
3857                 0x37960, 0x3799c,
3858                 0x379f0, 0x37ae4,
3859                 0x37af8, 0x37b10,
3860                 0x37b28, 0x37b28,
3861                 0x37b3c, 0x37b50,
3862                 0x37bf0, 0x37c10,
3863                 0x37c28, 0x37c28,
3864                 0x37c3c, 0x37c50,
3865                 0x37cf0, 0x37cfc,
3866                 0x38000, 0x38030,
3867                 0x38100, 0x38144,
3868                 0x38190, 0x381d0,
3869                 0x38200, 0x38318,
3870                 0x38400, 0x3852c,
3871                 0x38540, 0x3861c,
3872                 0x38800, 0x38834,
3873                 0x388c0, 0x38908,
3874                 0x38910, 0x389ac,
3875                 0x38a00, 0x38a2c,
3876                 0x38a44, 0x38a50,
3877                 0x38a74, 0x38c24,
3878                 0x38d00, 0x38d00,
3879                 0x38d08, 0x38d14,
3880                 0x38d1c, 0x38d20,
3881                 0x38d3c, 0x38d50,
3882                 0x39200, 0x3920c,
3883                 0x39220, 0x39220,
3884                 0x39240, 0x39240,
3885                 0x39600, 0x3960c,
3886                 0x39a00, 0x39a1c,
3887                 0x39e00, 0x39e20,
3888                 0x39e38, 0x39e3c,
3889                 0x39e80, 0x39e80,
3890                 0x39e88, 0x39ea8,
3891                 0x39eb0, 0x39eb4,
3892                 0x39ec8, 0x39ed4,
3893                 0x39fb8, 0x3a004,
3894                 0x3a200, 0x3a200,
3895                 0x3a208, 0x3a240,
3896                 0x3a248, 0x3a280,
3897                 0x3a288, 0x3a2c0,
3898                 0x3a2c8, 0x3a2fc,
3899                 0x3a600, 0x3a630,
3900                 0x3aa00, 0x3aabc,
3901                 0x3ab00, 0x3ab70,
3902                 0x3b000, 0x3b048,
3903                 0x3b060, 0x3b09c,
3904                 0x3b0f0, 0x3b148,
3905                 0x3b160, 0x3b19c,
3906                 0x3b1f0, 0x3b2e4,
3907                 0x3b2f8, 0x3b3e4,
3908                 0x3b3f8, 0x3b448,
3909                 0x3b460, 0x3b49c,
3910                 0x3b4f0, 0x3b548,
3911                 0x3b560, 0x3b59c,
3912                 0x3b5f0, 0x3b6e4,
3913                 0x3b6f8, 0x3b7e4,
3914                 0x3b7f8, 0x3b7fc,
3915                 0x3b814, 0x3b814,
3916                 0x3b82c, 0x3b82c,
3917                 0x3b880, 0x3b88c,
3918                 0x3b8e8, 0x3b8ec,
3919                 0x3b900, 0x3b948,
3920                 0x3b960, 0x3b99c,
3921                 0x3b9f0, 0x3bae4,
3922                 0x3baf8, 0x3bb10,
3923                 0x3bb28, 0x3bb28,
3924                 0x3bb3c, 0x3bb50,
3925                 0x3bbf0, 0x3bc10,
3926                 0x3bc28, 0x3bc28,
3927                 0x3bc3c, 0x3bc50,
3928                 0x3bcf0, 0x3bcfc,
3929                 0x3c000, 0x3c030,
3930                 0x3c100, 0x3c144,
3931                 0x3c190, 0x3c1d0,
3932                 0x3c200, 0x3c318,
3933                 0x3c400, 0x3c52c,
3934                 0x3c540, 0x3c61c,
3935                 0x3c800, 0x3c834,
3936                 0x3c8c0, 0x3c908,
3937                 0x3c910, 0x3c9ac,
3938                 0x3ca00, 0x3ca2c,
3939                 0x3ca44, 0x3ca50,
3940                 0x3ca74, 0x3cc24,
3941                 0x3cd00, 0x3cd00,
3942                 0x3cd08, 0x3cd14,
3943                 0x3cd1c, 0x3cd20,
3944                 0x3cd3c, 0x3cd50,
3945                 0x3d200, 0x3d20c,
3946                 0x3d220, 0x3d220,
3947                 0x3d240, 0x3d240,
3948                 0x3d600, 0x3d60c,
3949                 0x3da00, 0x3da1c,
3950                 0x3de00, 0x3de20,
3951                 0x3de38, 0x3de3c,
3952                 0x3de80, 0x3de80,
3953                 0x3de88, 0x3dea8,
3954                 0x3deb0, 0x3deb4,
3955                 0x3dec8, 0x3ded4,
3956                 0x3dfb8, 0x3e004,
3957                 0x3e200, 0x3e200,
3958                 0x3e208, 0x3e240,
3959                 0x3e248, 0x3e280,
3960                 0x3e288, 0x3e2c0,
3961                 0x3e2c8, 0x3e2fc,
3962                 0x3e600, 0x3e630,
3963                 0x3ea00, 0x3eabc,
3964                 0x3eb00, 0x3eb70,
3965                 0x3f000, 0x3f048,
3966                 0x3f060, 0x3f09c,
3967                 0x3f0f0, 0x3f148,
3968                 0x3f160, 0x3f19c,
3969                 0x3f1f0, 0x3f2e4,
3970                 0x3f2f8, 0x3f3e4,
3971                 0x3f3f8, 0x3f448,
3972                 0x3f460, 0x3f49c,
3973                 0x3f4f0, 0x3f548,
3974                 0x3f560, 0x3f59c,
3975                 0x3f5f0, 0x3f6e4,
3976                 0x3f6f8, 0x3f7e4,
3977                 0x3f7f8, 0x3f7fc,
3978                 0x3f814, 0x3f814,
3979                 0x3f82c, 0x3f82c,
3980                 0x3f880, 0x3f88c,
3981                 0x3f8e8, 0x3f8ec,
3982                 0x3f900, 0x3f948,
3983                 0x3f960, 0x3f99c,
3984                 0x3f9f0, 0x3fae4,
3985                 0x3faf8, 0x3fb10,
3986                 0x3fb28, 0x3fb28,
3987                 0x3fb3c, 0x3fb50,
3988                 0x3fbf0, 0x3fc10,
3989                 0x3fc28, 0x3fc28,
3990                 0x3fc3c, 0x3fc50,
3991                 0x3fcf0, 0x3fcfc,
3992                 0x40000, 0x4000c,
3993                 0x40040, 0x40068,
3994                 0x4007c, 0x40144,
3995                 0x40180, 0x4018c,
3996                 0x40200, 0x40298,
3997                 0x402ac, 0x4033c,
3998                 0x403f8, 0x403fc,
3999                 0x41304, 0x413c4,
4000                 0x41400, 0x4141c,
4001                 0x41480, 0x414d0,
4002                 0x44000, 0x44078,
4003                 0x440c0, 0x44278,
4004                 0x442c0, 0x44478,
4005                 0x444c0, 0x44678,
4006                 0x446c0, 0x44878,
4007                 0x448c0, 0x449fc,
4008                 0x45000, 0x45068,
4009                 0x45080, 0x45084,
4010                 0x450a0, 0x450b0,
4011                 0x45200, 0x45268,
4012                 0x45280, 0x45284,
4013                 0x452a0, 0x452b0,
4014                 0x460c0, 0x460e4,
4015                 0x47000, 0x4708c,
4016                 0x47200, 0x47250,
4017                 0x47400, 0x47420,
4018                 0x47600, 0x47618,
4019                 0x47800, 0x47814,
4020                 0x48000, 0x4800c,
4021                 0x48040, 0x48068,
4022                 0x4807c, 0x48144,
4023                 0x48180, 0x4818c,
4024                 0x48200, 0x48298,
4025                 0x482ac, 0x4833c,
4026                 0x483f8, 0x483fc,
4027                 0x49304, 0x493c4,
4028                 0x49400, 0x4941c,
4029                 0x49480, 0x494d0,
4030                 0x4c000, 0x4c078,
4031                 0x4c0c0, 0x4c278,
4032                 0x4c2c0, 0x4c478,
4033                 0x4c4c0, 0x4c678,
4034                 0x4c6c0, 0x4c878,
4035                 0x4c8c0, 0x4c9fc,
4036                 0x4d000, 0x4d068,
4037                 0x4d080, 0x4d084,
4038                 0x4d0a0, 0x4d0b0,
4039                 0x4d200, 0x4d268,
4040                 0x4d280, 0x4d284,
4041                 0x4d2a0, 0x4d2b0,
4042                 0x4e0c0, 0x4e0e4,
4043                 0x4f000, 0x4f08c,
4044                 0x4f200, 0x4f250,
4045                 0x4f400, 0x4f420,
4046                 0x4f600, 0x4f618,
4047                 0x4f800, 0x4f814,
4048                 0x50000, 0x500cc,
4049                 0x50400, 0x50400,
4050                 0x50800, 0x508cc,
4051                 0x50c00, 0x50c00,
4052                 0x51000, 0x5101c,
4053                 0x51300, 0x51308,
4054         };
4055
4056         if (is_t4(sc)) {
4057                 reg_ranges = &t4_reg_ranges[0];
4058                 n = nitems(t4_reg_ranges);
4059         } else {
4060                 reg_ranges = &t5_reg_ranges[0];
4061                 n = nitems(t5_reg_ranges);
4062         }
4063
4064         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4065         for (i = 0; i < n; i += 2)
4066                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4067 }
4068
4069 static void
4070 cxgbe_tick(void *arg)
4071 {
4072         struct port_info *pi = arg;
4073         struct adapter *sc = pi->adapter;
4074         struct ifnet *ifp = pi->ifp;
4075         struct sge_txq *txq;
4076         int i, drops;
4077         struct port_stats *s = &pi->stats;
4078
4079         PORT_LOCK(pi);
4080         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4081                 PORT_UNLOCK(pi);
4082                 return; /* without scheduling another callout */
4083         }
4084
4085         t4_get_port_stats(sc, pi->tx_chan, s);
4086
4087         ifp->if_opackets = s->tx_frames - s->tx_pause;
4088         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4089         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4090         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4091         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4092         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4093         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4094             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4095             s->rx_trunc3;
4096         for (i = 0; i < 4; i++) {
4097                 if (pi->rx_chan_map & (1 << i)) {
4098                         uint32_t v;
4099
4100                         /*
4101                          * XXX: indirect reads from the same ADDR/DATA pair can
4102                          * race with each other.
4103                          */
4104                         t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4105                             1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4106                         ifp->if_iqdrops += v;
4107                 }
4108         }
4109
4110         drops = s->tx_drop;
4111         for_each_txq(pi, i, txq)
4112                 drops += txq->br->br_drops;
4113         ifp->if_snd.ifq_drops = drops;
4114
4115         ifp->if_oerrors = s->tx_error_frames;
4116         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4117             s->rx_fcs_err + s->rx_len_err;
4118
4119         callout_schedule(&pi->tick, hz);
4120         PORT_UNLOCK(pi);
4121 }
4122
4123 static void
4124 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4125 {
4126         struct ifnet *vlan;
4127
4128         if (arg != ifp || ifp->if_type != IFT_ETHER)
4129                 return;
4130
4131         vlan = VLAN_DEVAT(ifp, vid);
4132         VLAN_SETCOOKIE(vlan, ifp);
4133 }
4134
4135 static int
4136 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4137 {
4138
4139 #ifdef INVARIANTS
4140         panic("%s: opcode 0x%02x on iq %p with payload %p",
4141             __func__, rss->opcode, iq, m);
4142 #else
4143         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4144             __func__, rss->opcode, iq, m);
4145         m_freem(m);
4146 #endif
4147         return (EDOOFUS);
4148 }
4149
4150 int
4151 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4152 {
4153         uintptr_t *loc, new;
4154
4155         if (opcode >= nitems(sc->cpl_handler))
4156                 return (EINVAL);
4157
4158         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4159         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4160         atomic_store_rel_ptr(loc, new);
4161
4162         return (0);
4163 }
4164
4165 static int
4166 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4167 {
4168
4169 #ifdef INVARIANTS
4170         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4171 #else
4172         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4173             __func__, iq, ctrl);
4174 #endif
4175         return (EDOOFUS);
4176 }
4177
4178 int
4179 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4180 {
4181         uintptr_t *loc, new;
4182
4183         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4184         loc = (uintptr_t *) &sc->an_handler;
4185         atomic_store_rel_ptr(loc, new);
4186
4187         return (0);
4188 }
4189
4190 static int
4191 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4192 {
4193         const struct cpl_fw6_msg *cpl =
4194             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4195
4196 #ifdef INVARIANTS
4197         panic("%s: fw_msg type %d", __func__, cpl->type);
4198 #else
4199         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4200 #endif
4201         return (EDOOFUS);
4202 }
4203
4204 int
4205 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4206 {
4207         uintptr_t *loc, new;
4208
4209         if (type >= nitems(sc->fw_msg_handler))
4210                 return (EINVAL);
4211
4212         /*
4213          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4214          * handler dispatch table.  Reject any attempt to install a handler for
4215          * this subtype.
4216          */
4217         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4218                 return (EINVAL);
4219
4220         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4221         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4222         atomic_store_rel_ptr(loc, new);
4223
4224         return (0);
4225 }
4226
4227 static int
4228 t4_sysctls(struct adapter *sc)
4229 {
4230         struct sysctl_ctx_list *ctx;
4231         struct sysctl_oid *oid;
4232         struct sysctl_oid_list *children, *c0;
4233         static char *caps[] = {
4234                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4235                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4236                     "\6HASHFILTER\7ETHOFLD",
4237                 "\20\1TOE",                             /* caps[2] toecaps */
4238                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4239                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4240                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4241                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4242                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4243                     "\4PO_INITIAOR\5PO_TARGET"
4244         };
4245         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4246
4247         ctx = device_get_sysctl_ctx(sc->dev);
4248
4249         /*
4250          * dev.t4nex.X.
4251          */
4252         oid = device_get_sysctl_tree(sc->dev);
4253         c0 = children = SYSCTL_CHILDREN(oid);
4254
4255         sc->sc_do_rxcopy = 1;
4256         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4257             &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4258
4259         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4260             sc->params.nports, "# of ports");
4261
4262         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4263             NULL, chip_rev(sc), "chip hardware revision");
4264
4265         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4266             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4267
4268         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4269             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4270
4271         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4272             sc->cfcsum, "config file checksum");
4273
4274         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4275             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4276             sysctl_bitfield, "A", "available doorbells");
4277
4278         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4279             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4280             sysctl_bitfield, "A", "available link capabilities");
4281
4282         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4283             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4284             sysctl_bitfield, "A", "available NIC capabilities");
4285
4286         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4287             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4288             sysctl_bitfield, "A", "available TCP offload capabilities");
4289
4290         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4291             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4292             sysctl_bitfield, "A", "available RDMA capabilities");
4293
4294         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4295             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4296             sysctl_bitfield, "A", "available iSCSI capabilities");
4297
4298         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4299             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4300             sysctl_bitfield, "A", "available FCoE capabilities");
4301
4302         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4303             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4304
4305         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4306             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4307             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4308             "interrupt holdoff timer values (us)");
4309
4310         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4311             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4312             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4313             "interrupt holdoff packet counter values");
4314
4315         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4316             NULL, sc->tids.nftids, "number of filters");
4317
4318         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4319             CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4320             "chip temperature (in Celsius)");
4321
4322         t4_sge_sysctls(sc, ctx, children);
4323
4324         sc->lro_timeout = 100;
4325         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4326             &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4327
4328 #ifdef SBUF_DRAIN
4329         /*
4330          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4331          */
4332         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4333             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4334             "logs and miscellaneous information");
4335         children = SYSCTL_CHILDREN(oid);
4336
4337         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4338             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4339             sysctl_cctrl, "A", "congestion control");
4340
4341         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4342             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4343             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4344
4345         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4346             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4347             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4348
4349         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4350             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4351             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4352
4353         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4354             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4355             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4356
4357         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4358             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4359             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4360
4361         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4362             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4363             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4364
4365         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4366             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4367             sysctl_cim_la, "A", "CIM logic analyzer");
4368
4369         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4370             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4371             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4372
4373         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4374             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4375             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4376
4377         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4378             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4379             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4380
4381         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4382             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4383             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4384
4385         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4386             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4387             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4388
4389         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4390             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4391             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4392
4393         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4394             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4395             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4396
4397         if (is_t5(sc)) {
4398                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4399                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4400                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4401
4402                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4403                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4404                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4405         }
4406
4407         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4408             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4409             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4410
4411         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4412             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4413             sysctl_cim_qcfg, "A", "CIM queue configuration");
4414
4415         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4416             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4417             sysctl_cpl_stats, "A", "CPL statistics");
4418
4419         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4420             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4421             sysctl_ddp_stats, "A", "DDP statistics");
4422
4423         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4424             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4425             sysctl_devlog, "A", "firmware's device log");
4426
4427         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4428             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4429             sysctl_fcoe_stats, "A", "FCoE statistics");
4430
4431         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4432             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4433             sysctl_hw_sched, "A", "hardware scheduler ");
4434
4435         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4436             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4437             sysctl_l2t, "A", "hardware L2 table");
4438
4439         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4440             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4441             sysctl_lb_stats, "A", "loopback statistics");
4442
4443         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4444             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4445             sysctl_meminfo, "A", "memory regions");
4446
4447         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4448             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4449             sysctl_mps_tcam, "A", "MPS TCAM entries");
4450
4451         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4452             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4453             sysctl_path_mtus, "A", "path MTUs");
4454
4455         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4456             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4457             sysctl_pm_stats, "A", "PM statistics");
4458
4459         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4460             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4461             sysctl_rdma_stats, "A", "RDMA statistics");
4462
4463         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4464             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4465             sysctl_tcp_stats, "A", "TCP statistics");
4466
4467         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4468             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4469             sysctl_tids, "A", "TID information");
4470
4471         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4472             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4473             sysctl_tp_err_stats, "A", "TP error statistics");
4474
4475         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4476             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4477             sysctl_tp_la, "A", "TP logic analyzer");
4478
4479         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4480             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4481             sysctl_tx_rate, "A", "Tx rate");
4482
4483         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4484             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4485             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4486
4487         if (is_t5(sc)) {
4488                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4489                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4490                     sysctl_wcwr_stats, "A", "write combined work requests");
4491         }
4492 #endif
4493
4494 #ifdef TCP_OFFLOAD
4495         if (is_offload(sc)) {
4496                 /*
4497                  * dev.t4nex.X.toe.
4498                  */
4499                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4500                     NULL, "TOE parameters");
4501                 children = SYSCTL_CHILDREN(oid);
4502
4503                 sc->tt.sndbuf = 256 * 1024;
4504                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4505                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4506
4507                 sc->tt.ddp = 0;
4508                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4509                     &sc->tt.ddp, 0, "DDP allowed");
4510
4511                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4512                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4513                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4514
4515                 sc->tt.ddp_thres =
4516                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4517                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4518                     &sc->tt.ddp_thres, 0, "DDP threshold");
4519
4520                 sc->tt.rx_coalesce = 1;
4521                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4522                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4523         }
4524 #endif
4525
4526
4527         return (0);
4528 }
4529
4530 static int
4531 cxgbe_sysctls(struct port_info *pi)
4532 {
4533         struct sysctl_ctx_list *ctx;
4534         struct sysctl_oid *oid;
4535         struct sysctl_oid_list *children;
4536         struct adapter *sc = pi->adapter;
4537
4538         ctx = device_get_sysctl_ctx(pi->dev);
4539
4540         /*
4541          * dev.cxgbe.X.
4542          */
4543         oid = device_get_sysctl_tree(pi->dev);
4544         children = SYSCTL_CHILDREN(oid);
4545
4546         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4547            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4548         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4549                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4550                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4551                     "PHY temperature (in Celsius)");
4552                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4553                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4554                     "PHY firmware version");
4555         }
4556         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4557             &pi->nrxq, 0, "# of rx queues");
4558         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4559             &pi->ntxq, 0, "# of tx queues");
4560         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4561             &pi->first_rxq, 0, "index of first rx queue");
4562         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4563             &pi->first_txq, 0, "index of first tx queue");
4564         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4565             CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4566             "Reserve queue 0 for non-flowid packets");
4567
4568 #ifdef TCP_OFFLOAD
4569         if (is_offload(sc)) {
4570                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4571                     &pi->nofldrxq, 0,
4572                     "# of rx queues for offloaded TCP connections");
4573                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4574                     &pi->nofldtxq, 0,
4575                     "# of tx queues for offloaded TCP connections");
4576                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4577                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4578                     "index of first TOE rx queue");
4579                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4580                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4581                     "index of first TOE tx queue");
4582         }
4583 #endif
4584
4585         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4586             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4587             "holdoff timer index");
4588         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4589             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4590             "holdoff packet counter index");
4591
4592         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4593             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4594             "rx queue size");
4595         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4596             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4597             "tx queue size");
4598
4599         /*
4600          * dev.cxgbe.X.stats.
4601          */
4602         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4603             NULL, "port statistics");
4604         children = SYSCTL_CHILDREN(oid);
4605
4606 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4607         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4608             CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
4609             sysctl_handle_t4_reg64, "QU", desc)
4610
4611         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4612             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4613         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4614             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4615         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4616             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4617         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4618             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4619         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4620             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4621         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4622             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4623         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4624             "# of tx frames in this range",
4625             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4626         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4627             "# of tx frames in this range",
4628             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4629         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4630             "# of tx frames in this range",
4631             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4632         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4633             "# of tx frames in this range",
4634             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4635         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4636             "# of tx frames in this range",
4637             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4638         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4639             "# of tx frames in this range",
4640             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4641         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4642             "# of tx frames in this range",
4643             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4644         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4645             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4646         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4647             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4648         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4649             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4650         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4651             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4652         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4653             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4654         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4655             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4656         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4657             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4658         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4659             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4660         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4661             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4662         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4663             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4664
4665         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4666             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4667         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4668             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4669         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4670             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4671         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4672             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4673         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4674             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4675         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4676             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4677         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4678             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4679         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4680             "# of frames received with bad FCS",
4681             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4682         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4683             "# of frames received with length error",
4684             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4685         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4686             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4687         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4688             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4689         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4690             "# of rx frames in this range",
4691             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4692         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4693             "# of rx frames in this range",
4694             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4695         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4696             "# of rx frames in this range",
4697             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4698         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4699             "# of rx frames in this range",
4700             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4701         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4702             "# of rx frames in this range",
4703             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4704         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4705             "# of rx frames in this range",
4706             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4707         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4708             "# of rx frames in this range",
4709             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4710         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4711             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4712         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4713             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4714         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4715             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4716         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4717             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4718         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4719             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4720         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4721             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4722         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4723             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4724         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4725             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4726         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4727             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4728
4729 #undef SYSCTL_ADD_T4_REG64
4730
4731 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4732         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4733             &pi->stats.name, desc)
4734
4735         /* We get these from port_stats and they may be stale by upto 1s */
4736         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4737             "# drops due to buffer-group 0 overflows");
4738         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4739             "# drops due to buffer-group 1 overflows");
4740         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4741             "# drops due to buffer-group 2 overflows");
4742         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4743             "# drops due to buffer-group 3 overflows");
4744         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4745             "# of buffer-group 0 truncated packets");
4746         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4747             "# of buffer-group 1 truncated packets");
4748         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4749             "# of buffer-group 2 truncated packets");
4750         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4751             "# of buffer-group 3 truncated packets");
4752
4753 #undef SYSCTL_ADD_T4_PORTSTAT
4754
4755         return (0);
4756 }
4757
4758 static int
4759 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4760 {
4761         int rc, *i;
4762         struct sbuf sb;
4763
4764         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4765         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4766                 sbuf_printf(&sb, "%d ", *i);
4767         sbuf_trim(&sb);
4768         sbuf_finish(&sb);
4769         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4770         sbuf_delete(&sb);
4771         return (rc);
4772 }
4773
4774 static int
4775 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4776 {
4777         int rc;
4778         struct sbuf *sb;
4779
4780         rc = sysctl_wire_old_buffer(req, 0);
4781         if (rc != 0)
4782                 return(rc);
4783
4784         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4785         if (sb == NULL)
4786                 return (ENOMEM);
4787
4788         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4789         rc = sbuf_finish(sb);
4790         sbuf_delete(sb);
4791
4792         return (rc);
4793 }
4794
4795 static int
4796 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4797 {
4798         struct port_info *pi = arg1;
4799         int op = arg2;
4800         struct adapter *sc = pi->adapter;
4801         u_int v;
4802         int rc;
4803
4804         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4805         if (rc)
4806                 return (rc);
4807         /* XXX: magic numbers */
4808         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4809             &v);
4810         end_synchronized_op(sc, 0);
4811         if (rc)
4812                 return (rc);
4813         if (op == 0)
4814                 v /= 256;
4815
4816         rc = sysctl_handle_int(oidp, &v, 0, req);
4817         return (rc);
4818 }
4819
4820 static int
4821 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4822 {
4823         struct port_info *pi = arg1;
4824         int rc, val;
4825
4826         val = pi->rsrv_noflowq;
4827         rc = sysctl_handle_int(oidp, &val, 0, req);
4828         if (rc != 0 || req->newptr == NULL)
4829                 return (rc);
4830
4831         if ((val >= 1) && (pi->ntxq > 1))
4832                 pi->rsrv_noflowq = 1;
4833         else
4834                 pi->rsrv_noflowq = 0;
4835
4836         return (rc);
4837 }
4838
4839 static int
4840 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4841 {
4842         struct port_info *pi = arg1;
4843         struct adapter *sc = pi->adapter;
4844         int idx, rc, i;
4845         struct sge_rxq *rxq;
4846 #ifdef TCP_OFFLOAD
4847         struct sge_ofld_rxq *ofld_rxq;
4848 #endif
4849         uint8_t v;
4850
4851         idx = pi->tmr_idx;
4852
4853         rc = sysctl_handle_int(oidp, &idx, 0, req);
4854         if (rc != 0 || req->newptr == NULL)
4855                 return (rc);
4856
4857         if (idx < 0 || idx >= SGE_NTIMERS)
4858                 return (EINVAL);
4859
4860         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4861             "t4tmr");
4862         if (rc)
4863                 return (rc);
4864
4865         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4866         for_each_rxq(pi, i, rxq) {
4867 #ifdef atomic_store_rel_8
4868                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4869 #else
4870                 rxq->iq.intr_params = v;
4871 #endif
4872         }
4873 #ifdef TCP_OFFLOAD
4874         for_each_ofld_rxq(pi, i, ofld_rxq) {
4875 #ifdef atomic_store_rel_8
4876                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4877 #else
4878                 ofld_rxq->iq.intr_params = v;
4879 #endif
4880         }
4881 #endif
4882         pi->tmr_idx = idx;
4883
4884         end_synchronized_op(sc, LOCK_HELD);
4885         return (0);
4886 }
4887
4888 static int
4889 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4890 {
4891         struct port_info *pi = arg1;
4892         struct adapter *sc = pi->adapter;
4893         int idx, rc;
4894
4895         idx = pi->pktc_idx;
4896
4897         rc = sysctl_handle_int(oidp, &idx, 0, req);
4898         if (rc != 0 || req->newptr == NULL)
4899                 return (rc);
4900
4901         if (idx < -1 || idx >= SGE_NCOUNTERS)
4902                 return (EINVAL);
4903
4904         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4905             "t4pktc");
4906         if (rc)
4907                 return (rc);
4908
4909         if (pi->flags & PORT_INIT_DONE)
4910                 rc = EBUSY; /* cannot be changed once the queues are created */
4911         else
4912                 pi->pktc_idx = idx;
4913
4914         end_synchronized_op(sc, LOCK_HELD);
4915         return (rc);
4916 }
4917
4918 static int
4919 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4920 {
4921         struct port_info *pi = arg1;
4922         struct adapter *sc = pi->adapter;
4923         int qsize, rc;
4924
4925         qsize = pi->qsize_rxq;
4926
4927         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4928         if (rc != 0 || req->newptr == NULL)
4929                 return (rc);
4930
4931         if (qsize < 128 || (qsize & 7))
4932                 return (EINVAL);
4933
4934         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4935             "t4rxqs");
4936         if (rc)
4937                 return (rc);
4938
4939         if (pi->flags & PORT_INIT_DONE)
4940                 rc = EBUSY; /* cannot be changed once the queues are created */
4941         else
4942                 pi->qsize_rxq = qsize;
4943
4944         end_synchronized_op(sc, LOCK_HELD);
4945         return (rc);
4946 }
4947
4948 static int
4949 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4950 {
4951         struct port_info *pi = arg1;
4952         struct adapter *sc = pi->adapter;
4953         int qsize, rc;
4954
4955         qsize = pi->qsize_txq;
4956
4957         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4958         if (rc != 0 || req->newptr == NULL)
4959                 return (rc);
4960
4961         /* bufring size must be powerof2 */
4962         if (qsize < 128 || !powerof2(qsize))
4963                 return (EINVAL);
4964
4965         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4966             "t4txqs");
4967         if (rc)
4968                 return (rc);
4969
4970         if (pi->flags & PORT_INIT_DONE)
4971                 rc = EBUSY; /* cannot be changed once the queues are created */
4972         else
4973                 pi->qsize_txq = qsize;
4974
4975         end_synchronized_op(sc, LOCK_HELD);
4976         return (rc);
4977 }
4978
4979 static int
4980 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4981 {
4982         struct adapter *sc = arg1;
4983         int reg = arg2;
4984         uint64_t val;
4985
4986         val = t4_read_reg64(sc, reg);
4987
4988         return (sysctl_handle_64(oidp, &val, 0, req));
4989 }
4990
4991 static int
4992 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4993 {
4994         struct adapter *sc = arg1;
4995         int rc, t;
4996         uint32_t param, val;
4997
4998         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4999         if (rc)
5000                 return (rc);
5001         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5002             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
5003             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
5004         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
5005         end_synchronized_op(sc, 0);
5006         if (rc)
5007                 return (rc);
5008
5009         /* unknown is returned as 0 but we display -1 in that case */
5010         t = val == 0 ? -1 : val;
5011
5012         rc = sysctl_handle_int(oidp, &t, 0, req);
5013         return (rc);
5014 }
5015
5016 #ifdef SBUF_DRAIN
5017 static int
5018 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
5019 {
5020         struct adapter *sc = arg1;
5021         struct sbuf *sb;
5022         int rc, i;
5023         uint16_t incr[NMTUS][NCCTRL_WIN];
5024         static const char *dec_fac[] = {
5025                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
5026                 "0.9375"
5027         };
5028
5029         rc = sysctl_wire_old_buffer(req, 0);
5030         if (rc != 0)
5031                 return (rc);
5032
5033         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5034         if (sb == NULL)
5035                 return (ENOMEM);
5036
5037         t4_read_cong_tbl(sc, incr);
5038
5039         for (i = 0; i < NCCTRL_WIN; ++i) {
5040                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5041                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5042                     incr[5][i], incr[6][i], incr[7][i]);
5043                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5044                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5045                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5046                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5047         }
5048
5049         rc = sbuf_finish(sb);
5050         sbuf_delete(sb);
5051
5052         return (rc);
5053 }
5054
5055 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5056         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
5057         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5058         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
5059 };
5060
5061 static int
5062 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5063 {
5064         struct adapter *sc = arg1;
5065         struct sbuf *sb;
5066         int rc, i, n, qid = arg2;
5067         uint32_t *buf, *p;
5068         char *qtype;
5069         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5070
5071         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5072             ("%s: bad qid %d\n", __func__, qid));
5073
5074         if (qid < CIM_NUM_IBQ) {
5075                 /* inbound queue */
5076                 qtype = "IBQ";
5077                 n = 4 * CIM_IBQ_SIZE;
5078                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5079                 rc = t4_read_cim_ibq(sc, qid, buf, n);
5080         } else {
5081                 /* outbound queue */
5082                 qtype = "OBQ";
5083                 qid -= CIM_NUM_IBQ;
5084                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5085                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5086                 rc = t4_read_cim_obq(sc, qid, buf, n);
5087         }
5088
5089         if (rc < 0) {
5090                 rc = -rc;
5091                 goto done;
5092         }
5093         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5094
5095         rc = sysctl_wire_old_buffer(req, 0);
5096         if (rc != 0)
5097                 goto done;
5098
5099         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5100         if (sb == NULL) {
5101                 rc = ENOMEM;
5102                 goto done;
5103         }
5104
5105         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5106         for (i = 0, p = buf; i < n; i += 16, p += 4)
5107                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5108                     p[2], p[3]);
5109
5110         rc = sbuf_finish(sb);
5111         sbuf_delete(sb);
5112 done:
5113         free(buf, M_CXGBE);
5114         return (rc);
5115 }
5116
5117 static int
5118 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5119 {
5120         struct adapter *sc = arg1;
5121         u_int cfg;
5122         struct sbuf *sb;
5123         uint32_t *buf, *p;
5124         int rc;
5125
5126         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5127         if (rc != 0)
5128                 return (rc);
5129
5130         rc = sysctl_wire_old_buffer(req, 0);
5131         if (rc != 0)
5132                 return (rc);
5133
5134         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5135         if (sb == NULL)
5136                 return (ENOMEM);
5137
5138         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5139             M_ZERO | M_WAITOK);
5140
5141         rc = -t4_cim_read_la(sc, buf, NULL);
5142         if (rc != 0)
5143                 goto done;
5144
5145         sbuf_printf(sb, "Status   Data      PC%s",
5146             cfg & F_UPDBGLACAPTPCONLY ? "" :
5147             "     LS0Stat  LS0Addr             LS0Data");
5148
5149         KASSERT((sc->params.cim_la_size & 7) == 0,
5150             ("%s: p will walk off the end of buf", __func__));
5151
5152         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5153                 if (cfg & F_UPDBGLACAPTPCONLY) {
5154                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5155                             p[6], p[7]);
5156                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5157                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5158                             p[4] & 0xff, p[5] >> 8);
5159                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5160                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5161                             p[1] & 0xf, p[2] >> 4);
5162                 } else {
5163                         sbuf_printf(sb,
5164                             "\n  %02x   %x%07x %x%07x %08x %08x "
5165                             "%08x%08x%08x%08x",
5166                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5167                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5168                             p[6], p[7]);
5169                 }
5170         }
5171
5172         rc = sbuf_finish(sb);
5173         sbuf_delete(sb);
5174 done:
5175         free(buf, M_CXGBE);
5176         return (rc);
5177 }
5178
5179 static int
5180 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5181 {
5182         struct adapter *sc = arg1;
5183         u_int i;
5184         struct sbuf *sb;
5185         uint32_t *buf, *p;
5186         int rc;
5187
5188         rc = sysctl_wire_old_buffer(req, 0);
5189         if (rc != 0)
5190                 return (rc);
5191
5192         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5193         if (sb == NULL)
5194                 return (ENOMEM);
5195
5196         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5197             M_ZERO | M_WAITOK);
5198
5199         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5200         p = buf;
5201
5202         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5203                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5204                     p[1], p[0]);
5205         }
5206
5207         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5208         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5209                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5210                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5211                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5212                     (p[1] >> 2) | ((p[2] & 3) << 30),
5213                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5214                     p[0] & 1);
5215         }
5216
5217         rc = sbuf_finish(sb);
5218         sbuf_delete(sb);
5219         free(buf, M_CXGBE);
5220         return (rc);
5221 }
5222
5223 static int
5224 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5225 {
5226         struct adapter *sc = arg1;
5227         u_int i;
5228         struct sbuf *sb;
5229         uint32_t *buf, *p;
5230         int rc;
5231
5232         rc = sysctl_wire_old_buffer(req, 0);
5233         if (rc != 0)
5234                 return (rc);
5235
5236         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5237         if (sb == NULL)
5238                 return (ENOMEM);
5239
5240         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5241             M_ZERO | M_WAITOK);
5242
5243         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5244         p = buf;
5245
5246         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5247         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5248                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5249                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5250                     p[4], p[3], p[2], p[1], p[0]);
5251         }
5252
5253         sbuf_printf(sb, "\n\nCntl ID               Data");
5254         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5255                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5256                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5257         }
5258
5259         rc = sbuf_finish(sb);
5260         sbuf_delete(sb);
5261         free(buf, M_CXGBE);
5262         return (rc);
5263 }
5264
5265 static int
5266 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5267 {
5268         struct adapter *sc = arg1;
5269         struct sbuf *sb;
5270         int rc, i;
5271         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5272         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5273         uint16_t thres[CIM_NUM_IBQ];
5274         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5275         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5276         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5277
5278         if (is_t4(sc)) {
5279                 cim_num_obq = CIM_NUM_OBQ;
5280                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5281                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5282         } else {
5283                 cim_num_obq = CIM_NUM_OBQ_T5;
5284                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5285                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5286         }
5287         nq = CIM_NUM_IBQ + cim_num_obq;
5288
5289         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5290         if (rc == 0)
5291                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5292         if (rc != 0)
5293                 return (rc);
5294
5295         t4_read_cimq_cfg(sc, base, size, thres);
5296
5297         rc = sysctl_wire_old_buffer(req, 0);
5298         if (rc != 0)
5299                 return (rc);
5300
5301         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5302         if (sb == NULL)
5303                 return (ENOMEM);
5304
5305         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5306
5307         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5308                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5309                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5310                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5311                     G_QUEREMFLITS(p[2]) * 16);
5312         for ( ; i < nq; i++, p += 4, wr += 2)
5313                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5314                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5315                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5316                     G_QUEREMFLITS(p[2]) * 16);
5317
5318         rc = sbuf_finish(sb);
5319         sbuf_delete(sb);
5320
5321         return (rc);
5322 }
5323
5324 static int
5325 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5326 {
5327         struct adapter *sc = arg1;
5328         struct sbuf *sb;
5329         int rc;
5330         struct tp_cpl_stats stats;
5331
5332         rc = sysctl_wire_old_buffer(req, 0);
5333         if (rc != 0)
5334                 return (rc);
5335
5336         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5337         if (sb == NULL)
5338                 return (ENOMEM);
5339
5340         t4_tp_get_cpl_stats(sc, &stats);
5341
5342         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5343             "channel 3\n");
5344         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5345                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5346         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5347                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5348
5349         rc = sbuf_finish(sb);
5350         sbuf_delete(sb);
5351
5352         return (rc);
5353 }
5354
5355 static int
5356 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5357 {
5358         struct adapter *sc = arg1;
5359         struct sbuf *sb;
5360         int rc;
5361         struct tp_usm_stats stats;
5362
5363         rc = sysctl_wire_old_buffer(req, 0);
5364         if (rc != 0)
5365                 return(rc);
5366
5367         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5368         if (sb == NULL)
5369                 return (ENOMEM);
5370
5371         t4_get_usm_stats(sc, &stats);
5372
5373         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5374         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5375         sbuf_printf(sb, "Drops:  %u", stats.drops);
5376
5377         rc = sbuf_finish(sb);
5378         sbuf_delete(sb);
5379
5380         return (rc);
5381 }
5382
5383 const char *devlog_level_strings[] = {
5384         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5385         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5386         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5387         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5388         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5389         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5390 };
5391
5392 const char *devlog_facility_strings[] = {
5393         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5394         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5395         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5396         [FW_DEVLOG_FACILITY_RES]        = "RES",
5397         [FW_DEVLOG_FACILITY_HW]         = "HW",
5398         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5399         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5400         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5401         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5402         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5403         [FW_DEVLOG_FACILITY_VI]         = "VI",
5404         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5405         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5406         [FW_DEVLOG_FACILITY_TM]         = "TM",
5407         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5408         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5409         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5410         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5411         [FW_DEVLOG_FACILITY_RI]         = "RI",
5412         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5413         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5414         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5415         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5416 };
5417
5418 static int
5419 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5420 {
5421         struct adapter *sc = arg1;
5422         struct devlog_params *dparams = &sc->params.devlog;
5423         struct fw_devlog_e *buf, *e;
5424         int i, j, rc, nentries, first = 0, m;
5425         struct sbuf *sb;
5426         uint64_t ftstamp = UINT64_MAX;
5427
5428         if (dparams->start == 0) {
5429                 dparams->memtype = FW_MEMTYPE_EDC0;
5430                 dparams->start = 0x84000;
5431                 dparams->size = 32768;
5432         }
5433
5434         nentries = dparams->size / sizeof(struct fw_devlog_e);
5435
5436         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5437         if (buf == NULL)
5438                 return (ENOMEM);
5439
5440         m = fwmtype_to_hwmtype(dparams->memtype);
5441         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5442         if (rc != 0)
5443                 goto done;
5444
5445         for (i = 0; i < nentries; i++) {
5446                 e = &buf[i];
5447
5448                 if (e->timestamp == 0)
5449                         break;  /* end */
5450
5451                 e->timestamp = be64toh(e->timestamp);
5452                 e->seqno = be32toh(e->seqno);
5453                 for (j = 0; j < 8; j++)
5454                         e->params[j] = be32toh(e->params[j]);
5455
5456                 if (e->timestamp < ftstamp) {
5457                         ftstamp = e->timestamp;
5458                         first = i;
5459                 }
5460         }
5461
5462         if (buf[first].timestamp == 0)
5463                 goto done;      /* nothing in the log */
5464
5465         rc = sysctl_wire_old_buffer(req, 0);
5466         if (rc != 0)
5467                 goto done;
5468
5469         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5470         if (sb == NULL) {
5471                 rc = ENOMEM;
5472                 goto done;
5473         }
5474         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5475             "Seq#", "Tstamp", "Level", "Facility", "Message");
5476
5477         i = first;
5478         do {
5479                 e = &buf[i];
5480                 if (e->timestamp == 0)
5481                         break;  /* end */
5482
5483                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5484                     e->seqno, e->timestamp,
5485                     (e->level < nitems(devlog_level_strings) ?
5486                         devlog_level_strings[e->level] : "UNKNOWN"),
5487                     (e->facility < nitems(devlog_facility_strings) ?
5488                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5489                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5490                     e->params[2], e->params[3], e->params[4],
5491                     e->params[5], e->params[6], e->params[7]);
5492
5493                 if (++i == nentries)
5494                         i = 0;
5495         } while (i != first);
5496
5497         rc = sbuf_finish(sb);
5498         sbuf_delete(sb);
5499 done:
5500         free(buf, M_CXGBE);
5501         return (rc);
5502 }
5503
5504 static int
5505 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5506 {
5507         struct adapter *sc = arg1;
5508         struct sbuf *sb;
5509         int rc;
5510         struct tp_fcoe_stats stats[4];
5511
5512         rc = sysctl_wire_old_buffer(req, 0);
5513         if (rc != 0)
5514                 return (rc);
5515
5516         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5517         if (sb == NULL)
5518                 return (ENOMEM);
5519
5520         t4_get_fcoe_stats(sc, 0, &stats[0]);
5521         t4_get_fcoe_stats(sc, 1, &stats[1]);
5522         t4_get_fcoe_stats(sc, 2, &stats[2]);
5523         t4_get_fcoe_stats(sc, 3, &stats[3]);
5524
5525         sbuf_printf(sb, "                   channel 0        channel 1        "
5526             "channel 2        channel 3\n");
5527         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5528             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5529             stats[3].octetsDDP);
5530         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5531             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5532         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5533             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5534             stats[3].framesDrop);
5535
5536         rc = sbuf_finish(sb);
5537         sbuf_delete(sb);
5538
5539         return (rc);
5540 }
5541
5542 static int
5543 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5544 {
5545         struct adapter *sc = arg1;
5546         struct sbuf *sb;
5547         int rc, i;
5548         unsigned int map, kbps, ipg, mode;
5549         unsigned int pace_tab[NTX_SCHED];
5550
5551         rc = sysctl_wire_old_buffer(req, 0);
5552         if (rc != 0)
5553                 return (rc);
5554
5555         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5556         if (sb == NULL)
5557                 return (ENOMEM);
5558
5559         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5560         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5561         t4_read_pace_tbl(sc, pace_tab);
5562
5563         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5564             "Class IPG (0.1 ns)   Flow IPG (us)");
5565
5566         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5567                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5568                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5569                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5570                 if (kbps)
5571                         sbuf_printf(sb, "%9u     ", kbps);
5572                 else
5573                         sbuf_printf(sb, " disabled     ");
5574
5575                 if (ipg)
5576                         sbuf_printf(sb, "%13u        ", ipg);
5577                 else
5578                         sbuf_printf(sb, "     disabled        ");
5579
5580                 if (pace_tab[i])
5581                         sbuf_printf(sb, "%10u", pace_tab[i]);
5582                 else
5583                         sbuf_printf(sb, "  disabled");
5584         }
5585
5586         rc = sbuf_finish(sb);
5587         sbuf_delete(sb);
5588
5589         return (rc);
5590 }
5591
5592 static int
5593 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5594 {
5595         struct adapter *sc = arg1;
5596         struct sbuf *sb;
5597         int rc, i, j;
5598         uint64_t *p0, *p1;
5599         struct lb_port_stats s[2];
5600         static const char *stat_name[] = {
5601                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5602                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5603                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5604                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5605                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5606                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5607                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5608         };
5609
5610         rc = sysctl_wire_old_buffer(req, 0);
5611         if (rc != 0)
5612                 return (rc);
5613
5614         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5615         if (sb == NULL)
5616                 return (ENOMEM);
5617
5618         memset(s, 0, sizeof(s));
5619
5620         for (i = 0; i < 4; i += 2) {
5621                 t4_get_lb_stats(sc, i, &s[0]);
5622                 t4_get_lb_stats(sc, i + 1, &s[1]);
5623
5624                 p0 = &s[0].octets;
5625                 p1 = &s[1].octets;
5626                 sbuf_printf(sb, "%s                       Loopback %u"
5627                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5628
5629                 for (j = 0; j < nitems(stat_name); j++)
5630                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5631                                    *p0++, *p1++);
5632         }
5633
5634         rc = sbuf_finish(sb);
5635         sbuf_delete(sb);
5636
5637         return (rc);
5638 }
5639
5640 static int
5641 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5642 {
5643         int rc = 0;
5644         struct port_info *pi = arg1;
5645         struct sbuf *sb;
5646         static const char *linkdnreasons[] = {
5647                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5648                 "PHY overheated", "unknown", "rx los", "reserved7"
5649         };
5650
5651         rc = sysctl_wire_old_buffer(req, 0);
5652         if (rc != 0)
5653                 return(rc);
5654         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5655         if (sb == NULL)
5656                 return (ENOMEM);
5657
5658         if (pi->linkdnrc < 0)
5659                 sbuf_printf(sb, "n/a");
5660         else if (pi->linkdnrc < nitems(linkdnreasons))
5661                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5662         else
5663                 sbuf_printf(sb, "%d", pi->linkdnrc);
5664
5665         rc = sbuf_finish(sb);
5666         sbuf_delete(sb);
5667
5668         return (rc);
5669 }
5670
5671 struct mem_desc {
5672         unsigned int base;
5673         unsigned int limit;
5674         unsigned int idx;
5675 };
5676
5677 static int
5678 mem_desc_cmp(const void *a, const void *b)
5679 {
5680         return ((const struct mem_desc *)a)->base -
5681                ((const struct mem_desc *)b)->base;
5682 }
5683
5684 static void
5685 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5686     unsigned int to)
5687 {
5688         unsigned int size;
5689
5690         size = to - from + 1;
5691         if (size == 0)
5692                 return;
5693
5694         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5695         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5696 }
5697
5698 static int
5699 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5700 {
5701         struct adapter *sc = arg1;
5702         struct sbuf *sb;
5703         int rc, i, n;
5704         uint32_t lo, hi, used, alloc;
5705         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5706         static const char *region[] = {
5707                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5708                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5709                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5710                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5711                 "RQUDP region:", "PBL region:", "TXPBL region:",
5712                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5713                 "On-chip queues:"
5714         };
5715         struct mem_desc avail[4];
5716         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5717         struct mem_desc *md = mem;
5718
5719         rc = sysctl_wire_old_buffer(req, 0);
5720         if (rc != 0)
5721                 return (rc);
5722
5723         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5724         if (sb == NULL)
5725                 return (ENOMEM);
5726
5727         for (i = 0; i < nitems(mem); i++) {
5728                 mem[i].limit = 0;
5729                 mem[i].idx = i;
5730         }
5731
5732         /* Find and sort the populated memory ranges */
5733         i = 0;
5734         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5735         if (lo & F_EDRAM0_ENABLE) {
5736                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5737                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5738                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5739                 avail[i].idx = 0;
5740                 i++;
5741         }
5742         if (lo & F_EDRAM1_ENABLE) {
5743                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5744                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5745                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5746                 avail[i].idx = 1;
5747                 i++;
5748         }
5749         if (lo & F_EXT_MEM_ENABLE) {
5750                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5751                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5752                 avail[i].limit = avail[i].base +
5753                     (G_EXT_MEM_SIZE(hi) << 20);
5754                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5755                 i++;
5756         }
5757         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5758                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5759                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5760                 avail[i].limit = avail[i].base +
5761                     (G_EXT_MEM1_SIZE(hi) << 20);
5762                 avail[i].idx = 4;
5763                 i++;
5764         }
5765         if (!i)                                    /* no memory available */
5766                 return 0;
5767         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5768
5769         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5770         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5771         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5772         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5773         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5774         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5775         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5776         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5777         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5778
5779         /* the next few have explicit upper bounds */
5780         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5781         md->limit = md->base - 1 +
5782                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5783                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5784         md++;
5785
5786         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5787         md->limit = md->base - 1 +
5788                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5789                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5790         md++;
5791
5792         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5793                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5794                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5795                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5796         } else {
5797                 md->base = 0;
5798                 md->idx = nitems(region);  /* hide it */
5799         }
5800         md++;
5801
5802 #define ulp_region(reg) \
5803         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5804         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5805
5806         ulp_region(RX_ISCSI);
5807         ulp_region(RX_TDDP);
5808         ulp_region(TX_TPT);
5809         ulp_region(RX_STAG);
5810         ulp_region(RX_RQ);
5811         ulp_region(RX_RQUDP);
5812         ulp_region(RX_PBL);
5813         ulp_region(TX_PBL);
5814 #undef ulp_region
5815
5816         md->base = 0;
5817         md->idx = nitems(region);
5818         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5819                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5820                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5821                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5822         }
5823         md++;
5824
5825         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5826         md->limit = md->base + sc->tids.ntids - 1;
5827         md++;
5828         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5829         md->limit = md->base + sc->tids.ntids - 1;
5830         md++;
5831
5832         md->base = sc->vres.ocq.start;
5833         if (sc->vres.ocq.size)
5834                 md->limit = md->base + sc->vres.ocq.size - 1;
5835         else
5836                 md->idx = nitems(region);  /* hide it */
5837         md++;
5838
5839         /* add any address-space holes, there can be up to 3 */
5840         for (n = 0; n < i - 1; n++)
5841                 if (avail[n].limit < avail[n + 1].base)
5842                         (md++)->base = avail[n].limit;
5843         if (avail[n].limit)
5844                 (md++)->base = avail[n].limit;
5845
5846         n = md - mem;
5847         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5848
5849         for (lo = 0; lo < i; lo++)
5850                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5851                                 avail[lo].limit - 1);
5852
5853         sbuf_printf(sb, "\n");
5854         for (i = 0; i < n; i++) {
5855                 if (mem[i].idx >= nitems(region))
5856                         continue;                        /* skip holes */
5857                 if (!mem[i].limit)
5858                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5859                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5860                                 mem[i].limit);
5861         }
5862
5863         sbuf_printf(sb, "\n");
5864         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5865         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5866         mem_region_show(sb, "uP RAM:", lo, hi);
5867
5868         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5869         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5870         mem_region_show(sb, "uP Extmem2:", lo, hi);
5871
5872         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5873         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5874                    G_PMRXMAXPAGE(lo),
5875                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5876                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5877
5878         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5879         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5880         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5881                    G_PMTXMAXPAGE(lo),
5882                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5883                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5884         sbuf_printf(sb, "%u p-structs\n",
5885                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5886
5887         for (i = 0; i < 4; i++) {
5888                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5889                 if (is_t4(sc)) {
5890                         used = G_USED(lo);
5891                         alloc = G_ALLOC(lo);
5892                 } else {
5893                         used = G_T5_USED(lo);
5894                         alloc = G_T5_ALLOC(lo);
5895                 }
5896                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5897                            i, used, alloc);
5898         }
5899         for (i = 0; i < 4; i++) {
5900                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5901                 if (is_t4(sc)) {
5902                         used = G_USED(lo);
5903                         alloc = G_ALLOC(lo);
5904                 } else {
5905                         used = G_T5_USED(lo);
5906                         alloc = G_T5_ALLOC(lo);
5907                 }
5908                 sbuf_printf(sb,
5909                            "\nLoopback %d using %u pages out of %u allocated",
5910                            i, used, alloc);
5911         }
5912
5913         rc = sbuf_finish(sb);
5914         sbuf_delete(sb);
5915
5916         return (rc);
5917 }
5918
5919 static inline void
5920 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5921 {
5922         *mask = x | y;
5923         y = htobe64(y);
5924         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5925 }
5926
5927 static int
5928 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5929 {
5930         struct adapter *sc = arg1;
5931         struct sbuf *sb;
5932         int rc, i, n;
5933
5934         rc = sysctl_wire_old_buffer(req, 0);
5935         if (rc != 0)
5936                 return (rc);
5937
5938         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5939         if (sb == NULL)
5940                 return (ENOMEM);
5941
5942         sbuf_printf(sb,
5943             "Idx  Ethernet address     Mask     Vld Ports PF"
5944             "  VF              Replication             P0 P1 P2 P3  ML");
5945         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5946             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5947         for (i = 0; i < n; i++) {
5948                 uint64_t tcamx, tcamy, mask;
5949                 uint32_t cls_lo, cls_hi;
5950                 uint8_t addr[ETHER_ADDR_LEN];
5951
5952                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5953                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5954                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5955                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5956
5957                 if (tcamx & tcamy)
5958                         continue;
5959
5960                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5961                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5962                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5963                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5964                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5965                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5966                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5967
5968                 if (cls_lo & F_REPLICATE) {
5969                         struct fw_ldst_cmd ldst_cmd;
5970
5971                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5972                         ldst_cmd.op_to_addrspace =
5973                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5974                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5975                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5976                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5977                         ldst_cmd.u.mps.fid_ctl =
5978                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5979                                 V_FW_LDST_CMD_CTL(i));
5980
5981                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5982                             "t4mps");
5983                         if (rc)
5984                                 break;
5985                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5986                             sizeof(ldst_cmd), &ldst_cmd);
5987                         end_synchronized_op(sc, 0);
5988
5989                         if (rc != 0) {
5990                                 sbuf_printf(sb,
5991                                     " ------------ error %3u ------------", rc);
5992                                 rc = 0;
5993                         } else {
5994                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5995                                     be32toh(ldst_cmd.u.mps.rplc127_96),
5996                                     be32toh(ldst_cmd.u.mps.rplc95_64),
5997                                     be32toh(ldst_cmd.u.mps.rplc63_32),
5998                                     be32toh(ldst_cmd.u.mps.rplc31_0));
5999                         }
6000                 } else
6001                         sbuf_printf(sb, "%36s", "");
6002
6003                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
6004                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
6005                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
6006         }
6007
6008         if (rc)
6009                 (void) sbuf_finish(sb);
6010         else
6011                 rc = sbuf_finish(sb);
6012         sbuf_delete(sb);
6013
6014         return (rc);
6015 }
6016
6017 static int
6018 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
6019 {
6020         struct adapter *sc = arg1;
6021         struct sbuf *sb;
6022         int rc;
6023         uint16_t mtus[NMTUS];
6024
6025         rc = sysctl_wire_old_buffer(req, 0);
6026         if (rc != 0)
6027                 return (rc);
6028
6029         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6030         if (sb == NULL)
6031                 return (ENOMEM);
6032
6033         t4_read_mtu_tbl(sc, mtus, NULL);
6034
6035         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6036             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6037             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6038             mtus[14], mtus[15]);
6039
6040         rc = sbuf_finish(sb);
6041         sbuf_delete(sb);
6042
6043         return (rc);
6044 }
6045
6046 static int
6047 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6048 {
6049         struct adapter *sc = arg1;
6050         struct sbuf *sb;
6051         int rc, i;
6052         uint32_t cnt[PM_NSTATS];
6053         uint64_t cyc[PM_NSTATS];
6054         static const char *rx_stats[] = {
6055                 "Read:", "Write bypass:", "Write mem:", "Flush:"
6056         };
6057         static const char *tx_stats[] = {
6058                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6059         };
6060
6061         rc = sysctl_wire_old_buffer(req, 0);
6062         if (rc != 0)
6063                 return (rc);
6064
6065         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6066         if (sb == NULL)
6067                 return (ENOMEM);
6068
6069         t4_pmtx_get_stats(sc, cnt, cyc);
6070         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6071         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6072                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6073                     cyc[i]);
6074
6075         t4_pmrx_get_stats(sc, cnt, cyc);
6076         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6077         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6078                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6079                     cyc[i]);
6080
6081         rc = sbuf_finish(sb);
6082         sbuf_delete(sb);
6083
6084         return (rc);
6085 }
6086
6087 static int
6088 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6089 {
6090         struct adapter *sc = arg1;
6091         struct sbuf *sb;
6092         int rc;
6093         struct tp_rdma_stats stats;
6094
6095         rc = sysctl_wire_old_buffer(req, 0);
6096         if (rc != 0)
6097                 return (rc);
6098
6099         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6100         if (sb == NULL)
6101                 return (ENOMEM);
6102
6103         t4_tp_get_rdma_stats(sc, &stats);
6104         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6105         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6106
6107         rc = sbuf_finish(sb);
6108         sbuf_delete(sb);
6109
6110         return (rc);
6111 }
6112
6113 static int
6114 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6115 {
6116         struct adapter *sc = arg1;
6117         struct sbuf *sb;
6118         int rc;
6119         struct tp_tcp_stats v4, v6;
6120
6121         rc = sysctl_wire_old_buffer(req, 0);
6122         if (rc != 0)
6123                 return (rc);
6124
6125         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6126         if (sb == NULL)
6127                 return (ENOMEM);
6128
6129         t4_tp_get_tcp_stats(sc, &v4, &v6);
6130         sbuf_printf(sb,
6131             "                                IP                 IPv6\n");
6132         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6133             v4.tcpOutRsts, v6.tcpOutRsts);
6134         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6135             v4.tcpInSegs, v6.tcpInSegs);
6136         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6137             v4.tcpOutSegs, v6.tcpOutSegs);
6138         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6139             v4.tcpRetransSegs, v6.tcpRetransSegs);
6140
6141         rc = sbuf_finish(sb);
6142         sbuf_delete(sb);
6143
6144         return (rc);
6145 }
6146
6147 static int
6148 sysctl_tids(SYSCTL_HANDLER_ARGS)
6149 {
6150         struct adapter *sc = arg1;
6151         struct sbuf *sb;
6152         int rc;
6153         struct tid_info *t = &sc->tids;
6154
6155         rc = sysctl_wire_old_buffer(req, 0);
6156         if (rc != 0)
6157                 return (rc);
6158
6159         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6160         if (sb == NULL)
6161                 return (ENOMEM);
6162
6163         if (t->natids) {
6164                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6165                     t->atids_in_use);
6166         }
6167
6168         if (t->ntids) {
6169                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6170                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6171
6172                         if (b) {
6173                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6174                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6175                                     t->ntids - 1);
6176                         } else {
6177                                 sbuf_printf(sb, "TID range: %u-%u",
6178                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6179                                     t->ntids - 1);
6180                         }
6181                 } else
6182                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6183                 sbuf_printf(sb, ", in use: %u\n",
6184                     atomic_load_acq_int(&t->tids_in_use));
6185         }
6186
6187         if (t->nstids) {
6188                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6189                     t->stid_base + t->nstids - 1, t->stids_in_use);
6190         }
6191
6192         if (t->nftids) {
6193                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6194                     t->ftid_base + t->nftids - 1);
6195         }
6196
6197         if (t->netids) {
6198                 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6199                     t->etid_base + t->netids - 1);
6200         }
6201
6202         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6203             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6204             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6205
6206         rc = sbuf_finish(sb);
6207         sbuf_delete(sb);
6208
6209         return (rc);
6210 }
6211
6212 static int
6213 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6214 {
6215         struct adapter *sc = arg1;
6216         struct sbuf *sb;
6217         int rc;
6218         struct tp_err_stats stats;
6219
6220         rc = sysctl_wire_old_buffer(req, 0);
6221         if (rc != 0)
6222                 return (rc);
6223
6224         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6225         if (sb == NULL)
6226                 return (ENOMEM);
6227
6228         t4_tp_get_err_stats(sc, &stats);
6229
6230         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6231                       "channel 3\n");
6232         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6233             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6234             stats.macInErrs[3]);
6235         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6236             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6237             stats.hdrInErrs[3]);
6238         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6239             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6240             stats.tcpInErrs[3]);
6241         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6242             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6243             stats.tcp6InErrs[3]);
6244         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6245             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6246             stats.tnlCongDrops[3]);
6247         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6248             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6249             stats.tnlTxDrops[3]);
6250         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6251             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6252             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6253         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6254             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6255             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6256         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6257             stats.ofldNoNeigh, stats.ofldCongDefer);
6258
6259         rc = sbuf_finish(sb);
6260         sbuf_delete(sb);
6261
6262         return (rc);
6263 }
6264
6265 struct field_desc {
6266         const char *name;
6267         u_int start;
6268         u_int width;
6269 };
6270
6271 static void
6272 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6273 {
6274         char buf[32];
6275         int line_size = 0;
6276
6277         while (f->name) {
6278                 uint64_t mask = (1ULL << f->width) - 1;
6279                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6280                     ((uintmax_t)v >> f->start) & mask);
6281
6282                 if (line_size + len >= 79) {
6283                         line_size = 8;
6284                         sbuf_printf(sb, "\n        ");
6285                 }
6286                 sbuf_printf(sb, "%s ", buf);
6287                 line_size += len + 1;
6288                 f++;
6289         }
6290         sbuf_printf(sb, "\n");
6291 }
6292
6293 static struct field_desc tp_la0[] = {
6294         { "RcfOpCodeOut", 60, 4 },
6295         { "State", 56, 4 },
6296         { "WcfState", 52, 4 },
6297         { "RcfOpcSrcOut", 50, 2 },
6298         { "CRxError", 49, 1 },
6299         { "ERxError", 48, 1 },
6300         { "SanityFailed", 47, 1 },
6301         { "SpuriousMsg", 46, 1 },
6302         { "FlushInputMsg", 45, 1 },
6303         { "FlushInputCpl", 44, 1 },
6304         { "RssUpBit", 43, 1 },
6305         { "RssFilterHit", 42, 1 },
6306         { "Tid", 32, 10 },
6307         { "InitTcb", 31, 1 },
6308         { "LineNumber", 24, 7 },
6309         { "Emsg", 23, 1 },
6310         { "EdataOut", 22, 1 },
6311         { "Cmsg", 21, 1 },
6312         { "CdataOut", 20, 1 },
6313         { "EreadPdu", 19, 1 },
6314         { "CreadPdu", 18, 1 },
6315         { "TunnelPkt", 17, 1 },
6316         { "RcfPeerFin", 16, 1 },
6317         { "RcfReasonOut", 12, 4 },
6318         { "TxCchannel", 10, 2 },
6319         { "RcfTxChannel", 8, 2 },
6320         { "RxEchannel", 6, 2 },
6321         { "RcfRxChannel", 5, 1 },
6322         { "RcfDataOutSrdy", 4, 1 },
6323         { "RxDvld", 3, 1 },
6324         { "RxOoDvld", 2, 1 },
6325         { "RxCongestion", 1, 1 },
6326         { "TxCongestion", 0, 1 },
6327         { NULL }
6328 };
6329
6330 static struct field_desc tp_la1[] = {
6331         { "CplCmdIn", 56, 8 },
6332         { "CplCmdOut", 48, 8 },
6333         { "ESynOut", 47, 1 },
6334         { "EAckOut", 46, 1 },
6335         { "EFinOut", 45, 1 },
6336         { "ERstOut", 44, 1 },
6337         { "SynIn", 43, 1 },
6338         { "AckIn", 42, 1 },
6339         { "FinIn", 41, 1 },
6340         { "RstIn", 40, 1 },
6341         { "DataIn", 39, 1 },
6342         { "DataInVld", 38, 1 },
6343         { "PadIn", 37, 1 },
6344         { "RxBufEmpty", 36, 1 },
6345         { "RxDdp", 35, 1 },
6346         { "RxFbCongestion", 34, 1 },
6347         { "TxFbCongestion", 33, 1 },
6348         { "TxPktSumSrdy", 32, 1 },
6349         { "RcfUlpType", 28, 4 },
6350         { "Eread", 27, 1 },
6351         { "Ebypass", 26, 1 },
6352         { "Esave", 25, 1 },
6353         { "Static0", 24, 1 },
6354         { "Cread", 23, 1 },
6355         { "Cbypass", 22, 1 },
6356         { "Csave", 21, 1 },
6357         { "CPktOut", 20, 1 },
6358         { "RxPagePoolFull", 18, 2 },
6359         { "RxLpbkPkt", 17, 1 },
6360         { "TxLpbkPkt", 16, 1 },
6361         { "RxVfValid", 15, 1 },
6362         { "SynLearned", 14, 1 },
6363         { "SetDelEntry", 13, 1 },
6364         { "SetInvEntry", 12, 1 },
6365         { "CpcmdDvld", 11, 1 },
6366         { "CpcmdSave", 10, 1 },
6367         { "RxPstructsFull", 8, 2 },
6368         { "EpcmdDvld", 7, 1 },
6369         { "EpcmdFlush", 6, 1 },
6370         { "EpcmdTrimPrefix", 5, 1 },
6371         { "EpcmdTrimPostfix", 4, 1 },
6372         { "ERssIp4Pkt", 3, 1 },
6373         { "ERssIp6Pkt", 2, 1 },
6374         { "ERssTcpUdpPkt", 1, 1 },
6375         { "ERssFceFipPkt", 0, 1 },
6376         { NULL }
6377 };
6378
6379 static struct field_desc tp_la2[] = {
6380         { "CplCmdIn", 56, 8 },
6381         { "MpsVfVld", 55, 1 },
6382         { "MpsPf", 52, 3 },
6383         { "MpsVf", 44, 8 },
6384         { "SynIn", 43, 1 },
6385         { "AckIn", 42, 1 },
6386         { "FinIn", 41, 1 },
6387         { "RstIn", 40, 1 },
6388         { "DataIn", 39, 1 },
6389         { "DataInVld", 38, 1 },
6390         { "PadIn", 37, 1 },
6391         { "RxBufEmpty", 36, 1 },
6392         { "RxDdp", 35, 1 },
6393         { "RxFbCongestion", 34, 1 },
6394         { "TxFbCongestion", 33, 1 },
6395         { "TxPktSumSrdy", 32, 1 },
6396         { "RcfUlpType", 28, 4 },
6397         { "Eread", 27, 1 },
6398         { "Ebypass", 26, 1 },
6399         { "Esave", 25, 1 },
6400         { "Static0", 24, 1 },
6401         { "Cread", 23, 1 },
6402         { "Cbypass", 22, 1 },
6403         { "Csave", 21, 1 },
6404         { "CPktOut", 20, 1 },
6405         { "RxPagePoolFull", 18, 2 },
6406         { "RxLpbkPkt", 17, 1 },
6407         { "TxLpbkPkt", 16, 1 },
6408         { "RxVfValid", 15, 1 },
6409         { "SynLearned", 14, 1 },
6410         { "SetDelEntry", 13, 1 },
6411         { "SetInvEntry", 12, 1 },
6412         { "CpcmdDvld", 11, 1 },
6413         { "CpcmdSave", 10, 1 },
6414         { "RxPstructsFull", 8, 2 },
6415         { "EpcmdDvld", 7, 1 },
6416         { "EpcmdFlush", 6, 1 },
6417         { "EpcmdTrimPrefix", 5, 1 },
6418         { "EpcmdTrimPostfix", 4, 1 },
6419         { "ERssIp4Pkt", 3, 1 },
6420         { "ERssIp6Pkt", 2, 1 },
6421         { "ERssTcpUdpPkt", 1, 1 },
6422         { "ERssFceFipPkt", 0, 1 },
6423         { NULL }
6424 };
6425
6426 static void
6427 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6428 {
6429
6430         field_desc_show(sb, *p, tp_la0);
6431 }
6432
6433 static void
6434 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6435 {
6436
6437         if (idx)
6438                 sbuf_printf(sb, "\n");
6439         field_desc_show(sb, p[0], tp_la0);
6440         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6441                 field_desc_show(sb, p[1], tp_la0);
6442 }
6443
6444 static void
6445 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6446 {
6447
6448         if (idx)
6449                 sbuf_printf(sb, "\n");
6450         field_desc_show(sb, p[0], tp_la0);
6451         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6452                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6453 }
6454
6455 static int
6456 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6457 {
6458         struct adapter *sc = arg1;
6459         struct sbuf *sb;
6460         uint64_t *buf, *p;
6461         int rc;
6462         u_int i, inc;
6463         void (*show_func)(struct sbuf *, uint64_t *, int);
6464
6465         rc = sysctl_wire_old_buffer(req, 0);
6466         if (rc != 0)
6467                 return (rc);
6468
6469         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6470         if (sb == NULL)
6471                 return (ENOMEM);
6472
6473         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6474
6475         t4_tp_read_la(sc, buf, NULL);
6476         p = buf;
6477
6478         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6479         case 2:
6480                 inc = 2;
6481                 show_func = tp_la_show2;
6482                 break;
6483         case 3:
6484                 inc = 2;
6485                 show_func = tp_la_show3;
6486                 break;
6487         default:
6488                 inc = 1;
6489                 show_func = tp_la_show;
6490         }
6491
6492         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6493                 (*show_func)(sb, p, i);
6494
6495         rc = sbuf_finish(sb);
6496         sbuf_delete(sb);
6497         free(buf, M_CXGBE);
6498         return (rc);
6499 }
6500
6501 static int
6502 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6503 {
6504         struct adapter *sc = arg1;
6505         struct sbuf *sb;
6506         int rc;
6507         u64 nrate[NCHAN], orate[NCHAN];
6508
6509         rc = sysctl_wire_old_buffer(req, 0);
6510         if (rc != 0)
6511                 return (rc);
6512
6513         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6514         if (sb == NULL)
6515                 return (ENOMEM);
6516
6517         t4_get_chan_txrate(sc, nrate, orate);
6518         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6519                  "channel 3\n");
6520         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6521             nrate[0], nrate[1], nrate[2], nrate[3]);
6522         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6523             orate[0], orate[1], orate[2], orate[3]);
6524
6525         rc = sbuf_finish(sb);
6526         sbuf_delete(sb);
6527
6528         return (rc);
6529 }
6530
6531 static int
6532 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6533 {
6534         struct adapter *sc = arg1;
6535         struct sbuf *sb;
6536         uint32_t *buf, *p;
6537         int rc, i;
6538
6539         rc = sysctl_wire_old_buffer(req, 0);
6540         if (rc != 0)
6541                 return (rc);
6542
6543         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6544         if (sb == NULL)
6545                 return (ENOMEM);
6546
6547         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6548             M_ZERO | M_WAITOK);
6549
6550         t4_ulprx_read_la(sc, buf);
6551         p = buf;
6552
6553         sbuf_printf(sb, "      Pcmd        Type   Message"
6554             "                Data");
6555         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6556                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6557                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6558         }
6559
6560         rc = sbuf_finish(sb);
6561         sbuf_delete(sb);
6562         free(buf, M_CXGBE);
6563         return (rc);
6564 }
6565
6566 static int
6567 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6568 {
6569         struct adapter *sc = arg1;
6570         struct sbuf *sb;
6571         int rc, v;
6572
6573         rc = sysctl_wire_old_buffer(req, 0);
6574         if (rc != 0)
6575                 return (rc);
6576
6577         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6578         if (sb == NULL)
6579                 return (ENOMEM);
6580
6581         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6582         if (G_STATSOURCE_T5(v) == 7) {
6583                 if (G_STATMODE(v) == 0) {
6584                         sbuf_printf(sb, "total %d, incomplete %d",
6585                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6586                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6587                 } else if (G_STATMODE(v) == 1) {
6588                         sbuf_printf(sb, "total %d, data overflow %d",
6589                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6590                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6591                 }
6592         }
6593         rc = sbuf_finish(sb);
6594         sbuf_delete(sb);
6595
6596         return (rc);
6597 }
6598 #endif
6599
6600 static inline void
6601 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6602 {
6603         struct buf_ring *br;
6604         struct mbuf *m;
6605
6606         TXQ_LOCK_ASSERT_OWNED(txq);
6607
6608         br = txq->br;
6609         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6610         if (m)
6611                 t4_eth_tx(ifp, txq, m);
6612 }
6613
6614 void
6615 t4_tx_callout(void *arg)
6616 {
6617         struct sge_eq *eq = arg;
6618         struct adapter *sc;
6619
6620         if (EQ_TRYLOCK(eq) == 0)
6621                 goto reschedule;
6622
6623         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6624                 EQ_UNLOCK(eq);
6625 reschedule:
6626                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6627                         callout_schedule(&eq->tx_callout, 1);
6628                 return;
6629         }
6630
6631         EQ_LOCK_ASSERT_OWNED(eq);
6632
6633         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6634
6635                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6636                         struct sge_txq *txq = arg;
6637                         struct port_info *pi = txq->ifp->if_softc;
6638
6639                         sc = pi->adapter;
6640                 } else {
6641                         struct sge_wrq *wrq = arg;
6642
6643                         sc = wrq->adapter;
6644                 }
6645
6646                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6647         }
6648
6649         EQ_UNLOCK(eq);
6650 }
6651
6652 void
6653 t4_tx_task(void *arg, int count)
6654 {
6655         struct sge_eq *eq = arg;
6656
6657         EQ_LOCK(eq);
6658         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6659                 struct sge_txq *txq = arg;
6660                 txq_start(txq->ifp, txq);
6661         } else {
6662                 struct sge_wrq *wrq = arg;
6663                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6664         }
6665         EQ_UNLOCK(eq);
6666 }
6667
6668 static uint32_t
6669 fconf_to_mode(uint32_t fconf)
6670 {
6671         uint32_t mode;
6672
6673         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6674             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6675
6676         if (fconf & F_FRAGMENTATION)
6677                 mode |= T4_FILTER_IP_FRAGMENT;
6678
6679         if (fconf & F_MPSHITTYPE)
6680                 mode |= T4_FILTER_MPS_HIT_TYPE;
6681
6682         if (fconf & F_MACMATCH)
6683                 mode |= T4_FILTER_MAC_IDX;
6684
6685         if (fconf & F_ETHERTYPE)
6686                 mode |= T4_FILTER_ETH_TYPE;
6687
6688         if (fconf & F_PROTOCOL)
6689                 mode |= T4_FILTER_IP_PROTO;
6690
6691         if (fconf & F_TOS)
6692                 mode |= T4_FILTER_IP_TOS;
6693
6694         if (fconf & F_VLAN)
6695                 mode |= T4_FILTER_VLAN;
6696
6697         if (fconf & F_VNIC_ID)
6698                 mode |= T4_FILTER_VNIC;
6699
6700         if (fconf & F_PORT)
6701                 mode |= T4_FILTER_PORT;
6702
6703         if (fconf & F_FCOE)
6704                 mode |= T4_FILTER_FCoE;
6705
6706         return (mode);
6707 }
6708
6709 static uint32_t
6710 mode_to_fconf(uint32_t mode)
6711 {
6712         uint32_t fconf = 0;
6713
6714         if (mode & T4_FILTER_IP_FRAGMENT)
6715                 fconf |= F_FRAGMENTATION;
6716
6717         if (mode & T4_FILTER_MPS_HIT_TYPE)
6718                 fconf |= F_MPSHITTYPE;
6719
6720         if (mode & T4_FILTER_MAC_IDX)
6721                 fconf |= F_MACMATCH;
6722
6723         if (mode & T4_FILTER_ETH_TYPE)
6724                 fconf |= F_ETHERTYPE;
6725
6726         if (mode & T4_FILTER_IP_PROTO)
6727                 fconf |= F_PROTOCOL;
6728
6729         if (mode & T4_FILTER_IP_TOS)
6730                 fconf |= F_TOS;
6731
6732         if (mode & T4_FILTER_VLAN)
6733                 fconf |= F_VLAN;
6734
6735         if (mode & T4_FILTER_VNIC)
6736                 fconf |= F_VNIC_ID;
6737
6738         if (mode & T4_FILTER_PORT)
6739                 fconf |= F_PORT;
6740
6741         if (mode & T4_FILTER_FCoE)
6742                 fconf |= F_FCOE;
6743
6744         return (fconf);
6745 }
6746
6747 static uint32_t
6748 fspec_to_fconf(struct t4_filter_specification *fs)
6749 {
6750         uint32_t fconf = 0;
6751
6752         if (fs->val.frag || fs->mask.frag)
6753                 fconf |= F_FRAGMENTATION;
6754
6755         if (fs->val.matchtype || fs->mask.matchtype)
6756                 fconf |= F_MPSHITTYPE;
6757
6758         if (fs->val.macidx || fs->mask.macidx)
6759                 fconf |= F_MACMATCH;
6760
6761         if (fs->val.ethtype || fs->mask.ethtype)
6762                 fconf |= F_ETHERTYPE;
6763
6764         if (fs->val.proto || fs->mask.proto)
6765                 fconf |= F_PROTOCOL;
6766
6767         if (fs->val.tos || fs->mask.tos)
6768                 fconf |= F_TOS;
6769
6770         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6771                 fconf |= F_VLAN;
6772
6773         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6774                 fconf |= F_VNIC_ID;
6775
6776         if (fs->val.iport || fs->mask.iport)
6777                 fconf |= F_PORT;
6778
6779         if (fs->val.fcoe || fs->mask.fcoe)
6780                 fconf |= F_FCOE;
6781
6782         return (fconf);
6783 }
6784
6785 static int
6786 get_filter_mode(struct adapter *sc, uint32_t *mode)
6787 {
6788         int rc;
6789         uint32_t fconf;
6790
6791         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6792             "t4getfm");
6793         if (rc)
6794                 return (rc);
6795
6796         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6797             A_TP_VLAN_PRI_MAP);
6798
6799         if (sc->params.tp.vlan_pri_map != fconf) {
6800                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6801                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6802                     fconf);
6803                 sc->params.tp.vlan_pri_map = fconf;
6804         }
6805
6806         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6807
6808         end_synchronized_op(sc, LOCK_HELD);
6809         return (0);
6810 }
6811
6812 static int
6813 set_filter_mode(struct adapter *sc, uint32_t mode)
6814 {
6815         uint32_t fconf;
6816         int rc;
6817
6818         fconf = mode_to_fconf(mode);
6819
6820         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6821             "t4setfm");
6822         if (rc)
6823                 return (rc);
6824
6825         if (sc->tids.ftids_in_use > 0) {
6826                 rc = EBUSY;
6827                 goto done;
6828         }
6829
6830 #ifdef TCP_OFFLOAD
6831         if (sc->offload_map) {
6832                 rc = EBUSY;
6833                 goto done;
6834         }
6835 #endif
6836
6837 #ifdef notyet
6838         rc = -t4_set_filter_mode(sc, fconf);
6839         if (rc == 0)
6840                 sc->filter_mode = fconf;
6841 #else
6842         rc = ENOTSUP;
6843 #endif
6844
6845 done:
6846         end_synchronized_op(sc, LOCK_HELD);
6847         return (rc);
6848 }
6849
6850 static inline uint64_t
6851 get_filter_hits(struct adapter *sc, uint32_t fid)
6852 {
6853         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6854         uint64_t hits;
6855
6856         memwin_info(sc, 0, &mw_base, NULL);
6857         off = position_memwin(sc, 0,
6858             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6859         if (is_t4(sc)) {
6860                 hits = t4_read_reg64(sc, mw_base + off + 16);
6861                 hits = be64toh(hits);
6862         } else {
6863                 hits = t4_read_reg(sc, mw_base + off + 24);
6864                 hits = be32toh(hits);
6865         }
6866
6867         return (hits);
6868 }
6869
6870 static int
6871 get_filter(struct adapter *sc, struct t4_filter *t)
6872 {
6873         int i, rc, nfilters = sc->tids.nftids;
6874         struct filter_entry *f;
6875
6876         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6877             "t4getf");
6878         if (rc)
6879                 return (rc);
6880
6881         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6882             t->idx >= nfilters) {
6883                 t->idx = 0xffffffff;
6884                 goto done;
6885         }
6886
6887         f = &sc->tids.ftid_tab[t->idx];
6888         for (i = t->idx; i < nfilters; i++, f++) {
6889                 if (f->valid) {
6890                         t->idx = i;
6891                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6892                         t->smtidx = f->smtidx;
6893                         if (f->fs.hitcnts)
6894                                 t->hits = get_filter_hits(sc, t->idx);
6895                         else
6896                                 t->hits = UINT64_MAX;
6897                         t->fs = f->fs;
6898
6899                         goto done;
6900                 }
6901         }
6902
6903         t->idx = 0xffffffff;
6904 done:
6905         end_synchronized_op(sc, LOCK_HELD);
6906         return (0);
6907 }
6908
6909 static int
6910 set_filter(struct adapter *sc, struct t4_filter *t)
6911 {
6912         unsigned int nfilters, nports;
6913         struct filter_entry *f;
6914         int i, rc;
6915
6916         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6917         if (rc)
6918                 return (rc);
6919
6920         nfilters = sc->tids.nftids;
6921         nports = sc->params.nports;
6922
6923         if (nfilters == 0) {
6924                 rc = ENOTSUP;
6925                 goto done;
6926         }
6927
6928         if (!(sc->flags & FULL_INIT_DONE)) {
6929                 rc = EAGAIN;
6930                 goto done;
6931         }
6932
6933         if (t->idx >= nfilters) {
6934                 rc = EINVAL;
6935                 goto done;
6936         }
6937
6938         /* Validate against the global filter mode */
6939         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6940             sc->params.tp.vlan_pri_map) {
6941                 rc = E2BIG;
6942                 goto done;
6943         }
6944
6945         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6946                 rc = EINVAL;
6947                 goto done;
6948         }
6949
6950         if (t->fs.val.iport >= nports) {
6951                 rc = EINVAL;
6952                 goto done;
6953         }
6954
6955         /* Can't specify an iq if not steering to it */
6956         if (!t->fs.dirsteer && t->fs.iq) {
6957                 rc = EINVAL;
6958                 goto done;
6959         }
6960
6961         /* IPv6 filter idx must be 4 aligned */
6962         if (t->fs.type == 1 &&
6963             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6964                 rc = EINVAL;
6965                 goto done;
6966         }
6967
6968         if (sc->tids.ftid_tab == NULL) {
6969                 KASSERT(sc->tids.ftids_in_use == 0,
6970                     ("%s: no memory allocated but filters_in_use > 0",
6971                     __func__));
6972
6973                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6974                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6975                 if (sc->tids.ftid_tab == NULL) {
6976                         rc = ENOMEM;
6977                         goto done;
6978                 }
6979                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6980         }
6981
6982         for (i = 0; i < 4; i++) {
6983                 f = &sc->tids.ftid_tab[t->idx + i];
6984
6985                 if (f->pending || f->valid) {
6986                         rc = EBUSY;
6987                         goto done;
6988                 }
6989                 if (f->locked) {
6990                         rc = EPERM;
6991                         goto done;
6992                 }
6993
6994                 if (t->fs.type == 0)
6995                         break;
6996         }
6997
6998         f = &sc->tids.ftid_tab[t->idx];
6999         f->fs = t->fs;
7000
7001         rc = set_filter_wr(sc, t->idx);
7002 done:
7003         end_synchronized_op(sc, 0);
7004
7005         if (rc == 0) {
7006                 mtx_lock(&sc->tids.ftid_lock);
7007                 for (;;) {
7008                         if (f->pending == 0) {
7009                                 rc = f->valid ? 0 : EIO;
7010                                 break;
7011                         }
7012
7013                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7014                             PCATCH, "t4setfw", 0)) {
7015                                 rc = EINPROGRESS;
7016                                 break;
7017                         }
7018                 }
7019                 mtx_unlock(&sc->tids.ftid_lock);
7020         }
7021         return (rc);
7022 }
7023
7024 static int
7025 del_filter(struct adapter *sc, struct t4_filter *t)
7026 {
7027         unsigned int nfilters;
7028         struct filter_entry *f;
7029         int rc;
7030
7031         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7032         if (rc)
7033                 return (rc);
7034
7035         nfilters = sc->tids.nftids;
7036
7037         if (nfilters == 0) {
7038                 rc = ENOTSUP;
7039                 goto done;
7040         }
7041
7042         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7043             t->idx >= nfilters) {
7044                 rc = EINVAL;
7045                 goto done;
7046         }
7047
7048         if (!(sc->flags & FULL_INIT_DONE)) {
7049                 rc = EAGAIN;
7050                 goto done;
7051         }
7052
7053         f = &sc->tids.ftid_tab[t->idx];
7054
7055         if (f->pending) {
7056                 rc = EBUSY;
7057                 goto done;
7058         }
7059         if (f->locked) {
7060                 rc = EPERM;
7061                 goto done;
7062         }
7063
7064         if (f->valid) {
7065                 t->fs = f->fs;  /* extra info for the caller */
7066                 rc = del_filter_wr(sc, t->idx);
7067         }
7068
7069 done:
7070         end_synchronized_op(sc, 0);
7071
7072         if (rc == 0) {
7073                 mtx_lock(&sc->tids.ftid_lock);
7074                 for (;;) {
7075                         if (f->pending == 0) {
7076                                 rc = f->valid ? EIO : 0;
7077                                 break;
7078                         }
7079
7080                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7081                             PCATCH, "t4delfw", 0)) {
7082                                 rc = EINPROGRESS;
7083                                 break;
7084                         }
7085                 }
7086                 mtx_unlock(&sc->tids.ftid_lock);
7087         }
7088
7089         return (rc);
7090 }
7091
7092 static void
7093 clear_filter(struct filter_entry *f)
7094 {
7095         if (f->l2t)
7096                 t4_l2t_release(f->l2t);
7097
7098         bzero(f, sizeof (*f));
7099 }
7100
7101 static int
7102 set_filter_wr(struct adapter *sc, int fidx)
7103 {
7104         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7105         struct wrqe *wr;
7106         struct fw_filter_wr *fwr;
7107         unsigned int ftid;
7108
7109         ASSERT_SYNCHRONIZED_OP(sc);
7110
7111         if (f->fs.newdmac || f->fs.newvlan) {
7112                 /* This filter needs an L2T entry; allocate one. */
7113                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7114                 if (f->l2t == NULL)
7115                         return (EAGAIN);
7116                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7117                     f->fs.dmac)) {
7118                         t4_l2t_release(f->l2t);
7119                         f->l2t = NULL;
7120                         return (ENOMEM);
7121                 }
7122         }
7123
7124         ftid = sc->tids.ftid_base + fidx;
7125
7126         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7127         if (wr == NULL)
7128                 return (ENOMEM);
7129
7130         fwr = wrtod(wr);
7131         bzero(fwr, sizeof (*fwr));
7132
7133         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7134         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7135         fwr->tid_to_iq =
7136             htobe32(V_FW_FILTER_WR_TID(ftid) |
7137                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7138                 V_FW_FILTER_WR_NOREPLY(0) |
7139                 V_FW_FILTER_WR_IQ(f->fs.iq));
7140         fwr->del_filter_to_l2tix =
7141             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7142                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7143                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7144                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7145                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7146                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7147                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7148                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7149                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7150                     f->fs.newvlan == VLAN_REWRITE) |
7151                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7152                     f->fs.newvlan == VLAN_REWRITE) |
7153                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7154                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7155                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7156                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7157         fwr->ethtype = htobe16(f->fs.val.ethtype);
7158         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7159         fwr->frag_to_ovlan_vldm =
7160             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7161                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7162                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7163                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7164                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7165                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7166         fwr->smac_sel = 0;
7167         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7168             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7169         fwr->maci_to_matchtypem =
7170             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7171                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7172                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7173                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7174                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7175                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7176                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7177                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7178         fwr->ptcl = f->fs.val.proto;
7179         fwr->ptclm = f->fs.mask.proto;
7180         fwr->ttyp = f->fs.val.tos;
7181         fwr->ttypm = f->fs.mask.tos;
7182         fwr->ivlan = htobe16(f->fs.val.vlan);
7183         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7184         fwr->ovlan = htobe16(f->fs.val.vnic);
7185         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7186         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7187         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7188         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7189         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7190         fwr->lp = htobe16(f->fs.val.dport);
7191         fwr->lpm = htobe16(f->fs.mask.dport);
7192         fwr->fp = htobe16(f->fs.val.sport);
7193         fwr->fpm = htobe16(f->fs.mask.sport);
7194         if (f->fs.newsmac)
7195                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7196
7197         f->pending = 1;
7198         sc->tids.ftids_in_use++;
7199
7200         t4_wrq_tx(sc, wr);
7201         return (0);
7202 }
7203
7204 static int
7205 del_filter_wr(struct adapter *sc, int fidx)
7206 {
7207         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7208         struct wrqe *wr;
7209         struct fw_filter_wr *fwr;
7210         unsigned int ftid;
7211
7212         ftid = sc->tids.ftid_base + fidx;
7213
7214         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7215         if (wr == NULL)
7216                 return (ENOMEM);
7217         fwr = wrtod(wr);
7218         bzero(fwr, sizeof (*fwr));
7219
7220         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7221
7222         f->pending = 1;
7223         t4_wrq_tx(sc, wr);
7224         return (0);
7225 }
7226
7227 int
7228 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7229 {
7230         struct adapter *sc = iq->adapter;
7231         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7232         unsigned int idx = GET_TID(rpl);
7233         unsigned int rc;
7234         struct filter_entry *f;
7235
7236         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7237             rss->opcode));
7238
7239         if (is_ftid(sc, idx)) {
7240
7241                 idx -= sc->tids.ftid_base;
7242                 f = &sc->tids.ftid_tab[idx];
7243                 rc = G_COOKIE(rpl->cookie);
7244
7245                 mtx_lock(&sc->tids.ftid_lock);
7246                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7247                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7248                             __func__, idx));
7249                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7250                         f->pending = 0;  /* asynchronous setup completed */
7251                         f->valid = 1;
7252                 } else {
7253                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7254                                 /* Add or delete failed, display an error */
7255                                 log(LOG_ERR,
7256                                     "filter %u setup failed with error %u\n",
7257                                     idx, rc);
7258                         }
7259
7260                         clear_filter(f);
7261                         sc->tids.ftids_in_use--;
7262                 }
7263                 wakeup(&sc->tids.ftid_tab);
7264                 mtx_unlock(&sc->tids.ftid_lock);
7265         }
7266
7267         return (0);
7268 }
7269
7270 static int
7271 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7272 {
7273         int rc;
7274
7275         if (cntxt->cid > M_CTXTQID)
7276                 return (EINVAL);
7277
7278         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7279             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7280                 return (EINVAL);
7281
7282         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7283         if (rc)
7284                 return (rc);
7285
7286         if (sc->flags & FW_OK) {
7287                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7288                     &cntxt->data[0]);
7289                 if (rc == 0)
7290                         goto done;
7291         }
7292
7293         /*
7294          * Read via firmware failed or wasn't even attempted.  Read directly via
7295          * the backdoor.
7296          */
7297         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7298 done:
7299         end_synchronized_op(sc, 0);
7300         return (rc);
7301 }
7302
7303 static int
7304 load_fw(struct adapter *sc, struct t4_data *fw)
7305 {
7306         int rc;
7307         uint8_t *fw_data;
7308
7309         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7310         if (rc)
7311                 return (rc);
7312
7313         if (sc->flags & FULL_INIT_DONE) {
7314                 rc = EBUSY;
7315                 goto done;
7316         }
7317
7318         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7319         if (fw_data == NULL) {
7320                 rc = ENOMEM;
7321                 goto done;
7322         }
7323
7324         rc = copyin(fw->data, fw_data, fw->len);
7325         if (rc == 0)
7326                 rc = -t4_load_fw(sc, fw_data, fw->len);
7327
7328         free(fw_data, M_CXGBE);
7329 done:
7330         end_synchronized_op(sc, 0);
7331         return (rc);
7332 }
7333
7334 static int
7335 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7336 {
7337         uint32_t addr, off, remaining, i, n;
7338         uint32_t *buf, *b;
7339         uint32_t mw_base, mw_aperture;
7340         int rc;
7341         uint8_t *dst;
7342
7343         rc = validate_mem_range(sc, mr->addr, mr->len);
7344         if (rc != 0)
7345                 return (rc);
7346
7347         memwin_info(sc, win, &mw_base, &mw_aperture);
7348         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7349         addr = mr->addr;
7350         remaining = mr->len;
7351         dst = (void *)mr->data;
7352
7353         while (remaining) {
7354                 off = position_memwin(sc, win, addr);
7355
7356                 /* number of bytes that we'll copy in the inner loop */
7357                 n = min(remaining, mw_aperture - off);
7358                 for (i = 0; i < n; i += 4)
7359                         *b++ = t4_read_reg(sc, mw_base + off + i);
7360
7361                 rc = copyout(buf, dst, n);
7362                 if (rc != 0)
7363                         break;
7364
7365                 b = buf;
7366                 dst += n;
7367                 remaining -= n;
7368                 addr += n;
7369         }
7370
7371         free(buf, M_CXGBE);
7372         return (rc);
7373 }
7374
7375 static int
7376 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7377 {
7378         int rc;
7379
7380         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7381                 return (EINVAL);
7382
7383         if (i2cd->len > 1) {
7384                 /* XXX: need fw support for longer reads in one go */
7385                 return (ENOTSUP);
7386         }
7387
7388         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7389         if (rc)
7390                 return (rc);
7391         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7392             i2cd->offset, &i2cd->data[0]);
7393         end_synchronized_op(sc, 0);
7394
7395         return (rc);
7396 }
7397
7398 static int
7399 in_range(int val, int lo, int hi)
7400 {
7401
7402         return (val < 0 || (val <= hi && val >= lo));
7403 }
7404
7405 static int
7406 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7407 {
7408         int fw_subcmd, fw_type, rc;
7409
7410         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7411         if (rc)
7412                 return (rc);
7413
7414         if (!(sc->flags & FULL_INIT_DONE)) {
7415                 rc = EAGAIN;
7416                 goto done;
7417         }
7418
7419         /*
7420          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7421          * sub-command and type are in common locations.)
7422          */
7423         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7424                 fw_subcmd = FW_SCHED_SC_CONFIG;
7425         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7426                 fw_subcmd = FW_SCHED_SC_PARAMS;
7427         else {
7428                 rc = EINVAL;
7429                 goto done;
7430         }
7431         if (p->type == SCHED_CLASS_TYPE_PACKET)
7432                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7433         else {
7434                 rc = EINVAL;
7435                 goto done;
7436         }
7437
7438         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7439                 /* Vet our parameters ..*/
7440                 if (p->u.config.minmax < 0) {
7441                         rc = EINVAL;
7442                         goto done;
7443                 }
7444
7445                 /* And pass the request to the firmware ...*/
7446                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7447                 goto done;
7448         }
7449
7450         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7451                 int fw_level;
7452                 int fw_mode;
7453                 int fw_rateunit;
7454                 int fw_ratemode;
7455
7456                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7457                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7458                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7459                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7460                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7461                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7462                 else {
7463                         rc = EINVAL;
7464                         goto done;
7465                 }
7466
7467                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7468                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7469                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7470                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7471                 else {
7472                         rc = EINVAL;
7473                         goto done;
7474                 }
7475
7476                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7477                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7478                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7479                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7480                 else {
7481                         rc = EINVAL;
7482                         goto done;
7483                 }
7484
7485                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7486                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7487                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7488                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7489                 else {
7490                         rc = EINVAL;
7491                         goto done;
7492                 }
7493
7494                 /* Vet our parameters ... */
7495                 if (!in_range(p->u.params.channel, 0, 3) ||
7496                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7497                     !in_range(p->u.params.minrate, 0, 10000000) ||
7498                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7499                     !in_range(p->u.params.weight, 0, 100)) {
7500                         rc = ERANGE;
7501                         goto done;
7502                 }
7503
7504                 /*
7505                  * Translate any unset parameters into the firmware's
7506                  * nomenclature and/or fail the call if the parameters
7507                  * are required ...
7508                  */
7509                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7510                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7511                         rc = EINVAL;
7512                         goto done;
7513                 }
7514                 if (p->u.params.minrate < 0)
7515                         p->u.params.minrate = 0;
7516                 if (p->u.params.maxrate < 0) {
7517                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7518                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7519                                 rc = EINVAL;
7520                                 goto done;
7521                         } else
7522                                 p->u.params.maxrate = 0;
7523                 }
7524                 if (p->u.params.weight < 0) {
7525                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7526                                 rc = EINVAL;
7527                                 goto done;
7528                         } else
7529                                 p->u.params.weight = 0;
7530                 }
7531                 if (p->u.params.pktsize < 0) {
7532                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7533                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7534                                 rc = EINVAL;
7535                                 goto done;
7536                         } else
7537                                 p->u.params.pktsize = 0;
7538                 }
7539
7540                 /* See what the firmware thinks of the request ... */
7541                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7542                     fw_rateunit, fw_ratemode, p->u.params.channel,
7543                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7544                     p->u.params.weight, p->u.params.pktsize);
7545                 goto done;
7546         }
7547
7548         rc = EINVAL;
7549 done:
7550         end_synchronized_op(sc, 0);
7551         return (rc);
7552 }
7553
7554 static int
7555 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7556 {
7557         struct port_info *pi = NULL;
7558         struct sge_txq *txq;
7559         uint32_t fw_mnem, fw_queue, fw_class;
7560         int i, rc;
7561
7562         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7563         if (rc)
7564                 return (rc);
7565
7566         if (!(sc->flags & FULL_INIT_DONE)) {
7567                 rc = EAGAIN;
7568                 goto done;
7569         }
7570
7571         if (p->port >= sc->params.nports) {
7572                 rc = EINVAL;
7573                 goto done;
7574         }
7575
7576         pi = sc->port[p->port];
7577         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7578                 rc = EINVAL;
7579                 goto done;
7580         }
7581
7582         /*
7583          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7584          * Scheduling Class in this case).
7585          */
7586         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7587             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7588         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7589
7590         /*
7591          * If op.queue is non-negative, then we're only changing the scheduling
7592          * on a single specified TX queue.
7593          */
7594         if (p->queue >= 0) {
7595                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7596                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7597                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7598                     &fw_class);
7599                 goto done;
7600         }
7601
7602         /*
7603          * Change the scheduling on all the TX queues for the
7604          * interface.
7605          */
7606         for_each_txq(pi, i, txq) {
7607                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7608                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7609                     &fw_class);
7610                 if (rc)
7611                         goto done;
7612         }
7613
7614         rc = 0;
7615 done:
7616         end_synchronized_op(sc, 0);
7617         return (rc);
7618 }
7619
7620 int
7621 t4_os_find_pci_capability(struct adapter *sc, int cap)
7622 {
7623         int i;
7624
7625         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7626 }
7627
7628 int
7629 t4_os_pci_save_state(struct adapter *sc)
7630 {
7631         device_t dev;
7632         struct pci_devinfo *dinfo;
7633
7634         dev = sc->dev;
7635         dinfo = device_get_ivars(dev);
7636
7637         pci_cfg_save(dev, dinfo, 0);
7638         return (0);
7639 }
7640
7641 int
7642 t4_os_pci_restore_state(struct adapter *sc)
7643 {
7644         device_t dev;
7645         struct pci_devinfo *dinfo;
7646
7647         dev = sc->dev;
7648         dinfo = device_get_ivars(dev);
7649
7650         pci_cfg_restore(dev, dinfo);
7651         return (0);
7652 }
7653
7654 void
7655 t4_os_portmod_changed(const struct adapter *sc, int idx)
7656 {
7657         struct port_info *pi = sc->port[idx];
7658         static const char *mod_str[] = {
7659                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7660         };
7661
7662         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7663                 if_printf(pi->ifp, "transceiver unplugged.\n");
7664         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7665                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7666         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7667                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7668         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7669                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7670                     mod_str[pi->mod_type]);
7671         } else {
7672                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7673                     pi->mod_type);
7674         }
7675 }
7676
7677 void
7678 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7679 {
7680         struct port_info *pi = sc->port[idx];
7681         struct ifnet *ifp = pi->ifp;
7682
7683         if (link_stat) {
7684                 pi->linkdnrc = -1;
7685                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7686                 if_link_state_change(ifp, LINK_STATE_UP);
7687         } else {
7688                 if (reason >= 0)
7689                         pi->linkdnrc = reason;
7690                 if_link_state_change(ifp, LINK_STATE_DOWN);
7691         }
7692 }
7693
7694 void
7695 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7696 {
7697         struct adapter *sc;
7698
7699         sx_slock(&t4_list_lock);
7700         SLIST_FOREACH(sc, &t4_list, link) {
7701                 /*
7702                  * func should not make any assumptions about what state sc is
7703                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7704                  */
7705                 func(sc, arg);
7706         }
7707         sx_sunlock(&t4_list_lock);
7708 }
7709
7710 static int
7711 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7712 {
7713        return (0);
7714 }
7715
7716 static int
7717 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7718 {
7719        return (0);
7720 }
7721
7722 static int
7723 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7724     struct thread *td)
7725 {
7726         int rc;
7727         struct adapter *sc = dev->si_drv1;
7728
7729         rc = priv_check(td, PRIV_DRIVER);
7730         if (rc != 0)
7731                 return (rc);
7732
7733         switch (cmd) {
7734         case CHELSIO_T4_GETREG: {
7735                 struct t4_reg *edata = (struct t4_reg *)data;
7736
7737                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7738                         return (EFAULT);
7739
7740                 if (edata->size == 4)
7741                         edata->val = t4_read_reg(sc, edata->addr);
7742                 else if (edata->size == 8)
7743                         edata->val = t4_read_reg64(sc, edata->addr);
7744                 else
7745                         return (EINVAL);
7746
7747                 break;
7748         }
7749         case CHELSIO_T4_SETREG: {
7750                 struct t4_reg *edata = (struct t4_reg *)data;
7751
7752                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7753                         return (EFAULT);
7754
7755                 if (edata->size == 4) {
7756                         if (edata->val & 0xffffffff00000000)
7757                                 return (EINVAL);
7758                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7759                 } else if (edata->size == 8)
7760                         t4_write_reg64(sc, edata->addr, edata->val);
7761                 else
7762                         return (EINVAL);
7763                 break;
7764         }
7765         case CHELSIO_T4_REGDUMP: {
7766                 struct t4_regdump *regs = (struct t4_regdump *)data;
7767                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7768                 uint8_t *buf;
7769
7770                 if (regs->len < reglen) {
7771                         regs->len = reglen; /* hint to the caller */
7772                         return (ENOBUFS);
7773                 }
7774
7775                 regs->len = reglen;
7776                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7777                 t4_get_regs(sc, regs, buf);
7778                 rc = copyout(buf, regs->data, reglen);
7779                 free(buf, M_CXGBE);
7780                 break;
7781         }
7782         case CHELSIO_T4_GET_FILTER_MODE:
7783                 rc = get_filter_mode(sc, (uint32_t *)data);
7784                 break;
7785         case CHELSIO_T4_SET_FILTER_MODE:
7786                 rc = set_filter_mode(sc, *(uint32_t *)data);
7787                 break;
7788         case CHELSIO_T4_GET_FILTER:
7789                 rc = get_filter(sc, (struct t4_filter *)data);
7790                 break;
7791         case CHELSIO_T4_SET_FILTER:
7792                 rc = set_filter(sc, (struct t4_filter *)data);
7793                 break;
7794         case CHELSIO_T4_DEL_FILTER:
7795                 rc = del_filter(sc, (struct t4_filter *)data);
7796                 break;
7797         case CHELSIO_T4_GET_SGE_CONTEXT:
7798                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7799                 break;
7800         case CHELSIO_T4_LOAD_FW:
7801                 rc = load_fw(sc, (struct t4_data *)data);
7802                 break;
7803         case CHELSIO_T4_GET_MEM:
7804                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7805                 break;
7806         case CHELSIO_T4_GET_I2C:
7807                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7808                 break;
7809         case CHELSIO_T4_CLEAR_STATS: {
7810                 int i;
7811                 u_int port_id = *(uint32_t *)data;
7812                 struct port_info *pi;
7813
7814                 if (port_id >= sc->params.nports)
7815                         return (EINVAL);
7816                 pi = sc->port[port_id];
7817
7818                 /* MAC stats */
7819                 t4_clr_port_stats(sc, pi->tx_chan);
7820
7821                 if (pi->flags & PORT_INIT_DONE) {
7822                         struct sge_rxq *rxq;
7823                         struct sge_txq *txq;
7824                         struct sge_wrq *wrq;
7825
7826                         for_each_rxq(pi, i, rxq) {
7827 #if defined(INET) || defined(INET6)
7828                                 rxq->lro.lro_queued = 0;
7829                                 rxq->lro.lro_flushed = 0;
7830 #endif
7831                                 rxq->rxcsum = 0;
7832                                 rxq->vlan_extraction = 0;
7833                         }
7834
7835                         for_each_txq(pi, i, txq) {
7836                                 txq->txcsum = 0;
7837                                 txq->tso_wrs = 0;
7838                                 txq->vlan_insertion = 0;
7839                                 txq->imm_wrs = 0;
7840                                 txq->sgl_wrs = 0;
7841                                 txq->txpkt_wrs = 0;
7842                                 txq->txpkts_wrs = 0;
7843                                 txq->txpkts_pkts = 0;
7844                                 txq->br->br_drops = 0;
7845                                 txq->no_dmamap = 0;
7846                                 txq->no_desc = 0;
7847                         }
7848
7849 #ifdef TCP_OFFLOAD
7850                         /* nothing to clear for each ofld_rxq */
7851
7852                         for_each_ofld_txq(pi, i, wrq) {
7853                                 wrq->tx_wrs = 0;
7854                                 wrq->no_desc = 0;
7855                         }
7856 #endif
7857                         wrq = &sc->sge.ctrlq[pi->port_id];
7858                         wrq->tx_wrs = 0;
7859                         wrq->no_desc = 0;
7860                 }
7861                 break;
7862         }
7863         case CHELSIO_T4_SCHED_CLASS:
7864                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
7865                 break;
7866         case CHELSIO_T4_SCHED_QUEUE:
7867                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7868                 break;
7869         case CHELSIO_T4_GET_TRACER:
7870                 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7871                 break;
7872         case CHELSIO_T4_SET_TRACER:
7873                 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7874                 break;
7875         default:
7876                 rc = EINVAL;
7877         }
7878
7879         return (rc);
7880 }
7881
7882 #ifdef TCP_OFFLOAD
7883 static int
7884 toe_capability(struct port_info *pi, int enable)
7885 {
7886         int rc;
7887         struct adapter *sc = pi->adapter;
7888
7889         ASSERT_SYNCHRONIZED_OP(sc);
7890
7891         if (!is_offload(sc))
7892                 return (ENODEV);
7893
7894         if (enable) {
7895                 if (!(sc->flags & FULL_INIT_DONE)) {
7896                         rc = cxgbe_init_synchronized(pi);
7897                         if (rc)
7898                                 return (rc);
7899                 }
7900
7901                 if (isset(&sc->offload_map, pi->port_id))
7902                         return (0);
7903
7904                 if (!(sc->flags & TOM_INIT_DONE)) {
7905                         rc = t4_activate_uld(sc, ULD_TOM);
7906                         if (rc == EAGAIN) {
7907                                 log(LOG_WARNING,
7908                                     "You must kldload t4_tom.ko before trying "
7909                                     "to enable TOE on a cxgbe interface.\n");
7910                         }
7911                         if (rc != 0)
7912                                 return (rc);
7913                         KASSERT(sc->tom_softc != NULL,
7914                             ("%s: TOM activated but softc NULL", __func__));
7915                         KASSERT(sc->flags & TOM_INIT_DONE,
7916                             ("%s: TOM activated but flag not set", __func__));
7917                 }
7918
7919                 setbit(&sc->offload_map, pi->port_id);
7920         } else {
7921                 if (!isset(&sc->offload_map, pi->port_id))
7922                         return (0);
7923
7924                 KASSERT(sc->flags & TOM_INIT_DONE,
7925                     ("%s: TOM never initialized?", __func__));
7926                 clrbit(&sc->offload_map, pi->port_id);
7927         }
7928
7929         return (0);
7930 }
7931
7932 /*
7933  * Add an upper layer driver to the global list.
7934  */
7935 int
7936 t4_register_uld(struct uld_info *ui)
7937 {
7938         int rc = 0;
7939         struct uld_info *u;
7940
7941         sx_xlock(&t4_uld_list_lock);
7942         SLIST_FOREACH(u, &t4_uld_list, link) {
7943             if (u->uld_id == ui->uld_id) {
7944                     rc = EEXIST;
7945                     goto done;
7946             }
7947         }
7948
7949         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7950         ui->refcount = 0;
7951 done:
7952         sx_xunlock(&t4_uld_list_lock);
7953         return (rc);
7954 }
7955
7956 int
7957 t4_unregister_uld(struct uld_info *ui)
7958 {
7959         int rc = EINVAL;
7960         struct uld_info *u;
7961
7962         sx_xlock(&t4_uld_list_lock);
7963
7964         SLIST_FOREACH(u, &t4_uld_list, link) {
7965             if (u == ui) {
7966                     if (ui->refcount > 0) {
7967                             rc = EBUSY;
7968                             goto done;
7969                     }
7970
7971                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7972                     rc = 0;
7973                     goto done;
7974             }
7975         }
7976 done:
7977         sx_xunlock(&t4_uld_list_lock);
7978         return (rc);
7979 }
7980
7981 int
7982 t4_activate_uld(struct adapter *sc, int id)
7983 {
7984         int rc = EAGAIN;
7985         struct uld_info *ui;
7986
7987         ASSERT_SYNCHRONIZED_OP(sc);
7988
7989         sx_slock(&t4_uld_list_lock);
7990
7991         SLIST_FOREACH(ui, &t4_uld_list, link) {
7992                 if (ui->uld_id == id) {
7993                         rc = ui->activate(sc);
7994                         if (rc == 0)
7995                                 ui->refcount++;
7996                         goto done;
7997                 }
7998         }
7999 done:
8000         sx_sunlock(&t4_uld_list_lock);
8001
8002         return (rc);
8003 }
8004
8005 int
8006 t4_deactivate_uld(struct adapter *sc, int id)
8007 {
8008         int rc = EINVAL;
8009         struct uld_info *ui;
8010
8011         ASSERT_SYNCHRONIZED_OP(sc);
8012
8013         sx_slock(&t4_uld_list_lock);
8014
8015         SLIST_FOREACH(ui, &t4_uld_list, link) {
8016                 if (ui->uld_id == id) {
8017                         rc = ui->deactivate(sc);
8018                         if (rc == 0)
8019                                 ui->refcount--;
8020                         goto done;
8021                 }
8022         }
8023 done:
8024         sx_sunlock(&t4_uld_list_lock);
8025
8026         return (rc);
8027 }
8028 #endif
8029
8030 /*
8031  * Come up with reasonable defaults for some of the tunables, provided they're
8032  * not set by the user (in which case we'll use the values as is).
8033  */
8034 static void
8035 tweak_tunables(void)
8036 {
8037         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
8038
8039         if (t4_ntxq10g < 1)
8040                 t4_ntxq10g = min(nc, NTXQ_10G);
8041
8042         if (t4_ntxq1g < 1)
8043                 t4_ntxq1g = min(nc, NTXQ_1G);
8044
8045         if (t4_nrxq10g < 1)
8046                 t4_nrxq10g = min(nc, NRXQ_10G);
8047
8048         if (t4_nrxq1g < 1)
8049                 t4_nrxq1g = min(nc, NRXQ_1G);
8050
8051 #ifdef TCP_OFFLOAD
8052         if (t4_nofldtxq10g < 1)
8053                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8054
8055         if (t4_nofldtxq1g < 1)
8056                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8057
8058         if (t4_nofldrxq10g < 1)
8059                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8060
8061         if (t4_nofldrxq1g < 1)
8062                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8063
8064         if (t4_toecaps_allowed == -1)
8065                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8066 #else
8067         if (t4_toecaps_allowed == -1)
8068                 t4_toecaps_allowed = 0;
8069 #endif
8070
8071         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8072                 t4_tmr_idx_10g = TMR_IDX_10G;
8073
8074         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8075                 t4_pktc_idx_10g = PKTC_IDX_10G;
8076
8077         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8078                 t4_tmr_idx_1g = TMR_IDX_1G;
8079
8080         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8081                 t4_pktc_idx_1g = PKTC_IDX_1G;
8082
8083         if (t4_qsize_txq < 128)
8084                 t4_qsize_txq = 128;
8085
8086         if (t4_qsize_rxq < 128)
8087                 t4_qsize_rxq = 128;
8088         while (t4_qsize_rxq & 7)
8089                 t4_qsize_rxq++;
8090
8091         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8092 }
8093
8094 static int
8095 mod_event(module_t mod, int cmd, void *arg)
8096 {
8097         int rc = 0;
8098         static int loaded = 0;
8099
8100         switch (cmd) {
8101         case MOD_LOAD:
8102                 if (atomic_fetchadd_int(&loaded, 1))
8103                         break;
8104                 t4_sge_modload();
8105                 sx_init(&t4_list_lock, "T4/T5 adapters");
8106                 SLIST_INIT(&t4_list);
8107 #ifdef TCP_OFFLOAD
8108                 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8109                 SLIST_INIT(&t4_uld_list);
8110 #endif
8111                 t4_tracer_modload();
8112                 tweak_tunables();
8113                 break;
8114
8115         case MOD_UNLOAD:
8116                 if (atomic_fetchadd_int(&loaded, -1) > 1)
8117                         break;
8118                 t4_tracer_modunload();
8119 #ifdef TCP_OFFLOAD
8120                 sx_slock(&t4_uld_list_lock);
8121                 if (!SLIST_EMPTY(&t4_uld_list)) {
8122                         rc = EBUSY;
8123                         sx_sunlock(&t4_uld_list_lock);
8124                         break;
8125                 }
8126                 sx_sunlock(&t4_uld_list_lock);
8127                 sx_destroy(&t4_uld_list_lock);
8128 #endif
8129                 sx_slock(&t4_list_lock);
8130                 if (!SLIST_EMPTY(&t4_list)) {
8131                         rc = EBUSY;
8132                         sx_sunlock(&t4_list_lock);
8133                         break;
8134                 }
8135                 sx_sunlock(&t4_list_lock);
8136                 sx_destroy(&t4_list_lock);
8137                 break;
8138         }
8139
8140         return (rc);
8141 }
8142
8143 static devclass_t t4_devclass, t5_devclass;
8144 static devclass_t cxgbe_devclass, cxl_devclass;
8145
8146 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8147 MODULE_VERSION(t4nex, 1);
8148 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8149
8150 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8151 MODULE_VERSION(t5nex, 1);
8152 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8153
8154 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8155 MODULE_VERSION(cxgbe, 1);
8156
8157 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8158 MODULE_VERSION(cxl, 1);