]> CyberLeo.Net >> Repos - FreeBSD/releng/9.3.git/blob - sys/dev/cxgbe/t4_main.c
Copy stable/9 to releng/9.3 as part of the 9.3-RELEASE cycle.
[FreeBSD/releng/9.3.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 static int t4_rsrv_noflowq = 0;
201 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203 #ifdef TCP_OFFLOAD
204 #define NOFLDTXQ_10G 8
205 static int t4_nofldtxq10g = -1;
206 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208 #define NOFLDRXQ_10G 2
209 static int t4_nofldrxq10g = -1;
210 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212 #define NOFLDTXQ_1G 2
213 static int t4_nofldtxq1g = -1;
214 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216 #define NOFLDRXQ_1G 1
217 static int t4_nofldrxq1g = -1;
218 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 #endif
220
221 /*
222  * Holdoff parameters for 10G and 1G ports.
223  */
224 #define TMR_IDX_10G 1
225 static int t4_tmr_idx_10g = TMR_IDX_10G;
226 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228 #define PKTC_IDX_10G (-1)
229 static int t4_pktc_idx_10g = PKTC_IDX_10G;
230 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232 #define TMR_IDX_1G 1
233 static int t4_tmr_idx_1g = TMR_IDX_1G;
234 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236 #define PKTC_IDX_1G (-1)
237 static int t4_pktc_idx_1g = PKTC_IDX_1G;
238 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240 /*
241  * Size (# of entries) of each tx and rx queue.
242  */
243 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249 /*
250  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251  */
252 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255 /*
256  * Configuration file.
257  */
258 #define DEFAULT_CF      "default"
259 #define FLASH_CF        "flash"
260 #define UWIRE_CF        "uwire"
261 #define FPGA_CF         "fpga"
262 static char t4_cfg_file[32] = DEFAULT_CF;
263 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265 /*
266  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267  * encouraged respectively).
268  */
269 static unsigned int t4_fw_install = 1;
270 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272 /*
273  * ASIC features that will be used.  Disable the ones you don't want so that the
274  * chip resources aren't wasted on features that will not be used.
275  */
276 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
277 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282 static int t4_toecaps_allowed = -1;
283 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285 static int t4_rdmacaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288 static int t4_iscsicaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291 static int t4_fcoecaps_allowed = 0;
292 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294 static int t5_write_combine = 0;
295 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297 struct intrs_and_queues {
298         int intr_type;          /* INTx, MSI, or MSI-X */
299         int nirq;               /* Number of vectors */
300         int intr_flags;
301         int ntxq10g;            /* # of NIC txq's for each 10G port */
302         int nrxq10g;            /* # of NIC rxq's for each 10G port */
303         int ntxq1g;             /* # of NIC txq's for each 1G port */
304         int nrxq1g;             /* # of NIC rxq's for each 1G port */
305         int rsrv_noflowq;       /* Flag whether to reserve queue 0 */
306 #ifdef TCP_OFFLOAD
307         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
308         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
309         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
310         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
311 #endif
312 };
313
314 struct filter_entry {
315         uint32_t valid:1;       /* filter allocated and valid */
316         uint32_t locked:1;      /* filter is administratively locked */
317         uint32_t pending:1;     /* filter action is pending firmware reply */
318         uint32_t smtidx:8;      /* Source MAC Table index for smac */
319         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
320
321         struct t4_filter_specification fs;
322 };
323
324 enum {
325         XGMAC_MTU       = (1 << 0),
326         XGMAC_PROMISC   = (1 << 1),
327         XGMAC_ALLMULTI  = (1 << 2),
328         XGMAC_VLANEX    = (1 << 3),
329         XGMAC_UCADDR    = (1 << 4),
330         XGMAC_MCADDRS   = (1 << 5),
331
332         XGMAC_ALL       = 0xffff
333 };
334
335 static int map_bars_0_and_4(struct adapter *);
336 static int map_bar_2(struct adapter *);
337 static void setup_memwin(struct adapter *);
338 static int validate_mem_range(struct adapter *, uint32_t, int);
339 static int fwmtype_to_hwmtype(int);
340 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341     uint32_t *);
342 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343 static uint32_t position_memwin(struct adapter *, int, uint32_t);
344 static int cfg_itype_and_nqueues(struct adapter *, int, int,
345     struct intrs_and_queues *);
346 static int prep_firmware(struct adapter *);
347 static int partition_resources(struct adapter *, const struct firmware *,
348     const char *);
349 static int get_params__pre_init(struct adapter *);
350 static int get_params__post_init(struct adapter *);
351 static int set_params__post_init(struct adapter *);
352 static void t4_set_desc(struct adapter *);
353 static void build_medialist(struct port_info *);
354 static int update_mac_settings(struct port_info *, int);
355 static int cxgbe_init_synchronized(struct port_info *);
356 static int cxgbe_uninit_synchronized(struct port_info *);
357 static int setup_intr_handlers(struct adapter *);
358 static int adapter_full_init(struct adapter *);
359 static int adapter_full_uninit(struct adapter *);
360 static int port_full_init(struct port_info *);
361 static int port_full_uninit(struct port_info *);
362 static void quiesce_eq(struct adapter *, struct sge_eq *);
363 static void quiesce_iq(struct adapter *, struct sge_iq *);
364 static void quiesce_fl(struct adapter *, struct sge_fl *);
365 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366     driver_intr_t *, void *, char *);
367 static int t4_free_irq(struct adapter *, struct irq *);
368 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369     unsigned int);
370 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371 static void cxgbe_tick(void *);
372 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
373 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
374     struct mbuf *);
375 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
376 static int fw_msg_not_handled(struct adapter *, const __be64 *);
377 static int t4_sysctls(struct adapter *);
378 static int cxgbe_sysctls(struct port_info *);
379 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
380 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
381 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
382 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
383 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
384 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
385 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
386 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
388 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
389 #ifdef SBUF_DRAIN
390 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
392 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
393 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
394 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
395 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
396 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
398 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
399 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
401 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
403 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
404 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
405 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
406 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
407 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
408 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
409 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
410 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
411 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
412 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
413 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
414 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
415 #endif
416 static inline void txq_start(struct ifnet *, struct sge_txq *);
417 static uint32_t fconf_to_mode(uint32_t);
418 static uint32_t mode_to_fconf(uint32_t);
419 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
420 static int get_filter_mode(struct adapter *, uint32_t *);
421 static int set_filter_mode(struct adapter *, uint32_t);
422 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
423 static int get_filter(struct adapter *, struct t4_filter *);
424 static int set_filter(struct adapter *, struct t4_filter *);
425 static int del_filter(struct adapter *, struct t4_filter *);
426 static void clear_filter(struct filter_entry *);
427 static int set_filter_wr(struct adapter *, int);
428 static int del_filter_wr(struct adapter *, int);
429 static int get_sge_context(struct adapter *, struct t4_sge_context *);
430 static int load_fw(struct adapter *, struct t4_data *);
431 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
432 static int read_i2c(struct adapter *, struct t4_i2c_data *);
433 static int set_sched_class(struct adapter *, struct t4_sched_params *);
434 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
435 #ifdef TCP_OFFLOAD
436 static int toe_capability(struct port_info *, int);
437 #endif
438 static int mod_event(module_t, int, void *);
439
440 struct {
441         uint16_t device;
442         char *desc;
443 } t4_pciids[] = {
444         {0xa000, "Chelsio Terminator 4 FPGA"},
445         {0x4400, "Chelsio T440-dbg"},
446         {0x4401, "Chelsio T420-CR"},
447         {0x4402, "Chelsio T422-CR"},
448         {0x4403, "Chelsio T440-CR"},
449         {0x4404, "Chelsio T420-BCH"},
450         {0x4405, "Chelsio T440-BCH"},
451         {0x4406, "Chelsio T440-CH"},
452         {0x4407, "Chelsio T420-SO"},
453         {0x4408, "Chelsio T420-CX"},
454         {0x4409, "Chelsio T420-BT"},
455         {0x440a, "Chelsio T404-BT"},
456         {0x440e, "Chelsio T440-LP-CR"},
457 }, t5_pciids[] = {
458         {0xb000, "Chelsio Terminator 5 FPGA"},
459         {0x5400, "Chelsio T580-dbg"},
460         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
461         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
462         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
463         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
464         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
465         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
466         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
467         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
468         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
469         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
470         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
471         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
472 #ifdef notyet
473         {0x5404,  "Chelsio T520-BCH"},
474         {0x5405,  "Chelsio T540-BCH"},
475         {0x5406,  "Chelsio T540-CH"},
476         {0x5408,  "Chelsio T520-CX"},
477         {0x540b,  "Chelsio B520-SR"},
478         {0x540c,  "Chelsio B504-BT"},
479         {0x540f,  "Chelsio Amsterdam"},
480         {0x5413,  "Chelsio T580-CHR"},
481 #endif
482 };
483
484 #ifdef TCP_OFFLOAD
485 /*
486  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
487  * exactly the same for both rxq and ofld_rxq.
488  */
489 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
490 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
491 #endif
492
493 /* No easy way to include t4_msg.h before adapter.h so we check this way */
494 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
495 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
496
497 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
498
499 static int
500 t4_probe(device_t dev)
501 {
502         int i;
503         uint16_t v = pci_get_vendor(dev);
504         uint16_t d = pci_get_device(dev);
505         uint8_t f = pci_get_function(dev);
506
507         if (v != PCI_VENDOR_ID_CHELSIO)
508                 return (ENXIO);
509
510         /* Attach only to PF0 of the FPGA */
511         if (d == 0xa000 && f != 0)
512                 return (ENXIO);
513
514         for (i = 0; i < nitems(t4_pciids); i++) {
515                 if (d == t4_pciids[i].device) {
516                         device_set_desc(dev, t4_pciids[i].desc);
517                         return (BUS_PROBE_DEFAULT);
518                 }
519         }
520
521         return (ENXIO);
522 }
523
524 static int
525 t5_probe(device_t dev)
526 {
527         int i;
528         uint16_t v = pci_get_vendor(dev);
529         uint16_t d = pci_get_device(dev);
530         uint8_t f = pci_get_function(dev);
531
532         if (v != PCI_VENDOR_ID_CHELSIO)
533                 return (ENXIO);
534
535         /* Attach only to PF0 of the FPGA */
536         if (d == 0xb000 && f != 0)
537                 return (ENXIO);
538
539         for (i = 0; i < nitems(t5_pciids); i++) {
540                 if (d == t5_pciids[i].device) {
541                         device_set_desc(dev, t5_pciids[i].desc);
542                         return (BUS_PROBE_DEFAULT);
543                 }
544         }
545
546         return (ENXIO);
547 }
548
549 static int
550 t4_attach(device_t dev)
551 {
552         struct adapter *sc;
553         int rc = 0, i, n10g, n1g, rqidx, tqidx;
554         struct intrs_and_queues iaq;
555         struct sge *s;
556 #ifdef TCP_OFFLOAD
557         int ofld_rqidx, ofld_tqidx;
558 #endif
559
560         sc = device_get_softc(dev);
561         sc->dev = dev;
562
563         pci_enable_busmaster(dev);
564         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
565                 uint32_t v;
566
567                 pci_set_max_read_req(dev, 4096);
568                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
569                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
570                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
571         }
572
573         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
574             device_get_nameunit(dev));
575         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
576         mtx_lock(&t4_list_lock);
577         SLIST_INSERT_HEAD(&t4_list, sc, link);
578         mtx_unlock(&t4_list_lock);
579
580         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
581         TAILQ_INIT(&sc->sfl);
582         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
583
584         rc = map_bars_0_and_4(sc);
585         if (rc != 0)
586                 goto done; /* error message displayed already */
587
588         /*
589          * This is the real PF# to which we're attaching.  Works from within PCI
590          * passthrough environments too, where pci_get_function() could return a
591          * different PF# depending on the passthrough configuration.  We need to
592          * use the real PF# in all our communication with the firmware.
593          */
594         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
595         sc->mbox = sc->pf;
596
597         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
598         sc->an_handler = an_not_handled;
599         for (i = 0; i < nitems(sc->cpl_handler); i++)
600                 sc->cpl_handler[i] = cpl_not_handled;
601         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
602                 sc->fw_msg_handler[i] = fw_msg_not_handled;
603         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
604         t4_init_sge_cpl_handlers(sc);
605
606         /* Prepare the adapter for operation */
607         rc = -t4_prep_adapter(sc);
608         if (rc != 0) {
609                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
610                 goto done;
611         }
612
613         /*
614          * Do this really early, with the memory windows set up even before the
615          * character device.  The userland tool's register i/o and mem read
616          * will work even in "recovery mode".
617          */
618         setup_memwin(sc);
619         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
620             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
621             device_get_nameunit(dev));
622         if (sc->cdev == NULL)
623                 device_printf(dev, "failed to create nexus char device.\n");
624         else
625                 sc->cdev->si_drv1 = sc;
626
627         /* Go no further if recovery mode has been requested. */
628         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
629                 device_printf(dev, "recovery mode.\n");
630                 goto done;
631         }
632
633         /* Prepare the firmware for operation */
634         rc = prep_firmware(sc);
635         if (rc != 0)
636                 goto done; /* error message displayed already */
637
638         rc = get_params__post_init(sc);
639         if (rc != 0)
640                 goto done; /* error message displayed already */
641
642         rc = set_params__post_init(sc);
643         if (rc != 0)
644                 goto done; /* error message displayed already */
645
646         rc = map_bar_2(sc);
647         if (rc != 0)
648                 goto done; /* error message displayed already */
649
650         rc = t4_create_dma_tag(sc);
651         if (rc != 0)
652                 goto done; /* error message displayed already */
653
654         /*
655          * First pass over all the ports - allocate VIs and initialize some
656          * basic parameters like mac address, port type, etc.  We also figure
657          * out whether a port is 10G or 1G and use that information when
658          * calculating how many interrupts to attempt to allocate.
659          */
660         n10g = n1g = 0;
661         for_each_port(sc, i) {
662                 struct port_info *pi;
663
664                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
665                 sc->port[i] = pi;
666
667                 /* These must be set before t4_port_init */
668                 pi->adapter = sc;
669                 pi->port_id = i;
670
671                 /* Allocate the vi and initialize parameters like mac addr */
672                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
673                 if (rc != 0) {
674                         device_printf(dev, "unable to initialize port %d: %d\n",
675                             i, rc);
676                         free(pi, M_CXGBE);
677                         sc->port[i] = NULL;
678                         goto done;
679                 }
680
681                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
682                     device_get_nameunit(dev), i);
683                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
684
685                 if (is_10G_port(pi) || is_40G_port(pi)) {
686                         n10g++;
687                         pi->tmr_idx = t4_tmr_idx_10g;
688                         pi->pktc_idx = t4_pktc_idx_10g;
689                 } else {
690                         n1g++;
691                         pi->tmr_idx = t4_tmr_idx_1g;
692                         pi->pktc_idx = t4_pktc_idx_1g;
693                 }
694
695                 pi->xact_addr_filt = -1;
696                 pi->linkdnrc = -1;
697
698                 pi->qsize_rxq = t4_qsize_rxq;
699                 pi->qsize_txq = t4_qsize_txq;
700
701                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
702                 if (pi->dev == NULL) {
703                         device_printf(dev,
704                             "failed to add device for port %d.\n", i);
705                         rc = ENXIO;
706                         goto done;
707                 }
708                 device_set_softc(pi->dev, pi);
709         }
710
711         /*
712          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
713          */
714         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
715         if (rc != 0)
716                 goto done; /* error message displayed already */
717
718         sc->intr_type = iaq.intr_type;
719         sc->intr_count = iaq.nirq;
720         sc->flags |= iaq.intr_flags;
721
722         s = &sc->sge;
723         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
724         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
725         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
726         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
727         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
728
729 #ifdef TCP_OFFLOAD
730         if (is_offload(sc)) {
731
732                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
733                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
734                 s->neq += s->nofldtxq + s->nofldrxq;
735                 s->niq += s->nofldrxq;
736
737                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
738                     M_CXGBE, M_ZERO | M_WAITOK);
739                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
740                     M_CXGBE, M_ZERO | M_WAITOK);
741         }
742 #endif
743
744         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
745             M_ZERO | M_WAITOK);
746         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
747             M_ZERO | M_WAITOK);
748         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
749             M_ZERO | M_WAITOK);
750         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
751             M_ZERO | M_WAITOK);
752         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
753             M_ZERO | M_WAITOK);
754
755         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
756             M_ZERO | M_WAITOK);
757
758         t4_init_l2t(sc, M_WAITOK);
759
760         /*
761          * Second pass over the ports.  This time we know the number of rx and
762          * tx queues that each port should get.
763          */
764         rqidx = tqidx = 0;
765 #ifdef TCP_OFFLOAD
766         ofld_rqidx = ofld_tqidx = 0;
767 #endif
768         for_each_port(sc, i) {
769                 struct port_info *pi = sc->port[i];
770
771                 if (pi == NULL)
772                         continue;
773
774                 pi->first_rxq = rqidx;
775                 pi->first_txq = tqidx;
776                 if (is_10G_port(pi) || is_40G_port(pi)) {
777                         pi->nrxq = iaq.nrxq10g;
778                         pi->ntxq = iaq.ntxq10g;
779                 } else {
780                         pi->nrxq = iaq.nrxq1g;
781                         pi->ntxq = iaq.ntxq1g;
782                 }
783
784                 if (pi->ntxq > 1)
785                         pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
786                 else
787                         pi->rsrv_noflowq = 0;
788
789                 rqidx += pi->nrxq;
790                 tqidx += pi->ntxq;
791
792 #ifdef TCP_OFFLOAD
793                 if (is_offload(sc)) {
794                         pi->first_ofld_rxq = ofld_rqidx;
795                         pi->first_ofld_txq = ofld_tqidx;
796                         if (is_10G_port(pi) || is_40G_port(pi)) {
797                                 pi->nofldrxq = iaq.nofldrxq10g;
798                                 pi->nofldtxq = iaq.nofldtxq10g;
799                         } else {
800                                 pi->nofldrxq = iaq.nofldrxq1g;
801                                 pi->nofldtxq = iaq.nofldtxq1g;
802                         }
803                         ofld_rqidx += pi->nofldrxq;
804                         ofld_tqidx += pi->nofldtxq;
805                 }
806 #endif
807         }
808
809         rc = setup_intr_handlers(sc);
810         if (rc != 0) {
811                 device_printf(dev,
812                     "failed to setup interrupt handlers: %d\n", rc);
813                 goto done;
814         }
815
816         rc = bus_generic_attach(dev);
817         if (rc != 0) {
818                 device_printf(dev,
819                     "failed to attach all child ports: %d\n", rc);
820                 goto done;
821         }
822
823         device_printf(dev,
824             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
825             sc->params.pci.width, sc->params.nports, sc->intr_count,
826             sc->intr_type == INTR_MSIX ? "MSI-X" :
827             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
828             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
829
830         t4_set_desc(sc);
831
832 done:
833         if (rc != 0 && sc->cdev) {
834                 /* cdev was created and so cxgbetool works; recover that way. */
835                 device_printf(dev,
836                     "error during attach, adapter is now in recovery mode.\n");
837                 rc = 0;
838         }
839
840         if (rc != 0)
841                 t4_detach(dev);
842         else
843                 t4_sysctls(sc);
844
845         return (rc);
846 }
847
848 /*
849  * Idempotent
850  */
851 static int
852 t4_detach(device_t dev)
853 {
854         struct adapter *sc;
855         struct port_info *pi;
856         int i, rc;
857
858         sc = device_get_softc(dev);
859
860         if (sc->flags & FULL_INIT_DONE)
861                 t4_intr_disable(sc);
862
863         if (sc->cdev) {
864                 destroy_dev(sc->cdev);
865                 sc->cdev = NULL;
866         }
867
868         rc = bus_generic_detach(dev);
869         if (rc) {
870                 device_printf(dev,
871                     "failed to detach child devices: %d\n", rc);
872                 return (rc);
873         }
874
875         for (i = 0; i < sc->intr_count; i++)
876                 t4_free_irq(sc, &sc->irq[i]);
877
878         for (i = 0; i < MAX_NPORTS; i++) {
879                 pi = sc->port[i];
880                 if (pi) {
881                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
882                         if (pi->dev)
883                                 device_delete_child(dev, pi->dev);
884
885                         mtx_destroy(&pi->pi_lock);
886                         free(pi, M_CXGBE);
887                 }
888         }
889
890         if (sc->flags & FULL_INIT_DONE)
891                 adapter_full_uninit(sc);
892
893         if (sc->flags & FW_OK)
894                 t4_fw_bye(sc, sc->mbox);
895
896         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
897                 pci_release_msi(dev);
898
899         if (sc->regs_res)
900                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
901                     sc->regs_res);
902
903         if (sc->udbs_res)
904                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
905                     sc->udbs_res);
906
907         if (sc->msix_res)
908                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
909                     sc->msix_res);
910
911         if (sc->l2t)
912                 t4_free_l2t(sc->l2t);
913
914 #ifdef TCP_OFFLOAD
915         free(sc->sge.ofld_rxq, M_CXGBE);
916         free(sc->sge.ofld_txq, M_CXGBE);
917 #endif
918         free(sc->irq, M_CXGBE);
919         free(sc->sge.rxq, M_CXGBE);
920         free(sc->sge.txq, M_CXGBE);
921         free(sc->sge.ctrlq, M_CXGBE);
922         free(sc->sge.iqmap, M_CXGBE);
923         free(sc->sge.eqmap, M_CXGBE);
924         free(sc->tids.ftid_tab, M_CXGBE);
925         t4_destroy_dma_tag(sc);
926         if (mtx_initialized(&sc->sc_lock)) {
927                 mtx_lock(&t4_list_lock);
928                 SLIST_REMOVE(&t4_list, sc, adapter, link);
929                 mtx_unlock(&t4_list_lock);
930                 mtx_destroy(&sc->sc_lock);
931         }
932
933         if (mtx_initialized(&sc->tids.ftid_lock))
934                 mtx_destroy(&sc->tids.ftid_lock);
935         if (mtx_initialized(&sc->sfl_lock))
936                 mtx_destroy(&sc->sfl_lock);
937
938         bzero(sc, sizeof(*sc));
939
940         return (0);
941 }
942
943
944 static int
945 cxgbe_probe(device_t dev)
946 {
947         char buf[128];
948         struct port_info *pi = device_get_softc(dev);
949
950         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
951         device_set_desc_copy(dev, buf);
952
953         return (BUS_PROBE_DEFAULT);
954 }
955
956 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
957     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
958     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
959 #define T4_CAP_ENABLE (T4_CAP)
960
961 static int
962 cxgbe_attach(device_t dev)
963 {
964         struct port_info *pi = device_get_softc(dev);
965         struct ifnet *ifp;
966
967         /* Allocate an ifnet and set it up */
968         ifp = if_alloc(IFT_ETHER);
969         if (ifp == NULL) {
970                 device_printf(dev, "Cannot allocate ifnet\n");
971                 return (ENOMEM);
972         }
973         pi->ifp = ifp;
974         ifp->if_softc = pi;
975
976         callout_init(&pi->tick, CALLOUT_MPSAFE);
977
978         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
979         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
980
981         ifp->if_init = cxgbe_init;
982         ifp->if_ioctl = cxgbe_ioctl;
983         ifp->if_transmit = cxgbe_transmit;
984         ifp->if_qflush = cxgbe_qflush;
985
986         ifp->if_capabilities = T4_CAP;
987 #ifdef TCP_OFFLOAD
988         if (is_offload(pi->adapter))
989                 ifp->if_capabilities |= IFCAP_TOE;
990 #endif
991         ifp->if_capenable = T4_CAP_ENABLE;
992         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
993             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
994
995         /* Initialize ifmedia for this port */
996         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
997             cxgbe_media_status);
998         build_medialist(pi);
999
1000         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1001             EVENTHANDLER_PRI_ANY);
1002
1003         ether_ifattach(ifp, pi->hw_addr);
1004
1005 #ifdef TCP_OFFLOAD
1006         if (is_offload(pi->adapter)) {
1007                 device_printf(dev,
1008                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1009                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1010         } else
1011 #endif
1012                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1013
1014         cxgbe_sysctls(pi);
1015
1016         return (0);
1017 }
1018
1019 static int
1020 cxgbe_detach(device_t dev)
1021 {
1022         struct port_info *pi = device_get_softc(dev);
1023         struct adapter *sc = pi->adapter;
1024         struct ifnet *ifp = pi->ifp;
1025
1026         /* Tell if_ioctl and if_init that the port is going away */
1027         ADAPTER_LOCK(sc);
1028         SET_DOOMED(pi);
1029         wakeup(&sc->flags);
1030         while (IS_BUSY(sc))
1031                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1032         SET_BUSY(sc);
1033 #ifdef INVARIANTS
1034         sc->last_op = "t4detach";
1035         sc->last_op_thr = curthread;
1036 #endif
1037         ADAPTER_UNLOCK(sc);
1038
1039         if (pi->vlan_c)
1040                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1041
1042         PORT_LOCK(pi);
1043         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1044         callout_stop(&pi->tick);
1045         PORT_UNLOCK(pi);
1046         callout_drain(&pi->tick);
1047
1048         /* Let detach proceed even if these fail. */
1049         cxgbe_uninit_synchronized(pi);
1050         port_full_uninit(pi);
1051
1052         ifmedia_removeall(&pi->media);
1053         ether_ifdetach(pi->ifp);
1054         if_free(pi->ifp);
1055
1056         ADAPTER_LOCK(sc);
1057         CLR_BUSY(sc);
1058         wakeup(&sc->flags);
1059         ADAPTER_UNLOCK(sc);
1060
1061         return (0);
1062 }
1063
1064 static void
1065 cxgbe_init(void *arg)
1066 {
1067         struct port_info *pi = arg;
1068         struct adapter *sc = pi->adapter;
1069
1070         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1071                 return;
1072         cxgbe_init_synchronized(pi);
1073         end_synchronized_op(sc, 0);
1074 }
1075
1076 static int
1077 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1078 {
1079         int rc = 0, mtu, flags;
1080         struct port_info *pi = ifp->if_softc;
1081         struct adapter *sc = pi->adapter;
1082         struct ifreq *ifr = (struct ifreq *)data;
1083         uint32_t mask;
1084
1085         switch (cmd) {
1086         case SIOCSIFMTU:
1087                 mtu = ifr->ifr_mtu;
1088                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1089                         return (EINVAL);
1090
1091                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1092                 if (rc)
1093                         return (rc);
1094                 ifp->if_mtu = mtu;
1095                 if (pi->flags & PORT_INIT_DONE) {
1096                         t4_update_fl_bufsize(ifp);
1097                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098                                 rc = update_mac_settings(pi, XGMAC_MTU);
1099                 }
1100                 end_synchronized_op(sc, 0);
1101                 break;
1102
1103         case SIOCSIFFLAGS:
1104                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1105                 if (rc)
1106                         return (rc);
1107
1108                 if (ifp->if_flags & IFF_UP) {
1109                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1110                                 flags = pi->if_flags;
1111                                 if ((ifp->if_flags ^ flags) &
1112                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1113                                         rc = update_mac_settings(pi,
1114                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1115                                 }
1116                         } else
1117                                 rc = cxgbe_init_synchronized(pi);
1118                         pi->if_flags = ifp->if_flags;
1119                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1120                         rc = cxgbe_uninit_synchronized(pi);
1121                 end_synchronized_op(sc, 0);
1122                 break;
1123
1124         case SIOCADDMULTI:      
1125         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1126                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1127                 if (rc)
1128                         return (rc);
1129                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1130                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1131                 end_synchronized_op(sc, LOCK_HELD);
1132                 break;
1133
1134         case SIOCSIFCAP:
1135                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1136                 if (rc)
1137                         return (rc);
1138
1139                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1140                 if (mask & IFCAP_TXCSUM) {
1141                         ifp->if_capenable ^= IFCAP_TXCSUM;
1142                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1143
1144                         if (IFCAP_TSO4 & ifp->if_capenable &&
1145                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1146                                 ifp->if_capenable &= ~IFCAP_TSO4;
1147                                 if_printf(ifp,
1148                                     "tso4 disabled due to -txcsum.\n");
1149                         }
1150                 }
1151                 if (mask & IFCAP_TXCSUM_IPV6) {
1152                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1153                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1154
1155                         if (IFCAP_TSO6 & ifp->if_capenable &&
1156                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1157                                 ifp->if_capenable &= ~IFCAP_TSO6;
1158                                 if_printf(ifp,
1159                                     "tso6 disabled due to -txcsum6.\n");
1160                         }
1161                 }
1162                 if (mask & IFCAP_RXCSUM)
1163                         ifp->if_capenable ^= IFCAP_RXCSUM;
1164                 if (mask & IFCAP_RXCSUM_IPV6)
1165                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1166
1167                 /*
1168                  * Note that we leave CSUM_TSO alone (it is always set).  The
1169                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1170                  * sending a TSO request our way, so it's sufficient to toggle
1171                  * IFCAP_TSOx only.
1172                  */
1173                 if (mask & IFCAP_TSO4) {
1174                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1175                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1176                                 if_printf(ifp, "enable txcsum first.\n");
1177                                 rc = EAGAIN;
1178                                 goto fail;
1179                         }
1180                         ifp->if_capenable ^= IFCAP_TSO4;
1181                 }
1182                 if (mask & IFCAP_TSO6) {
1183                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1184                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1185                                 if_printf(ifp, "enable txcsum6 first.\n");
1186                                 rc = EAGAIN;
1187                                 goto fail;
1188                         }
1189                         ifp->if_capenable ^= IFCAP_TSO6;
1190                 }
1191                 if (mask & IFCAP_LRO) {
1192 #if defined(INET) || defined(INET6)
1193                         int i;
1194                         struct sge_rxq *rxq;
1195
1196                         ifp->if_capenable ^= IFCAP_LRO;
1197                         for_each_rxq(pi, i, rxq) {
1198                                 if (ifp->if_capenable & IFCAP_LRO)
1199                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1200                                 else
1201                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1202                         }
1203 #endif
1204                 }
1205 #ifdef TCP_OFFLOAD
1206                 if (mask & IFCAP_TOE) {
1207                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1208
1209                         rc = toe_capability(pi, enable);
1210                         if (rc != 0)
1211                                 goto fail;
1212
1213                         ifp->if_capenable ^= mask;
1214                 }
1215 #endif
1216                 if (mask & IFCAP_VLAN_HWTAGGING) {
1217                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1218                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1219                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1220                 }
1221                 if (mask & IFCAP_VLAN_MTU) {
1222                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1223
1224                         /* Need to find out how to disable auto-mtu-inflation */
1225                 }
1226                 if (mask & IFCAP_VLAN_HWTSO)
1227                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1228                 if (mask & IFCAP_VLAN_HWCSUM)
1229                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1230
1231 #ifdef VLAN_CAPABILITIES
1232                 VLAN_CAPABILITIES(ifp);
1233 #endif
1234 fail:
1235                 end_synchronized_op(sc, 0);
1236                 break;
1237
1238         case SIOCSIFMEDIA:
1239         case SIOCGIFMEDIA:
1240                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1241                 break;
1242
1243         default:
1244                 rc = ether_ioctl(ifp, cmd, data);
1245         }
1246
1247         return (rc);
1248 }
1249
1250 static int
1251 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1252 {
1253         struct port_info *pi = ifp->if_softc;
1254         struct adapter *sc = pi->adapter;
1255         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1256         struct buf_ring *br;
1257         int rc;
1258
1259         M_ASSERTPKTHDR(m);
1260
1261         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1262                 m_freem(m);
1263                 return (ENETDOWN);
1264         }
1265
1266         if (m->m_flags & M_FLOWID)
1267                 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1268                     + pi->rsrv_noflowq);
1269         br = txq->br;
1270
1271         if (TXQ_TRYLOCK(txq) == 0) {
1272                 struct sge_eq *eq = &txq->eq;
1273
1274                 /*
1275                  * It is possible that t4_eth_tx finishes up and releases the
1276                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1277                  * need to make sure that this mbuf doesn't just sit there in
1278                  * the drbr.
1279                  */
1280
1281                 rc = drbr_enqueue(ifp, br, m);
1282                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1283                     !(eq->flags & EQ_DOOMED))
1284                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1285                 return (rc);
1286         }
1287
1288         /*
1289          * txq->m is the mbuf that is held up due to a temporary shortage of
1290          * resources and it should be put on the wire first.  Then what's in
1291          * drbr and finally the mbuf that was just passed in to us.
1292          *
1293          * Return code should indicate the fate of the mbuf that was passed in
1294          * this time.
1295          */
1296
1297         TXQ_LOCK_ASSERT_OWNED(txq);
1298         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1299
1300                 /* Queued for transmission. */
1301
1302                 rc = drbr_enqueue(ifp, br, m);
1303                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1304                 (void) t4_eth_tx(ifp, txq, m);
1305                 TXQ_UNLOCK(txq);
1306                 return (rc);
1307         }
1308
1309         /* Direct transmission. */
1310         rc = t4_eth_tx(ifp, txq, m);
1311         if (rc != 0 && txq->m)
1312                 rc = 0; /* held, will be transmitted soon (hopefully) */
1313
1314         TXQ_UNLOCK(txq);
1315         return (rc);
1316 }
1317
1318 static void
1319 cxgbe_qflush(struct ifnet *ifp)
1320 {
1321         struct port_info *pi = ifp->if_softc;
1322         struct sge_txq *txq;
1323         int i;
1324         struct mbuf *m;
1325
1326         /* queues do not exist if !PORT_INIT_DONE. */
1327         if (pi->flags & PORT_INIT_DONE) {
1328                 for_each_txq(pi, i, txq) {
1329                         TXQ_LOCK(txq);
1330                         m_freem(txq->m);
1331                         txq->m = NULL;
1332                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1333                                 m_freem(m);
1334                         TXQ_UNLOCK(txq);
1335                 }
1336         }
1337         if_qflush(ifp);
1338 }
1339
1340 static int
1341 cxgbe_media_change(struct ifnet *ifp)
1342 {
1343         struct port_info *pi = ifp->if_softc;
1344
1345         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1346
1347         return (EOPNOTSUPP);
1348 }
1349
1350 static void
1351 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1352 {
1353         struct port_info *pi = ifp->if_softc;
1354         struct ifmedia_entry *cur = pi->media.ifm_cur;
1355         int speed = pi->link_cfg.speed;
1356         int data = (pi->port_type << 8) | pi->mod_type;
1357
1358         if (cur->ifm_data != data) {
1359                 build_medialist(pi);
1360                 cur = pi->media.ifm_cur;
1361         }
1362
1363         ifmr->ifm_status = IFM_AVALID;
1364         if (!pi->link_cfg.link_ok)
1365                 return;
1366
1367         ifmr->ifm_status |= IFM_ACTIVE;
1368
1369         /* active and current will differ iff current media is autoselect. */
1370         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1371                 return;
1372
1373         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1374         if (speed == SPEED_10000)
1375                 ifmr->ifm_active |= IFM_10G_T;
1376         else if (speed == SPEED_1000)
1377                 ifmr->ifm_active |= IFM_1000_T;
1378         else if (speed == SPEED_100)
1379                 ifmr->ifm_active |= IFM_100_TX;
1380         else if (speed == SPEED_10)
1381                 ifmr->ifm_active |= IFM_10_T;
1382         else
1383                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1384                             speed));
1385 }
1386
1387 void
1388 t4_fatal_err(struct adapter *sc)
1389 {
1390         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1391         t4_intr_disable(sc);
1392         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1393             device_get_nameunit(sc->dev));
1394 }
1395
1396 static int
1397 map_bars_0_and_4(struct adapter *sc)
1398 {
1399         sc->regs_rid = PCIR_BAR(0);
1400         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1401             &sc->regs_rid, RF_ACTIVE);
1402         if (sc->regs_res == NULL) {
1403                 device_printf(sc->dev, "cannot map registers.\n");
1404                 return (ENXIO);
1405         }
1406         sc->bt = rman_get_bustag(sc->regs_res);
1407         sc->bh = rman_get_bushandle(sc->regs_res);
1408         sc->mmio_len = rman_get_size(sc->regs_res);
1409         setbit(&sc->doorbells, DOORBELL_KDB);
1410
1411         sc->msix_rid = PCIR_BAR(4);
1412         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1413             &sc->msix_rid, RF_ACTIVE);
1414         if (sc->msix_res == NULL) {
1415                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1416                 return (ENXIO);
1417         }
1418
1419         return (0);
1420 }
1421
1422 static int
1423 map_bar_2(struct adapter *sc)
1424 {
1425
1426         /*
1427          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1428          * to map it if RDMA is disabled.
1429          */
1430         if (is_t4(sc) && sc->rdmacaps == 0)
1431                 return (0);
1432
1433         sc->udbs_rid = PCIR_BAR(2);
1434         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1435             &sc->udbs_rid, RF_ACTIVE);
1436         if (sc->udbs_res == NULL) {
1437                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1438                 return (ENXIO);
1439         }
1440         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1441
1442         if (is_t5(sc)) {
1443                 setbit(&sc->doorbells, DOORBELL_UDB);
1444 #if defined(__i386__) || defined(__amd64__)
1445                 if (t5_write_combine) {
1446                         int rc;
1447
1448                         /*
1449                          * Enable write combining on BAR2.  This is the
1450                          * userspace doorbell BAR and is split into 128B
1451                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1452                          * with an egress queue.  The first 64B has the doorbell
1453                          * and the second 64B can be used to submit a tx work
1454                          * request with an implicit doorbell.
1455                          */
1456
1457                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1458                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1459                         if (rc == 0) {
1460                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1461                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1462                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1463                         } else {
1464                                 device_printf(sc->dev,
1465                                     "couldn't enable write combining: %d\n",
1466                                     rc);
1467                         }
1468
1469                         t4_write_reg(sc, A_SGE_STAT_CFG,
1470                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1471                 }
1472 #endif
1473         }
1474
1475         return (0);
1476 }
1477
1478 static const struct memwin t4_memwin[] = {
1479         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1480         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1481         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1482 };
1483
1484 static const struct memwin t5_memwin[] = {
1485         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1486         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1487         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1488 };
1489
1490 static void
1491 setup_memwin(struct adapter *sc)
1492 {
1493         const struct memwin *mw;
1494         int i, n;
1495         uint32_t bar0;
1496
1497         if (is_t4(sc)) {
1498                 /*
1499                  * Read low 32b of bar0 indirectly via the hardware backdoor
1500                  * mechanism.  Works from within PCI passthrough environments
1501                  * too, where rman_get_start() can return a different value.  We
1502                  * need to program the T4 memory window decoders with the actual
1503                  * addresses that will be coming across the PCIe link.
1504                  */
1505                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1506                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1507
1508                 mw = &t4_memwin[0];
1509                 n = nitems(t4_memwin);
1510         } else {
1511                 /* T5 uses the relative offset inside the PCIe BAR */
1512                 bar0 = 0;
1513
1514                 mw = &t5_memwin[0];
1515                 n = nitems(t5_memwin);
1516         }
1517
1518         for (i = 0; i < n; i++, mw++) {
1519                 t4_write_reg(sc,
1520                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1521                     (mw->base + bar0) | V_BIR(0) |
1522                     V_WINDOW(ilog2(mw->aperture) - 10));
1523         }
1524
1525         /* flush */
1526         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1527 }
1528
1529 /*
1530  * Verify that the memory range specified by the addr/len pair is valid and lies
1531  * entirely within a single region (EDCx or MCx).
1532  */
1533 static int
1534 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1535 {
1536         uint32_t em, addr_len, maddr, mlen;
1537
1538         /* Memory can only be accessed in naturally aligned 4 byte units */
1539         if (addr & 3 || len & 3 || len == 0)
1540                 return (EINVAL);
1541
1542         /* Enabled memories */
1543         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1544         if (em & F_EDRAM0_ENABLE) {
1545                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1546                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1547                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1548                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549                     addr + len <= maddr + mlen)
1550                         return (0);
1551         }
1552         if (em & F_EDRAM1_ENABLE) {
1553                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1554                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1555                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1556                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1557                     addr + len <= maddr + mlen)
1558                         return (0);
1559         }
1560         if (em & F_EXT_MEM_ENABLE) {
1561                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1562                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1563                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1564                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1565                     addr + len <= maddr + mlen)
1566                         return (0);
1567         }
1568         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1569                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1570                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1571                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1572                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1573                     addr + len <= maddr + mlen)
1574                         return (0);
1575         }
1576
1577         return (EFAULT);
1578 }
1579
1580 static int
1581 fwmtype_to_hwmtype(int mtype)
1582 {
1583
1584         switch (mtype) {
1585         case FW_MEMTYPE_EDC0:
1586                 return (MEM_EDC0);
1587         case FW_MEMTYPE_EDC1:
1588                 return (MEM_EDC1);
1589         case FW_MEMTYPE_EXTMEM:
1590                 return (MEM_MC0);
1591         case FW_MEMTYPE_EXTMEM1:
1592                 return (MEM_MC1);
1593         default:
1594                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1595         }
1596 }
1597
1598 /*
1599  * Verify that the memory range specified by the memtype/offset/len pair is
1600  * valid and lies entirely within the memtype specified.  The global address of
1601  * the start of the range is returned in addr.
1602  */
1603 static int
1604 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1605     uint32_t *addr)
1606 {
1607         uint32_t em, addr_len, maddr, mlen;
1608
1609         /* Memory can only be accessed in naturally aligned 4 byte units */
1610         if (off & 3 || len & 3 || len == 0)
1611                 return (EINVAL);
1612
1613         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1614         switch (fwmtype_to_hwmtype(mtype)) {
1615         case MEM_EDC0:
1616                 if (!(em & F_EDRAM0_ENABLE))
1617                         return (EINVAL);
1618                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1619                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1620                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1621                 break;
1622         case MEM_EDC1:
1623                 if (!(em & F_EDRAM1_ENABLE))
1624                         return (EINVAL);
1625                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1626                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1627                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1628                 break;
1629         case MEM_MC:
1630                 if (!(em & F_EXT_MEM_ENABLE))
1631                         return (EINVAL);
1632                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1633                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1634                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1635                 break;
1636         case MEM_MC1:
1637                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1638                         return (EINVAL);
1639                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1640                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1641                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1642                 break;
1643         default:
1644                 return (EINVAL);
1645         }
1646
1647         if (mlen > 0 && off < mlen && off + len <= mlen) {
1648                 *addr = maddr + off;    /* global address */
1649                 return (0);
1650         }
1651
1652         return (EFAULT);
1653 }
1654
1655 static void
1656 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1657 {
1658         const struct memwin *mw;
1659
1660         if (is_t4(sc)) {
1661                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1662                     ("%s: incorrect memwin# (%d)", __func__, win));
1663                 mw = &t4_memwin[win];
1664         } else {
1665                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1666                     ("%s: incorrect memwin# (%d)", __func__, win));
1667                 mw = &t5_memwin[win];
1668         }
1669
1670         if (base != NULL)
1671                 *base = mw->base;
1672         if (aperture != NULL)
1673                 *aperture = mw->aperture;
1674 }
1675
1676 /*
1677  * Positions the memory window such that it can be used to access the specified
1678  * address in the chip's address space.  The return value is the offset of addr
1679  * from the start of the window.
1680  */
1681 static uint32_t
1682 position_memwin(struct adapter *sc, int n, uint32_t addr)
1683 {
1684         uint32_t start, pf;
1685         uint32_t reg;
1686
1687         KASSERT(n >= 0 && n <= 3,
1688             ("%s: invalid window %d.", __func__, n));
1689         KASSERT((addr & 3) == 0,
1690             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1691
1692         if (is_t4(sc)) {
1693                 pf = 0;
1694                 start = addr & ~0xf;    /* start must be 16B aligned */
1695         } else {
1696                 pf = V_PFNUM(sc->pf);
1697                 start = addr & ~0x7f;   /* start must be 128B aligned */
1698         }
1699         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1700
1701         t4_write_reg(sc, reg, start | pf);
1702         t4_read_reg(sc, reg);
1703
1704         return (addr - start);
1705 }
1706
1707 static int
1708 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1709     struct intrs_and_queues *iaq)
1710 {
1711         int rc, itype, navail, nrxq10g, nrxq1g, n;
1712         int nofldrxq10g = 0, nofldrxq1g = 0;
1713
1714         bzero(iaq, sizeof(*iaq));
1715
1716         iaq->ntxq10g = t4_ntxq10g;
1717         iaq->ntxq1g = t4_ntxq1g;
1718         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1719         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1720         iaq->rsrv_noflowq = t4_rsrv_noflowq;
1721 #ifdef TCP_OFFLOAD
1722         if (is_offload(sc)) {
1723                 iaq->nofldtxq10g = t4_nofldtxq10g;
1724                 iaq->nofldtxq1g = t4_nofldtxq1g;
1725                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1726                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1727         }
1728 #endif
1729
1730         for (itype = INTR_MSIX; itype; itype >>= 1) {
1731
1732                 if ((itype & t4_intr_types) == 0)
1733                         continue;       /* not allowed */
1734
1735                 if (itype == INTR_MSIX)
1736                         navail = pci_msix_count(sc->dev);
1737                 else if (itype == INTR_MSI)
1738                         navail = pci_msi_count(sc->dev);
1739                 else
1740                         navail = 1;
1741 restart:
1742                 if (navail == 0)
1743                         continue;
1744
1745                 iaq->intr_type = itype;
1746                 iaq->intr_flags = 0;
1747
1748                 /*
1749                  * Best option: an interrupt vector for errors, one for the
1750                  * firmware event queue, and one each for each rxq (NIC as well
1751                  * as offload).
1752                  */
1753                 iaq->nirq = T4_EXTRA_INTR;
1754                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1755                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1756                 if (iaq->nirq <= navail &&
1757                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1758                         iaq->intr_flags |= INTR_DIRECT;
1759                         goto allocate;
1760                 }
1761
1762                 /*
1763                  * Second best option: an interrupt vector for errors, one for
1764                  * the firmware event queue, and one each for either NIC or
1765                  * offload rxq's.
1766                  */
1767                 iaq->nirq = T4_EXTRA_INTR;
1768                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1769                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1770                 if (iaq->nirq <= navail &&
1771                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1772                         goto allocate;
1773
1774                 /*
1775                  * Next best option: an interrupt vector for errors, one for the
1776                  * firmware event queue, and at least one per port.  At this
1777                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1778                  * what's available to us.
1779                  */
1780                 iaq->nirq = T4_EXTRA_INTR;
1781                 iaq->nirq += n10g + n1g;
1782                 if (iaq->nirq <= navail) {
1783                         int leftover = navail - iaq->nirq;
1784
1785                         if (n10g > 0) {
1786                                 int target = max(nrxq10g, nofldrxq10g);
1787
1788                                 n = 1;
1789                                 while (n < target && leftover >= n10g) {
1790                                         leftover -= n10g;
1791                                         iaq->nirq += n10g;
1792                                         n++;
1793                                 }
1794                                 iaq->nrxq10g = min(n, nrxq10g);
1795 #ifdef TCP_OFFLOAD
1796                                 if (is_offload(sc))
1797                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1798 #endif
1799                         }
1800
1801                         if (n1g > 0) {
1802                                 int target = max(nrxq1g, nofldrxq1g);
1803
1804                                 n = 1;
1805                                 while (n < target && leftover >= n1g) {
1806                                         leftover -= n1g;
1807                                         iaq->nirq += n1g;
1808                                         n++;
1809                                 }
1810                                 iaq->nrxq1g = min(n, nrxq1g);
1811 #ifdef TCP_OFFLOAD
1812                                 if (is_offload(sc))
1813                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1814 #endif
1815                         }
1816
1817                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1818                                 goto allocate;
1819                 }
1820
1821                 /*
1822                  * Least desirable option: one interrupt vector for everything.
1823                  */
1824                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1825 #ifdef TCP_OFFLOAD
1826                 if (is_offload(sc))
1827                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1828 #endif
1829
1830 allocate:
1831                 navail = iaq->nirq;
1832                 rc = 0;
1833                 if (itype == INTR_MSIX)
1834                         rc = pci_alloc_msix(sc->dev, &navail);
1835                 else if (itype == INTR_MSI)
1836                         rc = pci_alloc_msi(sc->dev, &navail);
1837
1838                 if (rc == 0) {
1839                         if (navail == iaq->nirq)
1840                                 return (0);
1841
1842                         /*
1843                          * Didn't get the number requested.  Use whatever number
1844                          * the kernel is willing to allocate (it's in navail).
1845                          */
1846                         device_printf(sc->dev, "fewer vectors than requested, "
1847                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1848                             itype, iaq->nirq, navail);
1849                         pci_release_msi(sc->dev);
1850                         goto restart;
1851                 }
1852
1853                 device_printf(sc->dev,
1854                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1855                     itype, rc, iaq->nirq, navail);
1856         }
1857
1858         device_printf(sc->dev,
1859             "failed to find a usable interrupt type.  "
1860             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1861             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1862
1863         return (ENXIO);
1864 }
1865
1866 #define FW_VERSION(chip) ( \
1867     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1868     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1869     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1870     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1871 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1872
1873 struct fw_info {
1874         uint8_t chip;
1875         char *kld_name;
1876         char *fw_mod_name;
1877         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1878 } fw_info[] = {
1879         {
1880                 .chip = CHELSIO_T4,
1881                 .kld_name = "t4fw_cfg",
1882                 .fw_mod_name = "t4fw",
1883                 .fw_hdr = {
1884                         .chip = FW_HDR_CHIP_T4,
1885                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1886                         .intfver_nic = FW_INTFVER(T4, NIC),
1887                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1888                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1889                         .intfver_ri = FW_INTFVER(T4, RI),
1890                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1891                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1892                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1893                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1894                 },
1895         }, {
1896                 .chip = CHELSIO_T5,
1897                 .kld_name = "t5fw_cfg",
1898                 .fw_mod_name = "t5fw",
1899                 .fw_hdr = {
1900                         .chip = FW_HDR_CHIP_T5,
1901                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1902                         .intfver_nic = FW_INTFVER(T5, NIC),
1903                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1904                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1905                         .intfver_ri = FW_INTFVER(T5, RI),
1906                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1907                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1908                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1909                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1910                 },
1911         }
1912 };
1913
1914 static struct fw_info *
1915 find_fw_info(int chip)
1916 {
1917         int i;
1918
1919         for (i = 0; i < nitems(fw_info); i++) {
1920                 if (fw_info[i].chip == chip)
1921                         return (&fw_info[i]);
1922         }
1923         return (NULL);
1924 }
1925
1926 /*
1927  * Is the given firmware API compatible with the one the driver was compiled
1928  * with?
1929  */
1930 static int
1931 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1932 {
1933
1934         /* short circuit if it's the exact same firmware version */
1935         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1936                 return (1);
1937
1938         /*
1939          * XXX: Is this too conservative?  Perhaps I should limit this to the
1940          * features that are supported in the driver.
1941          */
1942 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1943         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1944             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1945             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1946                 return (1);
1947 #undef SAME_INTF
1948
1949         return (0);
1950 }
1951
1952 /*
1953  * The firmware in the KLD is usable, but should it be installed?  This routine
1954  * explains itself in detail if it indicates the KLD firmware should be
1955  * installed.
1956  */
1957 static int
1958 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1959 {
1960         const char *reason;
1961
1962         if (!card_fw_usable) {
1963                 reason = "incompatible or unusable";
1964                 goto install;
1965         }
1966
1967         if (k > c) {
1968                 reason = "older than the version bundled with this driver";
1969                 goto install;
1970         }
1971
1972         if (t4_fw_install == 2 && k != c) {
1973                 reason = "different than the version bundled with this driver";
1974                 goto install;
1975         }
1976
1977         return (0);
1978
1979 install:
1980         if (t4_fw_install == 0) {
1981                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1982                     "but the driver is prohibited from installing a different "
1983                     "firmware on the card.\n",
1984                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1985                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1986
1987                 return (0);
1988         }
1989
1990         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1991             "installing firmware %u.%u.%u.%u on card.\n",
1992             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1993             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1994             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1995             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1996
1997         return (1);
1998 }
1999 /*
2000  * Establish contact with the firmware and determine if we are the master driver
2001  * or not, and whether we are responsible for chip initialization.
2002  */
2003 static int
2004 prep_firmware(struct adapter *sc)
2005 {
2006         const struct firmware *fw = NULL, *default_cfg;
2007         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2008         enum dev_state state;
2009         struct fw_info *fw_info;
2010         struct fw_hdr *card_fw;         /* fw on the card */
2011         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2012         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2013                                            against */
2014
2015         /* Contact firmware. */
2016         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2017         if (rc < 0 || state == DEV_STATE_ERR) {
2018                 rc = -rc;
2019                 device_printf(sc->dev,
2020                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2021                 return (rc);
2022         }
2023         pf = rc;
2024         if (pf == sc->mbox)
2025                 sc->flags |= MASTER_PF;
2026         else if (state == DEV_STATE_UNINIT) {
2027                 /*
2028                  * We didn't get to be the master so we definitely won't be
2029                  * configuring the chip.  It's a bug if someone else hasn't
2030                  * configured it already.
2031                  */
2032                 device_printf(sc->dev, "couldn't be master(%d), "
2033                     "device not already initialized either(%d).\n", rc, state);
2034                 return (EDOOFUS);
2035         }
2036
2037         /* This is the firmware whose headers the driver was compiled against */
2038         fw_info = find_fw_info(chip_id(sc));
2039         if (fw_info == NULL) {
2040                 device_printf(sc->dev,
2041                     "unable to look up firmware information for chip %d.\n",
2042                     chip_id(sc));
2043                 return (EINVAL);
2044         }
2045         drv_fw = &fw_info->fw_hdr;
2046
2047         /*
2048          * The firmware KLD contains many modules.  The KLD name is also the
2049          * name of the module that contains the default config file.
2050          */
2051         default_cfg = firmware_get(fw_info->kld_name);
2052
2053         /* Read the header of the firmware on the card */
2054         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2055         rc = -t4_read_flash(sc, FLASH_FW_START,
2056             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2057         if (rc == 0)
2058                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2059         else {
2060                 device_printf(sc->dev,
2061                     "Unable to read card's firmware header: %d\n", rc);
2062                 card_fw_usable = 0;
2063         }
2064
2065         /* This is the firmware in the KLD */
2066         fw = firmware_get(fw_info->fw_mod_name);
2067         if (fw != NULL) {
2068                 kld_fw = (const void *)fw->data;
2069                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2070         } else {
2071                 kld_fw = NULL;
2072                 kld_fw_usable = 0;
2073         }
2074
2075         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2076             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2077                 /*
2078                  * Common case: the firmware on the card is an exact match and
2079                  * the KLD is an exact match too, or the KLD is
2080                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2081                  * here -- use cxgbetool loadfw if you want to reinstall the
2082                  * same firmware as the one on the card.
2083                  */
2084         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2085             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2086             be32toh(card_fw->fw_ver))) {
2087
2088                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2089                 if (rc != 0) {
2090                         device_printf(sc->dev,
2091                             "failed to install firmware: %d\n", rc);
2092                         goto done;
2093                 }
2094
2095                 /* Installed successfully, update the cached header too. */
2096                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2097                 card_fw_usable = 1;
2098                 need_fw_reset = 0;      /* already reset as part of load_fw */
2099         }
2100
2101         if (!card_fw_usable) {
2102                 uint32_t d, c, k;
2103
2104                 d = ntohl(drv_fw->fw_ver);
2105                 c = ntohl(card_fw->fw_ver);
2106                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2107
2108                 device_printf(sc->dev, "Cannot find a usable firmware: "
2109                     "fw_install %d, chip state %d, "
2110                     "driver compiled with %d.%d.%d.%d, "
2111                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2112                     t4_fw_install, state,
2113                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2114                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2115                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2116                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2117                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2118                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2119                 rc = EINVAL;
2120                 goto done;
2121         }
2122
2123         /* We're using whatever's on the card and it's known to be good. */
2124         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2125         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2126             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2127             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2128             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2129             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2130         t4_get_tp_version(sc, &sc->params.tp_vers);
2131
2132         /* Reset device */
2133         if (need_fw_reset &&
2134             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2135                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2136                 if (rc != ETIMEDOUT && rc != EIO)
2137                         t4_fw_bye(sc, sc->mbox);
2138                 goto done;
2139         }
2140         sc->flags |= FW_OK;
2141
2142         rc = get_params__pre_init(sc);
2143         if (rc != 0)
2144                 goto done; /* error message displayed already */
2145
2146         /* Partition adapter resources as specified in the config file. */
2147         if (state == DEV_STATE_UNINIT) {
2148
2149                 KASSERT(sc->flags & MASTER_PF,
2150                     ("%s: trying to change chip settings when not master.",
2151                     __func__));
2152
2153                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2154                 if (rc != 0)
2155                         goto done;      /* error message displayed already */
2156
2157                 t4_tweak_chip_settings(sc);
2158
2159                 /* get basic stuff going */
2160                 rc = -t4_fw_initialize(sc, sc->mbox);
2161                 if (rc != 0) {
2162                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2163                         goto done;
2164                 }
2165         } else {
2166                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2167                 sc->cfcsum = 0;
2168         }
2169
2170 done:
2171         free(card_fw, M_CXGBE);
2172         if (fw != NULL)
2173                 firmware_put(fw, FIRMWARE_UNLOAD);
2174         if (default_cfg != NULL)
2175                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2176
2177         return (rc);
2178 }
2179
2180 #define FW_PARAM_DEV(param) \
2181         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2182          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2183 #define FW_PARAM_PFVF(param) \
2184         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2185          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2186
2187 /*
2188  * Partition chip resources for use between various PFs, VFs, etc.
2189  */
2190 static int
2191 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2192     const char *name_prefix)
2193 {
2194         const struct firmware *cfg = NULL;
2195         int rc = 0;
2196         struct fw_caps_config_cmd caps;
2197         uint32_t mtype, moff, finicsum, cfcsum;
2198
2199         /*
2200          * Figure out what configuration file to use.  Pick the default config
2201          * file for the card if the user hasn't specified one explicitly.
2202          */
2203         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2204         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2205                 /* Card specific overrides go here. */
2206                 if (pci_get_device(sc->dev) == 0x440a)
2207                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2208                 if (is_fpga(sc))
2209                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2210         }
2211
2212         /*
2213          * We need to load another module if the profile is anything except
2214          * "default" or "flash".
2215          */
2216         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2217             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2218                 char s[32];
2219
2220                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2221                 cfg = firmware_get(s);
2222                 if (cfg == NULL) {
2223                         if (default_cfg != NULL) {
2224                                 device_printf(sc->dev,
2225                                     "unable to load module \"%s\" for "
2226                                     "configuration profile \"%s\", will use "
2227                                     "the default config file instead.\n",
2228                                     s, sc->cfg_file);
2229                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2230                                     "%s", DEFAULT_CF);
2231                         } else {
2232                                 device_printf(sc->dev,
2233                                     "unable to load module \"%s\" for "
2234                                     "configuration profile \"%s\", will use "
2235                                     "the config file on the card's flash "
2236                                     "instead.\n", s, sc->cfg_file);
2237                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2238                                     "%s", FLASH_CF);
2239                         }
2240                 }
2241         }
2242
2243         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2244             default_cfg == NULL) {
2245                 device_printf(sc->dev,
2246                     "default config file not available, will use the config "
2247                     "file on the card's flash instead.\n");
2248                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2249         }
2250
2251         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2252                 u_int cflen, i, n;
2253                 const uint32_t *cfdata;
2254                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2255
2256                 KASSERT(cfg != NULL || default_cfg != NULL,
2257                     ("%s: no config to upload", __func__));
2258
2259                 /*
2260                  * Ask the firmware where it wants us to upload the config file.
2261                  */
2262                 param = FW_PARAM_DEV(CF);
2263                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2264                 if (rc != 0) {
2265                         /* No support for config file?  Shouldn't happen. */
2266                         device_printf(sc->dev,
2267                             "failed to query config file location: %d.\n", rc);
2268                         goto done;
2269                 }
2270                 mtype = G_FW_PARAMS_PARAM_Y(val);
2271                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2272
2273                 /*
2274                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2275                  * useless stuffing/comments at the end of the config file so
2276                  * it's ok to simply throw away the last remaining bytes when
2277                  * the config file is not an exact multiple of 4.  This also
2278                  * helps with the validate_mt_off_len check.
2279                  */
2280                 if (cfg != NULL) {
2281                         cflen = cfg->datasize & ~3;
2282                         cfdata = cfg->data;
2283                 } else {
2284                         cflen = default_cfg->datasize & ~3;
2285                         cfdata = default_cfg->data;
2286                 }
2287
2288                 if (cflen > FLASH_CFG_MAX_SIZE) {
2289                         device_printf(sc->dev,
2290                             "config file too long (%d, max allowed is %d).  "
2291                             "Will try to use the config on the card, if any.\n",
2292                             cflen, FLASH_CFG_MAX_SIZE);
2293                         goto use_config_on_flash;
2294                 }
2295
2296                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2297                 if (rc != 0) {
2298                         device_printf(sc->dev,
2299                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2300                             "Will try to use the config on the card, if any.\n",
2301                             __func__, mtype, moff, cflen, rc);
2302                         goto use_config_on_flash;
2303                 }
2304
2305                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2306                 while (cflen) {
2307                         off = position_memwin(sc, 2, addr);
2308                         n = min(cflen, mw_aperture - off);
2309                         for (i = 0; i < n; i += 4)
2310                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2311                         cflen -= n;
2312                         addr += n;
2313                 }
2314         } else {
2315 use_config_on_flash:
2316                 mtype = FW_MEMTYPE_FLASH;
2317                 moff = t4_flash_cfg_addr(sc);
2318         }
2319
2320         bzero(&caps, sizeof(caps));
2321         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2322             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2323         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2324             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2325             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2326         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2327         if (rc != 0) {
2328                 device_printf(sc->dev,
2329                     "failed to pre-process config file: %d "
2330                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2331                 goto done;
2332         }
2333
2334         finicsum = be32toh(caps.finicsum);
2335         cfcsum = be32toh(caps.cfcsum);
2336         if (finicsum != cfcsum) {
2337                 device_printf(sc->dev,
2338                     "WARNING: config file checksum mismatch: %08x %08x\n",
2339                     finicsum, cfcsum);
2340         }
2341         sc->cfcsum = cfcsum;
2342
2343 #define LIMIT_CAPS(x) do { \
2344         caps.x &= htobe16(t4_##x##_allowed); \
2345 } while (0)
2346
2347         /*
2348          * Let the firmware know what features will (not) be used so it can tune
2349          * things accordingly.
2350          */
2351         LIMIT_CAPS(linkcaps);
2352         LIMIT_CAPS(niccaps);
2353         LIMIT_CAPS(toecaps);
2354         LIMIT_CAPS(rdmacaps);
2355         LIMIT_CAPS(iscsicaps);
2356         LIMIT_CAPS(fcoecaps);
2357 #undef LIMIT_CAPS
2358
2359         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2360             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2361         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2362         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2363         if (rc != 0) {
2364                 device_printf(sc->dev,
2365                     "failed to process config file: %d.\n", rc);
2366         }
2367 done:
2368         if (cfg != NULL)
2369                 firmware_put(cfg, FIRMWARE_UNLOAD);
2370         return (rc);
2371 }
2372
2373 /*
2374  * Retrieve parameters that are needed (or nice to have) very early.
2375  */
2376 static int
2377 get_params__pre_init(struct adapter *sc)
2378 {
2379         int rc;
2380         uint32_t param[2], val[2];
2381         struct fw_devlog_cmd cmd;
2382         struct devlog_params *dlog = &sc->params.devlog;
2383
2384         param[0] = FW_PARAM_DEV(PORTVEC);
2385         param[1] = FW_PARAM_DEV(CCLK);
2386         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2387         if (rc != 0) {
2388                 device_printf(sc->dev,
2389                     "failed to query parameters (pre_init): %d.\n", rc);
2390                 return (rc);
2391         }
2392
2393         sc->params.portvec = val[0];
2394         sc->params.nports = bitcount32(val[0]);
2395         sc->params.vpd.cclk = val[1];
2396
2397         /* Read device log parameters. */
2398         bzero(&cmd, sizeof(cmd));
2399         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2400             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2401         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2402         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2403         if (rc != 0) {
2404                 device_printf(sc->dev,
2405                     "failed to get devlog parameters: %d.\n", rc);
2406                 bzero(dlog, sizeof (*dlog));
2407                 rc = 0; /* devlog isn't critical for device operation */
2408         } else {
2409                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2410                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2411                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2412                 dlog->size = be32toh(cmd.memsize_devlog);
2413         }
2414
2415         return (rc);
2416 }
2417
2418 /*
2419  * Retrieve various parameters that are of interest to the driver.  The device
2420  * has been initialized by the firmware at this point.
2421  */
2422 static int
2423 get_params__post_init(struct adapter *sc)
2424 {
2425         int rc;
2426         uint32_t param[7], val[7];
2427         struct fw_caps_config_cmd caps;
2428
2429         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2430         param[1] = FW_PARAM_PFVF(EQ_START);
2431         param[2] = FW_PARAM_PFVF(FILTER_START);
2432         param[3] = FW_PARAM_PFVF(FILTER_END);
2433         param[4] = FW_PARAM_PFVF(L2T_START);
2434         param[5] = FW_PARAM_PFVF(L2T_END);
2435         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2436         if (rc != 0) {
2437                 device_printf(sc->dev,
2438                     "failed to query parameters (post_init): %d.\n", rc);
2439                 return (rc);
2440         }
2441
2442         sc->sge.iq_start = val[0];
2443         sc->sge.eq_start = val[1];
2444         sc->tids.ftid_base = val[2];
2445         sc->tids.nftids = val[3] - val[2] + 1;
2446         sc->params.ftid_min = val[2];
2447         sc->params.ftid_max = val[3];
2448         sc->vres.l2t.start = val[4];
2449         sc->vres.l2t.size = val[5] - val[4] + 1;
2450         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2451             ("%s: L2 table size (%u) larger than expected (%u)",
2452             __func__, sc->vres.l2t.size, L2T_SIZE));
2453
2454         /* get capabilites */
2455         bzero(&caps, sizeof(caps));
2456         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2457             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2458         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2459         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2460         if (rc != 0) {
2461                 device_printf(sc->dev,
2462                     "failed to get card capabilities: %d.\n", rc);
2463                 return (rc);
2464         }
2465
2466 #define READ_CAPS(x) do { \
2467         sc->x = htobe16(caps.x); \
2468 } while (0)
2469         READ_CAPS(linkcaps);
2470         READ_CAPS(niccaps);
2471         READ_CAPS(toecaps);
2472         READ_CAPS(rdmacaps);
2473         READ_CAPS(iscsicaps);
2474         READ_CAPS(fcoecaps);
2475
2476         if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2477                 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2478                 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2479                 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2480                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2481                 if (rc != 0) {
2482                         device_printf(sc->dev,
2483                             "failed to query NIC parameters: %d.\n", rc);
2484                         return (rc);
2485                 }
2486                 sc->tids.etid_base = val[0];
2487                 sc->params.etid_min = val[0];
2488                 sc->tids.netids = val[1] - val[0] + 1;
2489                 sc->params.netids = sc->tids.netids;
2490                 sc->params.eo_wr_cred = val[2];
2491                 sc->params.ethoffload = 1;
2492         }
2493
2494         if (sc->toecaps) {
2495                 /* query offload-related parameters */
2496                 param[0] = FW_PARAM_DEV(NTID);
2497                 param[1] = FW_PARAM_PFVF(SERVER_START);
2498                 param[2] = FW_PARAM_PFVF(SERVER_END);
2499                 param[3] = FW_PARAM_PFVF(TDDP_START);
2500                 param[4] = FW_PARAM_PFVF(TDDP_END);
2501                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2502                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2503                 if (rc != 0) {
2504                         device_printf(sc->dev,
2505                             "failed to query TOE parameters: %d.\n", rc);
2506                         return (rc);
2507                 }
2508                 sc->tids.ntids = val[0];
2509                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2510                 sc->tids.stid_base = val[1];
2511                 sc->tids.nstids = val[2] - val[1] + 1;
2512                 sc->vres.ddp.start = val[3];
2513                 sc->vres.ddp.size = val[4] - val[3] + 1;
2514                 sc->params.ofldq_wr_cred = val[5];
2515                 sc->params.offload = 1;
2516         }
2517         if (sc->rdmacaps) {
2518                 param[0] = FW_PARAM_PFVF(STAG_START);
2519                 param[1] = FW_PARAM_PFVF(STAG_END);
2520                 param[2] = FW_PARAM_PFVF(RQ_START);
2521                 param[3] = FW_PARAM_PFVF(RQ_END);
2522                 param[4] = FW_PARAM_PFVF(PBL_START);
2523                 param[5] = FW_PARAM_PFVF(PBL_END);
2524                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2525                 if (rc != 0) {
2526                         device_printf(sc->dev,
2527                             "failed to query RDMA parameters(1): %d.\n", rc);
2528                         return (rc);
2529                 }
2530                 sc->vres.stag.start = val[0];
2531                 sc->vres.stag.size = val[1] - val[0] + 1;
2532                 sc->vres.rq.start = val[2];
2533                 sc->vres.rq.size = val[3] - val[2] + 1;
2534                 sc->vres.pbl.start = val[4];
2535                 sc->vres.pbl.size = val[5] - val[4] + 1;
2536
2537                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2538                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2539                 param[2] = FW_PARAM_PFVF(CQ_START);
2540                 param[3] = FW_PARAM_PFVF(CQ_END);
2541                 param[4] = FW_PARAM_PFVF(OCQ_START);
2542                 param[5] = FW_PARAM_PFVF(OCQ_END);
2543                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2544                 if (rc != 0) {
2545                         device_printf(sc->dev,
2546                             "failed to query RDMA parameters(2): %d.\n", rc);
2547                         return (rc);
2548                 }
2549                 sc->vres.qp.start = val[0];
2550                 sc->vres.qp.size = val[1] - val[0] + 1;
2551                 sc->vres.cq.start = val[2];
2552                 sc->vres.cq.size = val[3] - val[2] + 1;
2553                 sc->vres.ocq.start = val[4];
2554                 sc->vres.ocq.size = val[5] - val[4] + 1;
2555         }
2556         if (sc->iscsicaps) {
2557                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2558                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2559                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2560                 if (rc != 0) {
2561                         device_printf(sc->dev,
2562                             "failed to query iSCSI parameters: %d.\n", rc);
2563                         return (rc);
2564                 }
2565                 sc->vres.iscsi.start = val[0];
2566                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2567         }
2568
2569         /*
2570          * We've got the params we wanted to query via the firmware.  Now grab
2571          * some others directly from the chip.
2572          */
2573         rc = t4_read_chip_settings(sc);
2574
2575         return (rc);
2576 }
2577
2578 static int
2579 set_params__post_init(struct adapter *sc)
2580 {
2581         uint32_t param, val;
2582
2583         /* ask for encapsulated CPLs */
2584         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2585         val = 1;
2586         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2587
2588         return (0);
2589 }
2590
2591 #undef FW_PARAM_PFVF
2592 #undef FW_PARAM_DEV
2593
2594 static void
2595 t4_set_desc(struct adapter *sc)
2596 {
2597         char buf[128];
2598         struct adapter_params *p = &sc->params;
2599
2600         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2601             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2602             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2603
2604         device_set_desc_copy(sc->dev, buf);
2605 }
2606
2607 static void
2608 build_medialist(struct port_info *pi)
2609 {
2610         struct ifmedia *media = &pi->media;
2611         int data, m;
2612
2613         PORT_LOCK(pi);
2614
2615         ifmedia_removeall(media);
2616
2617         m = IFM_ETHER | IFM_FDX;
2618         data = (pi->port_type << 8) | pi->mod_type;
2619
2620         switch(pi->port_type) {
2621         case FW_PORT_TYPE_BT_XFI:
2622                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2623                 break;
2624
2625         case FW_PORT_TYPE_BT_XAUI:
2626                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2627                 /* fall through */
2628
2629         case FW_PORT_TYPE_BT_SGMII:
2630                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2631                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2632                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2633                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2634                 break;
2635
2636         case FW_PORT_TYPE_CX4:
2637                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2638                 ifmedia_set(media, m | IFM_10G_CX4);
2639                 break;
2640
2641         case FW_PORT_TYPE_QSFP_10G:
2642         case FW_PORT_TYPE_SFP:
2643         case FW_PORT_TYPE_FIBER_XFI:
2644         case FW_PORT_TYPE_FIBER_XAUI:
2645                 switch (pi->mod_type) {
2646
2647                 case FW_PORT_MOD_TYPE_LR:
2648                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2649                         ifmedia_set(media, m | IFM_10G_LR);
2650                         break;
2651
2652                 case FW_PORT_MOD_TYPE_SR:
2653                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2654                         ifmedia_set(media, m | IFM_10G_SR);
2655                         break;
2656
2657                 case FW_PORT_MOD_TYPE_LRM:
2658                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2659                         ifmedia_set(media, m | IFM_10G_LRM);
2660                         break;
2661
2662                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2663                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2664                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2665                         ifmedia_set(media, m | IFM_10G_TWINAX);
2666                         break;
2667
2668                 case FW_PORT_MOD_TYPE_NONE:
2669                         m &= ~IFM_FDX;
2670                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2671                         ifmedia_set(media, m | IFM_NONE);
2672                         break;
2673
2674                 case FW_PORT_MOD_TYPE_NA:
2675                 case FW_PORT_MOD_TYPE_ER:
2676                 default:
2677                         device_printf(pi->dev,
2678                             "unknown port_type (%d), mod_type (%d)\n",
2679                             pi->port_type, pi->mod_type);
2680                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2681                         ifmedia_set(media, m | IFM_UNKNOWN);
2682                         break;
2683                 }
2684                 break;
2685
2686         case FW_PORT_TYPE_QSFP:
2687                 switch (pi->mod_type) {
2688
2689                 case FW_PORT_MOD_TYPE_LR:
2690                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2691                         ifmedia_set(media, m | IFM_40G_LR4);
2692                         break;
2693
2694                 case FW_PORT_MOD_TYPE_SR:
2695                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2696                         ifmedia_set(media, m | IFM_40G_SR4);
2697                         break;
2698
2699                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2700                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2701                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2702                         ifmedia_set(media, m | IFM_40G_CR4);
2703                         break;
2704
2705                 case FW_PORT_MOD_TYPE_NONE:
2706                         m &= ~IFM_FDX;
2707                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2708                         ifmedia_set(media, m | IFM_NONE);
2709                         break;
2710
2711                 default:
2712                         device_printf(pi->dev,
2713                             "unknown port_type (%d), mod_type (%d)\n",
2714                             pi->port_type, pi->mod_type);
2715                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2716                         ifmedia_set(media, m | IFM_UNKNOWN);
2717                         break;
2718                 }
2719                 break;
2720
2721         default:
2722                 device_printf(pi->dev,
2723                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2724                     pi->mod_type);
2725                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2726                 ifmedia_set(media, m | IFM_UNKNOWN);
2727                 break;
2728         }
2729
2730         PORT_UNLOCK(pi);
2731 }
2732
2733 #define FW_MAC_EXACT_CHUNK      7
2734
2735 /*
2736  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2737  * indicates which parameters should be programmed (the rest are left alone).
2738  */
2739 static int
2740 update_mac_settings(struct port_info *pi, int flags)
2741 {
2742         int rc;
2743         struct ifnet *ifp = pi->ifp;
2744         struct adapter *sc = pi->adapter;
2745         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2746
2747         ASSERT_SYNCHRONIZED_OP(sc);
2748         KASSERT(flags, ("%s: not told what to update.", __func__));
2749
2750         if (flags & XGMAC_MTU)
2751                 mtu = ifp->if_mtu;
2752
2753         if (flags & XGMAC_PROMISC)
2754                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2755
2756         if (flags & XGMAC_ALLMULTI)
2757                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2758
2759         if (flags & XGMAC_VLANEX)
2760                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2761
2762         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2763             vlanex, false);
2764         if (rc) {
2765                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2766                 return (rc);
2767         }
2768
2769         if (flags & XGMAC_UCADDR) {
2770                 uint8_t ucaddr[ETHER_ADDR_LEN];
2771
2772                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2773                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2774                     ucaddr, true, true);
2775                 if (rc < 0) {
2776                         rc = -rc;
2777                         if_printf(ifp, "change_mac failed: %d\n", rc);
2778                         return (rc);
2779                 } else {
2780                         pi->xact_addr_filt = rc;
2781                         rc = 0;
2782                 }
2783         }
2784
2785         if (flags & XGMAC_MCADDRS) {
2786                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2787                 int del = 1;
2788                 uint64_t hash = 0;
2789                 struct ifmultiaddr *ifma;
2790                 int i = 0, j;
2791
2792                 if_maddr_rlock(ifp);
2793                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2794                         if (ifma->ifma_addr->sa_family != AF_LINK)
2795                                 continue;
2796                         mcaddr[i++] =
2797                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2798
2799                         if (i == FW_MAC_EXACT_CHUNK) {
2800                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2801                                     del, i, mcaddr, NULL, &hash, 0);
2802                                 if (rc < 0) {
2803                                         rc = -rc;
2804                                         for (j = 0; j < i; j++) {
2805                                                 if_printf(ifp,
2806                                                     "failed to add mc address"
2807                                                     " %02x:%02x:%02x:"
2808                                                     "%02x:%02x:%02x rc=%d\n",
2809                                                     mcaddr[j][0], mcaddr[j][1],
2810                                                     mcaddr[j][2], mcaddr[j][3],
2811                                                     mcaddr[j][4], mcaddr[j][5],
2812                                                     rc);
2813                                         }
2814                                         goto mcfail;
2815                                 }
2816                                 del = 0;
2817                                 i = 0;
2818                         }
2819                 }
2820                 if (i > 0) {
2821                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2822                             del, i, mcaddr, NULL, &hash, 0);
2823                         if (rc < 0) {
2824                                 rc = -rc;
2825                                 for (j = 0; j < i; j++) {
2826                                         if_printf(ifp,
2827                                             "failed to add mc address"
2828                                             " %02x:%02x:%02x:"
2829                                             "%02x:%02x:%02x rc=%d\n",
2830                                             mcaddr[j][0], mcaddr[j][1],
2831                                             mcaddr[j][2], mcaddr[j][3],
2832                                             mcaddr[j][4], mcaddr[j][5],
2833                                             rc);
2834                                 }
2835                                 goto mcfail;
2836                         }
2837                 }
2838
2839                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2840                 if (rc != 0)
2841                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2842 mcfail:
2843                 if_maddr_runlock(ifp);
2844         }
2845
2846         return (rc);
2847 }
2848
2849 int
2850 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2851     char *wmesg)
2852 {
2853         int rc, pri;
2854
2855 #ifdef WITNESS
2856         /* the caller thinks it's ok to sleep, but is it really? */
2857         if (flags & SLEEP_OK)
2858                 pause("t4slptst", 1);
2859 #endif
2860
2861         if (INTR_OK)
2862                 pri = PCATCH;
2863         else
2864                 pri = 0;
2865
2866         ADAPTER_LOCK(sc);
2867         for (;;) {
2868
2869                 if (pi && IS_DOOMED(pi)) {
2870                         rc = ENXIO;
2871                         goto done;
2872                 }
2873
2874                 if (!IS_BUSY(sc)) {
2875                         rc = 0;
2876                         break;
2877                 }
2878
2879                 if (!(flags & SLEEP_OK)) {
2880                         rc = EBUSY;
2881                         goto done;
2882                 }
2883
2884                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2885                         rc = EINTR;
2886                         goto done;
2887                 }
2888         }
2889
2890         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2891         SET_BUSY(sc);
2892 #ifdef INVARIANTS
2893         sc->last_op = wmesg;
2894         sc->last_op_thr = curthread;
2895 #endif
2896
2897 done:
2898         if (!(flags & HOLD_LOCK) || rc)
2899                 ADAPTER_UNLOCK(sc);
2900
2901         return (rc);
2902 }
2903
2904 void
2905 end_synchronized_op(struct adapter *sc, int flags)
2906 {
2907
2908         if (flags & LOCK_HELD)
2909                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2910         else
2911                 ADAPTER_LOCK(sc);
2912
2913         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2914         CLR_BUSY(sc);
2915         wakeup(&sc->flags);
2916         ADAPTER_UNLOCK(sc);
2917 }
2918
2919 static int
2920 cxgbe_init_synchronized(struct port_info *pi)
2921 {
2922         struct adapter *sc = pi->adapter;
2923         struct ifnet *ifp = pi->ifp;
2924         int rc = 0;
2925
2926         ASSERT_SYNCHRONIZED_OP(sc);
2927
2928         if (isset(&sc->open_device_map, pi->port_id)) {
2929                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2930                     ("mismatch between open_device_map and if_drv_flags"));
2931                 return (0);     /* already running */
2932         }
2933
2934         if (!(sc->flags & FULL_INIT_DONE) &&
2935             ((rc = adapter_full_init(sc)) != 0))
2936                 return (rc);    /* error message displayed already */
2937
2938         if (!(pi->flags & PORT_INIT_DONE) &&
2939             ((rc = port_full_init(pi)) != 0))
2940                 return (rc); /* error message displayed already */
2941
2942         rc = update_mac_settings(pi, XGMAC_ALL);
2943         if (rc)
2944                 goto done;      /* error message displayed already */
2945
2946         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2947         if (rc != 0) {
2948                 if_printf(ifp, "start_link failed: %d\n", rc);
2949                 goto done;
2950         }
2951
2952         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2953         if (rc != 0) {
2954                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2955                 goto done;
2956         }
2957
2958         /* all ok */
2959         setbit(&sc->open_device_map, pi->port_id);
2960         PORT_LOCK(pi);
2961         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2962         PORT_UNLOCK(pi);
2963
2964         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2965 done:
2966         if (rc != 0)
2967                 cxgbe_uninit_synchronized(pi);
2968
2969         return (rc);
2970 }
2971
2972 /*
2973  * Idempotent.
2974  */
2975 static int
2976 cxgbe_uninit_synchronized(struct port_info *pi)
2977 {
2978         struct adapter *sc = pi->adapter;
2979         struct ifnet *ifp = pi->ifp;
2980         int rc;
2981
2982         ASSERT_SYNCHRONIZED_OP(sc);
2983
2984         /*
2985          * Disable the VI so that all its data in either direction is discarded
2986          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2987          * tick) intact as the TP can deliver negative advice or data that it's
2988          * holding in its RAM (for an offloaded connection) even after the VI is
2989          * disabled.
2990          */
2991         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2992         if (rc) {
2993                 if_printf(ifp, "disable_vi failed: %d\n", rc);
2994                 return (rc);
2995         }
2996
2997         clrbit(&sc->open_device_map, pi->port_id);
2998         PORT_LOCK(pi);
2999         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3000         PORT_UNLOCK(pi);
3001
3002         pi->link_cfg.link_ok = 0;
3003         pi->link_cfg.speed = 0;
3004         pi->linkdnrc = -1;
3005         t4_os_link_changed(sc, pi->port_id, 0, -1);
3006
3007         return (0);
3008 }
3009
3010 /*
3011  * It is ok for this function to fail midway and return right away.  t4_detach
3012  * will walk the entire sc->irq list and clean up whatever is valid.
3013  */
3014 static int
3015 setup_intr_handlers(struct adapter *sc)
3016 {
3017         int rc, rid, p, q;
3018         char s[8];
3019         struct irq *irq;
3020         struct port_info *pi;
3021         struct sge_rxq *rxq;
3022 #ifdef TCP_OFFLOAD
3023         struct sge_ofld_rxq *ofld_rxq;
3024 #endif
3025
3026         /*
3027          * Setup interrupts.
3028          */
3029         irq = &sc->irq[0];
3030         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3031         if (sc->intr_count == 1) {
3032                 KASSERT(!(sc->flags & INTR_DIRECT),
3033                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3034
3035                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3036                 if (rc != 0)
3037                         return (rc);
3038         } else {
3039                 /* Multiple interrupts. */
3040                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3041                     ("%s: too few intr.", __func__));
3042
3043                 /* The first one is always error intr */
3044                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3045                 if (rc != 0)
3046                         return (rc);
3047                 irq++;
3048                 rid++;
3049
3050                 /* The second one is always the firmware event queue */
3051                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3052                     "evt");
3053                 if (rc != 0)
3054                         return (rc);
3055                 irq++;
3056                 rid++;
3057
3058                 /*
3059                  * Note that if INTR_DIRECT is not set then either the NIC rx
3060                  * queues or (exclusive or) the TOE rx queueus will be taking
3061                  * direct interrupts.
3062                  *
3063                  * There is no need to check for is_offload(sc) as nofldrxq
3064                  * will be 0 if offload is disabled.
3065                  */
3066                 for_each_port(sc, p) {
3067                         pi = sc->port[p];
3068
3069 #ifdef TCP_OFFLOAD
3070                         /*
3071                          * Skip over the NIC queues if they aren't taking direct
3072                          * interrupts.
3073                          */
3074                         if (!(sc->flags & INTR_DIRECT) &&
3075                             pi->nofldrxq > pi->nrxq)
3076                                 goto ofld_queues;
3077 #endif
3078                         rxq = &sc->sge.rxq[pi->first_rxq];
3079                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3080                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3081                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3082                                     s);
3083                                 if (rc != 0)
3084                                         return (rc);
3085                                 irq++;
3086                                 rid++;
3087                         }
3088
3089 #ifdef TCP_OFFLOAD
3090                         /*
3091                          * Skip over the offload queues if they aren't taking
3092                          * direct interrupts.
3093                          */
3094                         if (!(sc->flags & INTR_DIRECT))
3095                                 continue;
3096 ofld_queues:
3097                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3098                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3099                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3100                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3101                                     ofld_rxq, s);
3102                                 if (rc != 0)
3103                                         return (rc);
3104                                 irq++;
3105                                 rid++;
3106                         }
3107 #endif
3108                 }
3109         }
3110
3111         return (0);
3112 }
3113
3114 static int
3115 adapter_full_init(struct adapter *sc)
3116 {
3117         int rc, i;
3118
3119         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3120         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3121             ("%s: FULL_INIT_DONE already", __func__));
3122
3123         /*
3124          * queues that belong to the adapter (not any particular port).
3125          */
3126         rc = t4_setup_adapter_queues(sc);
3127         if (rc != 0)
3128                 goto done;
3129
3130         for (i = 0; i < nitems(sc->tq); i++) {
3131                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3132                     taskqueue_thread_enqueue, &sc->tq[i]);
3133                 if (sc->tq[i] == NULL) {
3134                         device_printf(sc->dev,
3135                             "failed to allocate task queue %d\n", i);
3136                         rc = ENOMEM;
3137                         goto done;
3138                 }
3139                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3140                     device_get_nameunit(sc->dev), i);
3141         }
3142
3143         t4_intr_enable(sc);
3144         sc->flags |= FULL_INIT_DONE;
3145 done:
3146         if (rc != 0)
3147                 adapter_full_uninit(sc);
3148
3149         return (rc);
3150 }
3151
3152 static int
3153 adapter_full_uninit(struct adapter *sc)
3154 {
3155         int i;
3156
3157         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3158
3159         t4_teardown_adapter_queues(sc);
3160
3161         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3162                 taskqueue_free(sc->tq[i]);
3163                 sc->tq[i] = NULL;
3164         }
3165
3166         sc->flags &= ~FULL_INIT_DONE;
3167
3168         return (0);
3169 }
3170
3171 static int
3172 port_full_init(struct port_info *pi)
3173 {
3174         struct adapter *sc = pi->adapter;
3175         struct ifnet *ifp = pi->ifp;
3176         uint16_t *rss;
3177         struct sge_rxq *rxq;
3178         int rc, i, j;
3179
3180         ASSERT_SYNCHRONIZED_OP(sc);
3181         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3182             ("%s: PORT_INIT_DONE already", __func__));
3183
3184         sysctl_ctx_init(&pi->ctx);
3185         pi->flags |= PORT_SYSCTL_CTX;
3186
3187         /*
3188          * Allocate tx/rx/fl queues for this port.
3189          */
3190         rc = t4_setup_port_queues(pi);
3191         if (rc != 0)
3192                 goto done;      /* error message displayed already */
3193
3194         /*
3195          * Setup RSS for this port.  Save a copy of the RSS table for later use.
3196          */
3197         rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3198         for (i = 0; i < pi->rss_size;) {
3199                 for_each_rxq(pi, j, rxq) {
3200                         rss[i++] = rxq->iq.abs_id;
3201                         if (i == pi->rss_size)
3202                                 break;
3203                 }
3204         }
3205
3206         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3207             pi->rss_size);
3208         if (rc != 0) {
3209                 if_printf(ifp, "rss_config failed: %d\n", rc);
3210                 goto done;
3211         }
3212
3213         pi->rss = rss;
3214         pi->flags |= PORT_INIT_DONE;
3215 done:
3216         if (rc != 0)
3217                 port_full_uninit(pi);
3218
3219         return (rc);
3220 }
3221
3222 /*
3223  * Idempotent.
3224  */
3225 static int
3226 port_full_uninit(struct port_info *pi)
3227 {
3228         struct adapter *sc = pi->adapter;
3229         int i;
3230         struct sge_rxq *rxq;
3231         struct sge_txq *txq;
3232 #ifdef TCP_OFFLOAD
3233         struct sge_ofld_rxq *ofld_rxq;
3234         struct sge_wrq *ofld_txq;
3235 #endif
3236
3237         if (pi->flags & PORT_INIT_DONE) {
3238
3239                 /* Need to quiesce queues.  XXX: ctrl queues? */
3240
3241                 for_each_txq(pi, i, txq) {
3242                         quiesce_eq(sc, &txq->eq);
3243                 }
3244
3245 #ifdef TCP_OFFLOAD
3246                 for_each_ofld_txq(pi, i, ofld_txq) {
3247                         quiesce_eq(sc, &ofld_txq->eq);
3248                 }
3249 #endif
3250
3251                 for_each_rxq(pi, i, rxq) {
3252                         quiesce_iq(sc, &rxq->iq);
3253                         quiesce_fl(sc, &rxq->fl);
3254                 }
3255
3256 #ifdef TCP_OFFLOAD
3257                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3258                         quiesce_iq(sc, &ofld_rxq->iq);
3259                         quiesce_fl(sc, &ofld_rxq->fl);
3260                 }
3261 #endif
3262                 free(pi->rss, M_CXGBE);
3263         }
3264
3265         t4_teardown_port_queues(pi);
3266         pi->flags &= ~PORT_INIT_DONE;
3267
3268         return (0);
3269 }
3270
3271 static void
3272 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3273 {
3274         EQ_LOCK(eq);
3275         eq->flags |= EQ_DOOMED;
3276
3277         /*
3278          * Wait for the response to a credit flush if one's
3279          * pending.
3280          */
3281         while (eq->flags & EQ_CRFLUSHED)
3282                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3283         EQ_UNLOCK(eq);
3284
3285         callout_drain(&eq->tx_callout); /* XXX: iffy */
3286         pause("callout", 10);           /* Still iffy */
3287
3288         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3289 }
3290
3291 static void
3292 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3293 {
3294         (void) sc;      /* unused */
3295
3296         /* Synchronize with the interrupt handler */
3297         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3298                 pause("iqfree", 1);
3299 }
3300
3301 static void
3302 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3303 {
3304         mtx_lock(&sc->sfl_lock);
3305         FL_LOCK(fl);
3306         fl->flags |= FL_DOOMED;
3307         FL_UNLOCK(fl);
3308         mtx_unlock(&sc->sfl_lock);
3309
3310         callout_drain(&sc->sfl_callout);
3311         KASSERT((fl->flags & FL_STARVING) == 0,
3312             ("%s: still starving", __func__));
3313 }
3314
3315 static int
3316 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3317     driver_intr_t *handler, void *arg, char *name)
3318 {
3319         int rc;
3320
3321         irq->rid = rid;
3322         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3323             RF_SHAREABLE | RF_ACTIVE);
3324         if (irq->res == NULL) {
3325                 device_printf(sc->dev,
3326                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3327                 return (ENOMEM);
3328         }
3329
3330         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3331             NULL, handler, arg, &irq->tag);
3332         if (rc != 0) {
3333                 device_printf(sc->dev,
3334                     "failed to setup interrupt for rid %d, name %s: %d\n",
3335                     rid, name, rc);
3336         } else if (name)
3337                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3338
3339         return (rc);
3340 }
3341
3342 static int
3343 t4_free_irq(struct adapter *sc, struct irq *irq)
3344 {
3345         if (irq->tag)
3346                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3347         if (irq->res)
3348                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3349
3350         bzero(irq, sizeof(*irq));
3351
3352         return (0);
3353 }
3354
3355 static void
3356 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3357     unsigned int end)
3358 {
3359         uint32_t *p = (uint32_t *)(buf + start);
3360
3361         for ( ; start <= end; start += sizeof(uint32_t))
3362                 *p++ = t4_read_reg(sc, start);
3363 }
3364
3365 static void
3366 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3367 {
3368         int i, n;
3369         const unsigned int *reg_ranges;
3370         static const unsigned int t4_reg_ranges[] = {
3371                 0x1008, 0x1108,
3372                 0x1180, 0x11b4,
3373                 0x11fc, 0x123c,
3374                 0x1300, 0x173c,
3375                 0x1800, 0x18fc,
3376                 0x3000, 0x30d8,
3377                 0x30e0, 0x5924,
3378                 0x5960, 0x59d4,
3379                 0x5a00, 0x5af8,
3380                 0x6000, 0x6098,
3381                 0x6100, 0x6150,
3382                 0x6200, 0x6208,
3383                 0x6240, 0x6248,
3384                 0x6280, 0x6338,
3385                 0x6370, 0x638c,
3386                 0x6400, 0x643c,
3387                 0x6500, 0x6524,
3388                 0x6a00, 0x6a38,
3389                 0x6a60, 0x6a78,
3390                 0x6b00, 0x6b84,
3391                 0x6bf0, 0x6c84,
3392                 0x6cf0, 0x6d84,
3393                 0x6df0, 0x6e84,
3394                 0x6ef0, 0x6f84,
3395                 0x6ff0, 0x7084,
3396                 0x70f0, 0x7184,
3397                 0x71f0, 0x7284,
3398                 0x72f0, 0x7384,
3399                 0x73f0, 0x7450,
3400                 0x7500, 0x7530,
3401                 0x7600, 0x761c,
3402                 0x7680, 0x76cc,
3403                 0x7700, 0x7798,
3404                 0x77c0, 0x77fc,
3405                 0x7900, 0x79fc,
3406                 0x7b00, 0x7c38,
3407                 0x7d00, 0x7efc,
3408                 0x8dc0, 0x8e1c,
3409                 0x8e30, 0x8e78,
3410                 0x8ea0, 0x8f6c,
3411                 0x8fc0, 0x9074,
3412                 0x90fc, 0x90fc,
3413                 0x9400, 0x9458,
3414                 0x9600, 0x96bc,
3415                 0x9800, 0x9808,
3416                 0x9820, 0x983c,
3417                 0x9850, 0x9864,
3418                 0x9c00, 0x9c6c,
3419                 0x9c80, 0x9cec,
3420                 0x9d00, 0x9d6c,
3421                 0x9d80, 0x9dec,
3422                 0x9e00, 0x9e6c,
3423                 0x9e80, 0x9eec,
3424                 0x9f00, 0x9f6c,
3425                 0x9f80, 0x9fec,
3426                 0xd004, 0xd03c,
3427                 0xdfc0, 0xdfe0,
3428                 0xe000, 0xea7c,
3429                 0xf000, 0x11110,
3430                 0x11118, 0x11190,
3431                 0x19040, 0x1906c,
3432                 0x19078, 0x19080,
3433                 0x1908c, 0x19124,
3434                 0x19150, 0x191b0,
3435                 0x191d0, 0x191e8,
3436                 0x19238, 0x1924c,
3437                 0x193f8, 0x19474,
3438                 0x19490, 0x194f8,
3439                 0x19800, 0x19f30,
3440                 0x1a000, 0x1a06c,
3441                 0x1a0b0, 0x1a120,
3442                 0x1a128, 0x1a138,
3443                 0x1a190, 0x1a1c4,
3444                 0x1a1fc, 0x1a1fc,
3445                 0x1e040, 0x1e04c,
3446                 0x1e284, 0x1e28c,
3447                 0x1e2c0, 0x1e2c0,
3448                 0x1e2e0, 0x1e2e0,
3449                 0x1e300, 0x1e384,
3450                 0x1e3c0, 0x1e3c8,
3451                 0x1e440, 0x1e44c,
3452                 0x1e684, 0x1e68c,
3453                 0x1e6c0, 0x1e6c0,
3454                 0x1e6e0, 0x1e6e0,
3455                 0x1e700, 0x1e784,
3456                 0x1e7c0, 0x1e7c8,
3457                 0x1e840, 0x1e84c,
3458                 0x1ea84, 0x1ea8c,
3459                 0x1eac0, 0x1eac0,
3460                 0x1eae0, 0x1eae0,
3461                 0x1eb00, 0x1eb84,
3462                 0x1ebc0, 0x1ebc8,
3463                 0x1ec40, 0x1ec4c,
3464                 0x1ee84, 0x1ee8c,
3465                 0x1eec0, 0x1eec0,
3466                 0x1eee0, 0x1eee0,
3467                 0x1ef00, 0x1ef84,
3468                 0x1efc0, 0x1efc8,
3469                 0x1f040, 0x1f04c,
3470                 0x1f284, 0x1f28c,
3471                 0x1f2c0, 0x1f2c0,
3472                 0x1f2e0, 0x1f2e0,
3473                 0x1f300, 0x1f384,
3474                 0x1f3c0, 0x1f3c8,
3475                 0x1f440, 0x1f44c,
3476                 0x1f684, 0x1f68c,
3477                 0x1f6c0, 0x1f6c0,
3478                 0x1f6e0, 0x1f6e0,
3479                 0x1f700, 0x1f784,
3480                 0x1f7c0, 0x1f7c8,
3481                 0x1f840, 0x1f84c,
3482                 0x1fa84, 0x1fa8c,
3483                 0x1fac0, 0x1fac0,
3484                 0x1fae0, 0x1fae0,
3485                 0x1fb00, 0x1fb84,
3486                 0x1fbc0, 0x1fbc8,
3487                 0x1fc40, 0x1fc4c,
3488                 0x1fe84, 0x1fe8c,
3489                 0x1fec0, 0x1fec0,
3490                 0x1fee0, 0x1fee0,
3491                 0x1ff00, 0x1ff84,
3492                 0x1ffc0, 0x1ffc8,
3493                 0x20000, 0x2002c,
3494                 0x20100, 0x2013c,
3495                 0x20190, 0x201c8,
3496                 0x20200, 0x20318,
3497                 0x20400, 0x20528,
3498                 0x20540, 0x20614,
3499                 0x21000, 0x21040,
3500                 0x2104c, 0x21060,
3501                 0x210c0, 0x210ec,
3502                 0x21200, 0x21268,
3503                 0x21270, 0x21284,
3504                 0x212fc, 0x21388,
3505                 0x21400, 0x21404,
3506                 0x21500, 0x21518,
3507                 0x2152c, 0x2153c,
3508                 0x21550, 0x21554,
3509                 0x21600, 0x21600,
3510                 0x21608, 0x21628,
3511                 0x21630, 0x2163c,
3512                 0x21700, 0x2171c,
3513                 0x21780, 0x2178c,
3514                 0x21800, 0x21c38,
3515                 0x21c80, 0x21d7c,
3516                 0x21e00, 0x21e04,
3517                 0x22000, 0x2202c,
3518                 0x22100, 0x2213c,
3519                 0x22190, 0x221c8,
3520                 0x22200, 0x22318,
3521                 0x22400, 0x22528,
3522                 0x22540, 0x22614,
3523                 0x23000, 0x23040,
3524                 0x2304c, 0x23060,
3525                 0x230c0, 0x230ec,
3526                 0x23200, 0x23268,
3527                 0x23270, 0x23284,
3528                 0x232fc, 0x23388,
3529                 0x23400, 0x23404,
3530                 0x23500, 0x23518,
3531                 0x2352c, 0x2353c,
3532                 0x23550, 0x23554,
3533                 0x23600, 0x23600,
3534                 0x23608, 0x23628,
3535                 0x23630, 0x2363c,
3536                 0x23700, 0x2371c,
3537                 0x23780, 0x2378c,
3538                 0x23800, 0x23c38,
3539                 0x23c80, 0x23d7c,
3540                 0x23e00, 0x23e04,
3541                 0x24000, 0x2402c,
3542                 0x24100, 0x2413c,
3543                 0x24190, 0x241c8,
3544                 0x24200, 0x24318,
3545                 0x24400, 0x24528,
3546                 0x24540, 0x24614,
3547                 0x25000, 0x25040,
3548                 0x2504c, 0x25060,
3549                 0x250c0, 0x250ec,
3550                 0x25200, 0x25268,
3551                 0x25270, 0x25284,
3552                 0x252fc, 0x25388,
3553                 0x25400, 0x25404,
3554                 0x25500, 0x25518,
3555                 0x2552c, 0x2553c,
3556                 0x25550, 0x25554,
3557                 0x25600, 0x25600,
3558                 0x25608, 0x25628,
3559                 0x25630, 0x2563c,
3560                 0x25700, 0x2571c,
3561                 0x25780, 0x2578c,
3562                 0x25800, 0x25c38,
3563                 0x25c80, 0x25d7c,
3564                 0x25e00, 0x25e04,
3565                 0x26000, 0x2602c,
3566                 0x26100, 0x2613c,
3567                 0x26190, 0x261c8,
3568                 0x26200, 0x26318,
3569                 0x26400, 0x26528,
3570                 0x26540, 0x26614,
3571                 0x27000, 0x27040,
3572                 0x2704c, 0x27060,
3573                 0x270c0, 0x270ec,
3574                 0x27200, 0x27268,
3575                 0x27270, 0x27284,
3576                 0x272fc, 0x27388,
3577                 0x27400, 0x27404,
3578                 0x27500, 0x27518,
3579                 0x2752c, 0x2753c,
3580                 0x27550, 0x27554,
3581                 0x27600, 0x27600,
3582                 0x27608, 0x27628,
3583                 0x27630, 0x2763c,
3584                 0x27700, 0x2771c,
3585                 0x27780, 0x2778c,
3586                 0x27800, 0x27c38,
3587                 0x27c80, 0x27d7c,
3588                 0x27e00, 0x27e04
3589         };
3590         static const unsigned int t5_reg_ranges[] = {
3591                 0x1008, 0x1148,
3592                 0x1180, 0x11b4,
3593                 0x11fc, 0x123c,
3594                 0x1280, 0x173c,
3595                 0x1800, 0x18fc,
3596                 0x3000, 0x3028,
3597                 0x3060, 0x30d8,
3598                 0x30e0, 0x30fc,
3599                 0x3140, 0x357c,
3600                 0x35a8, 0x35cc,
3601                 0x35ec, 0x35ec,
3602                 0x3600, 0x5624,
3603                 0x56cc, 0x575c,
3604                 0x580c, 0x5814,
3605                 0x5890, 0x58bc,
3606                 0x5940, 0x59dc,
3607                 0x59fc, 0x5a18,
3608                 0x5a60, 0x5a9c,
3609                 0x5b94, 0x5bfc,
3610                 0x6000, 0x6040,
3611                 0x6058, 0x614c,
3612                 0x7700, 0x7798,
3613                 0x77c0, 0x78fc,
3614                 0x7b00, 0x7c54,
3615                 0x7d00, 0x7efc,
3616                 0x8dc0, 0x8de0,
3617                 0x8df8, 0x8e84,
3618                 0x8ea0, 0x8f84,
3619                 0x8fc0, 0x90f8,
3620                 0x9400, 0x9470,
3621                 0x9600, 0x96f4,
3622                 0x9800, 0x9808,
3623                 0x9820, 0x983c,
3624                 0x9850, 0x9864,
3625                 0x9c00, 0x9c6c,
3626                 0x9c80, 0x9cec,
3627                 0x9d00, 0x9d6c,
3628                 0x9d80, 0x9dec,
3629                 0x9e00, 0x9e6c,
3630                 0x9e80, 0x9eec,
3631                 0x9f00, 0x9f6c,
3632                 0x9f80, 0xa020,
3633                 0xd004, 0xd03c,
3634                 0xdfc0, 0xdfe0,
3635                 0xe000, 0x11088,
3636                 0x1109c, 0x11110,
3637                 0x11118, 0x1117c,
3638                 0x11190, 0x11204,
3639                 0x19040, 0x1906c,
3640                 0x19078, 0x19080,
3641                 0x1908c, 0x19124,
3642                 0x19150, 0x191b0,
3643                 0x191d0, 0x191e8,
3644                 0x19238, 0x19290,
3645                 0x193f8, 0x19474,
3646                 0x19490, 0x194cc,
3647                 0x194f0, 0x194f8,
3648                 0x19c00, 0x19c60,
3649                 0x19c94, 0x19e10,
3650                 0x19e50, 0x19f34,
3651                 0x19f40, 0x19f50,
3652                 0x19f90, 0x19fe4,
3653                 0x1a000, 0x1a06c,
3654                 0x1a0b0, 0x1a120,
3655                 0x1a128, 0x1a138,
3656                 0x1a190, 0x1a1c4,
3657                 0x1a1fc, 0x1a1fc,
3658                 0x1e008, 0x1e00c,
3659                 0x1e040, 0x1e04c,
3660                 0x1e284, 0x1e290,
3661                 0x1e2c0, 0x1e2c0,
3662                 0x1e2e0, 0x1e2e0,
3663                 0x1e300, 0x1e384,
3664                 0x1e3c0, 0x1e3c8,
3665                 0x1e408, 0x1e40c,
3666                 0x1e440, 0x1e44c,
3667                 0x1e684, 0x1e690,
3668                 0x1e6c0, 0x1e6c0,
3669                 0x1e6e0, 0x1e6e0,
3670                 0x1e700, 0x1e784,
3671                 0x1e7c0, 0x1e7c8,
3672                 0x1e808, 0x1e80c,
3673                 0x1e840, 0x1e84c,
3674                 0x1ea84, 0x1ea90,
3675                 0x1eac0, 0x1eac0,
3676                 0x1eae0, 0x1eae0,
3677                 0x1eb00, 0x1eb84,
3678                 0x1ebc0, 0x1ebc8,
3679                 0x1ec08, 0x1ec0c,
3680                 0x1ec40, 0x1ec4c,
3681                 0x1ee84, 0x1ee90,
3682                 0x1eec0, 0x1eec0,
3683                 0x1eee0, 0x1eee0,
3684                 0x1ef00, 0x1ef84,
3685                 0x1efc0, 0x1efc8,
3686                 0x1f008, 0x1f00c,
3687                 0x1f040, 0x1f04c,
3688                 0x1f284, 0x1f290,
3689                 0x1f2c0, 0x1f2c0,
3690                 0x1f2e0, 0x1f2e0,
3691                 0x1f300, 0x1f384,
3692                 0x1f3c0, 0x1f3c8,
3693                 0x1f408, 0x1f40c,
3694                 0x1f440, 0x1f44c,
3695                 0x1f684, 0x1f690,
3696                 0x1f6c0, 0x1f6c0,
3697                 0x1f6e0, 0x1f6e0,
3698                 0x1f700, 0x1f784,
3699                 0x1f7c0, 0x1f7c8,
3700                 0x1f808, 0x1f80c,
3701                 0x1f840, 0x1f84c,
3702                 0x1fa84, 0x1fa90,
3703                 0x1fac0, 0x1fac0,
3704                 0x1fae0, 0x1fae0,
3705                 0x1fb00, 0x1fb84,
3706                 0x1fbc0, 0x1fbc8,
3707                 0x1fc08, 0x1fc0c,
3708                 0x1fc40, 0x1fc4c,
3709                 0x1fe84, 0x1fe90,
3710                 0x1fec0, 0x1fec0,
3711                 0x1fee0, 0x1fee0,
3712                 0x1ff00, 0x1ff84,
3713                 0x1ffc0, 0x1ffc8,
3714                 0x30000, 0x30030,
3715                 0x30100, 0x30144,
3716                 0x30190, 0x301d0,
3717                 0x30200, 0x30318,
3718                 0x30400, 0x3052c,
3719                 0x30540, 0x3061c,
3720                 0x30800, 0x30834,
3721                 0x308c0, 0x30908,
3722                 0x30910, 0x309ac,
3723                 0x30a00, 0x30a2c,
3724                 0x30a44, 0x30a50,
3725                 0x30a74, 0x30c24,
3726                 0x30d00, 0x30d00,
3727                 0x30d08, 0x30d14,
3728                 0x30d1c, 0x30d20,
3729                 0x30d3c, 0x30d50,
3730                 0x31200, 0x3120c,
3731                 0x31220, 0x31220,
3732                 0x31240, 0x31240,
3733                 0x31600, 0x3160c,
3734                 0x31a00, 0x31a1c,
3735                 0x31e00, 0x31e20,
3736                 0x31e38, 0x31e3c,
3737                 0x31e80, 0x31e80,
3738                 0x31e88, 0x31ea8,
3739                 0x31eb0, 0x31eb4,
3740                 0x31ec8, 0x31ed4,
3741                 0x31fb8, 0x32004,
3742                 0x32200, 0x32200,
3743                 0x32208, 0x32240,
3744                 0x32248, 0x32280,
3745                 0x32288, 0x322c0,
3746                 0x322c8, 0x322fc,
3747                 0x32600, 0x32630,
3748                 0x32a00, 0x32abc,
3749                 0x32b00, 0x32b70,
3750                 0x33000, 0x33048,
3751                 0x33060, 0x3309c,
3752                 0x330f0, 0x33148,
3753                 0x33160, 0x3319c,
3754                 0x331f0, 0x332e4,
3755                 0x332f8, 0x333e4,
3756                 0x333f8, 0x33448,
3757                 0x33460, 0x3349c,
3758                 0x334f0, 0x33548,
3759                 0x33560, 0x3359c,
3760                 0x335f0, 0x336e4,
3761                 0x336f8, 0x337e4,
3762                 0x337f8, 0x337fc,
3763                 0x33814, 0x33814,
3764                 0x3382c, 0x3382c,
3765                 0x33880, 0x3388c,
3766                 0x338e8, 0x338ec,
3767                 0x33900, 0x33948,
3768                 0x33960, 0x3399c,
3769                 0x339f0, 0x33ae4,
3770                 0x33af8, 0x33b10,
3771                 0x33b28, 0x33b28,
3772                 0x33b3c, 0x33b50,
3773                 0x33bf0, 0x33c10,
3774                 0x33c28, 0x33c28,
3775                 0x33c3c, 0x33c50,
3776                 0x33cf0, 0x33cfc,
3777                 0x34000, 0x34030,
3778                 0x34100, 0x34144,
3779                 0x34190, 0x341d0,
3780                 0x34200, 0x34318,
3781                 0x34400, 0x3452c,
3782                 0x34540, 0x3461c,
3783                 0x34800, 0x34834,
3784                 0x348c0, 0x34908,
3785                 0x34910, 0x349ac,
3786                 0x34a00, 0x34a2c,
3787                 0x34a44, 0x34a50,
3788                 0x34a74, 0x34c24,
3789                 0x34d00, 0x34d00,
3790                 0x34d08, 0x34d14,
3791                 0x34d1c, 0x34d20,
3792                 0x34d3c, 0x34d50,
3793                 0x35200, 0x3520c,
3794                 0x35220, 0x35220,
3795                 0x35240, 0x35240,
3796                 0x35600, 0x3560c,
3797                 0x35a00, 0x35a1c,
3798                 0x35e00, 0x35e20,
3799                 0x35e38, 0x35e3c,
3800                 0x35e80, 0x35e80,
3801                 0x35e88, 0x35ea8,
3802                 0x35eb0, 0x35eb4,
3803                 0x35ec8, 0x35ed4,
3804                 0x35fb8, 0x36004,
3805                 0x36200, 0x36200,
3806                 0x36208, 0x36240,
3807                 0x36248, 0x36280,
3808                 0x36288, 0x362c0,
3809                 0x362c8, 0x362fc,
3810                 0x36600, 0x36630,
3811                 0x36a00, 0x36abc,
3812                 0x36b00, 0x36b70,
3813                 0x37000, 0x37048,
3814                 0x37060, 0x3709c,
3815                 0x370f0, 0x37148,
3816                 0x37160, 0x3719c,
3817                 0x371f0, 0x372e4,
3818                 0x372f8, 0x373e4,
3819                 0x373f8, 0x37448,
3820                 0x37460, 0x3749c,
3821                 0x374f0, 0x37548,
3822                 0x37560, 0x3759c,
3823                 0x375f0, 0x376e4,
3824                 0x376f8, 0x377e4,
3825                 0x377f8, 0x377fc,
3826                 0x37814, 0x37814,
3827                 0x3782c, 0x3782c,
3828                 0x37880, 0x3788c,
3829                 0x378e8, 0x378ec,
3830                 0x37900, 0x37948,
3831                 0x37960, 0x3799c,
3832                 0x379f0, 0x37ae4,
3833                 0x37af8, 0x37b10,
3834                 0x37b28, 0x37b28,
3835                 0x37b3c, 0x37b50,
3836                 0x37bf0, 0x37c10,
3837                 0x37c28, 0x37c28,
3838                 0x37c3c, 0x37c50,
3839                 0x37cf0, 0x37cfc,
3840                 0x38000, 0x38030,
3841                 0x38100, 0x38144,
3842                 0x38190, 0x381d0,
3843                 0x38200, 0x38318,
3844                 0x38400, 0x3852c,
3845                 0x38540, 0x3861c,
3846                 0x38800, 0x38834,
3847                 0x388c0, 0x38908,
3848                 0x38910, 0x389ac,
3849                 0x38a00, 0x38a2c,
3850                 0x38a44, 0x38a50,
3851                 0x38a74, 0x38c24,
3852                 0x38d00, 0x38d00,
3853                 0x38d08, 0x38d14,
3854                 0x38d1c, 0x38d20,
3855                 0x38d3c, 0x38d50,
3856                 0x39200, 0x3920c,
3857                 0x39220, 0x39220,
3858                 0x39240, 0x39240,
3859                 0x39600, 0x3960c,
3860                 0x39a00, 0x39a1c,
3861                 0x39e00, 0x39e20,
3862                 0x39e38, 0x39e3c,
3863                 0x39e80, 0x39e80,
3864                 0x39e88, 0x39ea8,
3865                 0x39eb0, 0x39eb4,
3866                 0x39ec8, 0x39ed4,
3867                 0x39fb8, 0x3a004,
3868                 0x3a200, 0x3a200,
3869                 0x3a208, 0x3a240,
3870                 0x3a248, 0x3a280,
3871                 0x3a288, 0x3a2c0,
3872                 0x3a2c8, 0x3a2fc,
3873                 0x3a600, 0x3a630,
3874                 0x3aa00, 0x3aabc,
3875                 0x3ab00, 0x3ab70,
3876                 0x3b000, 0x3b048,
3877                 0x3b060, 0x3b09c,
3878                 0x3b0f0, 0x3b148,
3879                 0x3b160, 0x3b19c,
3880                 0x3b1f0, 0x3b2e4,
3881                 0x3b2f8, 0x3b3e4,
3882                 0x3b3f8, 0x3b448,
3883                 0x3b460, 0x3b49c,
3884                 0x3b4f0, 0x3b548,
3885                 0x3b560, 0x3b59c,
3886                 0x3b5f0, 0x3b6e4,
3887                 0x3b6f8, 0x3b7e4,
3888                 0x3b7f8, 0x3b7fc,
3889                 0x3b814, 0x3b814,
3890                 0x3b82c, 0x3b82c,
3891                 0x3b880, 0x3b88c,
3892                 0x3b8e8, 0x3b8ec,
3893                 0x3b900, 0x3b948,
3894                 0x3b960, 0x3b99c,
3895                 0x3b9f0, 0x3bae4,
3896                 0x3baf8, 0x3bb10,
3897                 0x3bb28, 0x3bb28,
3898                 0x3bb3c, 0x3bb50,
3899                 0x3bbf0, 0x3bc10,
3900                 0x3bc28, 0x3bc28,
3901                 0x3bc3c, 0x3bc50,
3902                 0x3bcf0, 0x3bcfc,
3903                 0x3c000, 0x3c030,
3904                 0x3c100, 0x3c144,
3905                 0x3c190, 0x3c1d0,
3906                 0x3c200, 0x3c318,
3907                 0x3c400, 0x3c52c,
3908                 0x3c540, 0x3c61c,
3909                 0x3c800, 0x3c834,
3910                 0x3c8c0, 0x3c908,
3911                 0x3c910, 0x3c9ac,
3912                 0x3ca00, 0x3ca2c,
3913                 0x3ca44, 0x3ca50,
3914                 0x3ca74, 0x3cc24,
3915                 0x3cd00, 0x3cd00,
3916                 0x3cd08, 0x3cd14,
3917                 0x3cd1c, 0x3cd20,
3918                 0x3cd3c, 0x3cd50,
3919                 0x3d200, 0x3d20c,
3920                 0x3d220, 0x3d220,
3921                 0x3d240, 0x3d240,
3922                 0x3d600, 0x3d60c,
3923                 0x3da00, 0x3da1c,
3924                 0x3de00, 0x3de20,
3925                 0x3de38, 0x3de3c,
3926                 0x3de80, 0x3de80,
3927                 0x3de88, 0x3dea8,
3928                 0x3deb0, 0x3deb4,
3929                 0x3dec8, 0x3ded4,
3930                 0x3dfb8, 0x3e004,
3931                 0x3e200, 0x3e200,
3932                 0x3e208, 0x3e240,
3933                 0x3e248, 0x3e280,
3934                 0x3e288, 0x3e2c0,
3935                 0x3e2c8, 0x3e2fc,
3936                 0x3e600, 0x3e630,
3937                 0x3ea00, 0x3eabc,
3938                 0x3eb00, 0x3eb70,
3939                 0x3f000, 0x3f048,
3940                 0x3f060, 0x3f09c,
3941                 0x3f0f0, 0x3f148,
3942                 0x3f160, 0x3f19c,
3943                 0x3f1f0, 0x3f2e4,
3944                 0x3f2f8, 0x3f3e4,
3945                 0x3f3f8, 0x3f448,
3946                 0x3f460, 0x3f49c,
3947                 0x3f4f0, 0x3f548,
3948                 0x3f560, 0x3f59c,
3949                 0x3f5f0, 0x3f6e4,
3950                 0x3f6f8, 0x3f7e4,
3951                 0x3f7f8, 0x3f7fc,
3952                 0x3f814, 0x3f814,
3953                 0x3f82c, 0x3f82c,
3954                 0x3f880, 0x3f88c,
3955                 0x3f8e8, 0x3f8ec,
3956                 0x3f900, 0x3f948,
3957                 0x3f960, 0x3f99c,
3958                 0x3f9f0, 0x3fae4,
3959                 0x3faf8, 0x3fb10,
3960                 0x3fb28, 0x3fb28,
3961                 0x3fb3c, 0x3fb50,
3962                 0x3fbf0, 0x3fc10,
3963                 0x3fc28, 0x3fc28,
3964                 0x3fc3c, 0x3fc50,
3965                 0x3fcf0, 0x3fcfc,
3966                 0x40000, 0x4000c,
3967                 0x40040, 0x40068,
3968                 0x4007c, 0x40144,
3969                 0x40180, 0x4018c,
3970                 0x40200, 0x40298,
3971                 0x402ac, 0x4033c,
3972                 0x403f8, 0x403fc,
3973                 0x41304, 0x413c4,
3974                 0x41400, 0x4141c,
3975                 0x41480, 0x414d0,
3976                 0x44000, 0x44078,
3977                 0x440c0, 0x44278,
3978                 0x442c0, 0x44478,
3979                 0x444c0, 0x44678,
3980                 0x446c0, 0x44878,
3981                 0x448c0, 0x449fc,
3982                 0x45000, 0x45068,
3983                 0x45080, 0x45084,
3984                 0x450a0, 0x450b0,
3985                 0x45200, 0x45268,
3986                 0x45280, 0x45284,
3987                 0x452a0, 0x452b0,
3988                 0x460c0, 0x460e4,
3989                 0x47000, 0x4708c,
3990                 0x47200, 0x47250,
3991                 0x47400, 0x47420,
3992                 0x47600, 0x47618,
3993                 0x47800, 0x47814,
3994                 0x48000, 0x4800c,
3995                 0x48040, 0x48068,
3996                 0x4807c, 0x48144,
3997                 0x48180, 0x4818c,
3998                 0x48200, 0x48298,
3999                 0x482ac, 0x4833c,
4000                 0x483f8, 0x483fc,
4001                 0x49304, 0x493c4,
4002                 0x49400, 0x4941c,
4003                 0x49480, 0x494d0,
4004                 0x4c000, 0x4c078,
4005                 0x4c0c0, 0x4c278,
4006                 0x4c2c0, 0x4c478,
4007                 0x4c4c0, 0x4c678,
4008                 0x4c6c0, 0x4c878,
4009                 0x4c8c0, 0x4c9fc,
4010                 0x4d000, 0x4d068,
4011                 0x4d080, 0x4d084,
4012                 0x4d0a0, 0x4d0b0,
4013                 0x4d200, 0x4d268,
4014                 0x4d280, 0x4d284,
4015                 0x4d2a0, 0x4d2b0,
4016                 0x4e0c0, 0x4e0e4,
4017                 0x4f000, 0x4f08c,
4018                 0x4f200, 0x4f250,
4019                 0x4f400, 0x4f420,
4020                 0x4f600, 0x4f618,
4021                 0x4f800, 0x4f814,
4022                 0x50000, 0x500cc,
4023                 0x50400, 0x50400,
4024                 0x50800, 0x508cc,
4025                 0x50c00, 0x50c00,
4026                 0x51000, 0x5101c,
4027                 0x51300, 0x51308,
4028         };
4029
4030         if (is_t4(sc)) {
4031                 reg_ranges = &t4_reg_ranges[0];
4032                 n = nitems(t4_reg_ranges);
4033         } else {
4034                 reg_ranges = &t5_reg_ranges[0];
4035                 n = nitems(t5_reg_ranges);
4036         }
4037
4038         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4039         for (i = 0; i < n; i += 2)
4040                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4041 }
4042
4043 static void
4044 cxgbe_tick(void *arg)
4045 {
4046         struct port_info *pi = arg;
4047         struct adapter *sc = pi->adapter;
4048         struct ifnet *ifp = pi->ifp;
4049         struct sge_txq *txq;
4050         int i, drops;
4051         struct port_stats *s = &pi->stats;
4052
4053         PORT_LOCK(pi);
4054         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4055                 PORT_UNLOCK(pi);
4056                 return; /* without scheduling another callout */
4057         }
4058
4059         t4_get_port_stats(sc, pi->tx_chan, s);
4060
4061         ifp->if_opackets = s->tx_frames - s->tx_pause;
4062         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4063         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4064         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4065         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4066         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4067         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4068             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4069             s->rx_trunc3;
4070         for (i = 0; i < 4; i++) {
4071                 if (pi->rx_chan_map & (1 << i)) {
4072                         uint32_t v;
4073
4074                         /*
4075                          * XXX: indirect reads from the same ADDR/DATA pair can
4076                          * race with each other.
4077                          */
4078                         t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4079                             1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4080                         ifp->if_iqdrops += v;
4081                 }
4082         }
4083
4084         drops = s->tx_drop;
4085         for_each_txq(pi, i, txq)
4086                 drops += txq->br->br_drops;
4087         ifp->if_snd.ifq_drops = drops;
4088
4089         ifp->if_oerrors = s->tx_error_frames;
4090         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4091             s->rx_fcs_err + s->rx_len_err;
4092
4093         callout_schedule(&pi->tick, hz);
4094         PORT_UNLOCK(pi);
4095 }
4096
4097 static void
4098 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4099 {
4100         struct ifnet *vlan;
4101
4102         if (arg != ifp || ifp->if_type != IFT_ETHER)
4103                 return;
4104
4105         vlan = VLAN_DEVAT(ifp, vid);
4106         VLAN_SETCOOKIE(vlan, ifp);
4107 }
4108
4109 static int
4110 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4111 {
4112
4113 #ifdef INVARIANTS
4114         panic("%s: opcode 0x%02x on iq %p with payload %p",
4115             __func__, rss->opcode, iq, m);
4116 #else
4117         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4118             __func__, rss->opcode, iq, m);
4119         m_freem(m);
4120 #endif
4121         return (EDOOFUS);
4122 }
4123
4124 int
4125 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4126 {
4127         uintptr_t *loc, new;
4128
4129         if (opcode >= nitems(sc->cpl_handler))
4130                 return (EINVAL);
4131
4132         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4133         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4134         atomic_store_rel_ptr(loc, new);
4135
4136         return (0);
4137 }
4138
4139 static int
4140 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4141 {
4142
4143 #ifdef INVARIANTS
4144         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4145 #else
4146         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4147             __func__, iq, ctrl);
4148 #endif
4149         return (EDOOFUS);
4150 }
4151
4152 int
4153 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4154 {
4155         uintptr_t *loc, new;
4156
4157         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4158         loc = (uintptr_t *) &sc->an_handler;
4159         atomic_store_rel_ptr(loc, new);
4160
4161         return (0);
4162 }
4163
4164 static int
4165 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4166 {
4167         const struct cpl_fw6_msg *cpl =
4168             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4169
4170 #ifdef INVARIANTS
4171         panic("%s: fw_msg type %d", __func__, cpl->type);
4172 #else
4173         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4174 #endif
4175         return (EDOOFUS);
4176 }
4177
4178 int
4179 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4180 {
4181         uintptr_t *loc, new;
4182
4183         if (type >= nitems(sc->fw_msg_handler))
4184                 return (EINVAL);
4185
4186         /*
4187          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4188          * handler dispatch table.  Reject any attempt to install a handler for
4189          * this subtype.
4190          */
4191         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4192                 return (EINVAL);
4193
4194         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4195         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4196         atomic_store_rel_ptr(loc, new);
4197
4198         return (0);
4199 }
4200
4201 static int
4202 t4_sysctls(struct adapter *sc)
4203 {
4204         struct sysctl_ctx_list *ctx;
4205         struct sysctl_oid *oid;
4206         struct sysctl_oid_list *children, *c0;
4207         static char *caps[] = {
4208                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4209                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4210                     "\6HASHFILTER\7ETHOFLD",
4211                 "\20\1TOE",                             /* caps[2] toecaps */
4212                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4213                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4214                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4215                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4216                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4217                     "\4PO_INITIAOR\5PO_TARGET"
4218         };
4219         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4220
4221         ctx = device_get_sysctl_ctx(sc->dev);
4222
4223         /*
4224          * dev.t4nex.X.
4225          */
4226         oid = device_get_sysctl_tree(sc->dev);
4227         c0 = children = SYSCTL_CHILDREN(oid);
4228
4229         sc->sc_do_rxcopy = 1;
4230         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4231             &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4232
4233         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4234             sc->params.nports, "# of ports");
4235
4236         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4237             NULL, chip_rev(sc), "chip hardware revision");
4238
4239         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4240             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4241
4242         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4243             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4244
4245         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4246             sc->cfcsum, "config file checksum");
4247
4248         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4249             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4250             sysctl_bitfield, "A", "available doorbells");
4251
4252         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4253             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4254             sysctl_bitfield, "A", "available link capabilities");
4255
4256         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4257             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4258             sysctl_bitfield, "A", "available NIC capabilities");
4259
4260         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4261             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4262             sysctl_bitfield, "A", "available TCP offload capabilities");
4263
4264         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4265             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4266             sysctl_bitfield, "A", "available RDMA capabilities");
4267
4268         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4269             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4270             sysctl_bitfield, "A", "available iSCSI capabilities");
4271
4272         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4273             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4274             sysctl_bitfield, "A", "available FCoE capabilities");
4275
4276         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4277             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4278
4279         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4280             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4281             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4282             "interrupt holdoff timer values (us)");
4283
4284         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4285             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4286             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4287             "interrupt holdoff packet counter values");
4288
4289         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4290             NULL, sc->tids.nftids, "number of filters");
4291
4292         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4293             CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4294             "chip temperature (in Celsius)");
4295
4296         t4_sge_sysctls(sc, ctx, children);
4297
4298 #ifdef SBUF_DRAIN
4299         /*
4300          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4301          */
4302         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4303             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4304             "logs and miscellaneous information");
4305         children = SYSCTL_CHILDREN(oid);
4306
4307         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4308             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4309             sysctl_cctrl, "A", "congestion control");
4310
4311         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4312             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4313             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4314
4315         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4316             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4317             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4318
4319         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4320             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4321             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4322
4323         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4324             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4325             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4326
4327         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4328             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4329             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4330
4331         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4332             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4333             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4334
4335         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4336             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4337             sysctl_cim_la, "A", "CIM logic analyzer");
4338
4339         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4340             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4341             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4342
4343         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4344             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4345             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4346
4347         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4348             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4349             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4350
4351         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4352             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4353             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4354
4355         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4356             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4357             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4358
4359         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4360             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4361             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4362
4363         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4364             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4365             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4366
4367         if (is_t5(sc)) {
4368                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4369                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4370                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4371
4372                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4373                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4374                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4375         }
4376
4377         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4378             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4379             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4380
4381         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4382             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4383             sysctl_cim_qcfg, "A", "CIM queue configuration");
4384
4385         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4386             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4387             sysctl_cpl_stats, "A", "CPL statistics");
4388
4389         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4390             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4391             sysctl_ddp_stats, "A", "DDP statistics");
4392
4393         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4394             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4395             sysctl_devlog, "A", "firmware's device log");
4396
4397         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4398             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4399             sysctl_fcoe_stats, "A", "FCoE statistics");
4400
4401         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4402             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4403             sysctl_hw_sched, "A", "hardware scheduler ");
4404
4405         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4406             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4407             sysctl_l2t, "A", "hardware L2 table");
4408
4409         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4410             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4411             sysctl_lb_stats, "A", "loopback statistics");
4412
4413         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4414             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4415             sysctl_meminfo, "A", "memory regions");
4416
4417         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4418             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4419             sysctl_mps_tcam, "A", "MPS TCAM entries");
4420
4421         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4422             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4423             sysctl_path_mtus, "A", "path MTUs");
4424
4425         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4426             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4427             sysctl_pm_stats, "A", "PM statistics");
4428
4429         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4430             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4431             sysctl_rdma_stats, "A", "RDMA statistics");
4432
4433         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4434             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4435             sysctl_tcp_stats, "A", "TCP statistics");
4436
4437         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4438             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4439             sysctl_tids, "A", "TID information");
4440
4441         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4442             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4443             sysctl_tp_err_stats, "A", "TP error statistics");
4444
4445         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4446             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4447             sysctl_tp_la, "A", "TP logic analyzer");
4448
4449         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4450             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4451             sysctl_tx_rate, "A", "Tx rate");
4452
4453         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4454             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4455             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4456
4457         if (is_t5(sc)) {
4458                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4459                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4460                     sysctl_wcwr_stats, "A", "write combined work requests");
4461         }
4462 #endif
4463
4464 #ifdef TCP_OFFLOAD
4465         if (is_offload(sc)) {
4466                 /*
4467                  * dev.t4nex.X.toe.
4468                  */
4469                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4470                     NULL, "TOE parameters");
4471                 children = SYSCTL_CHILDREN(oid);
4472
4473                 sc->tt.sndbuf = 256 * 1024;
4474                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4475                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4476
4477                 sc->tt.ddp = 0;
4478                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4479                     &sc->tt.ddp, 0, "DDP allowed");
4480
4481                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4482                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4483                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4484
4485                 sc->tt.ddp_thres =
4486                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4487                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4488                     &sc->tt.ddp_thres, 0, "DDP threshold");
4489
4490                 sc->tt.rx_coalesce = 1;
4491                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4492                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4493         }
4494 #endif
4495
4496
4497         return (0);
4498 }
4499
4500 static int
4501 cxgbe_sysctls(struct port_info *pi)
4502 {
4503         struct sysctl_ctx_list *ctx;
4504         struct sysctl_oid *oid;
4505         struct sysctl_oid_list *children;
4506         struct adapter *sc = pi->adapter;
4507
4508         ctx = device_get_sysctl_ctx(pi->dev);
4509
4510         /*
4511          * dev.cxgbe.X.
4512          */
4513         oid = device_get_sysctl_tree(pi->dev);
4514         children = SYSCTL_CHILDREN(oid);
4515
4516         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4517            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4518         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4519                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4520                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4521                     "PHY temperature (in Celsius)");
4522                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4523                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4524                     "PHY firmware version");
4525         }
4526         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4527             &pi->nrxq, 0, "# of rx queues");
4528         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4529             &pi->ntxq, 0, "# of tx queues");
4530         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4531             &pi->first_rxq, 0, "index of first rx queue");
4532         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4533             &pi->first_txq, 0, "index of first tx queue");
4534         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4535             CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4536             "Reserve queue 0 for non-flowid packets");
4537
4538 #ifdef TCP_OFFLOAD
4539         if (is_offload(sc)) {
4540                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4541                     &pi->nofldrxq, 0,
4542                     "# of rx queues for offloaded TCP connections");
4543                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4544                     &pi->nofldtxq, 0,
4545                     "# of tx queues for offloaded TCP connections");
4546                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4547                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4548                     "index of first TOE rx queue");
4549                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4550                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4551                     "index of first TOE tx queue");
4552         }
4553 #endif
4554
4555         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4556             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4557             "holdoff timer index");
4558         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4559             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4560             "holdoff packet counter index");
4561
4562         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4563             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4564             "rx queue size");
4565         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4566             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4567             "tx queue size");
4568
4569         /*
4570          * dev.cxgbe.X.stats.
4571          */
4572         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4573             NULL, "port statistics");
4574         children = SYSCTL_CHILDREN(oid);
4575
4576 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4577         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4578             CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
4579             sysctl_handle_t4_reg64, "QU", desc)
4580
4581         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4582             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4583         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4584             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4585         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4586             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4587         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4588             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4589         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4590             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4591         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4592             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4593         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4594             "# of tx frames in this range",
4595             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4596         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4597             "# of tx frames in this range",
4598             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4599         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4600             "# of tx frames in this range",
4601             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4602         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4603             "# of tx frames in this range",
4604             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4605         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4606             "# of tx frames in this range",
4607             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4608         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4609             "# of tx frames in this range",
4610             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4611         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4612             "# of tx frames in this range",
4613             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4614         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4615             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4616         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4617             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4618         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4619             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4620         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4621             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4622         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4623             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4624         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4625             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4626         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4627             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4628         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4629             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4630         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4631             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4632         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4633             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4634
4635         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4636             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4637         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4638             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4639         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4640             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4641         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4642             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4643         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4644             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4645         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4646             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4647         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4648             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4649         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4650             "# of frames received with bad FCS",
4651             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4652         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4653             "# of frames received with length error",
4654             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4655         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4656             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4657         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4658             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4659         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4660             "# of rx frames in this range",
4661             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4662         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4663             "# of rx frames in this range",
4664             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4665         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4666             "# of rx frames in this range",
4667             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4668         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4669             "# of rx frames in this range",
4670             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4671         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4672             "# of rx frames in this range",
4673             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4674         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4675             "# of rx frames in this range",
4676             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4677         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4678             "# of rx frames in this range",
4679             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4680         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4681             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4682         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4683             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4684         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4685             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4686         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4687             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4688         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4689             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4690         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4691             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4692         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4693             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4694         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4695             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4696         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4697             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4698
4699 #undef SYSCTL_ADD_T4_REG64
4700
4701 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4702         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4703             &pi->stats.name, desc)
4704
4705         /* We get these from port_stats and they may be stale by upto 1s */
4706         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4707             "# drops due to buffer-group 0 overflows");
4708         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4709             "# drops due to buffer-group 1 overflows");
4710         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4711             "# drops due to buffer-group 2 overflows");
4712         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4713             "# drops due to buffer-group 3 overflows");
4714         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4715             "# of buffer-group 0 truncated packets");
4716         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4717             "# of buffer-group 1 truncated packets");
4718         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4719             "# of buffer-group 2 truncated packets");
4720         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4721             "# of buffer-group 3 truncated packets");
4722
4723 #undef SYSCTL_ADD_T4_PORTSTAT
4724
4725         return (0);
4726 }
4727
4728 static int
4729 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4730 {
4731         int rc, *i;
4732         struct sbuf sb;
4733
4734         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4735         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4736                 sbuf_printf(&sb, "%d ", *i);
4737         sbuf_trim(&sb);
4738         sbuf_finish(&sb);
4739         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4740         sbuf_delete(&sb);
4741         return (rc);
4742 }
4743
4744 static int
4745 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4746 {
4747         int rc;
4748         struct sbuf *sb;
4749
4750         rc = sysctl_wire_old_buffer(req, 0);
4751         if (rc != 0)
4752                 return(rc);
4753
4754         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4755         if (sb == NULL)
4756                 return (ENOMEM);
4757
4758         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4759         rc = sbuf_finish(sb);
4760         sbuf_delete(sb);
4761
4762         return (rc);
4763 }
4764
4765 static int
4766 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4767 {
4768         struct port_info *pi = arg1;
4769         int op = arg2;
4770         struct adapter *sc = pi->adapter;
4771         u_int v;
4772         int rc;
4773
4774         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4775         if (rc)
4776                 return (rc);
4777         /* XXX: magic numbers */
4778         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4779             &v);
4780         end_synchronized_op(sc, 0);
4781         if (rc)
4782                 return (rc);
4783         if (op == 0)
4784                 v /= 256;
4785
4786         rc = sysctl_handle_int(oidp, &v, 0, req);
4787         return (rc);
4788 }
4789
4790 static int
4791 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4792 {
4793         struct port_info *pi = arg1;
4794         int rc, val;
4795
4796         val = pi->rsrv_noflowq;
4797         rc = sysctl_handle_int(oidp, &val, 0, req);
4798         if (rc != 0 || req->newptr == NULL)
4799                 return (rc);
4800
4801         if ((val >= 1) && (pi->ntxq > 1))
4802                 pi->rsrv_noflowq = 1;
4803         else
4804                 pi->rsrv_noflowq = 0;
4805
4806         return (rc);
4807 }
4808
4809 static int
4810 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4811 {
4812         struct port_info *pi = arg1;
4813         struct adapter *sc = pi->adapter;
4814         int idx, rc, i;
4815         struct sge_rxq *rxq;
4816 #ifdef TCP_OFFLOAD
4817         struct sge_ofld_rxq *ofld_rxq;
4818 #endif
4819         uint8_t v;
4820
4821         idx = pi->tmr_idx;
4822
4823         rc = sysctl_handle_int(oidp, &idx, 0, req);
4824         if (rc != 0 || req->newptr == NULL)
4825                 return (rc);
4826
4827         if (idx < 0 || idx >= SGE_NTIMERS)
4828                 return (EINVAL);
4829
4830         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4831             "t4tmr");
4832         if (rc)
4833                 return (rc);
4834
4835         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4836         for_each_rxq(pi, i, rxq) {
4837 #ifdef atomic_store_rel_8
4838                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4839 #else
4840                 rxq->iq.intr_params = v;
4841 #endif
4842         }
4843 #ifdef TCP_OFFLOAD
4844         for_each_ofld_rxq(pi, i, ofld_rxq) {
4845 #ifdef atomic_store_rel_8
4846                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4847 #else
4848                 ofld_rxq->iq.intr_params = v;
4849 #endif
4850         }
4851 #endif
4852         pi->tmr_idx = idx;
4853
4854         end_synchronized_op(sc, LOCK_HELD);
4855         return (0);
4856 }
4857
4858 static int
4859 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4860 {
4861         struct port_info *pi = arg1;
4862         struct adapter *sc = pi->adapter;
4863         int idx, rc;
4864
4865         idx = pi->pktc_idx;
4866
4867         rc = sysctl_handle_int(oidp, &idx, 0, req);
4868         if (rc != 0 || req->newptr == NULL)
4869                 return (rc);
4870
4871         if (idx < -1 || idx >= SGE_NCOUNTERS)
4872                 return (EINVAL);
4873
4874         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4875             "t4pktc");
4876         if (rc)
4877                 return (rc);
4878
4879         if (pi->flags & PORT_INIT_DONE)
4880                 rc = EBUSY; /* cannot be changed once the queues are created */
4881         else
4882                 pi->pktc_idx = idx;
4883
4884         end_synchronized_op(sc, LOCK_HELD);
4885         return (rc);
4886 }
4887
4888 static int
4889 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4890 {
4891         struct port_info *pi = arg1;
4892         struct adapter *sc = pi->adapter;
4893         int qsize, rc;
4894
4895         qsize = pi->qsize_rxq;
4896
4897         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4898         if (rc != 0 || req->newptr == NULL)
4899                 return (rc);
4900
4901         if (qsize < 128 || (qsize & 7))
4902                 return (EINVAL);
4903
4904         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4905             "t4rxqs");
4906         if (rc)
4907                 return (rc);
4908
4909         if (pi->flags & PORT_INIT_DONE)
4910                 rc = EBUSY; /* cannot be changed once the queues are created */
4911         else
4912                 pi->qsize_rxq = qsize;
4913
4914         end_synchronized_op(sc, LOCK_HELD);
4915         return (rc);
4916 }
4917
4918 static int
4919 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4920 {
4921         struct port_info *pi = arg1;
4922         struct adapter *sc = pi->adapter;
4923         int qsize, rc;
4924
4925         qsize = pi->qsize_txq;
4926
4927         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4928         if (rc != 0 || req->newptr == NULL)
4929                 return (rc);
4930
4931         /* bufring size must be powerof2 */
4932         if (qsize < 128 || !powerof2(qsize))
4933                 return (EINVAL);
4934
4935         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4936             "t4txqs");
4937         if (rc)
4938                 return (rc);
4939
4940         if (pi->flags & PORT_INIT_DONE)
4941                 rc = EBUSY; /* cannot be changed once the queues are created */
4942         else
4943                 pi->qsize_txq = qsize;
4944
4945         end_synchronized_op(sc, LOCK_HELD);
4946         return (rc);
4947 }
4948
4949 static int
4950 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4951 {
4952         struct adapter *sc = arg1;
4953         int reg = arg2;
4954         uint64_t val;
4955
4956         val = t4_read_reg64(sc, reg);
4957
4958         return (sysctl_handle_64(oidp, &val, 0, req));
4959 }
4960
4961 static int
4962 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4963 {
4964         struct adapter *sc = arg1;
4965         int rc, t;
4966         uint32_t param, val;
4967
4968         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4969         if (rc)
4970                 return (rc);
4971         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4972             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4973             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4974         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4975         end_synchronized_op(sc, 0);
4976         if (rc)
4977                 return (rc);
4978
4979         /* unknown is returned as 0 but we display -1 in that case */
4980         t = val == 0 ? -1 : val;
4981
4982         rc = sysctl_handle_int(oidp, &t, 0, req);
4983         return (rc);
4984 }
4985
4986 #ifdef SBUF_DRAIN
4987 static int
4988 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4989 {
4990         struct adapter *sc = arg1;
4991         struct sbuf *sb;
4992         int rc, i;
4993         uint16_t incr[NMTUS][NCCTRL_WIN];
4994         static const char *dec_fac[] = {
4995                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4996                 "0.9375"
4997         };
4998
4999         rc = sysctl_wire_old_buffer(req, 0);
5000         if (rc != 0)
5001                 return (rc);
5002
5003         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5004         if (sb == NULL)
5005                 return (ENOMEM);
5006
5007         t4_read_cong_tbl(sc, incr);
5008
5009         for (i = 0; i < NCCTRL_WIN; ++i) {
5010                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
5011                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
5012                     incr[5][i], incr[6][i], incr[7][i]);
5013                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
5014                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
5015                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5016                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5017         }
5018
5019         rc = sbuf_finish(sb);
5020         sbuf_delete(sb);
5021
5022         return (rc);
5023 }
5024
5025 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5026         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
5027         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5028         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
5029 };
5030
5031 static int
5032 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5033 {
5034         struct adapter *sc = arg1;
5035         struct sbuf *sb;
5036         int rc, i, n, qid = arg2;
5037         uint32_t *buf, *p;
5038         char *qtype;
5039         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5040
5041         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5042             ("%s: bad qid %d\n", __func__, qid));
5043
5044         if (qid < CIM_NUM_IBQ) {
5045                 /* inbound queue */
5046                 qtype = "IBQ";
5047                 n = 4 * CIM_IBQ_SIZE;
5048                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5049                 rc = t4_read_cim_ibq(sc, qid, buf, n);
5050         } else {
5051                 /* outbound queue */
5052                 qtype = "OBQ";
5053                 qid -= CIM_NUM_IBQ;
5054                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5055                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5056                 rc = t4_read_cim_obq(sc, qid, buf, n);
5057         }
5058
5059         if (rc < 0) {
5060                 rc = -rc;
5061                 goto done;
5062         }
5063         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5064
5065         rc = sysctl_wire_old_buffer(req, 0);
5066         if (rc != 0)
5067                 goto done;
5068
5069         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5070         if (sb == NULL) {
5071                 rc = ENOMEM;
5072                 goto done;
5073         }
5074
5075         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5076         for (i = 0, p = buf; i < n; i += 16, p += 4)
5077                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5078                     p[2], p[3]);
5079
5080         rc = sbuf_finish(sb);
5081         sbuf_delete(sb);
5082 done:
5083         free(buf, M_CXGBE);
5084         return (rc);
5085 }
5086
5087 static int
5088 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5089 {
5090         struct adapter *sc = arg1;
5091         u_int cfg;
5092         struct sbuf *sb;
5093         uint32_t *buf, *p;
5094         int rc;
5095
5096         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5097         if (rc != 0)
5098                 return (rc);
5099
5100         rc = sysctl_wire_old_buffer(req, 0);
5101         if (rc != 0)
5102                 return (rc);
5103
5104         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5105         if (sb == NULL)
5106                 return (ENOMEM);
5107
5108         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5109             M_ZERO | M_WAITOK);
5110
5111         rc = -t4_cim_read_la(sc, buf, NULL);
5112         if (rc != 0)
5113                 goto done;
5114
5115         sbuf_printf(sb, "Status   Data      PC%s",
5116             cfg & F_UPDBGLACAPTPCONLY ? "" :
5117             "     LS0Stat  LS0Addr             LS0Data");
5118
5119         KASSERT((sc->params.cim_la_size & 7) == 0,
5120             ("%s: p will walk off the end of buf", __func__));
5121
5122         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5123                 if (cfg & F_UPDBGLACAPTPCONLY) {
5124                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5125                             p[6], p[7]);
5126                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5127                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5128                             p[4] & 0xff, p[5] >> 8);
5129                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5130                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5131                             p[1] & 0xf, p[2] >> 4);
5132                 } else {
5133                         sbuf_printf(sb,
5134                             "\n  %02x   %x%07x %x%07x %08x %08x "
5135                             "%08x%08x%08x%08x",
5136                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5137                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5138                             p[6], p[7]);
5139                 }
5140         }
5141
5142         rc = sbuf_finish(sb);
5143         sbuf_delete(sb);
5144 done:
5145         free(buf, M_CXGBE);
5146         return (rc);
5147 }
5148
5149 static int
5150 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5151 {
5152         struct adapter *sc = arg1;
5153         u_int i;
5154         struct sbuf *sb;
5155         uint32_t *buf, *p;
5156         int rc;
5157
5158         rc = sysctl_wire_old_buffer(req, 0);
5159         if (rc != 0)
5160                 return (rc);
5161
5162         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5163         if (sb == NULL)
5164                 return (ENOMEM);
5165
5166         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5167             M_ZERO | M_WAITOK);
5168
5169         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5170         p = buf;
5171
5172         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5173                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5174                     p[1], p[0]);
5175         }
5176
5177         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5178         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5179                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5180                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5181                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5182                     (p[1] >> 2) | ((p[2] & 3) << 30),
5183                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5184                     p[0] & 1);
5185         }
5186
5187         rc = sbuf_finish(sb);
5188         sbuf_delete(sb);
5189         free(buf, M_CXGBE);
5190         return (rc);
5191 }
5192
5193 static int
5194 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5195 {
5196         struct adapter *sc = arg1;
5197         u_int i;
5198         struct sbuf *sb;
5199         uint32_t *buf, *p;
5200         int rc;
5201
5202         rc = sysctl_wire_old_buffer(req, 0);
5203         if (rc != 0)
5204                 return (rc);
5205
5206         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5207         if (sb == NULL)
5208                 return (ENOMEM);
5209
5210         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5211             M_ZERO | M_WAITOK);
5212
5213         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5214         p = buf;
5215
5216         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5217         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5218                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5219                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5220                     p[4], p[3], p[2], p[1], p[0]);
5221         }
5222
5223         sbuf_printf(sb, "\n\nCntl ID               Data");
5224         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5225                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5226                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5227         }
5228
5229         rc = sbuf_finish(sb);
5230         sbuf_delete(sb);
5231         free(buf, M_CXGBE);
5232         return (rc);
5233 }
5234
5235 static int
5236 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5237 {
5238         struct adapter *sc = arg1;
5239         struct sbuf *sb;
5240         int rc, i;
5241         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5242         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5243         uint16_t thres[CIM_NUM_IBQ];
5244         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5245         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5246         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5247
5248         if (is_t4(sc)) {
5249                 cim_num_obq = CIM_NUM_OBQ;
5250                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5251                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5252         } else {
5253                 cim_num_obq = CIM_NUM_OBQ_T5;
5254                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5255                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5256         }
5257         nq = CIM_NUM_IBQ + cim_num_obq;
5258
5259         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5260         if (rc == 0)
5261                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5262         if (rc != 0)
5263                 return (rc);
5264
5265         t4_read_cimq_cfg(sc, base, size, thres);
5266
5267         rc = sysctl_wire_old_buffer(req, 0);
5268         if (rc != 0)
5269                 return (rc);
5270
5271         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5272         if (sb == NULL)
5273                 return (ENOMEM);
5274
5275         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5276
5277         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5278                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5279                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5280                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5281                     G_QUEREMFLITS(p[2]) * 16);
5282         for ( ; i < nq; i++, p += 4, wr += 2)
5283                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5284                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5285                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5286                     G_QUEREMFLITS(p[2]) * 16);
5287
5288         rc = sbuf_finish(sb);
5289         sbuf_delete(sb);
5290
5291         return (rc);
5292 }
5293
5294 static int
5295 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5296 {
5297         struct adapter *sc = arg1;
5298         struct sbuf *sb;
5299         int rc;
5300         struct tp_cpl_stats stats;
5301
5302         rc = sysctl_wire_old_buffer(req, 0);
5303         if (rc != 0)
5304                 return (rc);
5305
5306         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5307         if (sb == NULL)
5308                 return (ENOMEM);
5309
5310         t4_tp_get_cpl_stats(sc, &stats);
5311
5312         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5313             "channel 3\n");
5314         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5315                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5316         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5317                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5318
5319         rc = sbuf_finish(sb);
5320         sbuf_delete(sb);
5321
5322         return (rc);
5323 }
5324
5325 static int
5326 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5327 {
5328         struct adapter *sc = arg1;
5329         struct sbuf *sb;
5330         int rc;
5331         struct tp_usm_stats stats;
5332
5333         rc = sysctl_wire_old_buffer(req, 0);
5334         if (rc != 0)
5335                 return(rc);
5336
5337         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5338         if (sb == NULL)
5339                 return (ENOMEM);
5340
5341         t4_get_usm_stats(sc, &stats);
5342
5343         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5344         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5345         sbuf_printf(sb, "Drops:  %u", stats.drops);
5346
5347         rc = sbuf_finish(sb);
5348         sbuf_delete(sb);
5349
5350         return (rc);
5351 }
5352
5353 const char *devlog_level_strings[] = {
5354         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5355         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5356         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5357         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5358         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5359         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5360 };
5361
5362 const char *devlog_facility_strings[] = {
5363         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5364         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5365         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5366         [FW_DEVLOG_FACILITY_RES]        = "RES",
5367         [FW_DEVLOG_FACILITY_HW]         = "HW",
5368         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5369         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5370         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5371         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5372         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5373         [FW_DEVLOG_FACILITY_VI]         = "VI",
5374         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5375         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5376         [FW_DEVLOG_FACILITY_TM]         = "TM",
5377         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5378         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5379         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5380         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5381         [FW_DEVLOG_FACILITY_RI]         = "RI",
5382         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5383         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5384         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5385         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5386 };
5387
5388 static int
5389 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5390 {
5391         struct adapter *sc = arg1;
5392         struct devlog_params *dparams = &sc->params.devlog;
5393         struct fw_devlog_e *buf, *e;
5394         int i, j, rc, nentries, first = 0, m;
5395         struct sbuf *sb;
5396         uint64_t ftstamp = UINT64_MAX;
5397
5398         if (dparams->start == 0) {
5399                 dparams->memtype = FW_MEMTYPE_EDC0;
5400                 dparams->start = 0x84000;
5401                 dparams->size = 32768;
5402         }
5403
5404         nentries = dparams->size / sizeof(struct fw_devlog_e);
5405
5406         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5407         if (buf == NULL)
5408                 return (ENOMEM);
5409
5410         m = fwmtype_to_hwmtype(dparams->memtype);
5411         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5412         if (rc != 0)
5413                 goto done;
5414
5415         for (i = 0; i < nentries; i++) {
5416                 e = &buf[i];
5417
5418                 if (e->timestamp == 0)
5419                         break;  /* end */
5420
5421                 e->timestamp = be64toh(e->timestamp);
5422                 e->seqno = be32toh(e->seqno);
5423                 for (j = 0; j < 8; j++)
5424                         e->params[j] = be32toh(e->params[j]);
5425
5426                 if (e->timestamp < ftstamp) {
5427                         ftstamp = e->timestamp;
5428                         first = i;
5429                 }
5430         }
5431
5432         if (buf[first].timestamp == 0)
5433                 goto done;      /* nothing in the log */
5434
5435         rc = sysctl_wire_old_buffer(req, 0);
5436         if (rc != 0)
5437                 goto done;
5438
5439         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5440         if (sb == NULL) {
5441                 rc = ENOMEM;
5442                 goto done;
5443         }
5444         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5445             "Seq#", "Tstamp", "Level", "Facility", "Message");
5446
5447         i = first;
5448         do {
5449                 e = &buf[i];
5450                 if (e->timestamp == 0)
5451                         break;  /* end */
5452
5453                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5454                     e->seqno, e->timestamp,
5455                     (e->level < nitems(devlog_level_strings) ?
5456                         devlog_level_strings[e->level] : "UNKNOWN"),
5457                     (e->facility < nitems(devlog_facility_strings) ?
5458                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5459                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5460                     e->params[2], e->params[3], e->params[4],
5461                     e->params[5], e->params[6], e->params[7]);
5462
5463                 if (++i == nentries)
5464                         i = 0;
5465         } while (i != first);
5466
5467         rc = sbuf_finish(sb);
5468         sbuf_delete(sb);
5469 done:
5470         free(buf, M_CXGBE);
5471         return (rc);
5472 }
5473
5474 static int
5475 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5476 {
5477         struct adapter *sc = arg1;
5478         struct sbuf *sb;
5479         int rc;
5480         struct tp_fcoe_stats stats[4];
5481
5482         rc = sysctl_wire_old_buffer(req, 0);
5483         if (rc != 0)
5484                 return (rc);
5485
5486         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5487         if (sb == NULL)
5488                 return (ENOMEM);
5489
5490         t4_get_fcoe_stats(sc, 0, &stats[0]);
5491         t4_get_fcoe_stats(sc, 1, &stats[1]);
5492         t4_get_fcoe_stats(sc, 2, &stats[2]);
5493         t4_get_fcoe_stats(sc, 3, &stats[3]);
5494
5495         sbuf_printf(sb, "                   channel 0        channel 1        "
5496             "channel 2        channel 3\n");
5497         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5498             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5499             stats[3].octetsDDP);
5500         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5501             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5502         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5503             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5504             stats[3].framesDrop);
5505
5506         rc = sbuf_finish(sb);
5507         sbuf_delete(sb);
5508
5509         return (rc);
5510 }
5511
5512 static int
5513 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5514 {
5515         struct adapter *sc = arg1;
5516         struct sbuf *sb;
5517         int rc, i;
5518         unsigned int map, kbps, ipg, mode;
5519         unsigned int pace_tab[NTX_SCHED];
5520
5521         rc = sysctl_wire_old_buffer(req, 0);
5522         if (rc != 0)
5523                 return (rc);
5524
5525         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5526         if (sb == NULL)
5527                 return (ENOMEM);
5528
5529         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5530         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5531         t4_read_pace_tbl(sc, pace_tab);
5532
5533         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5534             "Class IPG (0.1 ns)   Flow IPG (us)");
5535
5536         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5537                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5538                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5539                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5540                 if (kbps)
5541                         sbuf_printf(sb, "%9u     ", kbps);
5542                 else
5543                         sbuf_printf(sb, " disabled     ");
5544
5545                 if (ipg)
5546                         sbuf_printf(sb, "%13u        ", ipg);
5547                 else
5548                         sbuf_printf(sb, "     disabled        ");
5549
5550                 if (pace_tab[i])
5551                         sbuf_printf(sb, "%10u", pace_tab[i]);
5552                 else
5553                         sbuf_printf(sb, "  disabled");
5554         }
5555
5556         rc = sbuf_finish(sb);
5557         sbuf_delete(sb);
5558
5559         return (rc);
5560 }
5561
5562 static int
5563 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5564 {
5565         struct adapter *sc = arg1;
5566         struct sbuf *sb;
5567         int rc, i, j;
5568         uint64_t *p0, *p1;
5569         struct lb_port_stats s[2];
5570         static const char *stat_name[] = {
5571                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5572                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5573                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5574                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5575                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5576                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5577                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5578         };
5579
5580         rc = sysctl_wire_old_buffer(req, 0);
5581         if (rc != 0)
5582                 return (rc);
5583
5584         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5585         if (sb == NULL)
5586                 return (ENOMEM);
5587
5588         memset(s, 0, sizeof(s));
5589
5590         for (i = 0; i < 4; i += 2) {
5591                 t4_get_lb_stats(sc, i, &s[0]);
5592                 t4_get_lb_stats(sc, i + 1, &s[1]);
5593
5594                 p0 = &s[0].octets;
5595                 p1 = &s[1].octets;
5596                 sbuf_printf(sb, "%s                       Loopback %u"
5597                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5598
5599                 for (j = 0; j < nitems(stat_name); j++)
5600                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5601                                    *p0++, *p1++);
5602         }
5603
5604         rc = sbuf_finish(sb);
5605         sbuf_delete(sb);
5606
5607         return (rc);
5608 }
5609
5610 static int
5611 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5612 {
5613         int rc = 0;
5614         struct port_info *pi = arg1;
5615         struct sbuf *sb;
5616         static const char *linkdnreasons[] = {
5617                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5618                 "PHY overheated", "unknown", "rx los", "reserved7"
5619         };
5620
5621         rc = sysctl_wire_old_buffer(req, 0);
5622         if (rc != 0)
5623                 return(rc);
5624         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5625         if (sb == NULL)
5626                 return (ENOMEM);
5627
5628         if (pi->linkdnrc < 0)
5629                 sbuf_printf(sb, "n/a");
5630         else if (pi->linkdnrc < nitems(linkdnreasons))
5631                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5632         else
5633                 sbuf_printf(sb, "%d", pi->linkdnrc);
5634
5635         rc = sbuf_finish(sb);
5636         sbuf_delete(sb);
5637
5638         return (rc);
5639 }
5640
5641 struct mem_desc {
5642         unsigned int base;
5643         unsigned int limit;
5644         unsigned int idx;
5645 };
5646
5647 static int
5648 mem_desc_cmp(const void *a, const void *b)
5649 {
5650         return ((const struct mem_desc *)a)->base -
5651                ((const struct mem_desc *)b)->base;
5652 }
5653
5654 static void
5655 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5656     unsigned int to)
5657 {
5658         unsigned int size;
5659
5660         size = to - from + 1;
5661         if (size == 0)
5662                 return;
5663
5664         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5665         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5666 }
5667
5668 static int
5669 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5670 {
5671         struct adapter *sc = arg1;
5672         struct sbuf *sb;
5673         int rc, i, n;
5674         uint32_t lo, hi, used, alloc;
5675         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5676         static const char *region[] = {
5677                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5678                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5679                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5680                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5681                 "RQUDP region:", "PBL region:", "TXPBL region:",
5682                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5683                 "On-chip queues:"
5684         };
5685         struct mem_desc avail[4];
5686         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5687         struct mem_desc *md = mem;
5688
5689         rc = sysctl_wire_old_buffer(req, 0);
5690         if (rc != 0)
5691                 return (rc);
5692
5693         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5694         if (sb == NULL)
5695                 return (ENOMEM);
5696
5697         for (i = 0; i < nitems(mem); i++) {
5698                 mem[i].limit = 0;
5699                 mem[i].idx = i;
5700         }
5701
5702         /* Find and sort the populated memory ranges */
5703         i = 0;
5704         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5705         if (lo & F_EDRAM0_ENABLE) {
5706                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5707                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5708                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5709                 avail[i].idx = 0;
5710                 i++;
5711         }
5712         if (lo & F_EDRAM1_ENABLE) {
5713                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5714                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5715                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5716                 avail[i].idx = 1;
5717                 i++;
5718         }
5719         if (lo & F_EXT_MEM_ENABLE) {
5720                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5721                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5722                 avail[i].limit = avail[i].base +
5723                     (G_EXT_MEM_SIZE(hi) << 20);
5724                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5725                 i++;
5726         }
5727         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5728                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5729                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5730                 avail[i].limit = avail[i].base +
5731                     (G_EXT_MEM1_SIZE(hi) << 20);
5732                 avail[i].idx = 4;
5733                 i++;
5734         }
5735         if (!i)                                    /* no memory available */
5736                 return 0;
5737         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5738
5739         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5740         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5741         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5742         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5743         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5744         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5745         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5746         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5747         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5748
5749         /* the next few have explicit upper bounds */
5750         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5751         md->limit = md->base - 1 +
5752                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5753                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5754         md++;
5755
5756         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5757         md->limit = md->base - 1 +
5758                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5759                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5760         md++;
5761
5762         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5763                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5764                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5765                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5766         } else {
5767                 md->base = 0;
5768                 md->idx = nitems(region);  /* hide it */
5769         }
5770         md++;
5771
5772 #define ulp_region(reg) \
5773         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5774         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5775
5776         ulp_region(RX_ISCSI);
5777         ulp_region(RX_TDDP);
5778         ulp_region(TX_TPT);
5779         ulp_region(RX_STAG);
5780         ulp_region(RX_RQ);
5781         ulp_region(RX_RQUDP);
5782         ulp_region(RX_PBL);
5783         ulp_region(TX_PBL);
5784 #undef ulp_region
5785
5786         md->base = 0;
5787         md->idx = nitems(region);
5788         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5789                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5790                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5791                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5792         }
5793         md++;
5794
5795         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5796         md->limit = md->base + sc->tids.ntids - 1;
5797         md++;
5798         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5799         md->limit = md->base + sc->tids.ntids - 1;
5800         md++;
5801
5802         md->base = sc->vres.ocq.start;
5803         if (sc->vres.ocq.size)
5804                 md->limit = md->base + sc->vres.ocq.size - 1;
5805         else
5806                 md->idx = nitems(region);  /* hide it */
5807         md++;
5808
5809         /* add any address-space holes, there can be up to 3 */
5810         for (n = 0; n < i - 1; n++)
5811                 if (avail[n].limit < avail[n + 1].base)
5812                         (md++)->base = avail[n].limit;
5813         if (avail[n].limit)
5814                 (md++)->base = avail[n].limit;
5815
5816         n = md - mem;
5817         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5818
5819         for (lo = 0; lo < i; lo++)
5820                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5821                                 avail[lo].limit - 1);
5822
5823         sbuf_printf(sb, "\n");
5824         for (i = 0; i < n; i++) {
5825                 if (mem[i].idx >= nitems(region))
5826                         continue;                        /* skip holes */
5827                 if (!mem[i].limit)
5828                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5829                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5830                                 mem[i].limit);
5831         }
5832
5833         sbuf_printf(sb, "\n");
5834         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5835         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5836         mem_region_show(sb, "uP RAM:", lo, hi);
5837
5838         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5839         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5840         mem_region_show(sb, "uP Extmem2:", lo, hi);
5841
5842         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5843         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5844                    G_PMRXMAXPAGE(lo),
5845                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5846                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5847
5848         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5849         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5850         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5851                    G_PMTXMAXPAGE(lo),
5852                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5853                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5854         sbuf_printf(sb, "%u p-structs\n",
5855                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5856
5857         for (i = 0; i < 4; i++) {
5858                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5859                 if (is_t4(sc)) {
5860                         used = G_USED(lo);
5861                         alloc = G_ALLOC(lo);
5862                 } else {
5863                         used = G_T5_USED(lo);
5864                         alloc = G_T5_ALLOC(lo);
5865                 }
5866                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5867                            i, used, alloc);
5868         }
5869         for (i = 0; i < 4; i++) {
5870                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5871                 if (is_t4(sc)) {
5872                         used = G_USED(lo);
5873                         alloc = G_ALLOC(lo);
5874                 } else {
5875                         used = G_T5_USED(lo);
5876                         alloc = G_T5_ALLOC(lo);
5877                 }
5878                 sbuf_printf(sb,
5879                            "\nLoopback %d using %u pages out of %u allocated",
5880                            i, used, alloc);
5881         }
5882
5883         rc = sbuf_finish(sb);
5884         sbuf_delete(sb);
5885
5886         return (rc);
5887 }
5888
5889 static inline void
5890 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5891 {
5892         *mask = x | y;
5893         y = htobe64(y);
5894         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5895 }
5896
5897 static int
5898 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5899 {
5900         struct adapter *sc = arg1;
5901         struct sbuf *sb;
5902         int rc, i, n;
5903
5904         rc = sysctl_wire_old_buffer(req, 0);
5905         if (rc != 0)
5906                 return (rc);
5907
5908         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5909         if (sb == NULL)
5910                 return (ENOMEM);
5911
5912         sbuf_printf(sb,
5913             "Idx  Ethernet address     Mask     Vld Ports PF"
5914             "  VF              Replication             P0 P1 P2 P3  ML");
5915         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5916             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5917         for (i = 0; i < n; i++) {
5918                 uint64_t tcamx, tcamy, mask;
5919                 uint32_t cls_lo, cls_hi;
5920                 uint8_t addr[ETHER_ADDR_LEN];
5921
5922                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5923                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5924                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5925                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5926
5927                 if (tcamx & tcamy)
5928                         continue;
5929
5930                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5931                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5932                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5933                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5934                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5935                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5936                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5937
5938                 if (cls_lo & F_REPLICATE) {
5939                         struct fw_ldst_cmd ldst_cmd;
5940
5941                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5942                         ldst_cmd.op_to_addrspace =
5943                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5944                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5945                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5946                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5947                         ldst_cmd.u.mps.fid_ctl =
5948                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5949                                 V_FW_LDST_CMD_CTL(i));
5950
5951                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5952                             "t4mps");
5953                         if (rc)
5954                                 break;
5955                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5956                             sizeof(ldst_cmd), &ldst_cmd);
5957                         end_synchronized_op(sc, 0);
5958
5959                         if (rc != 0) {
5960                                 sbuf_printf(sb,
5961                                     " ------------ error %3u ------------", rc);
5962                                 rc = 0;
5963                         } else {
5964                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5965                                     be32toh(ldst_cmd.u.mps.rplc127_96),
5966                                     be32toh(ldst_cmd.u.mps.rplc95_64),
5967                                     be32toh(ldst_cmd.u.mps.rplc63_32),
5968                                     be32toh(ldst_cmd.u.mps.rplc31_0));
5969                         }
5970                 } else
5971                         sbuf_printf(sb, "%36s", "");
5972
5973                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5974                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5975                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5976         }
5977
5978         if (rc)
5979                 (void) sbuf_finish(sb);
5980         else
5981                 rc = sbuf_finish(sb);
5982         sbuf_delete(sb);
5983
5984         return (rc);
5985 }
5986
5987 static int
5988 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5989 {
5990         struct adapter *sc = arg1;
5991         struct sbuf *sb;
5992         int rc;
5993         uint16_t mtus[NMTUS];
5994
5995         rc = sysctl_wire_old_buffer(req, 0);
5996         if (rc != 0)
5997                 return (rc);
5998
5999         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6000         if (sb == NULL)
6001                 return (ENOMEM);
6002
6003         t4_read_mtu_tbl(sc, mtus, NULL);
6004
6005         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
6006             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
6007             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
6008             mtus[14], mtus[15]);
6009
6010         rc = sbuf_finish(sb);
6011         sbuf_delete(sb);
6012
6013         return (rc);
6014 }
6015
6016 static int
6017 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6018 {
6019         struct adapter *sc = arg1;
6020         struct sbuf *sb;
6021         int rc, i;
6022         uint32_t cnt[PM_NSTATS];
6023         uint64_t cyc[PM_NSTATS];
6024         static const char *rx_stats[] = {
6025                 "Read:", "Write bypass:", "Write mem:", "Flush:"
6026         };
6027         static const char *tx_stats[] = {
6028                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6029         };
6030
6031         rc = sysctl_wire_old_buffer(req, 0);
6032         if (rc != 0)
6033                 return (rc);
6034
6035         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6036         if (sb == NULL)
6037                 return (ENOMEM);
6038
6039         t4_pmtx_get_stats(sc, cnt, cyc);
6040         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6041         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6042                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6043                     cyc[i]);
6044
6045         t4_pmrx_get_stats(sc, cnt, cyc);
6046         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6047         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6048                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6049                     cyc[i]);
6050
6051         rc = sbuf_finish(sb);
6052         sbuf_delete(sb);
6053
6054         return (rc);
6055 }
6056
6057 static int
6058 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6059 {
6060         struct adapter *sc = arg1;
6061         struct sbuf *sb;
6062         int rc;
6063         struct tp_rdma_stats stats;
6064
6065         rc = sysctl_wire_old_buffer(req, 0);
6066         if (rc != 0)
6067                 return (rc);
6068
6069         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6070         if (sb == NULL)
6071                 return (ENOMEM);
6072
6073         t4_tp_get_rdma_stats(sc, &stats);
6074         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6075         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6076
6077         rc = sbuf_finish(sb);
6078         sbuf_delete(sb);
6079
6080         return (rc);
6081 }
6082
6083 static int
6084 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6085 {
6086         struct adapter *sc = arg1;
6087         struct sbuf *sb;
6088         int rc;
6089         struct tp_tcp_stats v4, v6;
6090
6091         rc = sysctl_wire_old_buffer(req, 0);
6092         if (rc != 0)
6093                 return (rc);
6094
6095         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6096         if (sb == NULL)
6097                 return (ENOMEM);
6098
6099         t4_tp_get_tcp_stats(sc, &v4, &v6);
6100         sbuf_printf(sb,
6101             "                                IP                 IPv6\n");
6102         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6103             v4.tcpOutRsts, v6.tcpOutRsts);
6104         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6105             v4.tcpInSegs, v6.tcpInSegs);
6106         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6107             v4.tcpOutSegs, v6.tcpOutSegs);
6108         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6109             v4.tcpRetransSegs, v6.tcpRetransSegs);
6110
6111         rc = sbuf_finish(sb);
6112         sbuf_delete(sb);
6113
6114         return (rc);
6115 }
6116
6117 static int
6118 sysctl_tids(SYSCTL_HANDLER_ARGS)
6119 {
6120         struct adapter *sc = arg1;
6121         struct sbuf *sb;
6122         int rc;
6123         struct tid_info *t = &sc->tids;
6124
6125         rc = sysctl_wire_old_buffer(req, 0);
6126         if (rc != 0)
6127                 return (rc);
6128
6129         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6130         if (sb == NULL)
6131                 return (ENOMEM);
6132
6133         if (t->natids) {
6134                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6135                     t->atids_in_use);
6136         }
6137
6138         if (t->ntids) {
6139                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6140                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6141
6142                         if (b) {
6143                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6144                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6145                                     t->ntids - 1);
6146                         } else {
6147                                 sbuf_printf(sb, "TID range: %u-%u",
6148                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6149                                     t->ntids - 1);
6150                         }
6151                 } else
6152                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6153                 sbuf_printf(sb, ", in use: %u\n",
6154                     atomic_load_acq_int(&t->tids_in_use));
6155         }
6156
6157         if (t->nstids) {
6158                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6159                     t->stid_base + t->nstids - 1, t->stids_in_use);
6160         }
6161
6162         if (t->nftids) {
6163                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6164                     t->ftid_base + t->nftids - 1);
6165         }
6166
6167         if (t->netids) {
6168                 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6169                     t->etid_base + t->netids - 1);
6170         }
6171
6172         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6173             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6174             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6175
6176         rc = sbuf_finish(sb);
6177         sbuf_delete(sb);
6178
6179         return (rc);
6180 }
6181
6182 static int
6183 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6184 {
6185         struct adapter *sc = arg1;
6186         struct sbuf *sb;
6187         int rc;
6188         struct tp_err_stats stats;
6189
6190         rc = sysctl_wire_old_buffer(req, 0);
6191         if (rc != 0)
6192                 return (rc);
6193
6194         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6195         if (sb == NULL)
6196                 return (ENOMEM);
6197
6198         t4_tp_get_err_stats(sc, &stats);
6199
6200         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6201                       "channel 3\n");
6202         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6203             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6204             stats.macInErrs[3]);
6205         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6206             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6207             stats.hdrInErrs[3]);
6208         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6209             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6210             stats.tcpInErrs[3]);
6211         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6212             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6213             stats.tcp6InErrs[3]);
6214         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6215             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6216             stats.tnlCongDrops[3]);
6217         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6218             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6219             stats.tnlTxDrops[3]);
6220         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6221             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6222             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6223         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6224             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6225             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6226         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6227             stats.ofldNoNeigh, stats.ofldCongDefer);
6228
6229         rc = sbuf_finish(sb);
6230         sbuf_delete(sb);
6231
6232         return (rc);
6233 }
6234
6235 struct field_desc {
6236         const char *name;
6237         u_int start;
6238         u_int width;
6239 };
6240
6241 static void
6242 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6243 {
6244         char buf[32];
6245         int line_size = 0;
6246
6247         while (f->name) {
6248                 uint64_t mask = (1ULL << f->width) - 1;
6249                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6250                     ((uintmax_t)v >> f->start) & mask);
6251
6252                 if (line_size + len >= 79) {
6253                         line_size = 8;
6254                         sbuf_printf(sb, "\n        ");
6255                 }
6256                 sbuf_printf(sb, "%s ", buf);
6257                 line_size += len + 1;
6258                 f++;
6259         }
6260         sbuf_printf(sb, "\n");
6261 }
6262
6263 static struct field_desc tp_la0[] = {
6264         { "RcfOpCodeOut", 60, 4 },
6265         { "State", 56, 4 },
6266         { "WcfState", 52, 4 },
6267         { "RcfOpcSrcOut", 50, 2 },
6268         { "CRxError", 49, 1 },
6269         { "ERxError", 48, 1 },
6270         { "SanityFailed", 47, 1 },
6271         { "SpuriousMsg", 46, 1 },
6272         { "FlushInputMsg", 45, 1 },
6273         { "FlushInputCpl", 44, 1 },
6274         { "RssUpBit", 43, 1 },
6275         { "RssFilterHit", 42, 1 },
6276         { "Tid", 32, 10 },
6277         { "InitTcb", 31, 1 },
6278         { "LineNumber", 24, 7 },
6279         { "Emsg", 23, 1 },
6280         { "EdataOut", 22, 1 },
6281         { "Cmsg", 21, 1 },
6282         { "CdataOut", 20, 1 },
6283         { "EreadPdu", 19, 1 },
6284         { "CreadPdu", 18, 1 },
6285         { "TunnelPkt", 17, 1 },
6286         { "RcfPeerFin", 16, 1 },
6287         { "RcfReasonOut", 12, 4 },
6288         { "TxCchannel", 10, 2 },
6289         { "RcfTxChannel", 8, 2 },
6290         { "RxEchannel", 6, 2 },
6291         { "RcfRxChannel", 5, 1 },
6292         { "RcfDataOutSrdy", 4, 1 },
6293         { "RxDvld", 3, 1 },
6294         { "RxOoDvld", 2, 1 },
6295         { "RxCongestion", 1, 1 },
6296         { "TxCongestion", 0, 1 },
6297         { NULL }
6298 };
6299
6300 static struct field_desc tp_la1[] = {
6301         { "CplCmdIn", 56, 8 },
6302         { "CplCmdOut", 48, 8 },
6303         { "ESynOut", 47, 1 },
6304         { "EAckOut", 46, 1 },
6305         { "EFinOut", 45, 1 },
6306         { "ERstOut", 44, 1 },
6307         { "SynIn", 43, 1 },
6308         { "AckIn", 42, 1 },
6309         { "FinIn", 41, 1 },
6310         { "RstIn", 40, 1 },
6311         { "DataIn", 39, 1 },
6312         { "DataInVld", 38, 1 },
6313         { "PadIn", 37, 1 },
6314         { "RxBufEmpty", 36, 1 },
6315         { "RxDdp", 35, 1 },
6316         { "RxFbCongestion", 34, 1 },
6317         { "TxFbCongestion", 33, 1 },
6318         { "TxPktSumSrdy", 32, 1 },
6319         { "RcfUlpType", 28, 4 },
6320         { "Eread", 27, 1 },
6321         { "Ebypass", 26, 1 },
6322         { "Esave", 25, 1 },
6323         { "Static0", 24, 1 },
6324         { "Cread", 23, 1 },
6325         { "Cbypass", 22, 1 },
6326         { "Csave", 21, 1 },
6327         { "CPktOut", 20, 1 },
6328         { "RxPagePoolFull", 18, 2 },
6329         { "RxLpbkPkt", 17, 1 },
6330         { "TxLpbkPkt", 16, 1 },
6331         { "RxVfValid", 15, 1 },
6332         { "SynLearned", 14, 1 },
6333         { "SetDelEntry", 13, 1 },
6334         { "SetInvEntry", 12, 1 },
6335         { "CpcmdDvld", 11, 1 },
6336         { "CpcmdSave", 10, 1 },
6337         { "RxPstructsFull", 8, 2 },
6338         { "EpcmdDvld", 7, 1 },
6339         { "EpcmdFlush", 6, 1 },
6340         { "EpcmdTrimPrefix", 5, 1 },
6341         { "EpcmdTrimPostfix", 4, 1 },
6342         { "ERssIp4Pkt", 3, 1 },
6343         { "ERssIp6Pkt", 2, 1 },
6344         { "ERssTcpUdpPkt", 1, 1 },
6345         { "ERssFceFipPkt", 0, 1 },
6346         { NULL }
6347 };
6348
6349 static struct field_desc tp_la2[] = {
6350         { "CplCmdIn", 56, 8 },
6351         { "MpsVfVld", 55, 1 },
6352         { "MpsPf", 52, 3 },
6353         { "MpsVf", 44, 8 },
6354         { "SynIn", 43, 1 },
6355         { "AckIn", 42, 1 },
6356         { "FinIn", 41, 1 },
6357         { "RstIn", 40, 1 },
6358         { "DataIn", 39, 1 },
6359         { "DataInVld", 38, 1 },
6360         { "PadIn", 37, 1 },
6361         { "RxBufEmpty", 36, 1 },
6362         { "RxDdp", 35, 1 },
6363         { "RxFbCongestion", 34, 1 },
6364         { "TxFbCongestion", 33, 1 },
6365         { "TxPktSumSrdy", 32, 1 },
6366         { "RcfUlpType", 28, 4 },
6367         { "Eread", 27, 1 },
6368         { "Ebypass", 26, 1 },
6369         { "Esave", 25, 1 },
6370         { "Static0", 24, 1 },
6371         { "Cread", 23, 1 },
6372         { "Cbypass", 22, 1 },
6373         { "Csave", 21, 1 },
6374         { "CPktOut", 20, 1 },
6375         { "RxPagePoolFull", 18, 2 },
6376         { "RxLpbkPkt", 17, 1 },
6377         { "TxLpbkPkt", 16, 1 },
6378         { "RxVfValid", 15, 1 },
6379         { "SynLearned", 14, 1 },
6380         { "SetDelEntry", 13, 1 },
6381         { "SetInvEntry", 12, 1 },
6382         { "CpcmdDvld", 11, 1 },
6383         { "CpcmdSave", 10, 1 },
6384         { "RxPstructsFull", 8, 2 },
6385         { "EpcmdDvld", 7, 1 },
6386         { "EpcmdFlush", 6, 1 },
6387         { "EpcmdTrimPrefix", 5, 1 },
6388         { "EpcmdTrimPostfix", 4, 1 },
6389         { "ERssIp4Pkt", 3, 1 },
6390         { "ERssIp6Pkt", 2, 1 },
6391         { "ERssTcpUdpPkt", 1, 1 },
6392         { "ERssFceFipPkt", 0, 1 },
6393         { NULL }
6394 };
6395
6396 static void
6397 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6398 {
6399
6400         field_desc_show(sb, *p, tp_la0);
6401 }
6402
6403 static void
6404 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6405 {
6406
6407         if (idx)
6408                 sbuf_printf(sb, "\n");
6409         field_desc_show(sb, p[0], tp_la0);
6410         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6411                 field_desc_show(sb, p[1], tp_la0);
6412 }
6413
6414 static void
6415 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6416 {
6417
6418         if (idx)
6419                 sbuf_printf(sb, "\n");
6420         field_desc_show(sb, p[0], tp_la0);
6421         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6422                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6423 }
6424
6425 static int
6426 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6427 {
6428         struct adapter *sc = arg1;
6429         struct sbuf *sb;
6430         uint64_t *buf, *p;
6431         int rc;
6432         u_int i, inc;
6433         void (*show_func)(struct sbuf *, uint64_t *, int);
6434
6435         rc = sysctl_wire_old_buffer(req, 0);
6436         if (rc != 0)
6437                 return (rc);
6438
6439         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6440         if (sb == NULL)
6441                 return (ENOMEM);
6442
6443         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6444
6445         t4_tp_read_la(sc, buf, NULL);
6446         p = buf;
6447
6448         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6449         case 2:
6450                 inc = 2;
6451                 show_func = tp_la_show2;
6452                 break;
6453         case 3:
6454                 inc = 2;
6455                 show_func = tp_la_show3;
6456                 break;
6457         default:
6458                 inc = 1;
6459                 show_func = tp_la_show;
6460         }
6461
6462         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6463                 (*show_func)(sb, p, i);
6464
6465         rc = sbuf_finish(sb);
6466         sbuf_delete(sb);
6467         free(buf, M_CXGBE);
6468         return (rc);
6469 }
6470
6471 static int
6472 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6473 {
6474         struct adapter *sc = arg1;
6475         struct sbuf *sb;
6476         int rc;
6477         u64 nrate[NCHAN], orate[NCHAN];
6478
6479         rc = sysctl_wire_old_buffer(req, 0);
6480         if (rc != 0)
6481                 return (rc);
6482
6483         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6484         if (sb == NULL)
6485                 return (ENOMEM);
6486
6487         t4_get_chan_txrate(sc, nrate, orate);
6488         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6489                  "channel 3\n");
6490         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6491             nrate[0], nrate[1], nrate[2], nrate[3]);
6492         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6493             orate[0], orate[1], orate[2], orate[3]);
6494
6495         rc = sbuf_finish(sb);
6496         sbuf_delete(sb);
6497
6498         return (rc);
6499 }
6500
6501 static int
6502 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6503 {
6504         struct adapter *sc = arg1;
6505         struct sbuf *sb;
6506         uint32_t *buf, *p;
6507         int rc, i;
6508
6509         rc = sysctl_wire_old_buffer(req, 0);
6510         if (rc != 0)
6511                 return (rc);
6512
6513         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6514         if (sb == NULL)
6515                 return (ENOMEM);
6516
6517         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6518             M_ZERO | M_WAITOK);
6519
6520         t4_ulprx_read_la(sc, buf);
6521         p = buf;
6522
6523         sbuf_printf(sb, "      Pcmd        Type   Message"
6524             "                Data");
6525         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6526                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6527                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6528         }
6529
6530         rc = sbuf_finish(sb);
6531         sbuf_delete(sb);
6532         free(buf, M_CXGBE);
6533         return (rc);
6534 }
6535
6536 static int
6537 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6538 {
6539         struct adapter *sc = arg1;
6540         struct sbuf *sb;
6541         int rc, v;
6542
6543         rc = sysctl_wire_old_buffer(req, 0);
6544         if (rc != 0)
6545                 return (rc);
6546
6547         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6548         if (sb == NULL)
6549                 return (ENOMEM);
6550
6551         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6552         if (G_STATSOURCE_T5(v) == 7) {
6553                 if (G_STATMODE(v) == 0) {
6554                         sbuf_printf(sb, "total %d, incomplete %d",
6555                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6556                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6557                 } else if (G_STATMODE(v) == 1) {
6558                         sbuf_printf(sb, "total %d, data overflow %d",
6559                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6560                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6561                 }
6562         }
6563         rc = sbuf_finish(sb);
6564         sbuf_delete(sb);
6565
6566         return (rc);
6567 }
6568 #endif
6569
6570 static inline void
6571 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6572 {
6573         struct buf_ring *br;
6574         struct mbuf *m;
6575
6576         TXQ_LOCK_ASSERT_OWNED(txq);
6577
6578         br = txq->br;
6579         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6580         if (m)
6581                 t4_eth_tx(ifp, txq, m);
6582 }
6583
6584 void
6585 t4_tx_callout(void *arg)
6586 {
6587         struct sge_eq *eq = arg;
6588         struct adapter *sc;
6589
6590         if (EQ_TRYLOCK(eq) == 0)
6591                 goto reschedule;
6592
6593         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6594                 EQ_UNLOCK(eq);
6595 reschedule:
6596                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6597                         callout_schedule(&eq->tx_callout, 1);
6598                 return;
6599         }
6600
6601         EQ_LOCK_ASSERT_OWNED(eq);
6602
6603         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6604
6605                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6606                         struct sge_txq *txq = arg;
6607                         struct port_info *pi = txq->ifp->if_softc;
6608
6609                         sc = pi->adapter;
6610                 } else {
6611                         struct sge_wrq *wrq = arg;
6612
6613                         sc = wrq->adapter;
6614                 }
6615
6616                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6617         }
6618
6619         EQ_UNLOCK(eq);
6620 }
6621
6622 void
6623 t4_tx_task(void *arg, int count)
6624 {
6625         struct sge_eq *eq = arg;
6626
6627         EQ_LOCK(eq);
6628         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6629                 struct sge_txq *txq = arg;
6630                 txq_start(txq->ifp, txq);
6631         } else {
6632                 struct sge_wrq *wrq = arg;
6633                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6634         }
6635         EQ_UNLOCK(eq);
6636 }
6637
6638 static uint32_t
6639 fconf_to_mode(uint32_t fconf)
6640 {
6641         uint32_t mode;
6642
6643         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6644             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6645
6646         if (fconf & F_FRAGMENTATION)
6647                 mode |= T4_FILTER_IP_FRAGMENT;
6648
6649         if (fconf & F_MPSHITTYPE)
6650                 mode |= T4_FILTER_MPS_HIT_TYPE;
6651
6652         if (fconf & F_MACMATCH)
6653                 mode |= T4_FILTER_MAC_IDX;
6654
6655         if (fconf & F_ETHERTYPE)
6656                 mode |= T4_FILTER_ETH_TYPE;
6657
6658         if (fconf & F_PROTOCOL)
6659                 mode |= T4_FILTER_IP_PROTO;
6660
6661         if (fconf & F_TOS)
6662                 mode |= T4_FILTER_IP_TOS;
6663
6664         if (fconf & F_VLAN)
6665                 mode |= T4_FILTER_VLAN;
6666
6667         if (fconf & F_VNIC_ID)
6668                 mode |= T4_FILTER_VNIC;
6669
6670         if (fconf & F_PORT)
6671                 mode |= T4_FILTER_PORT;
6672
6673         if (fconf & F_FCOE)
6674                 mode |= T4_FILTER_FCoE;
6675
6676         return (mode);
6677 }
6678
6679 static uint32_t
6680 mode_to_fconf(uint32_t mode)
6681 {
6682         uint32_t fconf = 0;
6683
6684         if (mode & T4_FILTER_IP_FRAGMENT)
6685                 fconf |= F_FRAGMENTATION;
6686
6687         if (mode & T4_FILTER_MPS_HIT_TYPE)
6688                 fconf |= F_MPSHITTYPE;
6689
6690         if (mode & T4_FILTER_MAC_IDX)
6691                 fconf |= F_MACMATCH;
6692
6693         if (mode & T4_FILTER_ETH_TYPE)
6694                 fconf |= F_ETHERTYPE;
6695
6696         if (mode & T4_FILTER_IP_PROTO)
6697                 fconf |= F_PROTOCOL;
6698
6699         if (mode & T4_FILTER_IP_TOS)
6700                 fconf |= F_TOS;
6701
6702         if (mode & T4_FILTER_VLAN)
6703                 fconf |= F_VLAN;
6704
6705         if (mode & T4_FILTER_VNIC)
6706                 fconf |= F_VNIC_ID;
6707
6708         if (mode & T4_FILTER_PORT)
6709                 fconf |= F_PORT;
6710
6711         if (mode & T4_FILTER_FCoE)
6712                 fconf |= F_FCOE;
6713
6714         return (fconf);
6715 }
6716
6717 static uint32_t
6718 fspec_to_fconf(struct t4_filter_specification *fs)
6719 {
6720         uint32_t fconf = 0;
6721
6722         if (fs->val.frag || fs->mask.frag)
6723                 fconf |= F_FRAGMENTATION;
6724
6725         if (fs->val.matchtype || fs->mask.matchtype)
6726                 fconf |= F_MPSHITTYPE;
6727
6728         if (fs->val.macidx || fs->mask.macidx)
6729                 fconf |= F_MACMATCH;
6730
6731         if (fs->val.ethtype || fs->mask.ethtype)
6732                 fconf |= F_ETHERTYPE;
6733
6734         if (fs->val.proto || fs->mask.proto)
6735                 fconf |= F_PROTOCOL;
6736
6737         if (fs->val.tos || fs->mask.tos)
6738                 fconf |= F_TOS;
6739
6740         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6741                 fconf |= F_VLAN;
6742
6743         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6744                 fconf |= F_VNIC_ID;
6745
6746         if (fs->val.iport || fs->mask.iport)
6747                 fconf |= F_PORT;
6748
6749         if (fs->val.fcoe || fs->mask.fcoe)
6750                 fconf |= F_FCOE;
6751
6752         return (fconf);
6753 }
6754
6755 static int
6756 get_filter_mode(struct adapter *sc, uint32_t *mode)
6757 {
6758         int rc;
6759         uint32_t fconf;
6760
6761         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6762             "t4getfm");
6763         if (rc)
6764                 return (rc);
6765
6766         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6767             A_TP_VLAN_PRI_MAP);
6768
6769         if (sc->params.tp.vlan_pri_map != fconf) {
6770                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6771                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6772                     fconf);
6773                 sc->params.tp.vlan_pri_map = fconf;
6774         }
6775
6776         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6777
6778         end_synchronized_op(sc, LOCK_HELD);
6779         return (0);
6780 }
6781
6782 static int
6783 set_filter_mode(struct adapter *sc, uint32_t mode)
6784 {
6785         uint32_t fconf;
6786         int rc;
6787
6788         fconf = mode_to_fconf(mode);
6789
6790         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6791             "t4setfm");
6792         if (rc)
6793                 return (rc);
6794
6795         if (sc->tids.ftids_in_use > 0) {
6796                 rc = EBUSY;
6797                 goto done;
6798         }
6799
6800 #ifdef TCP_OFFLOAD
6801         if (sc->offload_map) {
6802                 rc = EBUSY;
6803                 goto done;
6804         }
6805 #endif
6806
6807 #ifdef notyet
6808         rc = -t4_set_filter_mode(sc, fconf);
6809         if (rc == 0)
6810                 sc->filter_mode = fconf;
6811 #else
6812         rc = ENOTSUP;
6813 #endif
6814
6815 done:
6816         end_synchronized_op(sc, LOCK_HELD);
6817         return (rc);
6818 }
6819
6820 static inline uint64_t
6821 get_filter_hits(struct adapter *sc, uint32_t fid)
6822 {
6823         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6824         uint64_t hits;
6825
6826         memwin_info(sc, 0, &mw_base, NULL);
6827         off = position_memwin(sc, 0,
6828             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6829         if (is_t4(sc)) {
6830                 hits = t4_read_reg64(sc, mw_base + off + 16);
6831                 hits = be64toh(hits);
6832         } else {
6833                 hits = t4_read_reg(sc, mw_base + off + 24);
6834                 hits = be32toh(hits);
6835         }
6836
6837         return (hits);
6838 }
6839
6840 static int
6841 get_filter(struct adapter *sc, struct t4_filter *t)
6842 {
6843         int i, rc, nfilters = sc->tids.nftids;
6844         struct filter_entry *f;
6845
6846         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6847             "t4getf");
6848         if (rc)
6849                 return (rc);
6850
6851         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6852             t->idx >= nfilters) {
6853                 t->idx = 0xffffffff;
6854                 goto done;
6855         }
6856
6857         f = &sc->tids.ftid_tab[t->idx];
6858         for (i = t->idx; i < nfilters; i++, f++) {
6859                 if (f->valid) {
6860                         t->idx = i;
6861                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6862                         t->smtidx = f->smtidx;
6863                         if (f->fs.hitcnts)
6864                                 t->hits = get_filter_hits(sc, t->idx);
6865                         else
6866                                 t->hits = UINT64_MAX;
6867                         t->fs = f->fs;
6868
6869                         goto done;
6870                 }
6871         }
6872
6873         t->idx = 0xffffffff;
6874 done:
6875         end_synchronized_op(sc, LOCK_HELD);
6876         return (0);
6877 }
6878
6879 static int
6880 set_filter(struct adapter *sc, struct t4_filter *t)
6881 {
6882         unsigned int nfilters, nports;
6883         struct filter_entry *f;
6884         int i, rc;
6885
6886         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6887         if (rc)
6888                 return (rc);
6889
6890         nfilters = sc->tids.nftids;
6891         nports = sc->params.nports;
6892
6893         if (nfilters == 0) {
6894                 rc = ENOTSUP;
6895                 goto done;
6896         }
6897
6898         if (!(sc->flags & FULL_INIT_DONE)) {
6899                 rc = EAGAIN;
6900                 goto done;
6901         }
6902
6903         if (t->idx >= nfilters) {
6904                 rc = EINVAL;
6905                 goto done;
6906         }
6907
6908         /* Validate against the global filter mode */
6909         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6910             sc->params.tp.vlan_pri_map) {
6911                 rc = E2BIG;
6912                 goto done;
6913         }
6914
6915         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6916                 rc = EINVAL;
6917                 goto done;
6918         }
6919
6920         if (t->fs.val.iport >= nports) {
6921                 rc = EINVAL;
6922                 goto done;
6923         }
6924
6925         /* Can't specify an iq if not steering to it */
6926         if (!t->fs.dirsteer && t->fs.iq) {
6927                 rc = EINVAL;
6928                 goto done;
6929         }
6930
6931         /* IPv6 filter idx must be 4 aligned */
6932         if (t->fs.type == 1 &&
6933             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6934                 rc = EINVAL;
6935                 goto done;
6936         }
6937
6938         if (sc->tids.ftid_tab == NULL) {
6939                 KASSERT(sc->tids.ftids_in_use == 0,
6940                     ("%s: no memory allocated but filters_in_use > 0",
6941                     __func__));
6942
6943                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6944                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6945                 if (sc->tids.ftid_tab == NULL) {
6946                         rc = ENOMEM;
6947                         goto done;
6948                 }
6949                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6950         }
6951
6952         for (i = 0; i < 4; i++) {
6953                 f = &sc->tids.ftid_tab[t->idx + i];
6954
6955                 if (f->pending || f->valid) {
6956                         rc = EBUSY;
6957                         goto done;
6958                 }
6959                 if (f->locked) {
6960                         rc = EPERM;
6961                         goto done;
6962                 }
6963
6964                 if (t->fs.type == 0)
6965                         break;
6966         }
6967
6968         f = &sc->tids.ftid_tab[t->idx];
6969         f->fs = t->fs;
6970
6971         rc = set_filter_wr(sc, t->idx);
6972 done:
6973         end_synchronized_op(sc, 0);
6974
6975         if (rc == 0) {
6976                 mtx_lock(&sc->tids.ftid_lock);
6977                 for (;;) {
6978                         if (f->pending == 0) {
6979                                 rc = f->valid ? 0 : EIO;
6980                                 break;
6981                         }
6982
6983                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6984                             PCATCH, "t4setfw", 0)) {
6985                                 rc = EINPROGRESS;
6986                                 break;
6987                         }
6988                 }
6989                 mtx_unlock(&sc->tids.ftid_lock);
6990         }
6991         return (rc);
6992 }
6993
6994 static int
6995 del_filter(struct adapter *sc, struct t4_filter *t)
6996 {
6997         unsigned int nfilters;
6998         struct filter_entry *f;
6999         int rc;
7000
7001         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
7002         if (rc)
7003                 return (rc);
7004
7005         nfilters = sc->tids.nftids;
7006
7007         if (nfilters == 0) {
7008                 rc = ENOTSUP;
7009                 goto done;
7010         }
7011
7012         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
7013             t->idx >= nfilters) {
7014                 rc = EINVAL;
7015                 goto done;
7016         }
7017
7018         if (!(sc->flags & FULL_INIT_DONE)) {
7019                 rc = EAGAIN;
7020                 goto done;
7021         }
7022
7023         f = &sc->tids.ftid_tab[t->idx];
7024
7025         if (f->pending) {
7026                 rc = EBUSY;
7027                 goto done;
7028         }
7029         if (f->locked) {
7030                 rc = EPERM;
7031                 goto done;
7032         }
7033
7034         if (f->valid) {
7035                 t->fs = f->fs;  /* extra info for the caller */
7036                 rc = del_filter_wr(sc, t->idx);
7037         }
7038
7039 done:
7040         end_synchronized_op(sc, 0);
7041
7042         if (rc == 0) {
7043                 mtx_lock(&sc->tids.ftid_lock);
7044                 for (;;) {
7045                         if (f->pending == 0) {
7046                                 rc = f->valid ? EIO : 0;
7047                                 break;
7048                         }
7049
7050                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7051                             PCATCH, "t4delfw", 0)) {
7052                                 rc = EINPROGRESS;
7053                                 break;
7054                         }
7055                 }
7056                 mtx_unlock(&sc->tids.ftid_lock);
7057         }
7058
7059         return (rc);
7060 }
7061
7062 static void
7063 clear_filter(struct filter_entry *f)
7064 {
7065         if (f->l2t)
7066                 t4_l2t_release(f->l2t);
7067
7068         bzero(f, sizeof (*f));
7069 }
7070
7071 static int
7072 set_filter_wr(struct adapter *sc, int fidx)
7073 {
7074         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7075         struct wrqe *wr;
7076         struct fw_filter_wr *fwr;
7077         unsigned int ftid;
7078
7079         ASSERT_SYNCHRONIZED_OP(sc);
7080
7081         if (f->fs.newdmac || f->fs.newvlan) {
7082                 /* This filter needs an L2T entry; allocate one. */
7083                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7084                 if (f->l2t == NULL)
7085                         return (EAGAIN);
7086                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7087                     f->fs.dmac)) {
7088                         t4_l2t_release(f->l2t);
7089                         f->l2t = NULL;
7090                         return (ENOMEM);
7091                 }
7092         }
7093
7094         ftid = sc->tids.ftid_base + fidx;
7095
7096         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7097         if (wr == NULL)
7098                 return (ENOMEM);
7099
7100         fwr = wrtod(wr);
7101         bzero(fwr, sizeof (*fwr));
7102
7103         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7104         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7105         fwr->tid_to_iq =
7106             htobe32(V_FW_FILTER_WR_TID(ftid) |
7107                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7108                 V_FW_FILTER_WR_NOREPLY(0) |
7109                 V_FW_FILTER_WR_IQ(f->fs.iq));
7110         fwr->del_filter_to_l2tix =
7111             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7112                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7113                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7114                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7115                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7116                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7117                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7118                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7119                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7120                     f->fs.newvlan == VLAN_REWRITE) |
7121                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7122                     f->fs.newvlan == VLAN_REWRITE) |
7123                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7124                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7125                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7126                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7127         fwr->ethtype = htobe16(f->fs.val.ethtype);
7128         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7129         fwr->frag_to_ovlan_vldm =
7130             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7131                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7132                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7133                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7134                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7135                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7136         fwr->smac_sel = 0;
7137         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7138             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7139         fwr->maci_to_matchtypem =
7140             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7141                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7142                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7143                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7144                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7145                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7146                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7147                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7148         fwr->ptcl = f->fs.val.proto;
7149         fwr->ptclm = f->fs.mask.proto;
7150         fwr->ttyp = f->fs.val.tos;
7151         fwr->ttypm = f->fs.mask.tos;
7152         fwr->ivlan = htobe16(f->fs.val.vlan);
7153         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7154         fwr->ovlan = htobe16(f->fs.val.vnic);
7155         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7156         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7157         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7158         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7159         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7160         fwr->lp = htobe16(f->fs.val.dport);
7161         fwr->lpm = htobe16(f->fs.mask.dport);
7162         fwr->fp = htobe16(f->fs.val.sport);
7163         fwr->fpm = htobe16(f->fs.mask.sport);
7164         if (f->fs.newsmac)
7165                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7166
7167         f->pending = 1;
7168         sc->tids.ftids_in_use++;
7169
7170         t4_wrq_tx(sc, wr);
7171         return (0);
7172 }
7173
7174 static int
7175 del_filter_wr(struct adapter *sc, int fidx)
7176 {
7177         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7178         struct wrqe *wr;
7179         struct fw_filter_wr *fwr;
7180         unsigned int ftid;
7181
7182         ftid = sc->tids.ftid_base + fidx;
7183
7184         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7185         if (wr == NULL)
7186                 return (ENOMEM);
7187         fwr = wrtod(wr);
7188         bzero(fwr, sizeof (*fwr));
7189
7190         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7191
7192         f->pending = 1;
7193         t4_wrq_tx(sc, wr);
7194         return (0);
7195 }
7196
7197 int
7198 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7199 {
7200         struct adapter *sc = iq->adapter;
7201         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7202         unsigned int idx = GET_TID(rpl);
7203         unsigned int rc;
7204         struct filter_entry *f;
7205
7206         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7207             rss->opcode));
7208
7209         if (is_ftid(sc, idx)) {
7210
7211                 idx -= sc->tids.ftid_base;
7212                 f = &sc->tids.ftid_tab[idx];
7213                 rc = G_COOKIE(rpl->cookie);
7214
7215                 mtx_lock(&sc->tids.ftid_lock);
7216                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7217                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7218                             __func__, idx));
7219                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7220                         f->pending = 0;  /* asynchronous setup completed */
7221                         f->valid = 1;
7222                 } else {
7223                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7224                                 /* Add or delete failed, display an error */
7225                                 log(LOG_ERR,
7226                                     "filter %u setup failed with error %u\n",
7227                                     idx, rc);
7228                         }
7229
7230                         clear_filter(f);
7231                         sc->tids.ftids_in_use--;
7232                 }
7233                 wakeup(&sc->tids.ftid_tab);
7234                 mtx_unlock(&sc->tids.ftid_lock);
7235         }
7236
7237         return (0);
7238 }
7239
7240 static int
7241 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7242 {
7243         int rc;
7244
7245         if (cntxt->cid > M_CTXTQID)
7246                 return (EINVAL);
7247
7248         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7249             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7250                 return (EINVAL);
7251
7252         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7253         if (rc)
7254                 return (rc);
7255
7256         if (sc->flags & FW_OK) {
7257                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7258                     &cntxt->data[0]);
7259                 if (rc == 0)
7260                         goto done;
7261         }
7262
7263         /*
7264          * Read via firmware failed or wasn't even attempted.  Read directly via
7265          * the backdoor.
7266          */
7267         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7268 done:
7269         end_synchronized_op(sc, 0);
7270         return (rc);
7271 }
7272
7273 static int
7274 load_fw(struct adapter *sc, struct t4_data *fw)
7275 {
7276         int rc;
7277         uint8_t *fw_data;
7278
7279         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7280         if (rc)
7281                 return (rc);
7282
7283         if (sc->flags & FULL_INIT_DONE) {
7284                 rc = EBUSY;
7285                 goto done;
7286         }
7287
7288         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7289         if (fw_data == NULL) {
7290                 rc = ENOMEM;
7291                 goto done;
7292         }
7293
7294         rc = copyin(fw->data, fw_data, fw->len);
7295         if (rc == 0)
7296                 rc = -t4_load_fw(sc, fw_data, fw->len);
7297
7298         free(fw_data, M_CXGBE);
7299 done:
7300         end_synchronized_op(sc, 0);
7301         return (rc);
7302 }
7303
7304 static int
7305 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7306 {
7307         uint32_t addr, off, remaining, i, n;
7308         uint32_t *buf, *b;
7309         uint32_t mw_base, mw_aperture;
7310         int rc;
7311         uint8_t *dst;
7312
7313         rc = validate_mem_range(sc, mr->addr, mr->len);
7314         if (rc != 0)
7315                 return (rc);
7316
7317         memwin_info(sc, win, &mw_base, &mw_aperture);
7318         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7319         addr = mr->addr;
7320         remaining = mr->len;
7321         dst = (void *)mr->data;
7322
7323         while (remaining) {
7324                 off = position_memwin(sc, win, addr);
7325
7326                 /* number of bytes that we'll copy in the inner loop */
7327                 n = min(remaining, mw_aperture - off);
7328                 for (i = 0; i < n; i += 4)
7329                         *b++ = t4_read_reg(sc, mw_base + off + i);
7330
7331                 rc = copyout(buf, dst, n);
7332                 if (rc != 0)
7333                         break;
7334
7335                 b = buf;
7336                 dst += n;
7337                 remaining -= n;
7338                 addr += n;
7339         }
7340
7341         free(buf, M_CXGBE);
7342         return (rc);
7343 }
7344
7345 static int
7346 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7347 {
7348         int rc;
7349
7350         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7351                 return (EINVAL);
7352
7353         if (i2cd->len > 1) {
7354                 /* XXX: need fw support for longer reads in one go */
7355                 return (ENOTSUP);
7356         }
7357
7358         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7359         if (rc)
7360                 return (rc);
7361         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7362             i2cd->offset, &i2cd->data[0]);
7363         end_synchronized_op(sc, 0);
7364
7365         return (rc);
7366 }
7367
7368 static int
7369 in_range(int val, int lo, int hi)
7370 {
7371
7372         return (val < 0 || (val <= hi && val >= lo));
7373 }
7374
7375 static int
7376 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7377 {
7378         int fw_subcmd, fw_type, rc;
7379
7380         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7381         if (rc)
7382                 return (rc);
7383
7384         if (!(sc->flags & FULL_INIT_DONE)) {
7385                 rc = EAGAIN;
7386                 goto done;
7387         }
7388
7389         /*
7390          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7391          * sub-command and type are in common locations.)
7392          */
7393         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7394                 fw_subcmd = FW_SCHED_SC_CONFIG;
7395         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7396                 fw_subcmd = FW_SCHED_SC_PARAMS;
7397         else {
7398                 rc = EINVAL;
7399                 goto done;
7400         }
7401         if (p->type == SCHED_CLASS_TYPE_PACKET)
7402                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7403         else {
7404                 rc = EINVAL;
7405                 goto done;
7406         }
7407
7408         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7409                 /* Vet our parameters ..*/
7410                 if (p->u.config.minmax < 0) {
7411                         rc = EINVAL;
7412                         goto done;
7413                 }
7414
7415                 /* And pass the request to the firmware ...*/
7416                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7417                 goto done;
7418         }
7419
7420         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7421                 int fw_level;
7422                 int fw_mode;
7423                 int fw_rateunit;
7424                 int fw_ratemode;
7425
7426                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7427                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7428                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7429                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7430                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7431                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7432                 else {
7433                         rc = EINVAL;
7434                         goto done;
7435                 }
7436
7437                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7438                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7439                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7440                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7441                 else {
7442                         rc = EINVAL;
7443                         goto done;
7444                 }
7445
7446                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7447                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7448                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7449                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7450                 else {
7451                         rc = EINVAL;
7452                         goto done;
7453                 }
7454
7455                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7456                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7457                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7458                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7459                 else {
7460                         rc = EINVAL;
7461                         goto done;
7462                 }
7463
7464                 /* Vet our parameters ... */
7465                 if (!in_range(p->u.params.channel, 0, 3) ||
7466                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7467                     !in_range(p->u.params.minrate, 0, 10000000) ||
7468                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7469                     !in_range(p->u.params.weight, 0, 100)) {
7470                         rc = ERANGE;
7471                         goto done;
7472                 }
7473
7474                 /*
7475                  * Translate any unset parameters into the firmware's
7476                  * nomenclature and/or fail the call if the parameters
7477                  * are required ...
7478                  */
7479                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7480                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7481                         rc = EINVAL;
7482                         goto done;
7483                 }
7484                 if (p->u.params.minrate < 0)
7485                         p->u.params.minrate = 0;
7486                 if (p->u.params.maxrate < 0) {
7487                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7488                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7489                                 rc = EINVAL;
7490                                 goto done;
7491                         } else
7492                                 p->u.params.maxrate = 0;
7493                 }
7494                 if (p->u.params.weight < 0) {
7495                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7496                                 rc = EINVAL;
7497                                 goto done;
7498                         } else
7499                                 p->u.params.weight = 0;
7500                 }
7501                 if (p->u.params.pktsize < 0) {
7502                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7503                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7504                                 rc = EINVAL;
7505                                 goto done;
7506                         } else
7507                                 p->u.params.pktsize = 0;
7508                 }
7509
7510                 /* See what the firmware thinks of the request ... */
7511                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7512                     fw_rateunit, fw_ratemode, p->u.params.channel,
7513                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7514                     p->u.params.weight, p->u.params.pktsize);
7515                 goto done;
7516         }
7517
7518         rc = EINVAL;
7519 done:
7520         end_synchronized_op(sc, 0);
7521         return (rc);
7522 }
7523
7524 static int
7525 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7526 {
7527         struct port_info *pi = NULL;
7528         struct sge_txq *txq;
7529         uint32_t fw_mnem, fw_queue, fw_class;
7530         int i, rc;
7531
7532         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7533         if (rc)
7534                 return (rc);
7535
7536         if (!(sc->flags & FULL_INIT_DONE)) {
7537                 rc = EAGAIN;
7538                 goto done;
7539         }
7540
7541         if (p->port >= sc->params.nports) {
7542                 rc = EINVAL;
7543                 goto done;
7544         }
7545
7546         pi = sc->port[p->port];
7547         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7548                 rc = EINVAL;
7549                 goto done;
7550         }
7551
7552         /*
7553          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7554          * Scheduling Class in this case).
7555          */
7556         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7557             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7558         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7559
7560         /*
7561          * If op.queue is non-negative, then we're only changing the scheduling
7562          * on a single specified TX queue.
7563          */
7564         if (p->queue >= 0) {
7565                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7566                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7567                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7568                     &fw_class);
7569                 goto done;
7570         }
7571
7572         /*
7573          * Change the scheduling on all the TX queues for the
7574          * interface.
7575          */
7576         for_each_txq(pi, i, txq) {
7577                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7578                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7579                     &fw_class);
7580                 if (rc)
7581                         goto done;
7582         }
7583
7584         rc = 0;
7585 done:
7586         end_synchronized_op(sc, 0);
7587         return (rc);
7588 }
7589
7590 int
7591 t4_os_find_pci_capability(struct adapter *sc, int cap)
7592 {
7593         int i;
7594
7595         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7596 }
7597
7598 int
7599 t4_os_pci_save_state(struct adapter *sc)
7600 {
7601         device_t dev;
7602         struct pci_devinfo *dinfo;
7603
7604         dev = sc->dev;
7605         dinfo = device_get_ivars(dev);
7606
7607         pci_cfg_save(dev, dinfo, 0);
7608         return (0);
7609 }
7610
7611 int
7612 t4_os_pci_restore_state(struct adapter *sc)
7613 {
7614         device_t dev;
7615         struct pci_devinfo *dinfo;
7616
7617         dev = sc->dev;
7618         dinfo = device_get_ivars(dev);
7619
7620         pci_cfg_restore(dev, dinfo);
7621         return (0);
7622 }
7623
7624 void
7625 t4_os_portmod_changed(const struct adapter *sc, int idx)
7626 {
7627         struct port_info *pi = sc->port[idx];
7628         static const char *mod_str[] = {
7629                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7630         };
7631
7632         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7633                 if_printf(pi->ifp, "transceiver unplugged.\n");
7634         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7635                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7636         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7637                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7638         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7639                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7640                     mod_str[pi->mod_type]);
7641         } else {
7642                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7643                     pi->mod_type);
7644         }
7645 }
7646
7647 void
7648 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7649 {
7650         struct port_info *pi = sc->port[idx];
7651         struct ifnet *ifp = pi->ifp;
7652
7653         if (link_stat) {
7654                 pi->linkdnrc = -1;
7655                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7656                 if_link_state_change(ifp, LINK_STATE_UP);
7657         } else {
7658                 if (reason >= 0)
7659                         pi->linkdnrc = reason;
7660                 if_link_state_change(ifp, LINK_STATE_DOWN);
7661         }
7662 }
7663
7664 void
7665 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7666 {
7667         struct adapter *sc;
7668
7669         mtx_lock(&t4_list_lock);
7670         SLIST_FOREACH(sc, &t4_list, link) {
7671                 /*
7672                  * func should not make any assumptions about what state sc is
7673                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7674                  */
7675                 func(sc, arg);
7676         }
7677         mtx_unlock(&t4_list_lock);
7678 }
7679
7680 static int
7681 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7682 {
7683        return (0);
7684 }
7685
7686 static int
7687 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7688 {
7689        return (0);
7690 }
7691
7692 static int
7693 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7694     struct thread *td)
7695 {
7696         int rc;
7697         struct adapter *sc = dev->si_drv1;
7698
7699         rc = priv_check(td, PRIV_DRIVER);
7700         if (rc != 0)
7701                 return (rc);
7702
7703         switch (cmd) {
7704         case CHELSIO_T4_GETREG: {
7705                 struct t4_reg *edata = (struct t4_reg *)data;
7706
7707                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7708                         return (EFAULT);
7709
7710                 if (edata->size == 4)
7711                         edata->val = t4_read_reg(sc, edata->addr);
7712                 else if (edata->size == 8)
7713                         edata->val = t4_read_reg64(sc, edata->addr);
7714                 else
7715                         return (EINVAL);
7716
7717                 break;
7718         }
7719         case CHELSIO_T4_SETREG: {
7720                 struct t4_reg *edata = (struct t4_reg *)data;
7721
7722                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7723                         return (EFAULT);
7724
7725                 if (edata->size == 4) {
7726                         if (edata->val & 0xffffffff00000000)
7727                                 return (EINVAL);
7728                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7729                 } else if (edata->size == 8)
7730                         t4_write_reg64(sc, edata->addr, edata->val);
7731                 else
7732                         return (EINVAL);
7733                 break;
7734         }
7735         case CHELSIO_T4_REGDUMP: {
7736                 struct t4_regdump *regs = (struct t4_regdump *)data;
7737                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7738                 uint8_t *buf;
7739
7740                 if (regs->len < reglen) {
7741                         regs->len = reglen; /* hint to the caller */
7742                         return (ENOBUFS);
7743                 }
7744
7745                 regs->len = reglen;
7746                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7747                 t4_get_regs(sc, regs, buf);
7748                 rc = copyout(buf, regs->data, reglen);
7749                 free(buf, M_CXGBE);
7750                 break;
7751         }
7752         case CHELSIO_T4_GET_FILTER_MODE:
7753                 rc = get_filter_mode(sc, (uint32_t *)data);
7754                 break;
7755         case CHELSIO_T4_SET_FILTER_MODE:
7756                 rc = set_filter_mode(sc, *(uint32_t *)data);
7757                 break;
7758         case CHELSIO_T4_GET_FILTER:
7759                 rc = get_filter(sc, (struct t4_filter *)data);
7760                 break;
7761         case CHELSIO_T4_SET_FILTER:
7762                 rc = set_filter(sc, (struct t4_filter *)data);
7763                 break;
7764         case CHELSIO_T4_DEL_FILTER:
7765                 rc = del_filter(sc, (struct t4_filter *)data);
7766                 break;
7767         case CHELSIO_T4_GET_SGE_CONTEXT:
7768                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7769                 break;
7770         case CHELSIO_T4_LOAD_FW:
7771                 rc = load_fw(sc, (struct t4_data *)data);
7772                 break;
7773         case CHELSIO_T4_GET_MEM:
7774                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7775                 break;
7776         case CHELSIO_T4_GET_I2C:
7777                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7778                 break;
7779         case CHELSIO_T4_CLEAR_STATS: {
7780                 int i;
7781                 u_int port_id = *(uint32_t *)data;
7782                 struct port_info *pi;
7783
7784                 if (port_id >= sc->params.nports)
7785                         return (EINVAL);
7786                 pi = sc->port[port_id];
7787
7788                 /* MAC stats */
7789                 t4_clr_port_stats(sc, pi->tx_chan);
7790
7791                 if (pi->flags & PORT_INIT_DONE) {
7792                         struct sge_rxq *rxq;
7793                         struct sge_txq *txq;
7794                         struct sge_wrq *wrq;
7795
7796                         for_each_rxq(pi, i, rxq) {
7797 #if defined(INET) || defined(INET6)
7798                                 rxq->lro.lro_queued = 0;
7799                                 rxq->lro.lro_flushed = 0;
7800 #endif
7801                                 rxq->rxcsum = 0;
7802                                 rxq->vlan_extraction = 0;
7803                         }
7804
7805                         for_each_txq(pi, i, txq) {
7806                                 txq->txcsum = 0;
7807                                 txq->tso_wrs = 0;
7808                                 txq->vlan_insertion = 0;
7809                                 txq->imm_wrs = 0;
7810                                 txq->sgl_wrs = 0;
7811                                 txq->txpkt_wrs = 0;
7812                                 txq->txpkts_wrs = 0;
7813                                 txq->txpkts_pkts = 0;
7814                                 txq->br->br_drops = 0;
7815                                 txq->no_dmamap = 0;
7816                                 txq->no_desc = 0;
7817                         }
7818
7819 #ifdef TCP_OFFLOAD
7820                         /* nothing to clear for each ofld_rxq */
7821
7822                         for_each_ofld_txq(pi, i, wrq) {
7823                                 wrq->tx_wrs = 0;
7824                                 wrq->no_desc = 0;
7825                         }
7826 #endif
7827                         wrq = &sc->sge.ctrlq[pi->port_id];
7828                         wrq->tx_wrs = 0;
7829                         wrq->no_desc = 0;
7830                 }
7831                 break;
7832         }
7833         case CHELSIO_T4_SCHED_CLASS:
7834                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
7835                 break;
7836         case CHELSIO_T4_SCHED_QUEUE:
7837                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7838                 break;
7839         default:
7840                 rc = EINVAL;
7841         }
7842
7843         return (rc);
7844 }
7845
7846 #ifdef TCP_OFFLOAD
7847 static int
7848 toe_capability(struct port_info *pi, int enable)
7849 {
7850         int rc;
7851         struct adapter *sc = pi->adapter;
7852
7853         ASSERT_SYNCHRONIZED_OP(sc);
7854
7855         if (!is_offload(sc))
7856                 return (ENODEV);
7857
7858         if (enable) {
7859                 if (!(sc->flags & FULL_INIT_DONE)) {
7860                         rc = cxgbe_init_synchronized(pi);
7861                         if (rc)
7862                                 return (rc);
7863                 }
7864
7865                 if (isset(&sc->offload_map, pi->port_id))
7866                         return (0);
7867
7868                 if (!(sc->flags & TOM_INIT_DONE)) {
7869                         rc = t4_activate_uld(sc, ULD_TOM);
7870                         if (rc == EAGAIN) {
7871                                 log(LOG_WARNING,
7872                                     "You must kldload t4_tom.ko before trying "
7873                                     "to enable TOE on a cxgbe interface.\n");
7874                         }
7875                         if (rc != 0)
7876                                 return (rc);
7877                         KASSERT(sc->tom_softc != NULL,
7878                             ("%s: TOM activated but softc NULL", __func__));
7879                         KASSERT(sc->flags & TOM_INIT_DONE,
7880                             ("%s: TOM activated but flag not set", __func__));
7881                 }
7882
7883                 setbit(&sc->offload_map, pi->port_id);
7884         } else {
7885                 if (!isset(&sc->offload_map, pi->port_id))
7886                         return (0);
7887
7888                 KASSERT(sc->flags & TOM_INIT_DONE,
7889                     ("%s: TOM never initialized?", __func__));
7890                 clrbit(&sc->offload_map, pi->port_id);
7891         }
7892
7893         return (0);
7894 }
7895
7896 /*
7897  * Add an upper layer driver to the global list.
7898  */
7899 int
7900 t4_register_uld(struct uld_info *ui)
7901 {
7902         int rc = 0;
7903         struct uld_info *u;
7904
7905         mtx_lock(&t4_uld_list_lock);
7906         SLIST_FOREACH(u, &t4_uld_list, link) {
7907             if (u->uld_id == ui->uld_id) {
7908                     rc = EEXIST;
7909                     goto done;
7910             }
7911         }
7912
7913         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7914         ui->refcount = 0;
7915 done:
7916         mtx_unlock(&t4_uld_list_lock);
7917         return (rc);
7918 }
7919
7920 int
7921 t4_unregister_uld(struct uld_info *ui)
7922 {
7923         int rc = EINVAL;
7924         struct uld_info *u;
7925
7926         mtx_lock(&t4_uld_list_lock);
7927
7928         SLIST_FOREACH(u, &t4_uld_list, link) {
7929             if (u == ui) {
7930                     if (ui->refcount > 0) {
7931                             rc = EBUSY;
7932                             goto done;
7933                     }
7934
7935                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7936                     rc = 0;
7937                     goto done;
7938             }
7939         }
7940 done:
7941         mtx_unlock(&t4_uld_list_lock);
7942         return (rc);
7943 }
7944
7945 int
7946 t4_activate_uld(struct adapter *sc, int id)
7947 {
7948         int rc = EAGAIN;
7949         struct uld_info *ui;
7950
7951         ASSERT_SYNCHRONIZED_OP(sc);
7952
7953         mtx_lock(&t4_uld_list_lock);
7954
7955         SLIST_FOREACH(ui, &t4_uld_list, link) {
7956                 if (ui->uld_id == id) {
7957                         rc = ui->activate(sc);
7958                         if (rc == 0)
7959                                 ui->refcount++;
7960                         goto done;
7961                 }
7962         }
7963 done:
7964         mtx_unlock(&t4_uld_list_lock);
7965
7966         return (rc);
7967 }
7968
7969 int
7970 t4_deactivate_uld(struct adapter *sc, int id)
7971 {
7972         int rc = EINVAL;
7973         struct uld_info *ui;
7974
7975         ASSERT_SYNCHRONIZED_OP(sc);
7976
7977         mtx_lock(&t4_uld_list_lock);
7978
7979         SLIST_FOREACH(ui, &t4_uld_list, link) {
7980                 if (ui->uld_id == id) {
7981                         rc = ui->deactivate(sc);
7982                         if (rc == 0)
7983                                 ui->refcount--;
7984                         goto done;
7985                 }
7986         }
7987 done:
7988         mtx_unlock(&t4_uld_list_lock);
7989
7990         return (rc);
7991 }
7992 #endif
7993
7994 /*
7995  * Come up with reasonable defaults for some of the tunables, provided they're
7996  * not set by the user (in which case we'll use the values as is).
7997  */
7998 static void
7999 tweak_tunables(void)
8000 {
8001         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
8002
8003         if (t4_ntxq10g < 1)
8004                 t4_ntxq10g = min(nc, NTXQ_10G);
8005
8006         if (t4_ntxq1g < 1)
8007                 t4_ntxq1g = min(nc, NTXQ_1G);
8008
8009         if (t4_nrxq10g < 1)
8010                 t4_nrxq10g = min(nc, NRXQ_10G);
8011
8012         if (t4_nrxq1g < 1)
8013                 t4_nrxq1g = min(nc, NRXQ_1G);
8014
8015 #ifdef TCP_OFFLOAD
8016         if (t4_nofldtxq10g < 1)
8017                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8018
8019         if (t4_nofldtxq1g < 1)
8020                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8021
8022         if (t4_nofldrxq10g < 1)
8023                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8024
8025         if (t4_nofldrxq1g < 1)
8026                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8027
8028         if (t4_toecaps_allowed == -1)
8029                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8030 #else
8031         if (t4_toecaps_allowed == -1)
8032                 t4_toecaps_allowed = 0;
8033 #endif
8034
8035         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8036                 t4_tmr_idx_10g = TMR_IDX_10G;
8037
8038         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8039                 t4_pktc_idx_10g = PKTC_IDX_10G;
8040
8041         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8042                 t4_tmr_idx_1g = TMR_IDX_1G;
8043
8044         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8045                 t4_pktc_idx_1g = PKTC_IDX_1G;
8046
8047         if (t4_qsize_txq < 128)
8048                 t4_qsize_txq = 128;
8049
8050         if (t4_qsize_rxq < 128)
8051                 t4_qsize_rxq = 128;
8052         while (t4_qsize_rxq & 7)
8053                 t4_qsize_rxq++;
8054
8055         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8056 }
8057
8058 static int
8059 mod_event(module_t mod, int cmd, void *arg)
8060 {
8061         int rc = 0;
8062         static int loaded = 0;
8063
8064         switch (cmd) {
8065         case MOD_LOAD:
8066                 if (atomic_fetchadd_int(&loaded, 1))
8067                         break;
8068                 t4_sge_modload();
8069                 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
8070                 SLIST_INIT(&t4_list);
8071 #ifdef TCP_OFFLOAD
8072                 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
8073                 SLIST_INIT(&t4_uld_list);
8074 #endif
8075                 tweak_tunables();
8076                 break;
8077
8078         case MOD_UNLOAD:
8079                 if (atomic_fetchadd_int(&loaded, -1) > 1)
8080                         break;
8081 #ifdef TCP_OFFLOAD
8082                 mtx_lock(&t4_uld_list_lock);
8083                 if (!SLIST_EMPTY(&t4_uld_list)) {
8084                         rc = EBUSY;
8085                         mtx_unlock(&t4_uld_list_lock);
8086                         break;
8087                 }
8088                 mtx_unlock(&t4_uld_list_lock);
8089                 mtx_destroy(&t4_uld_list_lock);
8090 #endif
8091                 mtx_lock(&t4_list_lock);
8092                 if (!SLIST_EMPTY(&t4_list)) {
8093                         rc = EBUSY;
8094                         mtx_unlock(&t4_list_lock);
8095                         break;
8096                 }
8097                 mtx_unlock(&t4_list_lock);
8098                 mtx_destroy(&t4_list_lock);
8099                 break;
8100         }
8101
8102         return (rc);
8103 }
8104
8105 static devclass_t t4_devclass, t5_devclass;
8106 static devclass_t cxgbe_devclass, cxl_devclass;
8107
8108 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8109 MODULE_VERSION(t4nex, 1);
8110 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8111
8112 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8113 MODULE_VERSION(t5nex, 1);
8114 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8115
8116 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8117 MODULE_VERSION(cxgbe, 1);
8118
8119 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8120 MODULE_VERSION(cxl, 1);