]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/cxgbe/t4_main.c
MFC r261533, r261536, r261537, and r263457.
[FreeBSD/stable/10.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 static int t4_rsrv_noflowq = 0;
201 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203 #ifdef TCP_OFFLOAD
204 #define NOFLDTXQ_10G 8
205 static int t4_nofldtxq10g = -1;
206 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208 #define NOFLDRXQ_10G 2
209 static int t4_nofldrxq10g = -1;
210 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212 #define NOFLDTXQ_1G 2
213 static int t4_nofldtxq1g = -1;
214 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216 #define NOFLDRXQ_1G 1
217 static int t4_nofldrxq1g = -1;
218 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 #endif
220
221 /*
222  * Holdoff parameters for 10G and 1G ports.
223  */
224 #define TMR_IDX_10G 1
225 static int t4_tmr_idx_10g = TMR_IDX_10G;
226 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228 #define PKTC_IDX_10G (-1)
229 static int t4_pktc_idx_10g = PKTC_IDX_10G;
230 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232 #define TMR_IDX_1G 1
233 static int t4_tmr_idx_1g = TMR_IDX_1G;
234 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236 #define PKTC_IDX_1G (-1)
237 static int t4_pktc_idx_1g = PKTC_IDX_1G;
238 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240 /*
241  * Size (# of entries) of each tx and rx queue.
242  */
243 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249 /*
250  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251  */
252 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255 /*
256  * Configuration file.
257  */
258 #define DEFAULT_CF      "default"
259 #define FLASH_CF        "flash"
260 #define UWIRE_CF        "uwire"
261 #define FPGA_CF         "fpga"
262 static char t4_cfg_file[32] = DEFAULT_CF;
263 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265 /*
266  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267  * encouraged respectively).
268  */
269 static unsigned int t4_fw_install = 1;
270 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272 /*
273  * ASIC features that will be used.  Disable the ones you don't want so that the
274  * chip resources aren't wasted on features that will not be used.
275  */
276 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
277 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282 static int t4_toecaps_allowed = -1;
283 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285 static int t4_rdmacaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288 static int t4_iscsicaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291 static int t4_fcoecaps_allowed = 0;
292 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294 static int t5_write_combine = 0;
295 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297 struct intrs_and_queues {
298         int intr_type;          /* INTx, MSI, or MSI-X */
299         int nirq;               /* Number of vectors */
300         int intr_flags;
301         int ntxq10g;            /* # of NIC txq's for each 10G port */
302         int nrxq10g;            /* # of NIC rxq's for each 10G port */
303         int ntxq1g;             /* # of NIC txq's for each 1G port */
304         int nrxq1g;             /* # of NIC rxq's for each 1G port */
305         int rsrv_noflowq;       /* Flag whether to reserve queue 0 */
306 #ifdef TCP_OFFLOAD
307         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
308         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
309         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
310         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
311 #endif
312 };
313
314 struct filter_entry {
315         uint32_t valid:1;       /* filter allocated and valid */
316         uint32_t locked:1;      /* filter is administratively locked */
317         uint32_t pending:1;     /* filter action is pending firmware reply */
318         uint32_t smtidx:8;      /* Source MAC Table index for smac */
319         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
320
321         struct t4_filter_specification fs;
322 };
323
324 enum {
325         XGMAC_MTU       = (1 << 0),
326         XGMAC_PROMISC   = (1 << 1),
327         XGMAC_ALLMULTI  = (1 << 2),
328         XGMAC_VLANEX    = (1 << 3),
329         XGMAC_UCADDR    = (1 << 4),
330         XGMAC_MCADDRS   = (1 << 5),
331
332         XGMAC_ALL       = 0xffff
333 };
334
335 static int map_bars_0_and_4(struct adapter *);
336 static int map_bar_2(struct adapter *);
337 static void setup_memwin(struct adapter *);
338 static int validate_mem_range(struct adapter *, uint32_t, int);
339 static int fwmtype_to_hwmtype(int);
340 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341     uint32_t *);
342 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343 static uint32_t position_memwin(struct adapter *, int, uint32_t);
344 static int cfg_itype_and_nqueues(struct adapter *, int, int,
345     struct intrs_and_queues *);
346 static int prep_firmware(struct adapter *);
347 static int partition_resources(struct adapter *, const struct firmware *,
348     const char *);
349 static int get_params__pre_init(struct adapter *);
350 static int get_params__post_init(struct adapter *);
351 static int set_params__post_init(struct adapter *);
352 static void t4_set_desc(struct adapter *);
353 static void build_medialist(struct port_info *);
354 static int update_mac_settings(struct port_info *, int);
355 static int cxgbe_init_synchronized(struct port_info *);
356 static int cxgbe_uninit_synchronized(struct port_info *);
357 static int setup_intr_handlers(struct adapter *);
358 static int adapter_full_init(struct adapter *);
359 static int adapter_full_uninit(struct adapter *);
360 static int port_full_init(struct port_info *);
361 static int port_full_uninit(struct port_info *);
362 static void quiesce_eq(struct adapter *, struct sge_eq *);
363 static void quiesce_iq(struct adapter *, struct sge_iq *);
364 static void quiesce_fl(struct adapter *, struct sge_fl *);
365 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366     driver_intr_t *, void *, char *);
367 static int t4_free_irq(struct adapter *, struct irq *);
368 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369     unsigned int);
370 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371 static void cxgbe_tick(void *);
372 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
373 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
374     struct mbuf *);
375 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
376 static int fw_msg_not_handled(struct adapter *, const __be64 *);
377 static int t4_sysctls(struct adapter *);
378 static int cxgbe_sysctls(struct port_info *);
379 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
380 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
381 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
382 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
383 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
384 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
385 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
386 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
388 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
389 #ifdef SBUF_DRAIN
390 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
392 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
393 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
394 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
395 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
396 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
398 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
399 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
400 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
401 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
403 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
404 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
405 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
406 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
407 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
408 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
409 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
410 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
411 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
412 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
413 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
414 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
415 #endif
416 static inline void txq_start(struct ifnet *, struct sge_txq *);
417 static uint32_t fconf_to_mode(uint32_t);
418 static uint32_t mode_to_fconf(uint32_t);
419 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
420 static int get_filter_mode(struct adapter *, uint32_t *);
421 static int set_filter_mode(struct adapter *, uint32_t);
422 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
423 static int get_filter(struct adapter *, struct t4_filter *);
424 static int set_filter(struct adapter *, struct t4_filter *);
425 static int del_filter(struct adapter *, struct t4_filter *);
426 static void clear_filter(struct filter_entry *);
427 static int set_filter_wr(struct adapter *, int);
428 static int del_filter_wr(struct adapter *, int);
429 static int get_sge_context(struct adapter *, struct t4_sge_context *);
430 static int load_fw(struct adapter *, struct t4_data *);
431 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
432 static int read_i2c(struct adapter *, struct t4_i2c_data *);
433 static int set_sched_class(struct adapter *, struct t4_sched_params *);
434 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
435 #ifdef TCP_OFFLOAD
436 static int toe_capability(struct port_info *, int);
437 #endif
438 static int mod_event(module_t, int, void *);
439
440 struct {
441         uint16_t device;
442         char *desc;
443 } t4_pciids[] = {
444         {0xa000, "Chelsio Terminator 4 FPGA"},
445         {0x4400, "Chelsio T440-dbg"},
446         {0x4401, "Chelsio T420-CR"},
447         {0x4402, "Chelsio T422-CR"},
448         {0x4403, "Chelsio T440-CR"},
449         {0x4404, "Chelsio T420-BCH"},
450         {0x4405, "Chelsio T440-BCH"},
451         {0x4406, "Chelsio T440-CH"},
452         {0x4407, "Chelsio T420-SO"},
453         {0x4408, "Chelsio T420-CX"},
454         {0x4409, "Chelsio T420-BT"},
455         {0x440a, "Chelsio T404-BT"},
456         {0x440e, "Chelsio T440-LP-CR"},
457 }, t5_pciids[] = {
458         {0xb000, "Chelsio Terminator 5 FPGA"},
459         {0x5400, "Chelsio T580-dbg"},
460         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
461         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
462         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
463         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
464         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
465         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
466         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
467         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
468         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
469         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
470         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
471         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
472 #ifdef notyet
473         {0x5404,  "Chelsio T520-BCH"},
474         {0x5405,  "Chelsio T540-BCH"},
475         {0x5406,  "Chelsio T540-CH"},
476         {0x5408,  "Chelsio T520-CX"},
477         {0x540b,  "Chelsio B520-SR"},
478         {0x540c,  "Chelsio B504-BT"},
479         {0x540f,  "Chelsio Amsterdam"},
480         {0x5413,  "Chelsio T580-CHR"},
481 #endif
482 };
483
484 #ifdef TCP_OFFLOAD
485 /*
486  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
487  * exactly the same for both rxq and ofld_rxq.
488  */
489 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
490 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
491 #endif
492
493 /* No easy way to include t4_msg.h before adapter.h so we check this way */
494 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
495 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
496
497 static int
498 t4_probe(device_t dev)
499 {
500         int i;
501         uint16_t v = pci_get_vendor(dev);
502         uint16_t d = pci_get_device(dev);
503         uint8_t f = pci_get_function(dev);
504
505         if (v != PCI_VENDOR_ID_CHELSIO)
506                 return (ENXIO);
507
508         /* Attach only to PF0 of the FPGA */
509         if (d == 0xa000 && f != 0)
510                 return (ENXIO);
511
512         for (i = 0; i < nitems(t4_pciids); i++) {
513                 if (d == t4_pciids[i].device) {
514                         device_set_desc(dev, t4_pciids[i].desc);
515                         return (BUS_PROBE_DEFAULT);
516                 }
517         }
518
519         return (ENXIO);
520 }
521
522 static int
523 t5_probe(device_t dev)
524 {
525         int i;
526         uint16_t v = pci_get_vendor(dev);
527         uint16_t d = pci_get_device(dev);
528         uint8_t f = pci_get_function(dev);
529
530         if (v != PCI_VENDOR_ID_CHELSIO)
531                 return (ENXIO);
532
533         /* Attach only to PF0 of the FPGA */
534         if (d == 0xb000 && f != 0)
535                 return (ENXIO);
536
537         for (i = 0; i < nitems(t5_pciids); i++) {
538                 if (d == t5_pciids[i].device) {
539                         device_set_desc(dev, t5_pciids[i].desc);
540                         return (BUS_PROBE_DEFAULT);
541                 }
542         }
543
544         return (ENXIO);
545 }
546
547 static int
548 t4_attach(device_t dev)
549 {
550         struct adapter *sc;
551         int rc = 0, i, n10g, n1g, rqidx, tqidx;
552         struct intrs_and_queues iaq;
553         struct sge *s;
554 #ifdef TCP_OFFLOAD
555         int ofld_rqidx, ofld_tqidx;
556 #endif
557
558         sc = device_get_softc(dev);
559         sc->dev = dev;
560
561         pci_enable_busmaster(dev);
562         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
563                 uint32_t v;
564
565                 pci_set_max_read_req(dev, 4096);
566                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
567                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
568                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
569         }
570
571         sc->traceq = -1;
572         mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
573         snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
574             device_get_nameunit(dev));
575
576         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
577             device_get_nameunit(dev));
578         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
579         sx_xlock(&t4_list_lock);
580         SLIST_INSERT_HEAD(&t4_list, sc, link);
581         sx_xunlock(&t4_list_lock);
582
583         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
584         TAILQ_INIT(&sc->sfl);
585         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
586
587         rc = map_bars_0_and_4(sc);
588         if (rc != 0)
589                 goto done; /* error message displayed already */
590
591         /*
592          * This is the real PF# to which we're attaching.  Works from within PCI
593          * passthrough environments too, where pci_get_function() could return a
594          * different PF# depending on the passthrough configuration.  We need to
595          * use the real PF# in all our communication with the firmware.
596          */
597         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
598         sc->mbox = sc->pf;
599
600         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
601         sc->an_handler = an_not_handled;
602         for (i = 0; i < nitems(sc->cpl_handler); i++)
603                 sc->cpl_handler[i] = cpl_not_handled;
604         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
605                 sc->fw_msg_handler[i] = fw_msg_not_handled;
606         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
607         t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
608         t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
609         t4_init_sge_cpl_handlers(sc);
610
611         /* Prepare the adapter for operation */
612         rc = -t4_prep_adapter(sc);
613         if (rc != 0) {
614                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
615                 goto done;
616         }
617
618         /*
619          * Do this really early, with the memory windows set up even before the
620          * character device.  The userland tool's register i/o and mem read
621          * will work even in "recovery mode".
622          */
623         setup_memwin(sc);
624         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
625             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
626             device_get_nameunit(dev));
627         if (sc->cdev == NULL)
628                 device_printf(dev, "failed to create nexus char device.\n");
629         else
630                 sc->cdev->si_drv1 = sc;
631
632         /* Go no further if recovery mode has been requested. */
633         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
634                 device_printf(dev, "recovery mode.\n");
635                 goto done;
636         }
637
638         /* Prepare the firmware for operation */
639         rc = prep_firmware(sc);
640         if (rc != 0)
641                 goto done; /* error message displayed already */
642
643         rc = get_params__post_init(sc);
644         if (rc != 0)
645                 goto done; /* error message displayed already */
646
647         rc = set_params__post_init(sc);
648         if (rc != 0)
649                 goto done; /* error message displayed already */
650
651         rc = map_bar_2(sc);
652         if (rc != 0)
653                 goto done; /* error message displayed already */
654
655         rc = t4_create_dma_tag(sc);
656         if (rc != 0)
657                 goto done; /* error message displayed already */
658
659         /*
660          * First pass over all the ports - allocate VIs and initialize some
661          * basic parameters like mac address, port type, etc.  We also figure
662          * out whether a port is 10G or 1G and use that information when
663          * calculating how many interrupts to attempt to allocate.
664          */
665         n10g = n1g = 0;
666         for_each_port(sc, i) {
667                 struct port_info *pi;
668
669                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
670                 sc->port[i] = pi;
671
672                 /* These must be set before t4_port_init */
673                 pi->adapter = sc;
674                 pi->port_id = i;
675
676                 /* Allocate the vi and initialize parameters like mac addr */
677                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
678                 if (rc != 0) {
679                         device_printf(dev, "unable to initialize port %d: %d\n",
680                             i, rc);
681                         free(pi, M_CXGBE);
682                         sc->port[i] = NULL;
683                         goto done;
684                 }
685
686                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
687                     device_get_nameunit(dev), i);
688                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
689                 sc->chan_map[pi->tx_chan] = i;
690
691                 if (is_10G_port(pi) || is_40G_port(pi)) {
692                         n10g++;
693                         pi->tmr_idx = t4_tmr_idx_10g;
694                         pi->pktc_idx = t4_pktc_idx_10g;
695                 } else {
696                         n1g++;
697                         pi->tmr_idx = t4_tmr_idx_1g;
698                         pi->pktc_idx = t4_pktc_idx_1g;
699                 }
700
701                 pi->xact_addr_filt = -1;
702                 pi->linkdnrc = -1;
703
704                 pi->qsize_rxq = t4_qsize_rxq;
705                 pi->qsize_txq = t4_qsize_txq;
706
707                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
708                 if (pi->dev == NULL) {
709                         device_printf(dev,
710                             "failed to add device for port %d.\n", i);
711                         rc = ENXIO;
712                         goto done;
713                 }
714                 device_set_softc(pi->dev, pi);
715         }
716
717         /*
718          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
719          */
720         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
721         if (rc != 0)
722                 goto done; /* error message displayed already */
723
724         sc->intr_type = iaq.intr_type;
725         sc->intr_count = iaq.nirq;
726         sc->flags |= iaq.intr_flags;
727
728         s = &sc->sge;
729         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
730         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
731         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
732         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
733         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
734
735 #ifdef TCP_OFFLOAD
736         if (is_offload(sc)) {
737
738                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
739                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
740                 s->neq += s->nofldtxq + s->nofldrxq;
741                 s->niq += s->nofldrxq;
742
743                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
744                     M_CXGBE, M_ZERO | M_WAITOK);
745                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
746                     M_CXGBE, M_ZERO | M_WAITOK);
747         }
748 #endif
749
750         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
751             M_ZERO | M_WAITOK);
752         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
753             M_ZERO | M_WAITOK);
754         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
755             M_ZERO | M_WAITOK);
756         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
757             M_ZERO | M_WAITOK);
758         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
759             M_ZERO | M_WAITOK);
760
761         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
762             M_ZERO | M_WAITOK);
763
764         t4_init_l2t(sc, M_WAITOK);
765
766         /*
767          * Second pass over the ports.  This time we know the number of rx and
768          * tx queues that each port should get.
769          */
770         rqidx = tqidx = 0;
771 #ifdef TCP_OFFLOAD
772         ofld_rqidx = ofld_tqidx = 0;
773 #endif
774         for_each_port(sc, i) {
775                 struct port_info *pi = sc->port[i];
776
777                 if (pi == NULL)
778                         continue;
779
780                 pi->first_rxq = rqidx;
781                 pi->first_txq = tqidx;
782                 if (is_10G_port(pi) || is_40G_port(pi)) {
783                         pi->nrxq = iaq.nrxq10g;
784                         pi->ntxq = iaq.ntxq10g;
785                 } else {
786                         pi->nrxq = iaq.nrxq1g;
787                         pi->ntxq = iaq.ntxq1g;
788                 }
789
790                 if (pi->ntxq > 1)
791                         pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
792                 else
793                         pi->rsrv_noflowq = 0;
794
795                 rqidx += pi->nrxq;
796                 tqidx += pi->ntxq;
797
798 #ifdef TCP_OFFLOAD
799                 if (is_offload(sc)) {
800                         pi->first_ofld_rxq = ofld_rqidx;
801                         pi->first_ofld_txq = ofld_tqidx;
802                         if (is_10G_port(pi) || is_40G_port(pi)) {
803                                 pi->nofldrxq = iaq.nofldrxq10g;
804                                 pi->nofldtxq = iaq.nofldtxq10g;
805                         } else {
806                                 pi->nofldrxq = iaq.nofldrxq1g;
807                                 pi->nofldtxq = iaq.nofldtxq1g;
808                         }
809                         ofld_rqidx += pi->nofldrxq;
810                         ofld_tqidx += pi->nofldtxq;
811                 }
812 #endif
813         }
814
815         rc = setup_intr_handlers(sc);
816         if (rc != 0) {
817                 device_printf(dev,
818                     "failed to setup interrupt handlers: %d\n", rc);
819                 goto done;
820         }
821
822         rc = bus_generic_attach(dev);
823         if (rc != 0) {
824                 device_printf(dev,
825                     "failed to attach all child ports: %d\n", rc);
826                 goto done;
827         }
828
829         device_printf(dev,
830             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
831             sc->params.pci.width, sc->params.nports, sc->intr_count,
832             sc->intr_type == INTR_MSIX ? "MSI-X" :
833             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
834             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
835
836         t4_set_desc(sc);
837
838 done:
839         if (rc != 0 && sc->cdev) {
840                 /* cdev was created and so cxgbetool works; recover that way. */
841                 device_printf(dev,
842                     "error during attach, adapter is now in recovery mode.\n");
843                 rc = 0;
844         }
845
846         if (rc != 0)
847                 t4_detach(dev);
848         else
849                 t4_sysctls(sc);
850
851         return (rc);
852 }
853
854 /*
855  * Idempotent
856  */
857 static int
858 t4_detach(device_t dev)
859 {
860         struct adapter *sc;
861         struct port_info *pi;
862         int i, rc;
863
864         sc = device_get_softc(dev);
865
866         if (sc->flags & FULL_INIT_DONE)
867                 t4_intr_disable(sc);
868
869         if (sc->cdev) {
870                 destroy_dev(sc->cdev);
871                 sc->cdev = NULL;
872         }
873
874         rc = bus_generic_detach(dev);
875         if (rc) {
876                 device_printf(dev,
877                     "failed to detach child devices: %d\n", rc);
878                 return (rc);
879         }
880
881         for (i = 0; i < sc->intr_count; i++)
882                 t4_free_irq(sc, &sc->irq[i]);
883
884         for (i = 0; i < MAX_NPORTS; i++) {
885                 pi = sc->port[i];
886                 if (pi) {
887                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
888                         if (pi->dev)
889                                 device_delete_child(dev, pi->dev);
890
891                         mtx_destroy(&pi->pi_lock);
892                         free(pi, M_CXGBE);
893                 }
894         }
895
896         if (sc->flags & FULL_INIT_DONE)
897                 adapter_full_uninit(sc);
898
899         if (sc->flags & FW_OK)
900                 t4_fw_bye(sc, sc->mbox);
901
902         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
903                 pci_release_msi(dev);
904
905         if (sc->regs_res)
906                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
907                     sc->regs_res);
908
909         if (sc->udbs_res)
910                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
911                     sc->udbs_res);
912
913         if (sc->msix_res)
914                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
915                     sc->msix_res);
916
917         if (sc->l2t)
918                 t4_free_l2t(sc->l2t);
919
920 #ifdef TCP_OFFLOAD
921         free(sc->sge.ofld_rxq, M_CXGBE);
922         free(sc->sge.ofld_txq, M_CXGBE);
923 #endif
924         free(sc->irq, M_CXGBE);
925         free(sc->sge.rxq, M_CXGBE);
926         free(sc->sge.txq, M_CXGBE);
927         free(sc->sge.ctrlq, M_CXGBE);
928         free(sc->sge.iqmap, M_CXGBE);
929         free(sc->sge.eqmap, M_CXGBE);
930         free(sc->tids.ftid_tab, M_CXGBE);
931         t4_destroy_dma_tag(sc);
932         if (mtx_initialized(&sc->sc_lock)) {
933                 sx_xlock(&t4_list_lock);
934                 SLIST_REMOVE(&t4_list, sc, adapter, link);
935                 sx_xunlock(&t4_list_lock);
936                 mtx_destroy(&sc->sc_lock);
937         }
938
939         if (mtx_initialized(&sc->tids.ftid_lock))
940                 mtx_destroy(&sc->tids.ftid_lock);
941         if (mtx_initialized(&sc->sfl_lock))
942                 mtx_destroy(&sc->sfl_lock);
943         if (mtx_initialized(&sc->ifp_lock))
944                 mtx_destroy(&sc->ifp_lock);
945
946         bzero(sc, sizeof(*sc));
947
948         return (0);
949 }
950
951
952 static int
953 cxgbe_probe(device_t dev)
954 {
955         char buf[128];
956         struct port_info *pi = device_get_softc(dev);
957
958         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
959         device_set_desc_copy(dev, buf);
960
961         return (BUS_PROBE_DEFAULT);
962 }
963
964 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
965     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
966     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
967 #define T4_CAP_ENABLE (T4_CAP)
968
969 static int
970 cxgbe_attach(device_t dev)
971 {
972         struct port_info *pi = device_get_softc(dev);
973         struct ifnet *ifp;
974
975         /* Allocate an ifnet and set it up */
976         ifp = if_alloc(IFT_ETHER);
977         if (ifp == NULL) {
978                 device_printf(dev, "Cannot allocate ifnet\n");
979                 return (ENOMEM);
980         }
981         pi->ifp = ifp;
982         ifp->if_softc = pi;
983
984         callout_init(&pi->tick, CALLOUT_MPSAFE);
985
986         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
987         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
988
989         ifp->if_init = cxgbe_init;
990         ifp->if_ioctl = cxgbe_ioctl;
991         ifp->if_transmit = cxgbe_transmit;
992         ifp->if_qflush = cxgbe_qflush;
993
994         ifp->if_capabilities = T4_CAP;
995 #ifdef TCP_OFFLOAD
996         if (is_offload(pi->adapter))
997                 ifp->if_capabilities |= IFCAP_TOE;
998 #endif
999         ifp->if_capenable = T4_CAP_ENABLE;
1000         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1001             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1002
1003         /* Initialize ifmedia for this port */
1004         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1005             cxgbe_media_status);
1006         build_medialist(pi);
1007
1008         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1009             EVENTHANDLER_PRI_ANY);
1010
1011         ether_ifattach(ifp, pi->hw_addr);
1012
1013 #ifdef TCP_OFFLOAD
1014         if (is_offload(pi->adapter)) {
1015                 device_printf(dev,
1016                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1017                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1018         } else
1019 #endif
1020                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1021
1022         cxgbe_sysctls(pi);
1023
1024         return (0);
1025 }
1026
1027 static int
1028 cxgbe_detach(device_t dev)
1029 {
1030         struct port_info *pi = device_get_softc(dev);
1031         struct adapter *sc = pi->adapter;
1032         struct ifnet *ifp = pi->ifp;
1033
1034         /* Tell if_ioctl and if_init that the port is going away */
1035         ADAPTER_LOCK(sc);
1036         SET_DOOMED(pi);
1037         wakeup(&sc->flags);
1038         while (IS_BUSY(sc))
1039                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1040         SET_BUSY(sc);
1041 #ifdef INVARIANTS
1042         sc->last_op = "t4detach";
1043         sc->last_op_thr = curthread;
1044 #endif
1045         ADAPTER_UNLOCK(sc);
1046
1047         if (pi->flags & HAS_TRACEQ) {
1048                 sc->traceq = -1;        /* cloner should not create ifnet */
1049                 t4_tracer_port_detach(sc);
1050         }
1051
1052         if (pi->vlan_c)
1053                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1054
1055         PORT_LOCK(pi);
1056         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1057         callout_stop(&pi->tick);
1058         PORT_UNLOCK(pi);
1059         callout_drain(&pi->tick);
1060
1061         /* Let detach proceed even if these fail. */
1062         cxgbe_uninit_synchronized(pi);
1063         port_full_uninit(pi);
1064
1065         ifmedia_removeall(&pi->media);
1066         ether_ifdetach(pi->ifp);
1067         if_free(pi->ifp);
1068
1069         ADAPTER_LOCK(sc);
1070         CLR_BUSY(sc);
1071         wakeup(&sc->flags);
1072         ADAPTER_UNLOCK(sc);
1073
1074         return (0);
1075 }
1076
1077 static void
1078 cxgbe_init(void *arg)
1079 {
1080         struct port_info *pi = arg;
1081         struct adapter *sc = pi->adapter;
1082
1083         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1084                 return;
1085         cxgbe_init_synchronized(pi);
1086         end_synchronized_op(sc, 0);
1087 }
1088
1089 static int
1090 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1091 {
1092         int rc = 0, mtu, flags;
1093         struct port_info *pi = ifp->if_softc;
1094         struct adapter *sc = pi->adapter;
1095         struct ifreq *ifr = (struct ifreq *)data;
1096         uint32_t mask;
1097
1098         switch (cmd) {
1099         case SIOCSIFMTU:
1100                 mtu = ifr->ifr_mtu;
1101                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1102                         return (EINVAL);
1103
1104                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1105                 if (rc)
1106                         return (rc);
1107                 ifp->if_mtu = mtu;
1108                 if (pi->flags & PORT_INIT_DONE) {
1109                         t4_update_fl_bufsize(ifp);
1110                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1111                                 rc = update_mac_settings(pi, XGMAC_MTU);
1112                 }
1113                 end_synchronized_op(sc, 0);
1114                 break;
1115
1116         case SIOCSIFFLAGS:
1117                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1118                 if (rc)
1119                         return (rc);
1120
1121                 if (ifp->if_flags & IFF_UP) {
1122                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1123                                 flags = pi->if_flags;
1124                                 if ((ifp->if_flags ^ flags) &
1125                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1126                                         rc = update_mac_settings(pi,
1127                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1128                                 }
1129                         } else
1130                                 rc = cxgbe_init_synchronized(pi);
1131                         pi->if_flags = ifp->if_flags;
1132                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1133                         rc = cxgbe_uninit_synchronized(pi);
1134                 end_synchronized_op(sc, 0);
1135                 break;
1136
1137         case SIOCADDMULTI:      
1138         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1139                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1140                 if (rc)
1141                         return (rc);
1142                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1143                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1144                 end_synchronized_op(sc, LOCK_HELD);
1145                 break;
1146
1147         case SIOCSIFCAP:
1148                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1149                 if (rc)
1150                         return (rc);
1151
1152                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1153                 if (mask & IFCAP_TXCSUM) {
1154                         ifp->if_capenable ^= IFCAP_TXCSUM;
1155                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1156
1157                         if (IFCAP_TSO4 & ifp->if_capenable &&
1158                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1159                                 ifp->if_capenable &= ~IFCAP_TSO4;
1160                                 if_printf(ifp,
1161                                     "tso4 disabled due to -txcsum.\n");
1162                         }
1163                 }
1164                 if (mask & IFCAP_TXCSUM_IPV6) {
1165                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1166                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1167
1168                         if (IFCAP_TSO6 & ifp->if_capenable &&
1169                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1170                                 ifp->if_capenable &= ~IFCAP_TSO6;
1171                                 if_printf(ifp,
1172                                     "tso6 disabled due to -txcsum6.\n");
1173                         }
1174                 }
1175                 if (mask & IFCAP_RXCSUM)
1176                         ifp->if_capenable ^= IFCAP_RXCSUM;
1177                 if (mask & IFCAP_RXCSUM_IPV6)
1178                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1179
1180                 /*
1181                  * Note that we leave CSUM_TSO alone (it is always set).  The
1182                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1183                  * sending a TSO request our way, so it's sufficient to toggle
1184                  * IFCAP_TSOx only.
1185                  */
1186                 if (mask & IFCAP_TSO4) {
1187                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1188                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1189                                 if_printf(ifp, "enable txcsum first.\n");
1190                                 rc = EAGAIN;
1191                                 goto fail;
1192                         }
1193                         ifp->if_capenable ^= IFCAP_TSO4;
1194                 }
1195                 if (mask & IFCAP_TSO6) {
1196                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1197                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1198                                 if_printf(ifp, "enable txcsum6 first.\n");
1199                                 rc = EAGAIN;
1200                                 goto fail;
1201                         }
1202                         ifp->if_capenable ^= IFCAP_TSO6;
1203                 }
1204                 if (mask & IFCAP_LRO) {
1205 #if defined(INET) || defined(INET6)
1206                         int i;
1207                         struct sge_rxq *rxq;
1208
1209                         ifp->if_capenable ^= IFCAP_LRO;
1210                         for_each_rxq(pi, i, rxq) {
1211                                 if (ifp->if_capenable & IFCAP_LRO)
1212                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1213                                 else
1214                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1215                         }
1216 #endif
1217                 }
1218 #ifdef TCP_OFFLOAD
1219                 if (mask & IFCAP_TOE) {
1220                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1221
1222                         rc = toe_capability(pi, enable);
1223                         if (rc != 0)
1224                                 goto fail;
1225
1226                         ifp->if_capenable ^= mask;
1227                 }
1228 #endif
1229                 if (mask & IFCAP_VLAN_HWTAGGING) {
1230                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1231                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1232                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1233                 }
1234                 if (mask & IFCAP_VLAN_MTU) {
1235                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1236
1237                         /* Need to find out how to disable auto-mtu-inflation */
1238                 }
1239                 if (mask & IFCAP_VLAN_HWTSO)
1240                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1241                 if (mask & IFCAP_VLAN_HWCSUM)
1242                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1243
1244 #ifdef VLAN_CAPABILITIES
1245                 VLAN_CAPABILITIES(ifp);
1246 #endif
1247 fail:
1248                 end_synchronized_op(sc, 0);
1249                 break;
1250
1251         case SIOCSIFMEDIA:
1252         case SIOCGIFMEDIA:
1253                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1254                 break;
1255
1256         default:
1257                 rc = ether_ioctl(ifp, cmd, data);
1258         }
1259
1260         return (rc);
1261 }
1262
1263 static int
1264 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1265 {
1266         struct port_info *pi = ifp->if_softc;
1267         struct adapter *sc = pi->adapter;
1268         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1269         struct buf_ring *br;
1270         int rc;
1271
1272         M_ASSERTPKTHDR(m);
1273
1274         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1275                 m_freem(m);
1276                 return (ENETDOWN);
1277         }
1278
1279         if (m->m_flags & M_FLOWID)
1280                 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1281                     + pi->rsrv_noflowq);
1282         br = txq->br;
1283
1284         if (TXQ_TRYLOCK(txq) == 0) {
1285                 struct sge_eq *eq = &txq->eq;
1286
1287                 /*
1288                  * It is possible that t4_eth_tx finishes up and releases the
1289                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1290                  * need to make sure that this mbuf doesn't just sit there in
1291                  * the drbr.
1292                  */
1293
1294                 rc = drbr_enqueue(ifp, br, m);
1295                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1296                     !(eq->flags & EQ_DOOMED))
1297                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1298                 return (rc);
1299         }
1300
1301         /*
1302          * txq->m is the mbuf that is held up due to a temporary shortage of
1303          * resources and it should be put on the wire first.  Then what's in
1304          * drbr and finally the mbuf that was just passed in to us.
1305          *
1306          * Return code should indicate the fate of the mbuf that was passed in
1307          * this time.
1308          */
1309
1310         TXQ_LOCK_ASSERT_OWNED(txq);
1311         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1312
1313                 /* Queued for transmission. */
1314
1315                 rc = drbr_enqueue(ifp, br, m);
1316                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1317                 (void) t4_eth_tx(ifp, txq, m);
1318                 TXQ_UNLOCK(txq);
1319                 return (rc);
1320         }
1321
1322         /* Direct transmission. */
1323         rc = t4_eth_tx(ifp, txq, m);
1324         if (rc != 0 && txq->m)
1325                 rc = 0; /* held, will be transmitted soon (hopefully) */
1326
1327         TXQ_UNLOCK(txq);
1328         return (rc);
1329 }
1330
1331 static void
1332 cxgbe_qflush(struct ifnet *ifp)
1333 {
1334         struct port_info *pi = ifp->if_softc;
1335         struct sge_txq *txq;
1336         int i;
1337         struct mbuf *m;
1338
1339         /* queues do not exist if !PORT_INIT_DONE. */
1340         if (pi->flags & PORT_INIT_DONE) {
1341                 for_each_txq(pi, i, txq) {
1342                         TXQ_LOCK(txq);
1343                         m_freem(txq->m);
1344                         txq->m = NULL;
1345                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1346                                 m_freem(m);
1347                         TXQ_UNLOCK(txq);
1348                 }
1349         }
1350         if_qflush(ifp);
1351 }
1352
1353 static int
1354 cxgbe_media_change(struct ifnet *ifp)
1355 {
1356         struct port_info *pi = ifp->if_softc;
1357
1358         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1359
1360         return (EOPNOTSUPP);
1361 }
1362
1363 static void
1364 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1365 {
1366         struct port_info *pi = ifp->if_softc;
1367         struct ifmedia_entry *cur = pi->media.ifm_cur;
1368         int speed = pi->link_cfg.speed;
1369         int data = (pi->port_type << 8) | pi->mod_type;
1370
1371         if (cur->ifm_data != data) {
1372                 build_medialist(pi);
1373                 cur = pi->media.ifm_cur;
1374         }
1375
1376         ifmr->ifm_status = IFM_AVALID;
1377         if (!pi->link_cfg.link_ok)
1378                 return;
1379
1380         ifmr->ifm_status |= IFM_ACTIVE;
1381
1382         /* active and current will differ iff current media is autoselect. */
1383         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1384                 return;
1385
1386         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1387         if (speed == SPEED_10000)
1388                 ifmr->ifm_active |= IFM_10G_T;
1389         else if (speed == SPEED_1000)
1390                 ifmr->ifm_active |= IFM_1000_T;
1391         else if (speed == SPEED_100)
1392                 ifmr->ifm_active |= IFM_100_TX;
1393         else if (speed == SPEED_10)
1394                 ifmr->ifm_active |= IFM_10_T;
1395         else
1396                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1397                             speed));
1398 }
1399
1400 void
1401 t4_fatal_err(struct adapter *sc)
1402 {
1403         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1404         t4_intr_disable(sc);
1405         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1406             device_get_nameunit(sc->dev));
1407 }
1408
1409 static int
1410 map_bars_0_and_4(struct adapter *sc)
1411 {
1412         sc->regs_rid = PCIR_BAR(0);
1413         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1414             &sc->regs_rid, RF_ACTIVE);
1415         if (sc->regs_res == NULL) {
1416                 device_printf(sc->dev, "cannot map registers.\n");
1417                 return (ENXIO);
1418         }
1419         sc->bt = rman_get_bustag(sc->regs_res);
1420         sc->bh = rman_get_bushandle(sc->regs_res);
1421         sc->mmio_len = rman_get_size(sc->regs_res);
1422         setbit(&sc->doorbells, DOORBELL_KDB);
1423
1424         sc->msix_rid = PCIR_BAR(4);
1425         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1426             &sc->msix_rid, RF_ACTIVE);
1427         if (sc->msix_res == NULL) {
1428                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1429                 return (ENXIO);
1430         }
1431
1432         return (0);
1433 }
1434
1435 static int
1436 map_bar_2(struct adapter *sc)
1437 {
1438
1439         /*
1440          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1441          * to map it if RDMA is disabled.
1442          */
1443         if (is_t4(sc) && sc->rdmacaps == 0)
1444                 return (0);
1445
1446         sc->udbs_rid = PCIR_BAR(2);
1447         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1448             &sc->udbs_rid, RF_ACTIVE);
1449         if (sc->udbs_res == NULL) {
1450                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1451                 return (ENXIO);
1452         }
1453         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1454
1455         if (is_t5(sc)) {
1456                 setbit(&sc->doorbells, DOORBELL_UDB);
1457 #if defined(__i386__) || defined(__amd64__)
1458                 if (t5_write_combine) {
1459                         int rc;
1460
1461                         /*
1462                          * Enable write combining on BAR2.  This is the
1463                          * userspace doorbell BAR and is split into 128B
1464                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1465                          * with an egress queue.  The first 64B has the doorbell
1466                          * and the second 64B can be used to submit a tx work
1467                          * request with an implicit doorbell.
1468                          */
1469
1470                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1471                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1472                         if (rc == 0) {
1473                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1474                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1475                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1476                         } else {
1477                                 device_printf(sc->dev,
1478                                     "couldn't enable write combining: %d\n",
1479                                     rc);
1480                         }
1481
1482                         t4_write_reg(sc, A_SGE_STAT_CFG,
1483                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1484                 }
1485 #endif
1486         }
1487
1488         return (0);
1489 }
1490
1491 static const struct memwin t4_memwin[] = {
1492         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1493         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1494         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1495 };
1496
1497 static const struct memwin t5_memwin[] = {
1498         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1499         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1500         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1501 };
1502
1503 static void
1504 setup_memwin(struct adapter *sc)
1505 {
1506         const struct memwin *mw;
1507         int i, n;
1508         uint32_t bar0;
1509
1510         if (is_t4(sc)) {
1511                 /*
1512                  * Read low 32b of bar0 indirectly via the hardware backdoor
1513                  * mechanism.  Works from within PCI passthrough environments
1514                  * too, where rman_get_start() can return a different value.  We
1515                  * need to program the T4 memory window decoders with the actual
1516                  * addresses that will be coming across the PCIe link.
1517                  */
1518                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1519                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1520
1521                 mw = &t4_memwin[0];
1522                 n = nitems(t4_memwin);
1523         } else {
1524                 /* T5 uses the relative offset inside the PCIe BAR */
1525                 bar0 = 0;
1526
1527                 mw = &t5_memwin[0];
1528                 n = nitems(t5_memwin);
1529         }
1530
1531         for (i = 0; i < n; i++, mw++) {
1532                 t4_write_reg(sc,
1533                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1534                     (mw->base + bar0) | V_BIR(0) |
1535                     V_WINDOW(ilog2(mw->aperture) - 10));
1536         }
1537
1538         /* flush */
1539         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1540 }
1541
1542 /*
1543  * Verify that the memory range specified by the addr/len pair is valid and lies
1544  * entirely within a single region (EDCx or MCx).
1545  */
1546 static int
1547 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1548 {
1549         uint32_t em, addr_len, maddr, mlen;
1550
1551         /* Memory can only be accessed in naturally aligned 4 byte units */
1552         if (addr & 3 || len & 3 || len == 0)
1553                 return (EINVAL);
1554
1555         /* Enabled memories */
1556         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1557         if (em & F_EDRAM0_ENABLE) {
1558                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1559                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1560                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1561                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1562                     addr + len <= maddr + mlen)
1563                         return (0);
1564         }
1565         if (em & F_EDRAM1_ENABLE) {
1566                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1567                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1568                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1569                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1570                     addr + len <= maddr + mlen)
1571                         return (0);
1572         }
1573         if (em & F_EXT_MEM_ENABLE) {
1574                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1575                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1576                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1577                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1578                     addr + len <= maddr + mlen)
1579                         return (0);
1580         }
1581         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1582                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1583                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1584                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1585                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1586                     addr + len <= maddr + mlen)
1587                         return (0);
1588         }
1589
1590         return (EFAULT);
1591 }
1592
1593 static int
1594 fwmtype_to_hwmtype(int mtype)
1595 {
1596
1597         switch (mtype) {
1598         case FW_MEMTYPE_EDC0:
1599                 return (MEM_EDC0);
1600         case FW_MEMTYPE_EDC1:
1601                 return (MEM_EDC1);
1602         case FW_MEMTYPE_EXTMEM:
1603                 return (MEM_MC0);
1604         case FW_MEMTYPE_EXTMEM1:
1605                 return (MEM_MC1);
1606         default:
1607                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1608         }
1609 }
1610
1611 /*
1612  * Verify that the memory range specified by the memtype/offset/len pair is
1613  * valid and lies entirely within the memtype specified.  The global address of
1614  * the start of the range is returned in addr.
1615  */
1616 static int
1617 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1618     uint32_t *addr)
1619 {
1620         uint32_t em, addr_len, maddr, mlen;
1621
1622         /* Memory can only be accessed in naturally aligned 4 byte units */
1623         if (off & 3 || len & 3 || len == 0)
1624                 return (EINVAL);
1625
1626         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1627         switch (fwmtype_to_hwmtype(mtype)) {
1628         case MEM_EDC0:
1629                 if (!(em & F_EDRAM0_ENABLE))
1630                         return (EINVAL);
1631                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1632                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1633                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1634                 break;
1635         case MEM_EDC1:
1636                 if (!(em & F_EDRAM1_ENABLE))
1637                         return (EINVAL);
1638                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1639                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1640                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1641                 break;
1642         case MEM_MC:
1643                 if (!(em & F_EXT_MEM_ENABLE))
1644                         return (EINVAL);
1645                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1646                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1647                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1648                 break;
1649         case MEM_MC1:
1650                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1651                         return (EINVAL);
1652                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1653                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1654                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1655                 break;
1656         default:
1657                 return (EINVAL);
1658         }
1659
1660         if (mlen > 0 && off < mlen && off + len <= mlen) {
1661                 *addr = maddr + off;    /* global address */
1662                 return (0);
1663         }
1664
1665         return (EFAULT);
1666 }
1667
1668 static void
1669 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1670 {
1671         const struct memwin *mw;
1672
1673         if (is_t4(sc)) {
1674                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1675                     ("%s: incorrect memwin# (%d)", __func__, win));
1676                 mw = &t4_memwin[win];
1677         } else {
1678                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1679                     ("%s: incorrect memwin# (%d)", __func__, win));
1680                 mw = &t5_memwin[win];
1681         }
1682
1683         if (base != NULL)
1684                 *base = mw->base;
1685         if (aperture != NULL)
1686                 *aperture = mw->aperture;
1687 }
1688
1689 /*
1690  * Positions the memory window such that it can be used to access the specified
1691  * address in the chip's address space.  The return value is the offset of addr
1692  * from the start of the window.
1693  */
1694 static uint32_t
1695 position_memwin(struct adapter *sc, int n, uint32_t addr)
1696 {
1697         uint32_t start, pf;
1698         uint32_t reg;
1699
1700         KASSERT(n >= 0 && n <= 3,
1701             ("%s: invalid window %d.", __func__, n));
1702         KASSERT((addr & 3) == 0,
1703             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1704
1705         if (is_t4(sc)) {
1706                 pf = 0;
1707                 start = addr & ~0xf;    /* start must be 16B aligned */
1708         } else {
1709                 pf = V_PFNUM(sc->pf);
1710                 start = addr & ~0x7f;   /* start must be 128B aligned */
1711         }
1712         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1713
1714         t4_write_reg(sc, reg, start | pf);
1715         t4_read_reg(sc, reg);
1716
1717         return (addr - start);
1718 }
1719
1720 static int
1721 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1722     struct intrs_and_queues *iaq)
1723 {
1724         int rc, itype, navail, nrxq10g, nrxq1g, n;
1725         int nofldrxq10g = 0, nofldrxq1g = 0;
1726
1727         bzero(iaq, sizeof(*iaq));
1728
1729         iaq->ntxq10g = t4_ntxq10g;
1730         iaq->ntxq1g = t4_ntxq1g;
1731         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1732         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1733         iaq->rsrv_noflowq = t4_rsrv_noflowq;
1734 #ifdef TCP_OFFLOAD
1735         if (is_offload(sc)) {
1736                 iaq->nofldtxq10g = t4_nofldtxq10g;
1737                 iaq->nofldtxq1g = t4_nofldtxq1g;
1738                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1739                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1740         }
1741 #endif
1742
1743         for (itype = INTR_MSIX; itype; itype >>= 1) {
1744
1745                 if ((itype & t4_intr_types) == 0)
1746                         continue;       /* not allowed */
1747
1748                 if (itype == INTR_MSIX)
1749                         navail = pci_msix_count(sc->dev);
1750                 else if (itype == INTR_MSI)
1751                         navail = pci_msi_count(sc->dev);
1752                 else
1753                         navail = 1;
1754 restart:
1755                 if (navail == 0)
1756                         continue;
1757
1758                 iaq->intr_type = itype;
1759                 iaq->intr_flags = 0;
1760
1761                 /*
1762                  * Best option: an interrupt vector for errors, one for the
1763                  * firmware event queue, and one each for each rxq (NIC as well
1764                  * as offload).
1765                  */
1766                 iaq->nirq = T4_EXTRA_INTR;
1767                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1768                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1769                 if (iaq->nirq <= navail &&
1770                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1771                         iaq->intr_flags |= INTR_DIRECT;
1772                         goto allocate;
1773                 }
1774
1775                 /*
1776                  * Second best option: an interrupt vector for errors, one for
1777                  * the firmware event queue, and one each for either NIC or
1778                  * offload rxq's.
1779                  */
1780                 iaq->nirq = T4_EXTRA_INTR;
1781                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1782                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1783                 if (iaq->nirq <= navail &&
1784                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1785                         goto allocate;
1786
1787                 /*
1788                  * Next best option: an interrupt vector for errors, one for the
1789                  * firmware event queue, and at least one per port.  At this
1790                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1791                  * what's available to us.
1792                  */
1793                 iaq->nirq = T4_EXTRA_INTR;
1794                 iaq->nirq += n10g + n1g;
1795                 if (iaq->nirq <= navail) {
1796                         int leftover = navail - iaq->nirq;
1797
1798                         if (n10g > 0) {
1799                                 int target = max(nrxq10g, nofldrxq10g);
1800
1801                                 n = 1;
1802                                 while (n < target && leftover >= n10g) {
1803                                         leftover -= n10g;
1804                                         iaq->nirq += n10g;
1805                                         n++;
1806                                 }
1807                                 iaq->nrxq10g = min(n, nrxq10g);
1808 #ifdef TCP_OFFLOAD
1809                                 if (is_offload(sc))
1810                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1811 #endif
1812                         }
1813
1814                         if (n1g > 0) {
1815                                 int target = max(nrxq1g, nofldrxq1g);
1816
1817                                 n = 1;
1818                                 while (n < target && leftover >= n1g) {
1819                                         leftover -= n1g;
1820                                         iaq->nirq += n1g;
1821                                         n++;
1822                                 }
1823                                 iaq->nrxq1g = min(n, nrxq1g);
1824 #ifdef TCP_OFFLOAD
1825                                 if (is_offload(sc))
1826                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1827 #endif
1828                         }
1829
1830                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1831                                 goto allocate;
1832                 }
1833
1834                 /*
1835                  * Least desirable option: one interrupt vector for everything.
1836                  */
1837                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1838 #ifdef TCP_OFFLOAD
1839                 if (is_offload(sc))
1840                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1841 #endif
1842
1843 allocate:
1844                 navail = iaq->nirq;
1845                 rc = 0;
1846                 if (itype == INTR_MSIX)
1847                         rc = pci_alloc_msix(sc->dev, &navail);
1848                 else if (itype == INTR_MSI)
1849                         rc = pci_alloc_msi(sc->dev, &navail);
1850
1851                 if (rc == 0) {
1852                         if (navail == iaq->nirq)
1853                                 return (0);
1854
1855                         /*
1856                          * Didn't get the number requested.  Use whatever number
1857                          * the kernel is willing to allocate (it's in navail).
1858                          */
1859                         device_printf(sc->dev, "fewer vectors than requested, "
1860                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1861                             itype, iaq->nirq, navail);
1862                         pci_release_msi(sc->dev);
1863                         goto restart;
1864                 }
1865
1866                 device_printf(sc->dev,
1867                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1868                     itype, rc, iaq->nirq, navail);
1869         }
1870
1871         device_printf(sc->dev,
1872             "failed to find a usable interrupt type.  "
1873             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1874             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1875
1876         return (ENXIO);
1877 }
1878
1879 #define FW_VERSION(chip) ( \
1880     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1881     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1882     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1883     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1884 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1885
1886 struct fw_info {
1887         uint8_t chip;
1888         char *kld_name;
1889         char *fw_mod_name;
1890         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1891 } fw_info[] = {
1892         {
1893                 .chip = CHELSIO_T4,
1894                 .kld_name = "t4fw_cfg",
1895                 .fw_mod_name = "t4fw",
1896                 .fw_hdr = {
1897                         .chip = FW_HDR_CHIP_T4,
1898                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1899                         .intfver_nic = FW_INTFVER(T4, NIC),
1900                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1901                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1902                         .intfver_ri = FW_INTFVER(T4, RI),
1903                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1904                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1905                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1906                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1907                 },
1908         }, {
1909                 .chip = CHELSIO_T5,
1910                 .kld_name = "t5fw_cfg",
1911                 .fw_mod_name = "t5fw",
1912                 .fw_hdr = {
1913                         .chip = FW_HDR_CHIP_T5,
1914                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1915                         .intfver_nic = FW_INTFVER(T5, NIC),
1916                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1917                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1918                         .intfver_ri = FW_INTFVER(T5, RI),
1919                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1920                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1921                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1922                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1923                 },
1924         }
1925 };
1926
1927 static struct fw_info *
1928 find_fw_info(int chip)
1929 {
1930         int i;
1931
1932         for (i = 0; i < nitems(fw_info); i++) {
1933                 if (fw_info[i].chip == chip)
1934                         return (&fw_info[i]);
1935         }
1936         return (NULL);
1937 }
1938
1939 /*
1940  * Is the given firmware API compatible with the one the driver was compiled
1941  * with?
1942  */
1943 static int
1944 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1945 {
1946
1947         /* short circuit if it's the exact same firmware version */
1948         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1949                 return (1);
1950
1951         /*
1952          * XXX: Is this too conservative?  Perhaps I should limit this to the
1953          * features that are supported in the driver.
1954          */
1955 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1956         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1957             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1958             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1959                 return (1);
1960 #undef SAME_INTF
1961
1962         return (0);
1963 }
1964
1965 /*
1966  * The firmware in the KLD is usable, but should it be installed?  This routine
1967  * explains itself in detail if it indicates the KLD firmware should be
1968  * installed.
1969  */
1970 static int
1971 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1972 {
1973         const char *reason;
1974
1975         if (!card_fw_usable) {
1976                 reason = "incompatible or unusable";
1977                 goto install;
1978         }
1979
1980         if (k > c) {
1981                 reason = "older than the version bundled with this driver";
1982                 goto install;
1983         }
1984
1985         if (t4_fw_install == 2 && k != c) {
1986                 reason = "different than the version bundled with this driver";
1987                 goto install;
1988         }
1989
1990         return (0);
1991
1992 install:
1993         if (t4_fw_install == 0) {
1994                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1995                     "but the driver is prohibited from installing a different "
1996                     "firmware on the card.\n",
1997                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1998                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1999
2000                 return (0);
2001         }
2002
2003         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2004             "installing firmware %u.%u.%u.%u on card.\n",
2005             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2006             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2007             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2008             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2009
2010         return (1);
2011 }
2012 /*
2013  * Establish contact with the firmware and determine if we are the master driver
2014  * or not, and whether we are responsible for chip initialization.
2015  */
2016 static int
2017 prep_firmware(struct adapter *sc)
2018 {
2019         const struct firmware *fw = NULL, *default_cfg;
2020         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2021         enum dev_state state;
2022         struct fw_info *fw_info;
2023         struct fw_hdr *card_fw;         /* fw on the card */
2024         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2025         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2026                                            against */
2027
2028         /* Contact firmware. */
2029         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2030         if (rc < 0 || state == DEV_STATE_ERR) {
2031                 rc = -rc;
2032                 device_printf(sc->dev,
2033                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2034                 return (rc);
2035         }
2036         pf = rc;
2037         if (pf == sc->mbox)
2038                 sc->flags |= MASTER_PF;
2039         else if (state == DEV_STATE_UNINIT) {
2040                 /*
2041                  * We didn't get to be the master so we definitely won't be
2042                  * configuring the chip.  It's a bug if someone else hasn't
2043                  * configured it already.
2044                  */
2045                 device_printf(sc->dev, "couldn't be master(%d), "
2046                     "device not already initialized either(%d).\n", rc, state);
2047                 return (EDOOFUS);
2048         }
2049
2050         /* This is the firmware whose headers the driver was compiled against */
2051         fw_info = find_fw_info(chip_id(sc));
2052         if (fw_info == NULL) {
2053                 device_printf(sc->dev,
2054                     "unable to look up firmware information for chip %d.\n",
2055                     chip_id(sc));
2056                 return (EINVAL);
2057         }
2058         drv_fw = &fw_info->fw_hdr;
2059
2060         /*
2061          * The firmware KLD contains many modules.  The KLD name is also the
2062          * name of the module that contains the default config file.
2063          */
2064         default_cfg = firmware_get(fw_info->kld_name);
2065
2066         /* Read the header of the firmware on the card */
2067         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2068         rc = -t4_read_flash(sc, FLASH_FW_START,
2069             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2070         if (rc == 0)
2071                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2072         else {
2073                 device_printf(sc->dev,
2074                     "Unable to read card's firmware header: %d\n", rc);
2075                 card_fw_usable = 0;
2076         }
2077
2078         /* This is the firmware in the KLD */
2079         fw = firmware_get(fw_info->fw_mod_name);
2080         if (fw != NULL) {
2081                 kld_fw = (const void *)fw->data;
2082                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2083         } else {
2084                 kld_fw = NULL;
2085                 kld_fw_usable = 0;
2086         }
2087
2088         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2089             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2090                 /*
2091                  * Common case: the firmware on the card is an exact match and
2092                  * the KLD is an exact match too, or the KLD is
2093                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2094                  * here -- use cxgbetool loadfw if you want to reinstall the
2095                  * same firmware as the one on the card.
2096                  */
2097         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2098             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2099             be32toh(card_fw->fw_ver))) {
2100
2101                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2102                 if (rc != 0) {
2103                         device_printf(sc->dev,
2104                             "failed to install firmware: %d\n", rc);
2105                         goto done;
2106                 }
2107
2108                 /* Installed successfully, update the cached header too. */
2109                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2110                 card_fw_usable = 1;
2111                 need_fw_reset = 0;      /* already reset as part of load_fw */
2112         }
2113
2114         if (!card_fw_usable) {
2115                 uint32_t d, c, k;
2116
2117                 d = ntohl(drv_fw->fw_ver);
2118                 c = ntohl(card_fw->fw_ver);
2119                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2120
2121                 device_printf(sc->dev, "Cannot find a usable firmware: "
2122                     "fw_install %d, chip state %d, "
2123                     "driver compiled with %d.%d.%d.%d, "
2124                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2125                     t4_fw_install, state,
2126                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2127                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2128                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2129                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2130                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2131                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2132                 rc = EINVAL;
2133                 goto done;
2134         }
2135
2136         /* We're using whatever's on the card and it's known to be good. */
2137         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2138         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2139             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2140             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2141             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2142             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2143         t4_get_tp_version(sc, &sc->params.tp_vers);
2144
2145         /* Reset device */
2146         if (need_fw_reset &&
2147             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2148                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2149                 if (rc != ETIMEDOUT && rc != EIO)
2150                         t4_fw_bye(sc, sc->mbox);
2151                 goto done;
2152         }
2153         sc->flags |= FW_OK;
2154
2155         rc = get_params__pre_init(sc);
2156         if (rc != 0)
2157                 goto done; /* error message displayed already */
2158
2159         /* Partition adapter resources as specified in the config file. */
2160         if (state == DEV_STATE_UNINIT) {
2161
2162                 KASSERT(sc->flags & MASTER_PF,
2163                     ("%s: trying to change chip settings when not master.",
2164                     __func__));
2165
2166                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2167                 if (rc != 0)
2168                         goto done;      /* error message displayed already */
2169
2170                 t4_tweak_chip_settings(sc);
2171
2172                 /* get basic stuff going */
2173                 rc = -t4_fw_initialize(sc, sc->mbox);
2174                 if (rc != 0) {
2175                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2176                         goto done;
2177                 }
2178         } else {
2179                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2180                 sc->cfcsum = 0;
2181         }
2182
2183 done:
2184         free(card_fw, M_CXGBE);
2185         if (fw != NULL)
2186                 firmware_put(fw, FIRMWARE_UNLOAD);
2187         if (default_cfg != NULL)
2188                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2189
2190         return (rc);
2191 }
2192
2193 #define FW_PARAM_DEV(param) \
2194         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2195          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2196 #define FW_PARAM_PFVF(param) \
2197         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2198          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2199
2200 /*
2201  * Partition chip resources for use between various PFs, VFs, etc.
2202  */
2203 static int
2204 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2205     const char *name_prefix)
2206 {
2207         const struct firmware *cfg = NULL;
2208         int rc = 0;
2209         struct fw_caps_config_cmd caps;
2210         uint32_t mtype, moff, finicsum, cfcsum;
2211
2212         /*
2213          * Figure out what configuration file to use.  Pick the default config
2214          * file for the card if the user hasn't specified one explicitly.
2215          */
2216         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2217         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2218                 /* Card specific overrides go here. */
2219                 if (pci_get_device(sc->dev) == 0x440a)
2220                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2221                 if (is_fpga(sc))
2222                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2223         }
2224
2225         /*
2226          * We need to load another module if the profile is anything except
2227          * "default" or "flash".
2228          */
2229         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2230             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2231                 char s[32];
2232
2233                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2234                 cfg = firmware_get(s);
2235                 if (cfg == NULL) {
2236                         if (default_cfg != NULL) {
2237                                 device_printf(sc->dev,
2238                                     "unable to load module \"%s\" for "
2239                                     "configuration profile \"%s\", will use "
2240                                     "the default config file instead.\n",
2241                                     s, sc->cfg_file);
2242                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2243                                     "%s", DEFAULT_CF);
2244                         } else {
2245                                 device_printf(sc->dev,
2246                                     "unable to load module \"%s\" for "
2247                                     "configuration profile \"%s\", will use "
2248                                     "the config file on the card's flash "
2249                                     "instead.\n", s, sc->cfg_file);
2250                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2251                                     "%s", FLASH_CF);
2252                         }
2253                 }
2254         }
2255
2256         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2257             default_cfg == NULL) {
2258                 device_printf(sc->dev,
2259                     "default config file not available, will use the config "
2260                     "file on the card's flash instead.\n");
2261                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2262         }
2263
2264         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2265                 u_int cflen, i, n;
2266                 const uint32_t *cfdata;
2267                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2268
2269                 KASSERT(cfg != NULL || default_cfg != NULL,
2270                     ("%s: no config to upload", __func__));
2271
2272                 /*
2273                  * Ask the firmware where it wants us to upload the config file.
2274                  */
2275                 param = FW_PARAM_DEV(CF);
2276                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2277                 if (rc != 0) {
2278                         /* No support for config file?  Shouldn't happen. */
2279                         device_printf(sc->dev,
2280                             "failed to query config file location: %d.\n", rc);
2281                         goto done;
2282                 }
2283                 mtype = G_FW_PARAMS_PARAM_Y(val);
2284                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2285
2286                 /*
2287                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2288                  * useless stuffing/comments at the end of the config file so
2289                  * it's ok to simply throw away the last remaining bytes when
2290                  * the config file is not an exact multiple of 4.  This also
2291                  * helps with the validate_mt_off_len check.
2292                  */
2293                 if (cfg != NULL) {
2294                         cflen = cfg->datasize & ~3;
2295                         cfdata = cfg->data;
2296                 } else {
2297                         cflen = default_cfg->datasize & ~3;
2298                         cfdata = default_cfg->data;
2299                 }
2300
2301                 if (cflen > FLASH_CFG_MAX_SIZE) {
2302                         device_printf(sc->dev,
2303                             "config file too long (%d, max allowed is %d).  "
2304                             "Will try to use the config on the card, if any.\n",
2305                             cflen, FLASH_CFG_MAX_SIZE);
2306                         goto use_config_on_flash;
2307                 }
2308
2309                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2310                 if (rc != 0) {
2311                         device_printf(sc->dev,
2312                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2313                             "Will try to use the config on the card, if any.\n",
2314                             __func__, mtype, moff, cflen, rc);
2315                         goto use_config_on_flash;
2316                 }
2317
2318                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2319                 while (cflen) {
2320                         off = position_memwin(sc, 2, addr);
2321                         n = min(cflen, mw_aperture - off);
2322                         for (i = 0; i < n; i += 4)
2323                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2324                         cflen -= n;
2325                         addr += n;
2326                 }
2327         } else {
2328 use_config_on_flash:
2329                 mtype = FW_MEMTYPE_FLASH;
2330                 moff = t4_flash_cfg_addr(sc);
2331         }
2332
2333         bzero(&caps, sizeof(caps));
2334         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2335             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2336         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2337             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2338             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2339         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2340         if (rc != 0) {
2341                 device_printf(sc->dev,
2342                     "failed to pre-process config file: %d "
2343                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2344                 goto done;
2345         }
2346
2347         finicsum = be32toh(caps.finicsum);
2348         cfcsum = be32toh(caps.cfcsum);
2349         if (finicsum != cfcsum) {
2350                 device_printf(sc->dev,
2351                     "WARNING: config file checksum mismatch: %08x %08x\n",
2352                     finicsum, cfcsum);
2353         }
2354         sc->cfcsum = cfcsum;
2355
2356 #define LIMIT_CAPS(x) do { \
2357         caps.x &= htobe16(t4_##x##_allowed); \
2358         sc->x = htobe16(caps.x); \
2359 } while (0)
2360
2361         /*
2362          * Let the firmware know what features will (not) be used so it can tune
2363          * things accordingly.
2364          */
2365         LIMIT_CAPS(linkcaps);
2366         LIMIT_CAPS(niccaps);
2367         LIMIT_CAPS(toecaps);
2368         LIMIT_CAPS(rdmacaps);
2369         LIMIT_CAPS(iscsicaps);
2370         LIMIT_CAPS(fcoecaps);
2371 #undef LIMIT_CAPS
2372
2373         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2374             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2375         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2376         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2377         if (rc != 0) {
2378                 device_printf(sc->dev,
2379                     "failed to process config file: %d.\n", rc);
2380         }
2381 done:
2382         if (cfg != NULL)
2383                 firmware_put(cfg, FIRMWARE_UNLOAD);
2384         return (rc);
2385 }
2386
2387 /*
2388  * Retrieve parameters that are needed (or nice to have) very early.
2389  */
2390 static int
2391 get_params__pre_init(struct adapter *sc)
2392 {
2393         int rc;
2394         uint32_t param[2], val[2];
2395         struct fw_devlog_cmd cmd;
2396         struct devlog_params *dlog = &sc->params.devlog;
2397
2398         param[0] = FW_PARAM_DEV(PORTVEC);
2399         param[1] = FW_PARAM_DEV(CCLK);
2400         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2401         if (rc != 0) {
2402                 device_printf(sc->dev,
2403                     "failed to query parameters (pre_init): %d.\n", rc);
2404                 return (rc);
2405         }
2406
2407         sc->params.portvec = val[0];
2408         sc->params.nports = bitcount32(val[0]);
2409         sc->params.vpd.cclk = val[1];
2410
2411         /* Read device log parameters. */
2412         bzero(&cmd, sizeof(cmd));
2413         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2414             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2415         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2416         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2417         if (rc != 0) {
2418                 device_printf(sc->dev,
2419                     "failed to get devlog parameters: %d.\n", rc);
2420                 bzero(dlog, sizeof (*dlog));
2421                 rc = 0; /* devlog isn't critical for device operation */
2422         } else {
2423                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2424                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2425                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2426                 dlog->size = be32toh(cmd.memsize_devlog);
2427         }
2428
2429         return (rc);
2430 }
2431
2432 /*
2433  * Retrieve various parameters that are of interest to the driver.  The device
2434  * has been initialized by the firmware at this point.
2435  */
2436 static int
2437 get_params__post_init(struct adapter *sc)
2438 {
2439         int rc;
2440         uint32_t param[7], val[7];
2441         struct fw_caps_config_cmd caps;
2442
2443         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2444         param[1] = FW_PARAM_PFVF(EQ_START);
2445         param[2] = FW_PARAM_PFVF(FILTER_START);
2446         param[3] = FW_PARAM_PFVF(FILTER_END);
2447         param[4] = FW_PARAM_PFVF(L2T_START);
2448         param[5] = FW_PARAM_PFVF(L2T_END);
2449         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2450         if (rc != 0) {
2451                 device_printf(sc->dev,
2452                     "failed to query parameters (post_init): %d.\n", rc);
2453                 return (rc);
2454         }
2455
2456         sc->sge.iq_start = val[0];
2457         sc->sge.eq_start = val[1];
2458         sc->tids.ftid_base = val[2];
2459         sc->tids.nftids = val[3] - val[2] + 1;
2460         sc->vres.l2t.start = val[4];
2461         sc->vres.l2t.size = val[5] - val[4] + 1;
2462         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2463             ("%s: L2 table size (%u) larger than expected (%u)",
2464             __func__, sc->vres.l2t.size, L2T_SIZE));
2465
2466         /* get capabilites */
2467         bzero(&caps, sizeof(caps));
2468         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2469             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2470         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2471         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2472         if (rc != 0) {
2473                 device_printf(sc->dev,
2474                     "failed to get card capabilities: %d.\n", rc);
2475                 return (rc);
2476         }
2477
2478         if (caps.toecaps) {
2479                 /* query offload-related parameters */
2480                 param[0] = FW_PARAM_DEV(NTID);
2481                 param[1] = FW_PARAM_PFVF(SERVER_START);
2482                 param[2] = FW_PARAM_PFVF(SERVER_END);
2483                 param[3] = FW_PARAM_PFVF(TDDP_START);
2484                 param[4] = FW_PARAM_PFVF(TDDP_END);
2485                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2486                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2487                 if (rc != 0) {
2488                         device_printf(sc->dev,
2489                             "failed to query TOE parameters: %d.\n", rc);
2490                         return (rc);
2491                 }
2492                 sc->tids.ntids = val[0];
2493                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2494                 sc->tids.stid_base = val[1];
2495                 sc->tids.nstids = val[2] - val[1] + 1;
2496                 sc->vres.ddp.start = val[3];
2497                 sc->vres.ddp.size = val[4] - val[3] + 1;
2498                 sc->params.ofldq_wr_cred = val[5];
2499                 sc->params.offload = 1;
2500         }
2501         if (caps.rdmacaps) {
2502                 param[0] = FW_PARAM_PFVF(STAG_START);
2503                 param[1] = FW_PARAM_PFVF(STAG_END);
2504                 param[2] = FW_PARAM_PFVF(RQ_START);
2505                 param[3] = FW_PARAM_PFVF(RQ_END);
2506                 param[4] = FW_PARAM_PFVF(PBL_START);
2507                 param[5] = FW_PARAM_PFVF(PBL_END);
2508                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2509                 if (rc != 0) {
2510                         device_printf(sc->dev,
2511                             "failed to query RDMA parameters(1): %d.\n", rc);
2512                         return (rc);
2513                 }
2514                 sc->vres.stag.start = val[0];
2515                 sc->vres.stag.size = val[1] - val[0] + 1;
2516                 sc->vres.rq.start = val[2];
2517                 sc->vres.rq.size = val[3] - val[2] + 1;
2518                 sc->vres.pbl.start = val[4];
2519                 sc->vres.pbl.size = val[5] - val[4] + 1;
2520
2521                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2522                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2523                 param[2] = FW_PARAM_PFVF(CQ_START);
2524                 param[3] = FW_PARAM_PFVF(CQ_END);
2525                 param[4] = FW_PARAM_PFVF(OCQ_START);
2526                 param[5] = FW_PARAM_PFVF(OCQ_END);
2527                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2528                 if (rc != 0) {
2529                         device_printf(sc->dev,
2530                             "failed to query RDMA parameters(2): %d.\n", rc);
2531                         return (rc);
2532                 }
2533                 sc->vres.qp.start = val[0];
2534                 sc->vres.qp.size = val[1] - val[0] + 1;
2535                 sc->vres.cq.start = val[2];
2536                 sc->vres.cq.size = val[3] - val[2] + 1;
2537                 sc->vres.ocq.start = val[4];
2538                 sc->vres.ocq.size = val[5] - val[4] + 1;
2539         }
2540         if (caps.iscsicaps) {
2541                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2542                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2543                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2544                 if (rc != 0) {
2545                         device_printf(sc->dev,
2546                             "failed to query iSCSI parameters: %d.\n", rc);
2547                         return (rc);
2548                 }
2549                 sc->vres.iscsi.start = val[0];
2550                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2551         }
2552
2553         /*
2554          * We've got the params we wanted to query via the firmware.  Now grab
2555          * some others directly from the chip.
2556          */
2557         rc = t4_read_chip_settings(sc);
2558
2559         return (rc);
2560 }
2561
2562 static int
2563 set_params__post_init(struct adapter *sc)
2564 {
2565         uint32_t param, val;
2566
2567         /* ask for encapsulated CPLs */
2568         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2569         val = 1;
2570         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2571
2572         return (0);
2573 }
2574
2575 #undef FW_PARAM_PFVF
2576 #undef FW_PARAM_DEV
2577
2578 static void
2579 t4_set_desc(struct adapter *sc)
2580 {
2581         char buf[128];
2582         struct adapter_params *p = &sc->params;
2583
2584         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2585             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2586             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2587
2588         device_set_desc_copy(sc->dev, buf);
2589 }
2590
2591 static void
2592 build_medialist(struct port_info *pi)
2593 {
2594         struct ifmedia *media = &pi->media;
2595         int data, m;
2596
2597         PORT_LOCK(pi);
2598
2599         ifmedia_removeall(media);
2600
2601         m = IFM_ETHER | IFM_FDX;
2602         data = (pi->port_type << 8) | pi->mod_type;
2603
2604         switch(pi->port_type) {
2605         case FW_PORT_TYPE_BT_XFI:
2606                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2607                 break;
2608
2609         case FW_PORT_TYPE_BT_XAUI:
2610                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2611                 /* fall through */
2612
2613         case FW_PORT_TYPE_BT_SGMII:
2614                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2615                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2616                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2617                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2618                 break;
2619
2620         case FW_PORT_TYPE_CX4:
2621                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2622                 ifmedia_set(media, m | IFM_10G_CX4);
2623                 break;
2624
2625         case FW_PORT_TYPE_QSFP_10G:
2626         case FW_PORT_TYPE_SFP:
2627         case FW_PORT_TYPE_FIBER_XFI:
2628         case FW_PORT_TYPE_FIBER_XAUI:
2629                 switch (pi->mod_type) {
2630
2631                 case FW_PORT_MOD_TYPE_LR:
2632                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2633                         ifmedia_set(media, m | IFM_10G_LR);
2634                         break;
2635
2636                 case FW_PORT_MOD_TYPE_SR:
2637                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2638                         ifmedia_set(media, m | IFM_10G_SR);
2639                         break;
2640
2641                 case FW_PORT_MOD_TYPE_LRM:
2642                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2643                         ifmedia_set(media, m | IFM_10G_LRM);
2644                         break;
2645
2646                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2647                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2648                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2649                         ifmedia_set(media, m | IFM_10G_TWINAX);
2650                         break;
2651
2652                 case FW_PORT_MOD_TYPE_NONE:
2653                         m &= ~IFM_FDX;
2654                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2655                         ifmedia_set(media, m | IFM_NONE);
2656                         break;
2657
2658                 case FW_PORT_MOD_TYPE_NA:
2659                 case FW_PORT_MOD_TYPE_ER:
2660                 default:
2661                         device_printf(pi->dev,
2662                             "unknown port_type (%d), mod_type (%d)\n",
2663                             pi->port_type, pi->mod_type);
2664                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2665                         ifmedia_set(media, m | IFM_UNKNOWN);
2666                         break;
2667                 }
2668                 break;
2669
2670         case FW_PORT_TYPE_QSFP:
2671                 switch (pi->mod_type) {
2672
2673                 case FW_PORT_MOD_TYPE_LR:
2674                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2675                         ifmedia_set(media, m | IFM_40G_LR4);
2676                         break;
2677
2678                 case FW_PORT_MOD_TYPE_SR:
2679                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2680                         ifmedia_set(media, m | IFM_40G_SR4);
2681                         break;
2682
2683                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2684                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2685                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2686                         ifmedia_set(media, m | IFM_40G_CR4);
2687                         break;
2688
2689                 case FW_PORT_MOD_TYPE_NONE:
2690                         m &= ~IFM_FDX;
2691                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2692                         ifmedia_set(media, m | IFM_NONE);
2693                         break;
2694
2695                 default:
2696                         device_printf(pi->dev,
2697                             "unknown port_type (%d), mod_type (%d)\n",
2698                             pi->port_type, pi->mod_type);
2699                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2700                         ifmedia_set(media, m | IFM_UNKNOWN);
2701                         break;
2702                 }
2703                 break;
2704
2705         default:
2706                 device_printf(pi->dev,
2707                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2708                     pi->mod_type);
2709                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2710                 ifmedia_set(media, m | IFM_UNKNOWN);
2711                 break;
2712         }
2713
2714         PORT_UNLOCK(pi);
2715 }
2716
2717 #define FW_MAC_EXACT_CHUNK      7
2718
2719 /*
2720  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2721  * indicates which parameters should be programmed (the rest are left alone).
2722  */
2723 static int
2724 update_mac_settings(struct port_info *pi, int flags)
2725 {
2726         int rc;
2727         struct ifnet *ifp = pi->ifp;
2728         struct adapter *sc = pi->adapter;
2729         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2730
2731         ASSERT_SYNCHRONIZED_OP(sc);
2732         KASSERT(flags, ("%s: not told what to update.", __func__));
2733
2734         if (flags & XGMAC_MTU)
2735                 mtu = ifp->if_mtu;
2736
2737         if (flags & XGMAC_PROMISC)
2738                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2739
2740         if (flags & XGMAC_ALLMULTI)
2741                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2742
2743         if (flags & XGMAC_VLANEX)
2744                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2745
2746         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2747             vlanex, false);
2748         if (rc) {
2749                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2750                 return (rc);
2751         }
2752
2753         if (flags & XGMAC_UCADDR) {
2754                 uint8_t ucaddr[ETHER_ADDR_LEN];
2755
2756                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2757                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2758                     ucaddr, true, true);
2759                 if (rc < 0) {
2760                         rc = -rc;
2761                         if_printf(ifp, "change_mac failed: %d\n", rc);
2762                         return (rc);
2763                 } else {
2764                         pi->xact_addr_filt = rc;
2765                         rc = 0;
2766                 }
2767         }
2768
2769         if (flags & XGMAC_MCADDRS) {
2770                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2771                 int del = 1;
2772                 uint64_t hash = 0;
2773                 struct ifmultiaddr *ifma;
2774                 int i = 0, j;
2775
2776                 if_maddr_rlock(ifp);
2777                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2778                         if (ifma->ifma_addr->sa_family != AF_LINK)
2779                                 continue;
2780                         mcaddr[i++] =
2781                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2782
2783                         if (i == FW_MAC_EXACT_CHUNK) {
2784                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2785                                     del, i, mcaddr, NULL, &hash, 0);
2786                                 if (rc < 0) {
2787                                         rc = -rc;
2788                                         for (j = 0; j < i; j++) {
2789                                                 if_printf(ifp,
2790                                                     "failed to add mc address"
2791                                                     " %02x:%02x:%02x:"
2792                                                     "%02x:%02x:%02x rc=%d\n",
2793                                                     mcaddr[j][0], mcaddr[j][1],
2794                                                     mcaddr[j][2], mcaddr[j][3],
2795                                                     mcaddr[j][4], mcaddr[j][5],
2796                                                     rc);
2797                                         }
2798                                         goto mcfail;
2799                                 }
2800                                 del = 0;
2801                                 i = 0;
2802                         }
2803                 }
2804                 if (i > 0) {
2805                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2806                             del, i, mcaddr, NULL, &hash, 0);
2807                         if (rc < 0) {
2808                                 rc = -rc;
2809                                 for (j = 0; j < i; j++) {
2810                                         if_printf(ifp,
2811                                             "failed to add mc address"
2812                                             " %02x:%02x:%02x:"
2813                                             "%02x:%02x:%02x rc=%d\n",
2814                                             mcaddr[j][0], mcaddr[j][1],
2815                                             mcaddr[j][2], mcaddr[j][3],
2816                                             mcaddr[j][4], mcaddr[j][5],
2817                                             rc);
2818                                 }
2819                                 goto mcfail;
2820                         }
2821                 }
2822
2823                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2824                 if (rc != 0)
2825                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2826 mcfail:
2827                 if_maddr_runlock(ifp);
2828         }
2829
2830         return (rc);
2831 }
2832
2833 int
2834 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2835     char *wmesg)
2836 {
2837         int rc, pri;
2838
2839 #ifdef WITNESS
2840         /* the caller thinks it's ok to sleep, but is it really? */
2841         if (flags & SLEEP_OK)
2842                 pause("t4slptst", 1);
2843 #endif
2844
2845         if (INTR_OK)
2846                 pri = PCATCH;
2847         else
2848                 pri = 0;
2849
2850         ADAPTER_LOCK(sc);
2851         for (;;) {
2852
2853                 if (pi && IS_DOOMED(pi)) {
2854                         rc = ENXIO;
2855                         goto done;
2856                 }
2857
2858                 if (!IS_BUSY(sc)) {
2859                         rc = 0;
2860                         break;
2861                 }
2862
2863                 if (!(flags & SLEEP_OK)) {
2864                         rc = EBUSY;
2865                         goto done;
2866                 }
2867
2868                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2869                         rc = EINTR;
2870                         goto done;
2871                 }
2872         }
2873
2874         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2875         SET_BUSY(sc);
2876 #ifdef INVARIANTS
2877         sc->last_op = wmesg;
2878         sc->last_op_thr = curthread;
2879 #endif
2880
2881 done:
2882         if (!(flags & HOLD_LOCK) || rc)
2883                 ADAPTER_UNLOCK(sc);
2884
2885         return (rc);
2886 }
2887
2888 void
2889 end_synchronized_op(struct adapter *sc, int flags)
2890 {
2891
2892         if (flags & LOCK_HELD)
2893                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2894         else
2895                 ADAPTER_LOCK(sc);
2896
2897         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2898         CLR_BUSY(sc);
2899         wakeup(&sc->flags);
2900         ADAPTER_UNLOCK(sc);
2901 }
2902
2903 static int
2904 cxgbe_init_synchronized(struct port_info *pi)
2905 {
2906         struct adapter *sc = pi->adapter;
2907         struct ifnet *ifp = pi->ifp;
2908         int rc = 0;
2909
2910         ASSERT_SYNCHRONIZED_OP(sc);
2911
2912         if (isset(&sc->open_device_map, pi->port_id)) {
2913                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2914                     ("mismatch between open_device_map and if_drv_flags"));
2915                 return (0);     /* already running */
2916         }
2917
2918         if (!(sc->flags & FULL_INIT_DONE) &&
2919             ((rc = adapter_full_init(sc)) != 0))
2920                 return (rc);    /* error message displayed already */
2921
2922         if (!(pi->flags & PORT_INIT_DONE) &&
2923             ((rc = port_full_init(pi)) != 0))
2924                 return (rc); /* error message displayed already */
2925
2926         rc = update_mac_settings(pi, XGMAC_ALL);
2927         if (rc)
2928                 goto done;      /* error message displayed already */
2929
2930         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2931         if (rc != 0) {
2932                 if_printf(ifp, "start_link failed: %d\n", rc);
2933                 goto done;
2934         }
2935
2936         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2937         if (rc != 0) {
2938                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2939                 goto done;
2940         }
2941
2942         /*
2943          * The first iq of the first port to come up is used for tracing.
2944          */
2945         if (sc->traceq < 0) {
2946                 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2947                 t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2948                     A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2949                     V_QUEUENUMBER(sc->traceq));
2950                 pi->flags |= HAS_TRACEQ;
2951         }
2952
2953         /* all ok */
2954         setbit(&sc->open_device_map, pi->port_id);
2955         PORT_LOCK(pi);
2956         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2957         PORT_UNLOCK(pi);
2958
2959         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2960 done:
2961         if (rc != 0)
2962                 cxgbe_uninit_synchronized(pi);
2963
2964         return (rc);
2965 }
2966
2967 /*
2968  * Idempotent.
2969  */
2970 static int
2971 cxgbe_uninit_synchronized(struct port_info *pi)
2972 {
2973         struct adapter *sc = pi->adapter;
2974         struct ifnet *ifp = pi->ifp;
2975         int rc;
2976
2977         ASSERT_SYNCHRONIZED_OP(sc);
2978
2979         /*
2980          * Disable the VI so that all its data in either direction is discarded
2981          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2982          * tick) intact as the TP can deliver negative advice or data that it's
2983          * holding in its RAM (for an offloaded connection) even after the VI is
2984          * disabled.
2985          */
2986         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2987         if (rc) {
2988                 if_printf(ifp, "disable_vi failed: %d\n", rc);
2989                 return (rc);
2990         }
2991
2992         clrbit(&sc->open_device_map, pi->port_id);
2993         PORT_LOCK(pi);
2994         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2995         PORT_UNLOCK(pi);
2996
2997         pi->link_cfg.link_ok = 0;
2998         pi->link_cfg.speed = 0;
2999         pi->linkdnrc = -1;
3000         t4_os_link_changed(sc, pi->port_id, 0, -1);
3001
3002         return (0);
3003 }
3004
3005 /*
3006  * It is ok for this function to fail midway and return right away.  t4_detach
3007  * will walk the entire sc->irq list and clean up whatever is valid.
3008  */
3009 static int
3010 setup_intr_handlers(struct adapter *sc)
3011 {
3012         int rc, rid, p, q;
3013         char s[8];
3014         struct irq *irq;
3015         struct port_info *pi;
3016         struct sge_rxq *rxq;
3017 #ifdef TCP_OFFLOAD
3018         struct sge_ofld_rxq *ofld_rxq;
3019 #endif
3020
3021         /*
3022          * Setup interrupts.
3023          */
3024         irq = &sc->irq[0];
3025         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3026         if (sc->intr_count == 1) {
3027                 KASSERT(!(sc->flags & INTR_DIRECT),
3028                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3029
3030                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3031                 if (rc != 0)
3032                         return (rc);
3033         } else {
3034                 /* Multiple interrupts. */
3035                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3036                     ("%s: too few intr.", __func__));
3037
3038                 /* The first one is always error intr */
3039                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3040                 if (rc != 0)
3041                         return (rc);
3042                 irq++;
3043                 rid++;
3044
3045                 /* The second one is always the firmware event queue */
3046                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3047                     "evt");
3048                 if (rc != 0)
3049                         return (rc);
3050                 irq++;
3051                 rid++;
3052
3053                 /*
3054                  * Note that if INTR_DIRECT is not set then either the NIC rx
3055                  * queues or (exclusive or) the TOE rx queueus will be taking
3056                  * direct interrupts.
3057                  *
3058                  * There is no need to check for is_offload(sc) as nofldrxq
3059                  * will be 0 if offload is disabled.
3060                  */
3061                 for_each_port(sc, p) {
3062                         pi = sc->port[p];
3063
3064 #ifdef TCP_OFFLOAD
3065                         /*
3066                          * Skip over the NIC queues if they aren't taking direct
3067                          * interrupts.
3068                          */
3069                         if (!(sc->flags & INTR_DIRECT) &&
3070                             pi->nofldrxq > pi->nrxq)
3071                                 goto ofld_queues;
3072 #endif
3073                         rxq = &sc->sge.rxq[pi->first_rxq];
3074                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3075                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3076                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3077                                     s);
3078                                 if (rc != 0)
3079                                         return (rc);
3080                                 irq++;
3081                                 rid++;
3082                         }
3083
3084 #ifdef TCP_OFFLOAD
3085                         /*
3086                          * Skip over the offload queues if they aren't taking
3087                          * direct interrupts.
3088                          */
3089                         if (!(sc->flags & INTR_DIRECT))
3090                                 continue;
3091 ofld_queues:
3092                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3093                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3094                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3095                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3096                                     ofld_rxq, s);
3097                                 if (rc != 0)
3098                                         return (rc);
3099                                 irq++;
3100                                 rid++;
3101                         }
3102 #endif
3103                 }
3104         }
3105
3106         return (0);
3107 }
3108
3109 static int
3110 adapter_full_init(struct adapter *sc)
3111 {
3112         int rc, i;
3113
3114         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3115         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3116             ("%s: FULL_INIT_DONE already", __func__));
3117
3118         /*
3119          * queues that belong to the adapter (not any particular port).
3120          */
3121         rc = t4_setup_adapter_queues(sc);
3122         if (rc != 0)
3123                 goto done;
3124
3125         for (i = 0; i < nitems(sc->tq); i++) {
3126                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3127                     taskqueue_thread_enqueue, &sc->tq[i]);
3128                 if (sc->tq[i] == NULL) {
3129                         device_printf(sc->dev,
3130                             "failed to allocate task queue %d\n", i);
3131                         rc = ENOMEM;
3132                         goto done;
3133                 }
3134                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3135                     device_get_nameunit(sc->dev), i);
3136         }
3137
3138         t4_intr_enable(sc);
3139         sc->flags |= FULL_INIT_DONE;
3140 done:
3141         if (rc != 0)
3142                 adapter_full_uninit(sc);
3143
3144         return (rc);
3145 }
3146
3147 static int
3148 adapter_full_uninit(struct adapter *sc)
3149 {
3150         int i;
3151
3152         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3153
3154         t4_teardown_adapter_queues(sc);
3155
3156         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3157                 taskqueue_free(sc->tq[i]);
3158                 sc->tq[i] = NULL;
3159         }
3160
3161         sc->flags &= ~FULL_INIT_DONE;
3162
3163         return (0);
3164 }
3165
3166 static int
3167 port_full_init(struct port_info *pi)
3168 {
3169         struct adapter *sc = pi->adapter;
3170         struct ifnet *ifp = pi->ifp;
3171         uint16_t *rss;
3172         struct sge_rxq *rxq;
3173         int rc, i, j;
3174
3175         ASSERT_SYNCHRONIZED_OP(sc);
3176         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3177             ("%s: PORT_INIT_DONE already", __func__));
3178
3179         sysctl_ctx_init(&pi->ctx);
3180         pi->flags |= PORT_SYSCTL_CTX;
3181
3182         /*
3183          * Allocate tx/rx/fl queues for this port.
3184          */
3185         rc = t4_setup_port_queues(pi);
3186         if (rc != 0)
3187                 goto done;      /* error message displayed already */
3188
3189         /*
3190          * Setup RSS for this port.  Save a copy of the RSS table for later use.
3191          */
3192         rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3193         for (i = 0; i < pi->rss_size;) {
3194                 for_each_rxq(pi, j, rxq) {
3195                         rss[i++] = rxq->iq.abs_id;
3196                         if (i == pi->rss_size)
3197                                 break;
3198                 }
3199         }
3200
3201         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3202             pi->rss_size);
3203         if (rc != 0) {
3204                 if_printf(ifp, "rss_config failed: %d\n", rc);
3205                 goto done;
3206         }
3207
3208         pi->rss = rss;
3209         pi->flags |= PORT_INIT_DONE;
3210 done:
3211         if (rc != 0)
3212                 port_full_uninit(pi);
3213
3214         return (rc);
3215 }
3216
3217 /*
3218  * Idempotent.
3219  */
3220 static int
3221 port_full_uninit(struct port_info *pi)
3222 {
3223         struct adapter *sc = pi->adapter;
3224         int i;
3225         struct sge_rxq *rxq;
3226         struct sge_txq *txq;
3227 #ifdef TCP_OFFLOAD
3228         struct sge_ofld_rxq *ofld_rxq;
3229         struct sge_wrq *ofld_txq;
3230 #endif
3231
3232         if (pi->flags & PORT_INIT_DONE) {
3233
3234                 /* Need to quiesce queues.  XXX: ctrl queues? */
3235
3236                 for_each_txq(pi, i, txq) {
3237                         quiesce_eq(sc, &txq->eq);
3238                 }
3239
3240 #ifdef TCP_OFFLOAD
3241                 for_each_ofld_txq(pi, i, ofld_txq) {
3242                         quiesce_eq(sc, &ofld_txq->eq);
3243                 }
3244 #endif
3245
3246                 for_each_rxq(pi, i, rxq) {
3247                         quiesce_iq(sc, &rxq->iq);
3248                         quiesce_fl(sc, &rxq->fl);
3249                 }
3250
3251 #ifdef TCP_OFFLOAD
3252                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3253                         quiesce_iq(sc, &ofld_rxq->iq);
3254                         quiesce_fl(sc, &ofld_rxq->fl);
3255                 }
3256 #endif
3257                 free(pi->rss, M_CXGBE);
3258         }
3259
3260         t4_teardown_port_queues(pi);
3261         pi->flags &= ~PORT_INIT_DONE;
3262
3263         return (0);
3264 }
3265
3266 static void
3267 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3268 {
3269         EQ_LOCK(eq);
3270         eq->flags |= EQ_DOOMED;
3271
3272         /*
3273          * Wait for the response to a credit flush if one's
3274          * pending.
3275          */
3276         while (eq->flags & EQ_CRFLUSHED)
3277                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3278         EQ_UNLOCK(eq);
3279
3280         callout_drain(&eq->tx_callout); /* XXX: iffy */
3281         pause("callout", 10);           /* Still iffy */
3282
3283         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3284 }
3285
3286 static void
3287 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3288 {
3289         (void) sc;      /* unused */
3290
3291         /* Synchronize with the interrupt handler */
3292         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3293                 pause("iqfree", 1);
3294 }
3295
3296 static void
3297 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3298 {
3299         mtx_lock(&sc->sfl_lock);
3300         FL_LOCK(fl);
3301         fl->flags |= FL_DOOMED;
3302         FL_UNLOCK(fl);
3303         mtx_unlock(&sc->sfl_lock);
3304
3305         callout_drain(&sc->sfl_callout);
3306         KASSERT((fl->flags & FL_STARVING) == 0,
3307             ("%s: still starving", __func__));
3308 }
3309
3310 static int
3311 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3312     driver_intr_t *handler, void *arg, char *name)
3313 {
3314         int rc;
3315
3316         irq->rid = rid;
3317         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3318             RF_SHAREABLE | RF_ACTIVE);
3319         if (irq->res == NULL) {
3320                 device_printf(sc->dev,
3321                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3322                 return (ENOMEM);
3323         }
3324
3325         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3326             NULL, handler, arg, &irq->tag);
3327         if (rc != 0) {
3328                 device_printf(sc->dev,
3329                     "failed to setup interrupt for rid %d, name %s: %d\n",
3330                     rid, name, rc);
3331         } else if (name)
3332                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3333
3334         return (rc);
3335 }
3336
3337 static int
3338 t4_free_irq(struct adapter *sc, struct irq *irq)
3339 {
3340         if (irq->tag)
3341                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3342         if (irq->res)
3343                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3344
3345         bzero(irq, sizeof(*irq));
3346
3347         return (0);
3348 }
3349
3350 static void
3351 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3352     unsigned int end)
3353 {
3354         uint32_t *p = (uint32_t *)(buf + start);
3355
3356         for ( ; start <= end; start += sizeof(uint32_t))
3357                 *p++ = t4_read_reg(sc, start);
3358 }
3359
3360 static void
3361 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3362 {
3363         int i, n;
3364         const unsigned int *reg_ranges;
3365         static const unsigned int t4_reg_ranges[] = {
3366                 0x1008, 0x1108,
3367                 0x1180, 0x11b4,
3368                 0x11fc, 0x123c,
3369                 0x1300, 0x173c,
3370                 0x1800, 0x18fc,
3371                 0x3000, 0x30d8,
3372                 0x30e0, 0x5924,
3373                 0x5960, 0x59d4,
3374                 0x5a00, 0x5af8,
3375                 0x6000, 0x6098,
3376                 0x6100, 0x6150,
3377                 0x6200, 0x6208,
3378                 0x6240, 0x6248,
3379                 0x6280, 0x6338,
3380                 0x6370, 0x638c,
3381                 0x6400, 0x643c,
3382                 0x6500, 0x6524,
3383                 0x6a00, 0x6a38,
3384                 0x6a60, 0x6a78,
3385                 0x6b00, 0x6b84,
3386                 0x6bf0, 0x6c84,
3387                 0x6cf0, 0x6d84,
3388                 0x6df0, 0x6e84,
3389                 0x6ef0, 0x6f84,
3390                 0x6ff0, 0x7084,
3391                 0x70f0, 0x7184,
3392                 0x71f0, 0x7284,
3393                 0x72f0, 0x7384,
3394                 0x73f0, 0x7450,
3395                 0x7500, 0x7530,
3396                 0x7600, 0x761c,
3397                 0x7680, 0x76cc,
3398                 0x7700, 0x7798,
3399                 0x77c0, 0x77fc,
3400                 0x7900, 0x79fc,
3401                 0x7b00, 0x7c38,
3402                 0x7d00, 0x7efc,
3403                 0x8dc0, 0x8e1c,
3404                 0x8e30, 0x8e78,
3405                 0x8ea0, 0x8f6c,
3406                 0x8fc0, 0x9074,
3407                 0x90fc, 0x90fc,
3408                 0x9400, 0x9458,
3409                 0x9600, 0x96bc,
3410                 0x9800, 0x9808,
3411                 0x9820, 0x983c,
3412                 0x9850, 0x9864,
3413                 0x9c00, 0x9c6c,
3414                 0x9c80, 0x9cec,
3415                 0x9d00, 0x9d6c,
3416                 0x9d80, 0x9dec,
3417                 0x9e00, 0x9e6c,
3418                 0x9e80, 0x9eec,
3419                 0x9f00, 0x9f6c,
3420                 0x9f80, 0x9fec,
3421                 0xd004, 0xd03c,
3422                 0xdfc0, 0xdfe0,
3423                 0xe000, 0xea7c,
3424                 0xf000, 0x11110,
3425                 0x11118, 0x11190,
3426                 0x19040, 0x1906c,
3427                 0x19078, 0x19080,
3428                 0x1908c, 0x19124,
3429                 0x19150, 0x191b0,
3430                 0x191d0, 0x191e8,
3431                 0x19238, 0x1924c,
3432                 0x193f8, 0x19474,
3433                 0x19490, 0x194f8,
3434                 0x19800, 0x19f30,
3435                 0x1a000, 0x1a06c,
3436                 0x1a0b0, 0x1a120,
3437                 0x1a128, 0x1a138,
3438                 0x1a190, 0x1a1c4,
3439                 0x1a1fc, 0x1a1fc,
3440                 0x1e040, 0x1e04c,
3441                 0x1e284, 0x1e28c,
3442                 0x1e2c0, 0x1e2c0,
3443                 0x1e2e0, 0x1e2e0,
3444                 0x1e300, 0x1e384,
3445                 0x1e3c0, 0x1e3c8,
3446                 0x1e440, 0x1e44c,
3447                 0x1e684, 0x1e68c,
3448                 0x1e6c0, 0x1e6c0,
3449                 0x1e6e0, 0x1e6e0,
3450                 0x1e700, 0x1e784,
3451                 0x1e7c0, 0x1e7c8,
3452                 0x1e840, 0x1e84c,
3453                 0x1ea84, 0x1ea8c,
3454                 0x1eac0, 0x1eac0,
3455                 0x1eae0, 0x1eae0,
3456                 0x1eb00, 0x1eb84,
3457                 0x1ebc0, 0x1ebc8,
3458                 0x1ec40, 0x1ec4c,
3459                 0x1ee84, 0x1ee8c,
3460                 0x1eec0, 0x1eec0,
3461                 0x1eee0, 0x1eee0,
3462                 0x1ef00, 0x1ef84,
3463                 0x1efc0, 0x1efc8,
3464                 0x1f040, 0x1f04c,
3465                 0x1f284, 0x1f28c,
3466                 0x1f2c0, 0x1f2c0,
3467                 0x1f2e0, 0x1f2e0,
3468                 0x1f300, 0x1f384,
3469                 0x1f3c0, 0x1f3c8,
3470                 0x1f440, 0x1f44c,
3471                 0x1f684, 0x1f68c,
3472                 0x1f6c0, 0x1f6c0,
3473                 0x1f6e0, 0x1f6e0,
3474                 0x1f700, 0x1f784,
3475                 0x1f7c0, 0x1f7c8,
3476                 0x1f840, 0x1f84c,
3477                 0x1fa84, 0x1fa8c,
3478                 0x1fac0, 0x1fac0,
3479                 0x1fae0, 0x1fae0,
3480                 0x1fb00, 0x1fb84,
3481                 0x1fbc0, 0x1fbc8,
3482                 0x1fc40, 0x1fc4c,
3483                 0x1fe84, 0x1fe8c,
3484                 0x1fec0, 0x1fec0,
3485                 0x1fee0, 0x1fee0,
3486                 0x1ff00, 0x1ff84,
3487                 0x1ffc0, 0x1ffc8,
3488                 0x20000, 0x2002c,
3489                 0x20100, 0x2013c,
3490                 0x20190, 0x201c8,
3491                 0x20200, 0x20318,
3492                 0x20400, 0x20528,
3493                 0x20540, 0x20614,
3494                 0x21000, 0x21040,
3495                 0x2104c, 0x21060,
3496                 0x210c0, 0x210ec,
3497                 0x21200, 0x21268,
3498                 0x21270, 0x21284,
3499                 0x212fc, 0x21388,
3500                 0x21400, 0x21404,
3501                 0x21500, 0x21518,
3502                 0x2152c, 0x2153c,
3503                 0x21550, 0x21554,
3504                 0x21600, 0x21600,
3505                 0x21608, 0x21628,
3506                 0x21630, 0x2163c,
3507                 0x21700, 0x2171c,
3508                 0x21780, 0x2178c,
3509                 0x21800, 0x21c38,
3510                 0x21c80, 0x21d7c,
3511                 0x21e00, 0x21e04,
3512                 0x22000, 0x2202c,
3513                 0x22100, 0x2213c,
3514                 0x22190, 0x221c8,
3515                 0x22200, 0x22318,
3516                 0x22400, 0x22528,
3517                 0x22540, 0x22614,
3518                 0x23000, 0x23040,
3519                 0x2304c, 0x23060,
3520                 0x230c0, 0x230ec,
3521                 0x23200, 0x23268,
3522                 0x23270, 0x23284,
3523                 0x232fc, 0x23388,
3524                 0x23400, 0x23404,
3525                 0x23500, 0x23518,
3526                 0x2352c, 0x2353c,
3527                 0x23550, 0x23554,
3528                 0x23600, 0x23600,
3529                 0x23608, 0x23628,
3530                 0x23630, 0x2363c,
3531                 0x23700, 0x2371c,
3532                 0x23780, 0x2378c,
3533                 0x23800, 0x23c38,
3534                 0x23c80, 0x23d7c,
3535                 0x23e00, 0x23e04,
3536                 0x24000, 0x2402c,
3537                 0x24100, 0x2413c,
3538                 0x24190, 0x241c8,
3539                 0x24200, 0x24318,
3540                 0x24400, 0x24528,
3541                 0x24540, 0x24614,
3542                 0x25000, 0x25040,
3543                 0x2504c, 0x25060,
3544                 0x250c0, 0x250ec,
3545                 0x25200, 0x25268,
3546                 0x25270, 0x25284,
3547                 0x252fc, 0x25388,
3548                 0x25400, 0x25404,
3549                 0x25500, 0x25518,
3550                 0x2552c, 0x2553c,
3551                 0x25550, 0x25554,
3552                 0x25600, 0x25600,
3553                 0x25608, 0x25628,
3554                 0x25630, 0x2563c,
3555                 0x25700, 0x2571c,
3556                 0x25780, 0x2578c,
3557                 0x25800, 0x25c38,
3558                 0x25c80, 0x25d7c,
3559                 0x25e00, 0x25e04,
3560                 0x26000, 0x2602c,
3561                 0x26100, 0x2613c,
3562                 0x26190, 0x261c8,
3563                 0x26200, 0x26318,
3564                 0x26400, 0x26528,
3565                 0x26540, 0x26614,
3566                 0x27000, 0x27040,
3567                 0x2704c, 0x27060,
3568                 0x270c0, 0x270ec,
3569                 0x27200, 0x27268,
3570                 0x27270, 0x27284,
3571                 0x272fc, 0x27388,
3572                 0x27400, 0x27404,
3573                 0x27500, 0x27518,
3574                 0x2752c, 0x2753c,
3575                 0x27550, 0x27554,
3576                 0x27600, 0x27600,
3577                 0x27608, 0x27628,
3578                 0x27630, 0x2763c,
3579                 0x27700, 0x2771c,
3580                 0x27780, 0x2778c,
3581                 0x27800, 0x27c38,
3582                 0x27c80, 0x27d7c,
3583                 0x27e00, 0x27e04
3584         };
3585         static const unsigned int t5_reg_ranges[] = {
3586                 0x1008, 0x1148,
3587                 0x1180, 0x11b4,
3588                 0x11fc, 0x123c,
3589                 0x1280, 0x173c,
3590                 0x1800, 0x18fc,
3591                 0x3000, 0x3028,
3592                 0x3060, 0x30d8,
3593                 0x30e0, 0x30fc,
3594                 0x3140, 0x357c,
3595                 0x35a8, 0x35cc,
3596                 0x35ec, 0x35ec,
3597                 0x3600, 0x5624,
3598                 0x56cc, 0x575c,
3599                 0x580c, 0x5814,
3600                 0x5890, 0x58bc,
3601                 0x5940, 0x59dc,
3602                 0x59fc, 0x5a18,
3603                 0x5a60, 0x5a9c,
3604                 0x5b94, 0x5bfc,
3605                 0x6000, 0x6040,
3606                 0x6058, 0x614c,
3607                 0x7700, 0x7798,
3608                 0x77c0, 0x78fc,
3609                 0x7b00, 0x7c54,
3610                 0x7d00, 0x7efc,
3611                 0x8dc0, 0x8de0,
3612                 0x8df8, 0x8e84,
3613                 0x8ea0, 0x8f84,
3614                 0x8fc0, 0x90f8,
3615                 0x9400, 0x9470,
3616                 0x9600, 0x96f4,
3617                 0x9800, 0x9808,
3618                 0x9820, 0x983c,
3619                 0x9850, 0x9864,
3620                 0x9c00, 0x9c6c,
3621                 0x9c80, 0x9cec,
3622                 0x9d00, 0x9d6c,
3623                 0x9d80, 0x9dec,
3624                 0x9e00, 0x9e6c,
3625                 0x9e80, 0x9eec,
3626                 0x9f00, 0x9f6c,
3627                 0x9f80, 0xa020,
3628                 0xd004, 0xd03c,
3629                 0xdfc0, 0xdfe0,
3630                 0xe000, 0x11088,
3631                 0x1109c, 0x11110,
3632                 0x11118, 0x1117c,
3633                 0x11190, 0x11204,
3634                 0x19040, 0x1906c,
3635                 0x19078, 0x19080,
3636                 0x1908c, 0x19124,
3637                 0x19150, 0x191b0,
3638                 0x191d0, 0x191e8,
3639                 0x19238, 0x19290,
3640                 0x193f8, 0x19474,
3641                 0x19490, 0x194cc,
3642                 0x194f0, 0x194f8,
3643                 0x19c00, 0x19c60,
3644                 0x19c94, 0x19e10,
3645                 0x19e50, 0x19f34,
3646                 0x19f40, 0x19f50,
3647                 0x19f90, 0x19fe4,
3648                 0x1a000, 0x1a06c,
3649                 0x1a0b0, 0x1a120,
3650                 0x1a128, 0x1a138,
3651                 0x1a190, 0x1a1c4,
3652                 0x1a1fc, 0x1a1fc,
3653                 0x1e008, 0x1e00c,
3654                 0x1e040, 0x1e04c,
3655                 0x1e284, 0x1e290,
3656                 0x1e2c0, 0x1e2c0,
3657                 0x1e2e0, 0x1e2e0,
3658                 0x1e300, 0x1e384,
3659                 0x1e3c0, 0x1e3c8,
3660                 0x1e408, 0x1e40c,
3661                 0x1e440, 0x1e44c,
3662                 0x1e684, 0x1e690,
3663                 0x1e6c0, 0x1e6c0,
3664                 0x1e6e0, 0x1e6e0,
3665                 0x1e700, 0x1e784,
3666                 0x1e7c0, 0x1e7c8,
3667                 0x1e808, 0x1e80c,
3668                 0x1e840, 0x1e84c,
3669                 0x1ea84, 0x1ea90,
3670                 0x1eac0, 0x1eac0,
3671                 0x1eae0, 0x1eae0,
3672                 0x1eb00, 0x1eb84,
3673                 0x1ebc0, 0x1ebc8,
3674                 0x1ec08, 0x1ec0c,
3675                 0x1ec40, 0x1ec4c,
3676                 0x1ee84, 0x1ee90,
3677                 0x1eec0, 0x1eec0,
3678                 0x1eee0, 0x1eee0,
3679                 0x1ef00, 0x1ef84,
3680                 0x1efc0, 0x1efc8,
3681                 0x1f008, 0x1f00c,
3682                 0x1f040, 0x1f04c,
3683                 0x1f284, 0x1f290,
3684                 0x1f2c0, 0x1f2c0,
3685                 0x1f2e0, 0x1f2e0,
3686                 0x1f300, 0x1f384,
3687                 0x1f3c0, 0x1f3c8,
3688                 0x1f408, 0x1f40c,
3689                 0x1f440, 0x1f44c,
3690                 0x1f684, 0x1f690,
3691                 0x1f6c0, 0x1f6c0,
3692                 0x1f6e0, 0x1f6e0,
3693                 0x1f700, 0x1f784,
3694                 0x1f7c0, 0x1f7c8,
3695                 0x1f808, 0x1f80c,
3696                 0x1f840, 0x1f84c,
3697                 0x1fa84, 0x1fa90,
3698                 0x1fac0, 0x1fac0,
3699                 0x1fae0, 0x1fae0,
3700                 0x1fb00, 0x1fb84,
3701                 0x1fbc0, 0x1fbc8,
3702                 0x1fc08, 0x1fc0c,
3703                 0x1fc40, 0x1fc4c,
3704                 0x1fe84, 0x1fe90,
3705                 0x1fec0, 0x1fec0,
3706                 0x1fee0, 0x1fee0,
3707                 0x1ff00, 0x1ff84,
3708                 0x1ffc0, 0x1ffc8,
3709                 0x30000, 0x30030,
3710                 0x30100, 0x30144,
3711                 0x30190, 0x301d0,
3712                 0x30200, 0x30318,
3713                 0x30400, 0x3052c,
3714                 0x30540, 0x3061c,
3715                 0x30800, 0x30834,
3716                 0x308c0, 0x30908,
3717                 0x30910, 0x309ac,
3718                 0x30a00, 0x30a2c,
3719                 0x30a44, 0x30a50,
3720                 0x30a74, 0x30c24,
3721                 0x30d00, 0x30d00,
3722                 0x30d08, 0x30d14,
3723                 0x30d1c, 0x30d20,
3724                 0x30d3c, 0x30d50,
3725                 0x31200, 0x3120c,
3726                 0x31220, 0x31220,
3727                 0x31240, 0x31240,
3728                 0x31600, 0x3160c,
3729                 0x31a00, 0x31a1c,
3730                 0x31e00, 0x31e20,
3731                 0x31e38, 0x31e3c,
3732                 0x31e80, 0x31e80,
3733                 0x31e88, 0x31ea8,
3734                 0x31eb0, 0x31eb4,
3735                 0x31ec8, 0x31ed4,
3736                 0x31fb8, 0x32004,
3737                 0x32200, 0x32200,
3738                 0x32208, 0x32240,
3739                 0x32248, 0x32280,
3740                 0x32288, 0x322c0,
3741                 0x322c8, 0x322fc,
3742                 0x32600, 0x32630,
3743                 0x32a00, 0x32abc,
3744                 0x32b00, 0x32b70,
3745                 0x33000, 0x33048,
3746                 0x33060, 0x3309c,
3747                 0x330f0, 0x33148,
3748                 0x33160, 0x3319c,
3749                 0x331f0, 0x332e4,
3750                 0x332f8, 0x333e4,
3751                 0x333f8, 0x33448,
3752                 0x33460, 0x3349c,
3753                 0x334f0, 0x33548,
3754                 0x33560, 0x3359c,
3755                 0x335f0, 0x336e4,
3756                 0x336f8, 0x337e4,
3757                 0x337f8, 0x337fc,
3758                 0x33814, 0x33814,
3759                 0x3382c, 0x3382c,
3760                 0x33880, 0x3388c,
3761                 0x338e8, 0x338ec,
3762                 0x33900, 0x33948,
3763                 0x33960, 0x3399c,
3764                 0x339f0, 0x33ae4,
3765                 0x33af8, 0x33b10,
3766                 0x33b28, 0x33b28,
3767                 0x33b3c, 0x33b50,
3768                 0x33bf0, 0x33c10,
3769                 0x33c28, 0x33c28,
3770                 0x33c3c, 0x33c50,
3771                 0x33cf0, 0x33cfc,
3772                 0x34000, 0x34030,
3773                 0x34100, 0x34144,
3774                 0x34190, 0x341d0,
3775                 0x34200, 0x34318,
3776                 0x34400, 0x3452c,
3777                 0x34540, 0x3461c,
3778                 0x34800, 0x34834,
3779                 0x348c0, 0x34908,
3780                 0x34910, 0x349ac,
3781                 0x34a00, 0x34a2c,
3782                 0x34a44, 0x34a50,
3783                 0x34a74, 0x34c24,
3784                 0x34d00, 0x34d00,
3785                 0x34d08, 0x34d14,
3786                 0x34d1c, 0x34d20,
3787                 0x34d3c, 0x34d50,
3788                 0x35200, 0x3520c,
3789                 0x35220, 0x35220,
3790                 0x35240, 0x35240,
3791                 0x35600, 0x3560c,
3792                 0x35a00, 0x35a1c,
3793                 0x35e00, 0x35e20,
3794                 0x35e38, 0x35e3c,
3795                 0x35e80, 0x35e80,
3796                 0x35e88, 0x35ea8,
3797                 0x35eb0, 0x35eb4,
3798                 0x35ec8, 0x35ed4,
3799                 0x35fb8, 0x36004,
3800                 0x36200, 0x36200,
3801                 0x36208, 0x36240,
3802                 0x36248, 0x36280,
3803                 0x36288, 0x362c0,
3804                 0x362c8, 0x362fc,
3805                 0x36600, 0x36630,
3806                 0x36a00, 0x36abc,
3807                 0x36b00, 0x36b70,
3808                 0x37000, 0x37048,
3809                 0x37060, 0x3709c,
3810                 0x370f0, 0x37148,
3811                 0x37160, 0x3719c,
3812                 0x371f0, 0x372e4,
3813                 0x372f8, 0x373e4,
3814                 0x373f8, 0x37448,
3815                 0x37460, 0x3749c,
3816                 0x374f0, 0x37548,
3817                 0x37560, 0x3759c,
3818                 0x375f0, 0x376e4,
3819                 0x376f8, 0x377e4,
3820                 0x377f8, 0x377fc,
3821                 0x37814, 0x37814,
3822                 0x3782c, 0x3782c,
3823                 0x37880, 0x3788c,
3824                 0x378e8, 0x378ec,
3825                 0x37900, 0x37948,
3826                 0x37960, 0x3799c,
3827                 0x379f0, 0x37ae4,
3828                 0x37af8, 0x37b10,
3829                 0x37b28, 0x37b28,
3830                 0x37b3c, 0x37b50,
3831                 0x37bf0, 0x37c10,
3832                 0x37c28, 0x37c28,
3833                 0x37c3c, 0x37c50,
3834                 0x37cf0, 0x37cfc,
3835                 0x38000, 0x38030,
3836                 0x38100, 0x38144,
3837                 0x38190, 0x381d0,
3838                 0x38200, 0x38318,
3839                 0x38400, 0x3852c,
3840                 0x38540, 0x3861c,
3841                 0x38800, 0x38834,
3842                 0x388c0, 0x38908,
3843                 0x38910, 0x389ac,
3844                 0x38a00, 0x38a2c,
3845                 0x38a44, 0x38a50,
3846                 0x38a74, 0x38c24,
3847                 0x38d00, 0x38d00,
3848                 0x38d08, 0x38d14,
3849                 0x38d1c, 0x38d20,
3850                 0x38d3c, 0x38d50,
3851                 0x39200, 0x3920c,
3852                 0x39220, 0x39220,
3853                 0x39240, 0x39240,
3854                 0x39600, 0x3960c,
3855                 0x39a00, 0x39a1c,
3856                 0x39e00, 0x39e20,
3857                 0x39e38, 0x39e3c,
3858                 0x39e80, 0x39e80,
3859                 0x39e88, 0x39ea8,
3860                 0x39eb0, 0x39eb4,
3861                 0x39ec8, 0x39ed4,
3862                 0x39fb8, 0x3a004,
3863                 0x3a200, 0x3a200,
3864                 0x3a208, 0x3a240,
3865                 0x3a248, 0x3a280,
3866                 0x3a288, 0x3a2c0,
3867                 0x3a2c8, 0x3a2fc,
3868                 0x3a600, 0x3a630,
3869                 0x3aa00, 0x3aabc,
3870                 0x3ab00, 0x3ab70,
3871                 0x3b000, 0x3b048,
3872                 0x3b060, 0x3b09c,
3873                 0x3b0f0, 0x3b148,
3874                 0x3b160, 0x3b19c,
3875                 0x3b1f0, 0x3b2e4,
3876                 0x3b2f8, 0x3b3e4,
3877                 0x3b3f8, 0x3b448,
3878                 0x3b460, 0x3b49c,
3879                 0x3b4f0, 0x3b548,
3880                 0x3b560, 0x3b59c,
3881                 0x3b5f0, 0x3b6e4,
3882                 0x3b6f8, 0x3b7e4,
3883                 0x3b7f8, 0x3b7fc,
3884                 0x3b814, 0x3b814,
3885                 0x3b82c, 0x3b82c,
3886                 0x3b880, 0x3b88c,
3887                 0x3b8e8, 0x3b8ec,
3888                 0x3b900, 0x3b948,
3889                 0x3b960, 0x3b99c,
3890                 0x3b9f0, 0x3bae4,
3891                 0x3baf8, 0x3bb10,
3892                 0x3bb28, 0x3bb28,
3893                 0x3bb3c, 0x3bb50,
3894                 0x3bbf0, 0x3bc10,
3895                 0x3bc28, 0x3bc28,
3896                 0x3bc3c, 0x3bc50,
3897                 0x3bcf0, 0x3bcfc,
3898                 0x3c000, 0x3c030,
3899                 0x3c100, 0x3c144,
3900                 0x3c190, 0x3c1d0,
3901                 0x3c200, 0x3c318,
3902                 0x3c400, 0x3c52c,
3903                 0x3c540, 0x3c61c,
3904                 0x3c800, 0x3c834,
3905                 0x3c8c0, 0x3c908,
3906                 0x3c910, 0x3c9ac,
3907                 0x3ca00, 0x3ca2c,
3908                 0x3ca44, 0x3ca50,
3909                 0x3ca74, 0x3cc24,
3910                 0x3cd00, 0x3cd00,
3911                 0x3cd08, 0x3cd14,
3912                 0x3cd1c, 0x3cd20,
3913                 0x3cd3c, 0x3cd50,
3914                 0x3d200, 0x3d20c,
3915                 0x3d220, 0x3d220,
3916                 0x3d240, 0x3d240,
3917                 0x3d600, 0x3d60c,
3918                 0x3da00, 0x3da1c,
3919                 0x3de00, 0x3de20,
3920                 0x3de38, 0x3de3c,
3921                 0x3de80, 0x3de80,
3922                 0x3de88, 0x3dea8,
3923                 0x3deb0, 0x3deb4,
3924                 0x3dec8, 0x3ded4,
3925                 0x3dfb8, 0x3e004,
3926                 0x3e200, 0x3e200,
3927                 0x3e208, 0x3e240,
3928                 0x3e248, 0x3e280,
3929                 0x3e288, 0x3e2c0,
3930                 0x3e2c8, 0x3e2fc,
3931                 0x3e600, 0x3e630,
3932                 0x3ea00, 0x3eabc,
3933                 0x3eb00, 0x3eb70,
3934                 0x3f000, 0x3f048,
3935                 0x3f060, 0x3f09c,
3936                 0x3f0f0, 0x3f148,
3937                 0x3f160, 0x3f19c,
3938                 0x3f1f0, 0x3f2e4,
3939                 0x3f2f8, 0x3f3e4,
3940                 0x3f3f8, 0x3f448,
3941                 0x3f460, 0x3f49c,
3942                 0x3f4f0, 0x3f548,
3943                 0x3f560, 0x3f59c,
3944                 0x3f5f0, 0x3f6e4,
3945                 0x3f6f8, 0x3f7e4,
3946                 0x3f7f8, 0x3f7fc,
3947                 0x3f814, 0x3f814,
3948                 0x3f82c, 0x3f82c,
3949                 0x3f880, 0x3f88c,
3950                 0x3f8e8, 0x3f8ec,
3951                 0x3f900, 0x3f948,
3952                 0x3f960, 0x3f99c,
3953                 0x3f9f0, 0x3fae4,
3954                 0x3faf8, 0x3fb10,
3955                 0x3fb28, 0x3fb28,
3956                 0x3fb3c, 0x3fb50,
3957                 0x3fbf0, 0x3fc10,
3958                 0x3fc28, 0x3fc28,
3959                 0x3fc3c, 0x3fc50,
3960                 0x3fcf0, 0x3fcfc,
3961                 0x40000, 0x4000c,
3962                 0x40040, 0x40068,
3963                 0x4007c, 0x40144,
3964                 0x40180, 0x4018c,
3965                 0x40200, 0x40298,
3966                 0x402ac, 0x4033c,
3967                 0x403f8, 0x403fc,
3968                 0x41304, 0x413c4,
3969                 0x41400, 0x4141c,
3970                 0x41480, 0x414d0,
3971                 0x44000, 0x44078,
3972                 0x440c0, 0x44278,
3973                 0x442c0, 0x44478,
3974                 0x444c0, 0x44678,
3975                 0x446c0, 0x44878,
3976                 0x448c0, 0x449fc,
3977                 0x45000, 0x45068,
3978                 0x45080, 0x45084,
3979                 0x450a0, 0x450b0,
3980                 0x45200, 0x45268,
3981                 0x45280, 0x45284,
3982                 0x452a0, 0x452b0,
3983                 0x460c0, 0x460e4,
3984                 0x47000, 0x4708c,
3985                 0x47200, 0x47250,
3986                 0x47400, 0x47420,
3987                 0x47600, 0x47618,
3988                 0x47800, 0x47814,
3989                 0x48000, 0x4800c,
3990                 0x48040, 0x48068,
3991                 0x4807c, 0x48144,
3992                 0x48180, 0x4818c,
3993                 0x48200, 0x48298,
3994                 0x482ac, 0x4833c,
3995                 0x483f8, 0x483fc,
3996                 0x49304, 0x493c4,
3997                 0x49400, 0x4941c,
3998                 0x49480, 0x494d0,
3999                 0x4c000, 0x4c078,
4000                 0x4c0c0, 0x4c278,
4001                 0x4c2c0, 0x4c478,
4002                 0x4c4c0, 0x4c678,
4003                 0x4c6c0, 0x4c878,
4004                 0x4c8c0, 0x4c9fc,
4005                 0x4d000, 0x4d068,
4006                 0x4d080, 0x4d084,
4007                 0x4d0a0, 0x4d0b0,
4008                 0x4d200, 0x4d268,
4009                 0x4d280, 0x4d284,
4010                 0x4d2a0, 0x4d2b0,
4011                 0x4e0c0, 0x4e0e4,
4012                 0x4f000, 0x4f08c,
4013                 0x4f200, 0x4f250,
4014                 0x4f400, 0x4f420,
4015                 0x4f600, 0x4f618,
4016                 0x4f800, 0x4f814,
4017                 0x50000, 0x500cc,
4018                 0x50400, 0x50400,
4019                 0x50800, 0x508cc,
4020                 0x50c00, 0x50c00,
4021                 0x51000, 0x5101c,
4022                 0x51300, 0x51308,
4023         };
4024
4025         if (is_t4(sc)) {
4026                 reg_ranges = &t4_reg_ranges[0];
4027                 n = nitems(t4_reg_ranges);
4028         } else {
4029                 reg_ranges = &t5_reg_ranges[0];
4030                 n = nitems(t5_reg_ranges);
4031         }
4032
4033         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4034         for (i = 0; i < n; i += 2)
4035                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4036 }
4037
4038 static void
4039 cxgbe_tick(void *arg)
4040 {
4041         struct port_info *pi = arg;
4042         struct ifnet *ifp = pi->ifp;
4043         struct sge_txq *txq;
4044         int i, drops;
4045         struct port_stats *s = &pi->stats;
4046
4047         PORT_LOCK(pi);
4048         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4049                 PORT_UNLOCK(pi);
4050                 return; /* without scheduling another callout */
4051         }
4052
4053         t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4054
4055         ifp->if_opackets = s->tx_frames - s->tx_pause;
4056         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4057         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4058         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4059         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4060         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4061         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4062             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4063             s->rx_trunc3;
4064
4065         drops = s->tx_drop;
4066         for_each_txq(pi, i, txq)
4067                 drops += txq->br->br_drops;
4068         ifp->if_snd.ifq_drops = drops;
4069
4070         ifp->if_oerrors = s->tx_error_frames;
4071         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4072             s->rx_fcs_err + s->rx_len_err;
4073
4074         callout_schedule(&pi->tick, hz);
4075         PORT_UNLOCK(pi);
4076 }
4077
4078 static void
4079 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4080 {
4081         struct ifnet *vlan;
4082
4083         if (arg != ifp || ifp->if_type != IFT_ETHER)
4084                 return;
4085
4086         vlan = VLAN_DEVAT(ifp, vid);
4087         VLAN_SETCOOKIE(vlan, ifp);
4088 }
4089
4090 static int
4091 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4092 {
4093
4094 #ifdef INVARIANTS
4095         panic("%s: opcode 0x%02x on iq %p with payload %p",
4096             __func__, rss->opcode, iq, m);
4097 #else
4098         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4099             __func__, rss->opcode, iq, m);
4100         m_freem(m);
4101 #endif
4102         return (EDOOFUS);
4103 }
4104
4105 int
4106 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4107 {
4108         uintptr_t *loc, new;
4109
4110         if (opcode >= nitems(sc->cpl_handler))
4111                 return (EINVAL);
4112
4113         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4114         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4115         atomic_store_rel_ptr(loc, new);
4116
4117         return (0);
4118 }
4119
4120 static int
4121 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4122 {
4123
4124 #ifdef INVARIANTS
4125         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4126 #else
4127         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4128             __func__, iq, ctrl);
4129 #endif
4130         return (EDOOFUS);
4131 }
4132
4133 int
4134 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4135 {
4136         uintptr_t *loc, new;
4137
4138         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4139         loc = (uintptr_t *) &sc->an_handler;
4140         atomic_store_rel_ptr(loc, new);
4141
4142         return (0);
4143 }
4144
4145 static int
4146 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4147 {
4148         const struct cpl_fw6_msg *cpl =
4149             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4150
4151 #ifdef INVARIANTS
4152         panic("%s: fw_msg type %d", __func__, cpl->type);
4153 #else
4154         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4155 #endif
4156         return (EDOOFUS);
4157 }
4158
4159 int
4160 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4161 {
4162         uintptr_t *loc, new;
4163
4164         if (type >= nitems(sc->fw_msg_handler))
4165                 return (EINVAL);
4166
4167         /*
4168          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4169          * handler dispatch table.  Reject any attempt to install a handler for
4170          * this subtype.
4171          */
4172         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4173                 return (EINVAL);
4174
4175         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4176         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4177         atomic_store_rel_ptr(loc, new);
4178
4179         return (0);
4180 }
4181
4182 static int
4183 t4_sysctls(struct adapter *sc)
4184 {
4185         struct sysctl_ctx_list *ctx;
4186         struct sysctl_oid *oid;
4187         struct sysctl_oid_list *children, *c0;
4188         static char *caps[] = {
4189                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4190                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4191                     "\6HASHFILTER\7ETHOFLD",
4192                 "\20\1TOE",                             /* caps[2] toecaps */
4193                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4194                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4195                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4196                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4197                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4198                     "\4PO_INITIAOR\5PO_TARGET"
4199         };
4200         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4201
4202         ctx = device_get_sysctl_ctx(sc->dev);
4203
4204         /*
4205          * dev.t4nex.X.
4206          */
4207         oid = device_get_sysctl_tree(sc->dev);
4208         c0 = children = SYSCTL_CHILDREN(oid);
4209
4210         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4211             sc->params.nports, "# of ports");
4212
4213         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4214             NULL, chip_rev(sc), "chip hardware revision");
4215
4216         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4217             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4218
4219         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4220             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4221
4222         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4223             sc->cfcsum, "config file checksum");
4224
4225         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4226             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4227             sysctl_bitfield, "A", "available doorbells");
4228
4229         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4230             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4231             sysctl_bitfield, "A", "available link capabilities");
4232
4233         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4234             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4235             sysctl_bitfield, "A", "available NIC capabilities");
4236
4237         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4238             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4239             sysctl_bitfield, "A", "available TCP offload capabilities");
4240
4241         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4242             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4243             sysctl_bitfield, "A", "available RDMA capabilities");
4244
4245         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4246             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4247             sysctl_bitfield, "A", "available iSCSI capabilities");
4248
4249         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4250             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4251             sysctl_bitfield, "A", "available FCoE capabilities");
4252
4253         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4254             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4255
4256         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4257             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4258             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4259             "interrupt holdoff timer values (us)");
4260
4261         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4262             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4263             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4264             "interrupt holdoff packet counter values");
4265
4266         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4267             NULL, sc->tids.nftids, "number of filters");
4268
4269         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4270             CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4271             "chip temperature (in Celsius)");
4272
4273         t4_sge_sysctls(sc, ctx, children);
4274
4275         sc->lro_timeout = 100;
4276         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4277             &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4278
4279 #ifdef SBUF_DRAIN
4280         /*
4281          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4282          */
4283         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4284             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4285             "logs and miscellaneous information");
4286         children = SYSCTL_CHILDREN(oid);
4287
4288         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4289             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4290             sysctl_cctrl, "A", "congestion control");
4291
4292         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4293             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4294             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4295
4296         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4297             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4298             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4299
4300         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4301             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4302             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4303
4304         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4305             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4306             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4307
4308         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4309             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4310             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4311
4312         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4313             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4314             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4315
4316         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4317             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4318             sysctl_cim_la, "A", "CIM logic analyzer");
4319
4320         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4321             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4322             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4323
4324         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4325             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4326             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4327
4328         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4329             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4330             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4331
4332         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4333             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4334             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4335
4336         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4337             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4338             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4339
4340         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4341             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4342             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4343
4344         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4345             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4346             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4347
4348         if (is_t5(sc)) {
4349                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4350                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4351                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4352
4353                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4354                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4355                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4356         }
4357
4358         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4359             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4360             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4361
4362         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4363             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4364             sysctl_cim_qcfg, "A", "CIM queue configuration");
4365
4366         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4367             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4368             sysctl_cpl_stats, "A", "CPL statistics");
4369
4370         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4371             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4372             sysctl_ddp_stats, "A", "DDP statistics");
4373
4374         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4375             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4376             sysctl_devlog, "A", "firmware's device log");
4377
4378         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4379             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4380             sysctl_fcoe_stats, "A", "FCoE statistics");
4381
4382         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4383             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4384             sysctl_hw_sched, "A", "hardware scheduler ");
4385
4386         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4387             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4388             sysctl_l2t, "A", "hardware L2 table");
4389
4390         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4391             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4392             sysctl_lb_stats, "A", "loopback statistics");
4393
4394         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4395             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4396             sysctl_meminfo, "A", "memory regions");
4397
4398         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4399             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4400             sysctl_mps_tcam, "A", "MPS TCAM entries");
4401
4402         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4403             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4404             sysctl_path_mtus, "A", "path MTUs");
4405
4406         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4407             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4408             sysctl_pm_stats, "A", "PM statistics");
4409
4410         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4411             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4412             sysctl_rdma_stats, "A", "RDMA statistics");
4413
4414         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4415             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4416             sysctl_tcp_stats, "A", "TCP statistics");
4417
4418         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4419             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4420             sysctl_tids, "A", "TID information");
4421
4422         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4423             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4424             sysctl_tp_err_stats, "A", "TP error statistics");
4425
4426         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4427             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4428             sysctl_tp_la, "A", "TP logic analyzer");
4429
4430         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4431             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4432             sysctl_tx_rate, "A", "Tx rate");
4433
4434         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4435             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4436             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4437
4438         if (is_t5(sc)) {
4439                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4440                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4441                     sysctl_wcwr_stats, "A", "write combined work requests");
4442         }
4443 #endif
4444
4445 #ifdef TCP_OFFLOAD
4446         if (is_offload(sc)) {
4447                 /*
4448                  * dev.t4nex.X.toe.
4449                  */
4450                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4451                     NULL, "TOE parameters");
4452                 children = SYSCTL_CHILDREN(oid);
4453
4454                 sc->tt.sndbuf = 256 * 1024;
4455                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4456                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4457
4458                 sc->tt.ddp = 0;
4459                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4460                     &sc->tt.ddp, 0, "DDP allowed");
4461
4462                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4463                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4464                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4465
4466                 sc->tt.ddp_thres =
4467                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4468                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4469                     &sc->tt.ddp_thres, 0, "DDP threshold");
4470
4471                 sc->tt.rx_coalesce = 1;
4472                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4473                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4474         }
4475 #endif
4476
4477
4478         return (0);
4479 }
4480
4481 static int
4482 cxgbe_sysctls(struct port_info *pi)
4483 {
4484         struct sysctl_ctx_list *ctx;
4485         struct sysctl_oid *oid;
4486         struct sysctl_oid_list *children;
4487
4488         ctx = device_get_sysctl_ctx(pi->dev);
4489
4490         /*
4491          * dev.cxgbe.X.
4492          */
4493         oid = device_get_sysctl_tree(pi->dev);
4494         children = SYSCTL_CHILDREN(oid);
4495
4496         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4497            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4498         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4499                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4500                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4501                     "PHY temperature (in Celsius)");
4502                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4503                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4504                     "PHY firmware version");
4505         }
4506         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4507             &pi->nrxq, 0, "# of rx queues");
4508         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4509             &pi->ntxq, 0, "# of tx queues");
4510         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4511             &pi->first_rxq, 0, "index of first rx queue");
4512         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4513             &pi->first_txq, 0, "index of first tx queue");
4514         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4515             CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4516             "Reserve queue 0 for non-flowid packets");
4517
4518 #ifdef TCP_OFFLOAD
4519         if (is_offload(pi->adapter)) {
4520                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4521                     &pi->nofldrxq, 0,
4522                     "# of rx queues for offloaded TCP connections");
4523                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4524                     &pi->nofldtxq, 0,
4525                     "# of tx queues for offloaded TCP connections");
4526                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4527                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4528                     "index of first TOE rx queue");
4529                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4530                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4531                     "index of first TOE tx queue");
4532         }
4533 #endif
4534
4535         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4536             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4537             "holdoff timer index");
4538         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4539             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4540             "holdoff packet counter index");
4541
4542         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4543             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4544             "rx queue size");
4545         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4546             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4547             "tx queue size");
4548
4549         /*
4550          * dev.cxgbe.X.stats.
4551          */
4552         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4553             NULL, "port statistics");
4554         children = SYSCTL_CHILDREN(oid);
4555
4556 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4557         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4558             CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4559             sysctl_handle_t4_reg64, "QU", desc)
4560
4561         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4562             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4563         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4564             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4565         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4566             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4567         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4568             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4569         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4570             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4571         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4572             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4573         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4574             "# of tx frames in this range",
4575             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4576         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4577             "# of tx frames in this range",
4578             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4579         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4580             "# of tx frames in this range",
4581             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4582         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4583             "# of tx frames in this range",
4584             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4585         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4586             "# of tx frames in this range",
4587             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4588         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4589             "# of tx frames in this range",
4590             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4591         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4592             "# of tx frames in this range",
4593             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4594         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4595             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4596         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4597             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4598         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4599             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4600         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4601             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4602         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4603             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4604         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4605             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4606         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4607             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4608         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4609             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4610         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4611             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4612         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4613             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4614
4615         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4616             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4617         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4618             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4619         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4620             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4621         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4622             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4623         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4624             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4625         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4626             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4627         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4628             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4629         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4630             "# of frames received with bad FCS",
4631             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4632         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4633             "# of frames received with length error",
4634             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4635         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4636             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4637         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4638             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4639         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4640             "# of rx frames in this range",
4641             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4642         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4643             "# of rx frames in this range",
4644             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4645         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4646             "# of rx frames in this range",
4647             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4648         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4649             "# of rx frames in this range",
4650             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4651         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4652             "# of rx frames in this range",
4653             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4654         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4655             "# of rx frames in this range",
4656             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4657         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4658             "# of rx frames in this range",
4659             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4660         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4661             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4662         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4663             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4664         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4665             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4666         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4667             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4668         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4669             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4670         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4671             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4672         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4673             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4674         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4675             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4676         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4677             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4678
4679 #undef SYSCTL_ADD_T4_REG64
4680
4681 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4682         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4683             &pi->stats.name, desc)
4684
4685         /* We get these from port_stats and they may be stale by upto 1s */
4686         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4687             "# drops due to buffer-group 0 overflows");
4688         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4689             "# drops due to buffer-group 1 overflows");
4690         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4691             "# drops due to buffer-group 2 overflows");
4692         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4693             "# drops due to buffer-group 3 overflows");
4694         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4695             "# of buffer-group 0 truncated packets");
4696         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4697             "# of buffer-group 1 truncated packets");
4698         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4699             "# of buffer-group 2 truncated packets");
4700         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4701             "# of buffer-group 3 truncated packets");
4702
4703 #undef SYSCTL_ADD_T4_PORTSTAT
4704
4705         return (0);
4706 }
4707
4708 static int
4709 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4710 {
4711         int rc, *i;
4712         struct sbuf sb;
4713
4714         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4715         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4716                 sbuf_printf(&sb, "%d ", *i);
4717         sbuf_trim(&sb);
4718         sbuf_finish(&sb);
4719         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4720         sbuf_delete(&sb);
4721         return (rc);
4722 }
4723
4724 static int
4725 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4726 {
4727         int rc;
4728         struct sbuf *sb;
4729
4730         rc = sysctl_wire_old_buffer(req, 0);
4731         if (rc != 0)
4732                 return(rc);
4733
4734         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4735         if (sb == NULL)
4736                 return (ENOMEM);
4737
4738         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4739         rc = sbuf_finish(sb);
4740         sbuf_delete(sb);
4741
4742         return (rc);
4743 }
4744
4745 static int
4746 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4747 {
4748         struct port_info *pi = arg1;
4749         int op = arg2;
4750         struct adapter *sc = pi->adapter;
4751         u_int v;
4752         int rc;
4753
4754         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4755         if (rc)
4756                 return (rc);
4757         /* XXX: magic numbers */
4758         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4759             &v);
4760         end_synchronized_op(sc, 0);
4761         if (rc)
4762                 return (rc);
4763         if (op == 0)
4764                 v /= 256;
4765
4766         rc = sysctl_handle_int(oidp, &v, 0, req);
4767         return (rc);
4768 }
4769
4770 static int
4771 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4772 {
4773         struct port_info *pi = arg1;
4774         int rc, val;
4775
4776         val = pi->rsrv_noflowq;
4777         rc = sysctl_handle_int(oidp, &val, 0, req);
4778         if (rc != 0 || req->newptr == NULL)
4779                 return (rc);
4780
4781         if ((val >= 1) && (pi->ntxq > 1))
4782                 pi->rsrv_noflowq = 1;
4783         else
4784                 pi->rsrv_noflowq = 0;
4785
4786         return (rc);
4787 }
4788
4789 static int
4790 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4791 {
4792         struct port_info *pi = arg1;
4793         struct adapter *sc = pi->adapter;
4794         int idx, rc, i;
4795         struct sge_rxq *rxq;
4796 #ifdef TCP_OFFLOAD
4797         struct sge_ofld_rxq *ofld_rxq;
4798 #endif
4799         uint8_t v;
4800
4801         idx = pi->tmr_idx;
4802
4803         rc = sysctl_handle_int(oidp, &idx, 0, req);
4804         if (rc != 0 || req->newptr == NULL)
4805                 return (rc);
4806
4807         if (idx < 0 || idx >= SGE_NTIMERS)
4808                 return (EINVAL);
4809
4810         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4811             "t4tmr");
4812         if (rc)
4813                 return (rc);
4814
4815         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4816         for_each_rxq(pi, i, rxq) {
4817 #ifdef atomic_store_rel_8
4818                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4819 #else
4820                 rxq->iq.intr_params = v;
4821 #endif
4822         }
4823 #ifdef TCP_OFFLOAD
4824         for_each_ofld_rxq(pi, i, ofld_rxq) {
4825 #ifdef atomic_store_rel_8
4826                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4827 #else
4828                 ofld_rxq->iq.intr_params = v;
4829 #endif
4830         }
4831 #endif
4832         pi->tmr_idx = idx;
4833
4834         end_synchronized_op(sc, LOCK_HELD);
4835         return (0);
4836 }
4837
4838 static int
4839 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4840 {
4841         struct port_info *pi = arg1;
4842         struct adapter *sc = pi->adapter;
4843         int idx, rc;
4844
4845         idx = pi->pktc_idx;
4846
4847         rc = sysctl_handle_int(oidp, &idx, 0, req);
4848         if (rc != 0 || req->newptr == NULL)
4849                 return (rc);
4850
4851         if (idx < -1 || idx >= SGE_NCOUNTERS)
4852                 return (EINVAL);
4853
4854         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4855             "t4pktc");
4856         if (rc)
4857                 return (rc);
4858
4859         if (pi->flags & PORT_INIT_DONE)
4860                 rc = EBUSY; /* cannot be changed once the queues are created */
4861         else
4862                 pi->pktc_idx = idx;
4863
4864         end_synchronized_op(sc, LOCK_HELD);
4865         return (rc);
4866 }
4867
4868 static int
4869 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4870 {
4871         struct port_info *pi = arg1;
4872         struct adapter *sc = pi->adapter;
4873         int qsize, rc;
4874
4875         qsize = pi->qsize_rxq;
4876
4877         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4878         if (rc != 0 || req->newptr == NULL)
4879                 return (rc);
4880
4881         if (qsize < 128 || (qsize & 7))
4882                 return (EINVAL);
4883
4884         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4885             "t4rxqs");
4886         if (rc)
4887                 return (rc);
4888
4889         if (pi->flags & PORT_INIT_DONE)
4890                 rc = EBUSY; /* cannot be changed once the queues are created */
4891         else
4892                 pi->qsize_rxq = qsize;
4893
4894         end_synchronized_op(sc, LOCK_HELD);
4895         return (rc);
4896 }
4897
4898 static int
4899 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4900 {
4901         struct port_info *pi = arg1;
4902         struct adapter *sc = pi->adapter;
4903         int qsize, rc;
4904
4905         qsize = pi->qsize_txq;
4906
4907         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4908         if (rc != 0 || req->newptr == NULL)
4909                 return (rc);
4910
4911         /* bufring size must be powerof2 */
4912         if (qsize < 128 || !powerof2(qsize))
4913                 return (EINVAL);
4914
4915         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4916             "t4txqs");
4917         if (rc)
4918                 return (rc);
4919
4920         if (pi->flags & PORT_INIT_DONE)
4921                 rc = EBUSY; /* cannot be changed once the queues are created */
4922         else
4923                 pi->qsize_txq = qsize;
4924
4925         end_synchronized_op(sc, LOCK_HELD);
4926         return (rc);
4927 }
4928
4929 static int
4930 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4931 {
4932         struct adapter *sc = arg1;
4933         int reg = arg2;
4934         uint64_t val;
4935
4936         val = t4_read_reg64(sc, reg);
4937
4938         return (sysctl_handle_64(oidp, &val, 0, req));
4939 }
4940
4941 static int
4942 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4943 {
4944         struct adapter *sc = arg1;
4945         int rc, t;
4946         uint32_t param, val;
4947
4948         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4949         if (rc)
4950                 return (rc);
4951         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4952             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4953             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4954         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4955         end_synchronized_op(sc, 0);
4956         if (rc)
4957                 return (rc);
4958
4959         /* unknown is returned as 0 but we display -1 in that case */
4960         t = val == 0 ? -1 : val;
4961
4962         rc = sysctl_handle_int(oidp, &t, 0, req);
4963         return (rc);
4964 }
4965
4966 #ifdef SBUF_DRAIN
4967 static int
4968 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4969 {
4970         struct adapter *sc = arg1;
4971         struct sbuf *sb;
4972         int rc, i;
4973         uint16_t incr[NMTUS][NCCTRL_WIN];
4974         static const char *dec_fac[] = {
4975                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4976                 "0.9375"
4977         };
4978
4979         rc = sysctl_wire_old_buffer(req, 0);
4980         if (rc != 0)
4981                 return (rc);
4982
4983         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4984         if (sb == NULL)
4985                 return (ENOMEM);
4986
4987         t4_read_cong_tbl(sc, incr);
4988
4989         for (i = 0; i < NCCTRL_WIN; ++i) {
4990                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4991                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4992                     incr[5][i], incr[6][i], incr[7][i]);
4993                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4994                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4995                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4996                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4997         }
4998
4999         rc = sbuf_finish(sb);
5000         sbuf_delete(sb);
5001
5002         return (rc);
5003 }
5004
5005 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5006         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
5007         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5008         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
5009 };
5010
5011 static int
5012 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5013 {
5014         struct adapter *sc = arg1;
5015         struct sbuf *sb;
5016         int rc, i, n, qid = arg2;
5017         uint32_t *buf, *p;
5018         char *qtype;
5019         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5020
5021         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5022             ("%s: bad qid %d\n", __func__, qid));
5023
5024         if (qid < CIM_NUM_IBQ) {
5025                 /* inbound queue */
5026                 qtype = "IBQ";
5027                 n = 4 * CIM_IBQ_SIZE;
5028                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5029                 rc = t4_read_cim_ibq(sc, qid, buf, n);
5030         } else {
5031                 /* outbound queue */
5032                 qtype = "OBQ";
5033                 qid -= CIM_NUM_IBQ;
5034                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5035                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5036                 rc = t4_read_cim_obq(sc, qid, buf, n);
5037         }
5038
5039         if (rc < 0) {
5040                 rc = -rc;
5041                 goto done;
5042         }
5043         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5044
5045         rc = sysctl_wire_old_buffer(req, 0);
5046         if (rc != 0)
5047                 goto done;
5048
5049         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5050         if (sb == NULL) {
5051                 rc = ENOMEM;
5052                 goto done;
5053         }
5054
5055         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5056         for (i = 0, p = buf; i < n; i += 16, p += 4)
5057                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5058                     p[2], p[3]);
5059
5060         rc = sbuf_finish(sb);
5061         sbuf_delete(sb);
5062 done:
5063         free(buf, M_CXGBE);
5064         return (rc);
5065 }
5066
5067 static int
5068 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5069 {
5070         struct adapter *sc = arg1;
5071         u_int cfg;
5072         struct sbuf *sb;
5073         uint32_t *buf, *p;
5074         int rc;
5075
5076         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5077         if (rc != 0)
5078                 return (rc);
5079
5080         rc = sysctl_wire_old_buffer(req, 0);
5081         if (rc != 0)
5082                 return (rc);
5083
5084         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5085         if (sb == NULL)
5086                 return (ENOMEM);
5087
5088         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5089             M_ZERO | M_WAITOK);
5090
5091         rc = -t4_cim_read_la(sc, buf, NULL);
5092         if (rc != 0)
5093                 goto done;
5094
5095         sbuf_printf(sb, "Status   Data      PC%s",
5096             cfg & F_UPDBGLACAPTPCONLY ? "" :
5097             "     LS0Stat  LS0Addr             LS0Data");
5098
5099         KASSERT((sc->params.cim_la_size & 7) == 0,
5100             ("%s: p will walk off the end of buf", __func__));
5101
5102         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5103                 if (cfg & F_UPDBGLACAPTPCONLY) {
5104                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5105                             p[6], p[7]);
5106                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5107                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5108                             p[4] & 0xff, p[5] >> 8);
5109                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5110                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5111                             p[1] & 0xf, p[2] >> 4);
5112                 } else {
5113                         sbuf_printf(sb,
5114                             "\n  %02x   %x%07x %x%07x %08x %08x "
5115                             "%08x%08x%08x%08x",
5116                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5117                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5118                             p[6], p[7]);
5119                 }
5120         }
5121
5122         rc = sbuf_finish(sb);
5123         sbuf_delete(sb);
5124 done:
5125         free(buf, M_CXGBE);
5126         return (rc);
5127 }
5128
5129 static int
5130 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5131 {
5132         struct adapter *sc = arg1;
5133         u_int i;
5134         struct sbuf *sb;
5135         uint32_t *buf, *p;
5136         int rc;
5137
5138         rc = sysctl_wire_old_buffer(req, 0);
5139         if (rc != 0)
5140                 return (rc);
5141
5142         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5143         if (sb == NULL)
5144                 return (ENOMEM);
5145
5146         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5147             M_ZERO | M_WAITOK);
5148
5149         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5150         p = buf;
5151
5152         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5153                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5154                     p[1], p[0]);
5155         }
5156
5157         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5158         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5159                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5160                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5161                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5162                     (p[1] >> 2) | ((p[2] & 3) << 30),
5163                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5164                     p[0] & 1);
5165         }
5166
5167         rc = sbuf_finish(sb);
5168         sbuf_delete(sb);
5169         free(buf, M_CXGBE);
5170         return (rc);
5171 }
5172
5173 static int
5174 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5175 {
5176         struct adapter *sc = arg1;
5177         u_int i;
5178         struct sbuf *sb;
5179         uint32_t *buf, *p;
5180         int rc;
5181
5182         rc = sysctl_wire_old_buffer(req, 0);
5183         if (rc != 0)
5184                 return (rc);
5185
5186         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5187         if (sb == NULL)
5188                 return (ENOMEM);
5189
5190         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5191             M_ZERO | M_WAITOK);
5192
5193         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5194         p = buf;
5195
5196         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5197         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5198                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5199                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5200                     p[4], p[3], p[2], p[1], p[0]);
5201         }
5202
5203         sbuf_printf(sb, "\n\nCntl ID               Data");
5204         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5205                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5206                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5207         }
5208
5209         rc = sbuf_finish(sb);
5210         sbuf_delete(sb);
5211         free(buf, M_CXGBE);
5212         return (rc);
5213 }
5214
5215 static int
5216 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5217 {
5218         struct adapter *sc = arg1;
5219         struct sbuf *sb;
5220         int rc, i;
5221         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5222         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5223         uint16_t thres[CIM_NUM_IBQ];
5224         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5225         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5226         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5227
5228         if (is_t4(sc)) {
5229                 cim_num_obq = CIM_NUM_OBQ;
5230                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5231                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5232         } else {
5233                 cim_num_obq = CIM_NUM_OBQ_T5;
5234                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5235                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5236         }
5237         nq = CIM_NUM_IBQ + cim_num_obq;
5238
5239         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5240         if (rc == 0)
5241                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5242         if (rc != 0)
5243                 return (rc);
5244
5245         t4_read_cimq_cfg(sc, base, size, thres);
5246
5247         rc = sysctl_wire_old_buffer(req, 0);
5248         if (rc != 0)
5249                 return (rc);
5250
5251         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5252         if (sb == NULL)
5253                 return (ENOMEM);
5254
5255         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5256
5257         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5258                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5259                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5260                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5261                     G_QUEREMFLITS(p[2]) * 16);
5262         for ( ; i < nq; i++, p += 4, wr += 2)
5263                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5264                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5265                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5266                     G_QUEREMFLITS(p[2]) * 16);
5267
5268         rc = sbuf_finish(sb);
5269         sbuf_delete(sb);
5270
5271         return (rc);
5272 }
5273
5274 static int
5275 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5276 {
5277         struct adapter *sc = arg1;
5278         struct sbuf *sb;
5279         int rc;
5280         struct tp_cpl_stats stats;
5281
5282         rc = sysctl_wire_old_buffer(req, 0);
5283         if (rc != 0)
5284                 return (rc);
5285
5286         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5287         if (sb == NULL)
5288                 return (ENOMEM);
5289
5290         t4_tp_get_cpl_stats(sc, &stats);
5291
5292         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5293             "channel 3\n");
5294         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5295                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5296         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5297                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5298
5299         rc = sbuf_finish(sb);
5300         sbuf_delete(sb);
5301
5302         return (rc);
5303 }
5304
5305 static int
5306 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5307 {
5308         struct adapter *sc = arg1;
5309         struct sbuf *sb;
5310         int rc;
5311         struct tp_usm_stats stats;
5312
5313         rc = sysctl_wire_old_buffer(req, 0);
5314         if (rc != 0)
5315                 return(rc);
5316
5317         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5318         if (sb == NULL)
5319                 return (ENOMEM);
5320
5321         t4_get_usm_stats(sc, &stats);
5322
5323         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5324         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5325         sbuf_printf(sb, "Drops:  %u", stats.drops);
5326
5327         rc = sbuf_finish(sb);
5328         sbuf_delete(sb);
5329
5330         return (rc);
5331 }
5332
5333 const char *devlog_level_strings[] = {
5334         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5335         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5336         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5337         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5338         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5339         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5340 };
5341
5342 const char *devlog_facility_strings[] = {
5343         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5344         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5345         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5346         [FW_DEVLOG_FACILITY_RES]        = "RES",
5347         [FW_DEVLOG_FACILITY_HW]         = "HW",
5348         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5349         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5350         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5351         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5352         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5353         [FW_DEVLOG_FACILITY_VI]         = "VI",
5354         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5355         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5356         [FW_DEVLOG_FACILITY_TM]         = "TM",
5357         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5358         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5359         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5360         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5361         [FW_DEVLOG_FACILITY_RI]         = "RI",
5362         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5363         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5364         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5365         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5366 };
5367
5368 static int
5369 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5370 {
5371         struct adapter *sc = arg1;
5372         struct devlog_params *dparams = &sc->params.devlog;
5373         struct fw_devlog_e *buf, *e;
5374         int i, j, rc, nentries, first = 0, m;
5375         struct sbuf *sb;
5376         uint64_t ftstamp = UINT64_MAX;
5377
5378         if (dparams->start == 0) {
5379                 dparams->memtype = FW_MEMTYPE_EDC0;
5380                 dparams->start = 0x84000;
5381                 dparams->size = 32768;
5382         }
5383
5384         nentries = dparams->size / sizeof(struct fw_devlog_e);
5385
5386         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5387         if (buf == NULL)
5388                 return (ENOMEM);
5389
5390         m = fwmtype_to_hwmtype(dparams->memtype);
5391         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5392         if (rc != 0)
5393                 goto done;
5394
5395         for (i = 0; i < nentries; i++) {
5396                 e = &buf[i];
5397
5398                 if (e->timestamp == 0)
5399                         break;  /* end */
5400
5401                 e->timestamp = be64toh(e->timestamp);
5402                 e->seqno = be32toh(e->seqno);
5403                 for (j = 0; j < 8; j++)
5404                         e->params[j] = be32toh(e->params[j]);
5405
5406                 if (e->timestamp < ftstamp) {
5407                         ftstamp = e->timestamp;
5408                         first = i;
5409                 }
5410         }
5411
5412         if (buf[first].timestamp == 0)
5413                 goto done;      /* nothing in the log */
5414
5415         rc = sysctl_wire_old_buffer(req, 0);
5416         if (rc != 0)
5417                 goto done;
5418
5419         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5420         if (sb == NULL) {
5421                 rc = ENOMEM;
5422                 goto done;
5423         }
5424         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5425             "Seq#", "Tstamp", "Level", "Facility", "Message");
5426
5427         i = first;
5428         do {
5429                 e = &buf[i];
5430                 if (e->timestamp == 0)
5431                         break;  /* end */
5432
5433                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5434                     e->seqno, e->timestamp,
5435                     (e->level < nitems(devlog_level_strings) ?
5436                         devlog_level_strings[e->level] : "UNKNOWN"),
5437                     (e->facility < nitems(devlog_facility_strings) ?
5438                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5439                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5440                     e->params[2], e->params[3], e->params[4],
5441                     e->params[5], e->params[6], e->params[7]);
5442
5443                 if (++i == nentries)
5444                         i = 0;
5445         } while (i != first);
5446
5447         rc = sbuf_finish(sb);
5448         sbuf_delete(sb);
5449 done:
5450         free(buf, M_CXGBE);
5451         return (rc);
5452 }
5453
5454 static int
5455 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5456 {
5457         struct adapter *sc = arg1;
5458         struct sbuf *sb;
5459         int rc;
5460         struct tp_fcoe_stats stats[4];
5461
5462         rc = sysctl_wire_old_buffer(req, 0);
5463         if (rc != 0)
5464                 return (rc);
5465
5466         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5467         if (sb == NULL)
5468                 return (ENOMEM);
5469
5470         t4_get_fcoe_stats(sc, 0, &stats[0]);
5471         t4_get_fcoe_stats(sc, 1, &stats[1]);
5472         t4_get_fcoe_stats(sc, 2, &stats[2]);
5473         t4_get_fcoe_stats(sc, 3, &stats[3]);
5474
5475         sbuf_printf(sb, "                   channel 0        channel 1        "
5476             "channel 2        channel 3\n");
5477         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5478             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5479             stats[3].octetsDDP);
5480         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5481             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5482         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5483             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5484             stats[3].framesDrop);
5485
5486         rc = sbuf_finish(sb);
5487         sbuf_delete(sb);
5488
5489         return (rc);
5490 }
5491
5492 static int
5493 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5494 {
5495         struct adapter *sc = arg1;
5496         struct sbuf *sb;
5497         int rc, i;
5498         unsigned int map, kbps, ipg, mode;
5499         unsigned int pace_tab[NTX_SCHED];
5500
5501         rc = sysctl_wire_old_buffer(req, 0);
5502         if (rc != 0)
5503                 return (rc);
5504
5505         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5506         if (sb == NULL)
5507                 return (ENOMEM);
5508
5509         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5510         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5511         t4_read_pace_tbl(sc, pace_tab);
5512
5513         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5514             "Class IPG (0.1 ns)   Flow IPG (us)");
5515
5516         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5517                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5518                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5519                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5520                 if (kbps)
5521                         sbuf_printf(sb, "%9u     ", kbps);
5522                 else
5523                         sbuf_printf(sb, " disabled     ");
5524
5525                 if (ipg)
5526                         sbuf_printf(sb, "%13u        ", ipg);
5527                 else
5528                         sbuf_printf(sb, "     disabled        ");
5529
5530                 if (pace_tab[i])
5531                         sbuf_printf(sb, "%10u", pace_tab[i]);
5532                 else
5533                         sbuf_printf(sb, "  disabled");
5534         }
5535
5536         rc = sbuf_finish(sb);
5537         sbuf_delete(sb);
5538
5539         return (rc);
5540 }
5541
5542 static int
5543 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5544 {
5545         struct adapter *sc = arg1;
5546         struct sbuf *sb;
5547         int rc, i, j;
5548         uint64_t *p0, *p1;
5549         struct lb_port_stats s[2];
5550         static const char *stat_name[] = {
5551                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5552                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5553                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5554                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5555                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5556                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5557                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5558         };
5559
5560         rc = sysctl_wire_old_buffer(req, 0);
5561         if (rc != 0)
5562                 return (rc);
5563
5564         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5565         if (sb == NULL)
5566                 return (ENOMEM);
5567
5568         memset(s, 0, sizeof(s));
5569
5570         for (i = 0; i < 4; i += 2) {
5571                 t4_get_lb_stats(sc, i, &s[0]);
5572                 t4_get_lb_stats(sc, i + 1, &s[1]);
5573
5574                 p0 = &s[0].octets;
5575                 p1 = &s[1].octets;
5576                 sbuf_printf(sb, "%s                       Loopback %u"
5577                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5578
5579                 for (j = 0; j < nitems(stat_name); j++)
5580                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5581                                    *p0++, *p1++);
5582         }
5583
5584         rc = sbuf_finish(sb);
5585         sbuf_delete(sb);
5586
5587         return (rc);
5588 }
5589
5590 static int
5591 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5592 {
5593         int rc = 0;
5594         struct port_info *pi = arg1;
5595         struct sbuf *sb;
5596         static const char *linkdnreasons[] = {
5597                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5598                 "PHY overheated", "unknown", "rx los", "reserved7"
5599         };
5600
5601         rc = sysctl_wire_old_buffer(req, 0);
5602         if (rc != 0)
5603                 return(rc);
5604         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5605         if (sb == NULL)
5606                 return (ENOMEM);
5607
5608         if (pi->linkdnrc < 0)
5609                 sbuf_printf(sb, "n/a");
5610         else if (pi->linkdnrc < nitems(linkdnreasons))
5611                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5612         else
5613                 sbuf_printf(sb, "%d", pi->linkdnrc);
5614
5615         rc = sbuf_finish(sb);
5616         sbuf_delete(sb);
5617
5618         return (rc);
5619 }
5620
5621 struct mem_desc {
5622         unsigned int base;
5623         unsigned int limit;
5624         unsigned int idx;
5625 };
5626
5627 static int
5628 mem_desc_cmp(const void *a, const void *b)
5629 {
5630         return ((const struct mem_desc *)a)->base -
5631                ((const struct mem_desc *)b)->base;
5632 }
5633
5634 static void
5635 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5636     unsigned int to)
5637 {
5638         unsigned int size;
5639
5640         size = to - from + 1;
5641         if (size == 0)
5642                 return;
5643
5644         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5645         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5646 }
5647
5648 static int
5649 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5650 {
5651         struct adapter *sc = arg1;
5652         struct sbuf *sb;
5653         int rc, i, n;
5654         uint32_t lo, hi, used, alloc;
5655         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5656         static const char *region[] = {
5657                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5658                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5659                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5660                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5661                 "RQUDP region:", "PBL region:", "TXPBL region:",
5662                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5663                 "On-chip queues:"
5664         };
5665         struct mem_desc avail[4];
5666         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5667         struct mem_desc *md = mem;
5668
5669         rc = sysctl_wire_old_buffer(req, 0);
5670         if (rc != 0)
5671                 return (rc);
5672
5673         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5674         if (sb == NULL)
5675                 return (ENOMEM);
5676
5677         for (i = 0; i < nitems(mem); i++) {
5678                 mem[i].limit = 0;
5679                 mem[i].idx = i;
5680         }
5681
5682         /* Find and sort the populated memory ranges */
5683         i = 0;
5684         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5685         if (lo & F_EDRAM0_ENABLE) {
5686                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5687                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5688                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5689                 avail[i].idx = 0;
5690                 i++;
5691         }
5692         if (lo & F_EDRAM1_ENABLE) {
5693                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5694                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5695                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5696                 avail[i].idx = 1;
5697                 i++;
5698         }
5699         if (lo & F_EXT_MEM_ENABLE) {
5700                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5701                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5702                 avail[i].limit = avail[i].base +
5703                     (G_EXT_MEM_SIZE(hi) << 20);
5704                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5705                 i++;
5706         }
5707         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5708                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5709                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5710                 avail[i].limit = avail[i].base +
5711                     (G_EXT_MEM1_SIZE(hi) << 20);
5712                 avail[i].idx = 4;
5713                 i++;
5714         }
5715         if (!i)                                    /* no memory available */
5716                 return 0;
5717         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5718
5719         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5720         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5721         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5722         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5723         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5724         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5725         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5726         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5727         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5728
5729         /* the next few have explicit upper bounds */
5730         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5731         md->limit = md->base - 1 +
5732                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5733                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5734         md++;
5735
5736         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5737         md->limit = md->base - 1 +
5738                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5739                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5740         md++;
5741
5742         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5743                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5744                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5745                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5746         } else {
5747                 md->base = 0;
5748                 md->idx = nitems(region);  /* hide it */
5749         }
5750         md++;
5751
5752 #define ulp_region(reg) \
5753         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5754         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5755
5756         ulp_region(RX_ISCSI);
5757         ulp_region(RX_TDDP);
5758         ulp_region(TX_TPT);
5759         ulp_region(RX_STAG);
5760         ulp_region(RX_RQ);
5761         ulp_region(RX_RQUDP);
5762         ulp_region(RX_PBL);
5763         ulp_region(TX_PBL);
5764 #undef ulp_region
5765
5766         md->base = 0;
5767         md->idx = nitems(region);
5768         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5769                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5770                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5771                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5772         }
5773         md++;
5774
5775         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5776         md->limit = md->base + sc->tids.ntids - 1;
5777         md++;
5778         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5779         md->limit = md->base + sc->tids.ntids - 1;
5780         md++;
5781
5782         md->base = sc->vres.ocq.start;
5783         if (sc->vres.ocq.size)
5784                 md->limit = md->base + sc->vres.ocq.size - 1;
5785         else
5786                 md->idx = nitems(region);  /* hide it */
5787         md++;
5788
5789         /* add any address-space holes, there can be up to 3 */
5790         for (n = 0; n < i - 1; n++)
5791                 if (avail[n].limit < avail[n + 1].base)
5792                         (md++)->base = avail[n].limit;
5793         if (avail[n].limit)
5794                 (md++)->base = avail[n].limit;
5795
5796         n = md - mem;
5797         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5798
5799         for (lo = 0; lo < i; lo++)
5800                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5801                                 avail[lo].limit - 1);
5802
5803         sbuf_printf(sb, "\n");
5804         for (i = 0; i < n; i++) {
5805                 if (mem[i].idx >= nitems(region))
5806                         continue;                        /* skip holes */
5807                 if (!mem[i].limit)
5808                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5809                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5810                                 mem[i].limit);
5811         }
5812
5813         sbuf_printf(sb, "\n");
5814         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5815         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5816         mem_region_show(sb, "uP RAM:", lo, hi);
5817
5818         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5819         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5820         mem_region_show(sb, "uP Extmem2:", lo, hi);
5821
5822         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5823         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5824                    G_PMRXMAXPAGE(lo),
5825                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5826                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5827
5828         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5829         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5830         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5831                    G_PMTXMAXPAGE(lo),
5832                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5833                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5834         sbuf_printf(sb, "%u p-structs\n",
5835                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5836
5837         for (i = 0; i < 4; i++) {
5838                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5839                 if (is_t4(sc)) {
5840                         used = G_USED(lo);
5841                         alloc = G_ALLOC(lo);
5842                 } else {
5843                         used = G_T5_USED(lo);
5844                         alloc = G_T5_ALLOC(lo);
5845                 }
5846                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5847                            i, used, alloc);
5848         }
5849         for (i = 0; i < 4; i++) {
5850                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5851                 if (is_t4(sc)) {
5852                         used = G_USED(lo);
5853                         alloc = G_ALLOC(lo);
5854                 } else {
5855                         used = G_T5_USED(lo);
5856                         alloc = G_T5_ALLOC(lo);
5857                 }
5858                 sbuf_printf(sb,
5859                            "\nLoopback %d using %u pages out of %u allocated",
5860                            i, used, alloc);
5861         }
5862
5863         rc = sbuf_finish(sb);
5864         sbuf_delete(sb);
5865
5866         return (rc);
5867 }
5868
5869 static inline void
5870 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5871 {
5872         *mask = x | y;
5873         y = htobe64(y);
5874         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5875 }
5876
5877 static int
5878 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5879 {
5880         struct adapter *sc = arg1;
5881         struct sbuf *sb;
5882         int rc, i, n;
5883
5884         rc = sysctl_wire_old_buffer(req, 0);
5885         if (rc != 0)
5886                 return (rc);
5887
5888         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5889         if (sb == NULL)
5890                 return (ENOMEM);
5891
5892         sbuf_printf(sb,
5893             "Idx  Ethernet address     Mask     Vld Ports PF"
5894             "  VF              Replication             P0 P1 P2 P3  ML");
5895         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5896             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5897         for (i = 0; i < n; i++) {
5898                 uint64_t tcamx, tcamy, mask;
5899                 uint32_t cls_lo, cls_hi;
5900                 uint8_t addr[ETHER_ADDR_LEN];
5901
5902                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5903                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5904                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5905                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5906
5907                 if (tcamx & tcamy)
5908                         continue;
5909
5910                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5911                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5912                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5913                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5914                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5915                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5916                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5917
5918                 if (cls_lo & F_REPLICATE) {
5919                         struct fw_ldst_cmd ldst_cmd;
5920
5921                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5922                         ldst_cmd.op_to_addrspace =
5923                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5924                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5925                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5926                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5927                         ldst_cmd.u.mps.fid_ctl =
5928                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5929                                 V_FW_LDST_CMD_CTL(i));
5930
5931                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5932                             "t4mps");
5933                         if (rc)
5934                                 break;
5935                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5936                             sizeof(ldst_cmd), &ldst_cmd);
5937                         end_synchronized_op(sc, 0);
5938
5939                         if (rc != 0) {
5940                                 sbuf_printf(sb,
5941                                     " ------------ error %3u ------------", rc);
5942                                 rc = 0;
5943                         } else {
5944                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5945                                     be32toh(ldst_cmd.u.mps.rplc127_96),
5946                                     be32toh(ldst_cmd.u.mps.rplc95_64),
5947                                     be32toh(ldst_cmd.u.mps.rplc63_32),
5948                                     be32toh(ldst_cmd.u.mps.rplc31_0));
5949                         }
5950                 } else
5951                         sbuf_printf(sb, "%36s", "");
5952
5953                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5954                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5955                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5956         }
5957
5958         if (rc)
5959                 (void) sbuf_finish(sb);
5960         else
5961                 rc = sbuf_finish(sb);
5962         sbuf_delete(sb);
5963
5964         return (rc);
5965 }
5966
5967 static int
5968 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5969 {
5970         struct adapter *sc = arg1;
5971         struct sbuf *sb;
5972         int rc;
5973         uint16_t mtus[NMTUS];
5974
5975         rc = sysctl_wire_old_buffer(req, 0);
5976         if (rc != 0)
5977                 return (rc);
5978
5979         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5980         if (sb == NULL)
5981                 return (ENOMEM);
5982
5983         t4_read_mtu_tbl(sc, mtus, NULL);
5984
5985         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5986             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5987             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5988             mtus[14], mtus[15]);
5989
5990         rc = sbuf_finish(sb);
5991         sbuf_delete(sb);
5992
5993         return (rc);
5994 }
5995
5996 static int
5997 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5998 {
5999         struct adapter *sc = arg1;
6000         struct sbuf *sb;
6001         int rc, i;
6002         uint32_t cnt[PM_NSTATS];
6003         uint64_t cyc[PM_NSTATS];
6004         static const char *rx_stats[] = {
6005                 "Read:", "Write bypass:", "Write mem:", "Flush:"
6006         };
6007         static const char *tx_stats[] = {
6008                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6009         };
6010
6011         rc = sysctl_wire_old_buffer(req, 0);
6012         if (rc != 0)
6013                 return (rc);
6014
6015         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6016         if (sb == NULL)
6017                 return (ENOMEM);
6018
6019         t4_pmtx_get_stats(sc, cnt, cyc);
6020         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6021         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6022                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6023                     cyc[i]);
6024
6025         t4_pmrx_get_stats(sc, cnt, cyc);
6026         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6027         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6028                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6029                     cyc[i]);
6030
6031         rc = sbuf_finish(sb);
6032         sbuf_delete(sb);
6033
6034         return (rc);
6035 }
6036
6037 static int
6038 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6039 {
6040         struct adapter *sc = arg1;
6041         struct sbuf *sb;
6042         int rc;
6043         struct tp_rdma_stats stats;
6044
6045         rc = sysctl_wire_old_buffer(req, 0);
6046         if (rc != 0)
6047                 return (rc);
6048
6049         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6050         if (sb == NULL)
6051                 return (ENOMEM);
6052
6053         t4_tp_get_rdma_stats(sc, &stats);
6054         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6055         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6056
6057         rc = sbuf_finish(sb);
6058         sbuf_delete(sb);
6059
6060         return (rc);
6061 }
6062
6063 static int
6064 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6065 {
6066         struct adapter *sc = arg1;
6067         struct sbuf *sb;
6068         int rc;
6069         struct tp_tcp_stats v4, v6;
6070
6071         rc = sysctl_wire_old_buffer(req, 0);
6072         if (rc != 0)
6073                 return (rc);
6074
6075         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6076         if (sb == NULL)
6077                 return (ENOMEM);
6078
6079         t4_tp_get_tcp_stats(sc, &v4, &v6);
6080         sbuf_printf(sb,
6081             "                                IP                 IPv6\n");
6082         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6083             v4.tcpOutRsts, v6.tcpOutRsts);
6084         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6085             v4.tcpInSegs, v6.tcpInSegs);
6086         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6087             v4.tcpOutSegs, v6.tcpOutSegs);
6088         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6089             v4.tcpRetransSegs, v6.tcpRetransSegs);
6090
6091         rc = sbuf_finish(sb);
6092         sbuf_delete(sb);
6093
6094         return (rc);
6095 }
6096
6097 static int
6098 sysctl_tids(SYSCTL_HANDLER_ARGS)
6099 {
6100         struct adapter *sc = arg1;
6101         struct sbuf *sb;
6102         int rc;
6103         struct tid_info *t = &sc->tids;
6104
6105         rc = sysctl_wire_old_buffer(req, 0);
6106         if (rc != 0)
6107                 return (rc);
6108
6109         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6110         if (sb == NULL)
6111                 return (ENOMEM);
6112
6113         if (t->natids) {
6114                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6115                     t->atids_in_use);
6116         }
6117
6118         if (t->ntids) {
6119                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6120                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6121
6122                         if (b) {
6123                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6124                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6125                                     t->ntids - 1);
6126                         } else {
6127                                 sbuf_printf(sb, "TID range: %u-%u",
6128                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6129                                     t->ntids - 1);
6130                         }
6131                 } else
6132                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6133                 sbuf_printf(sb, ", in use: %u\n",
6134                     atomic_load_acq_int(&t->tids_in_use));
6135         }
6136
6137         if (t->nstids) {
6138                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6139                     t->stid_base + t->nstids - 1, t->stids_in_use);
6140         }
6141
6142         if (t->nftids) {
6143                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6144                     t->ftid_base + t->nftids - 1);
6145         }
6146
6147         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6148             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6149             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6150
6151         rc = sbuf_finish(sb);
6152         sbuf_delete(sb);
6153
6154         return (rc);
6155 }
6156
6157 static int
6158 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6159 {
6160         struct adapter *sc = arg1;
6161         struct sbuf *sb;
6162         int rc;
6163         struct tp_err_stats stats;
6164
6165         rc = sysctl_wire_old_buffer(req, 0);
6166         if (rc != 0)
6167                 return (rc);
6168
6169         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6170         if (sb == NULL)
6171                 return (ENOMEM);
6172
6173         t4_tp_get_err_stats(sc, &stats);
6174
6175         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6176                       "channel 3\n");
6177         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6178             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6179             stats.macInErrs[3]);
6180         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6181             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6182             stats.hdrInErrs[3]);
6183         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6184             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6185             stats.tcpInErrs[3]);
6186         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6187             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6188             stats.tcp6InErrs[3]);
6189         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6190             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6191             stats.tnlCongDrops[3]);
6192         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6193             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6194             stats.tnlTxDrops[3]);
6195         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6196             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6197             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6198         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6199             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6200             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6201         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6202             stats.ofldNoNeigh, stats.ofldCongDefer);
6203
6204         rc = sbuf_finish(sb);
6205         sbuf_delete(sb);
6206
6207         return (rc);
6208 }
6209
6210 struct field_desc {
6211         const char *name;
6212         u_int start;
6213         u_int width;
6214 };
6215
6216 static void
6217 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6218 {
6219         char buf[32];
6220         int line_size = 0;
6221
6222         while (f->name) {
6223                 uint64_t mask = (1ULL << f->width) - 1;
6224                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6225                     ((uintmax_t)v >> f->start) & mask);
6226
6227                 if (line_size + len >= 79) {
6228                         line_size = 8;
6229                         sbuf_printf(sb, "\n        ");
6230                 }
6231                 sbuf_printf(sb, "%s ", buf);
6232                 line_size += len + 1;
6233                 f++;
6234         }
6235         sbuf_printf(sb, "\n");
6236 }
6237
6238 static struct field_desc tp_la0[] = {
6239         { "RcfOpCodeOut", 60, 4 },
6240         { "State", 56, 4 },
6241         { "WcfState", 52, 4 },
6242         { "RcfOpcSrcOut", 50, 2 },
6243         { "CRxError", 49, 1 },
6244         { "ERxError", 48, 1 },
6245         { "SanityFailed", 47, 1 },
6246         { "SpuriousMsg", 46, 1 },
6247         { "FlushInputMsg", 45, 1 },
6248         { "FlushInputCpl", 44, 1 },
6249         { "RssUpBit", 43, 1 },
6250         { "RssFilterHit", 42, 1 },
6251         { "Tid", 32, 10 },
6252         { "InitTcb", 31, 1 },
6253         { "LineNumber", 24, 7 },
6254         { "Emsg", 23, 1 },
6255         { "EdataOut", 22, 1 },
6256         { "Cmsg", 21, 1 },
6257         { "CdataOut", 20, 1 },
6258         { "EreadPdu", 19, 1 },
6259         { "CreadPdu", 18, 1 },
6260         { "TunnelPkt", 17, 1 },
6261         { "RcfPeerFin", 16, 1 },
6262         { "RcfReasonOut", 12, 4 },
6263         { "TxCchannel", 10, 2 },
6264         { "RcfTxChannel", 8, 2 },
6265         { "RxEchannel", 6, 2 },
6266         { "RcfRxChannel", 5, 1 },
6267         { "RcfDataOutSrdy", 4, 1 },
6268         { "RxDvld", 3, 1 },
6269         { "RxOoDvld", 2, 1 },
6270         { "RxCongestion", 1, 1 },
6271         { "TxCongestion", 0, 1 },
6272         { NULL }
6273 };
6274
6275 static struct field_desc tp_la1[] = {
6276         { "CplCmdIn", 56, 8 },
6277         { "CplCmdOut", 48, 8 },
6278         { "ESynOut", 47, 1 },
6279         { "EAckOut", 46, 1 },
6280         { "EFinOut", 45, 1 },
6281         { "ERstOut", 44, 1 },
6282         { "SynIn", 43, 1 },
6283         { "AckIn", 42, 1 },
6284         { "FinIn", 41, 1 },
6285         { "RstIn", 40, 1 },
6286         { "DataIn", 39, 1 },
6287         { "DataInVld", 38, 1 },
6288         { "PadIn", 37, 1 },
6289         { "RxBufEmpty", 36, 1 },
6290         { "RxDdp", 35, 1 },
6291         { "RxFbCongestion", 34, 1 },
6292         { "TxFbCongestion", 33, 1 },
6293         { "TxPktSumSrdy", 32, 1 },
6294         { "RcfUlpType", 28, 4 },
6295         { "Eread", 27, 1 },
6296         { "Ebypass", 26, 1 },
6297         { "Esave", 25, 1 },
6298         { "Static0", 24, 1 },
6299         { "Cread", 23, 1 },
6300         { "Cbypass", 22, 1 },
6301         { "Csave", 21, 1 },
6302         { "CPktOut", 20, 1 },
6303         { "RxPagePoolFull", 18, 2 },
6304         { "RxLpbkPkt", 17, 1 },
6305         { "TxLpbkPkt", 16, 1 },
6306         { "RxVfValid", 15, 1 },
6307         { "SynLearned", 14, 1 },
6308         { "SetDelEntry", 13, 1 },
6309         { "SetInvEntry", 12, 1 },
6310         { "CpcmdDvld", 11, 1 },
6311         { "CpcmdSave", 10, 1 },
6312         { "RxPstructsFull", 8, 2 },
6313         { "EpcmdDvld", 7, 1 },
6314         { "EpcmdFlush", 6, 1 },
6315         { "EpcmdTrimPrefix", 5, 1 },
6316         { "EpcmdTrimPostfix", 4, 1 },
6317         { "ERssIp4Pkt", 3, 1 },
6318         { "ERssIp6Pkt", 2, 1 },
6319         { "ERssTcpUdpPkt", 1, 1 },
6320         { "ERssFceFipPkt", 0, 1 },
6321         { NULL }
6322 };
6323
6324 static struct field_desc tp_la2[] = {
6325         { "CplCmdIn", 56, 8 },
6326         { "MpsVfVld", 55, 1 },
6327         { "MpsPf", 52, 3 },
6328         { "MpsVf", 44, 8 },
6329         { "SynIn", 43, 1 },
6330         { "AckIn", 42, 1 },
6331         { "FinIn", 41, 1 },
6332         { "RstIn", 40, 1 },
6333         { "DataIn", 39, 1 },
6334         { "DataInVld", 38, 1 },
6335         { "PadIn", 37, 1 },
6336         { "RxBufEmpty", 36, 1 },
6337         { "RxDdp", 35, 1 },
6338         { "RxFbCongestion", 34, 1 },
6339         { "TxFbCongestion", 33, 1 },
6340         { "TxPktSumSrdy", 32, 1 },
6341         { "RcfUlpType", 28, 4 },
6342         { "Eread", 27, 1 },
6343         { "Ebypass", 26, 1 },
6344         { "Esave", 25, 1 },
6345         { "Static0", 24, 1 },
6346         { "Cread", 23, 1 },
6347         { "Cbypass", 22, 1 },
6348         { "Csave", 21, 1 },
6349         { "CPktOut", 20, 1 },
6350         { "RxPagePoolFull", 18, 2 },
6351         { "RxLpbkPkt", 17, 1 },
6352         { "TxLpbkPkt", 16, 1 },
6353         { "RxVfValid", 15, 1 },
6354         { "SynLearned", 14, 1 },
6355         { "SetDelEntry", 13, 1 },
6356         { "SetInvEntry", 12, 1 },
6357         { "CpcmdDvld", 11, 1 },
6358         { "CpcmdSave", 10, 1 },
6359         { "RxPstructsFull", 8, 2 },
6360         { "EpcmdDvld", 7, 1 },
6361         { "EpcmdFlush", 6, 1 },
6362         { "EpcmdTrimPrefix", 5, 1 },
6363         { "EpcmdTrimPostfix", 4, 1 },
6364         { "ERssIp4Pkt", 3, 1 },
6365         { "ERssIp6Pkt", 2, 1 },
6366         { "ERssTcpUdpPkt", 1, 1 },
6367         { "ERssFceFipPkt", 0, 1 },
6368         { NULL }
6369 };
6370
6371 static void
6372 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6373 {
6374
6375         field_desc_show(sb, *p, tp_la0);
6376 }
6377
6378 static void
6379 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6380 {
6381
6382         if (idx)
6383                 sbuf_printf(sb, "\n");
6384         field_desc_show(sb, p[0], tp_la0);
6385         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6386                 field_desc_show(sb, p[1], tp_la0);
6387 }
6388
6389 static void
6390 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6391 {
6392
6393         if (idx)
6394                 sbuf_printf(sb, "\n");
6395         field_desc_show(sb, p[0], tp_la0);
6396         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6397                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6398 }
6399
6400 static int
6401 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6402 {
6403         struct adapter *sc = arg1;
6404         struct sbuf *sb;
6405         uint64_t *buf, *p;
6406         int rc;
6407         u_int i, inc;
6408         void (*show_func)(struct sbuf *, uint64_t *, int);
6409
6410         rc = sysctl_wire_old_buffer(req, 0);
6411         if (rc != 0)
6412                 return (rc);
6413
6414         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6415         if (sb == NULL)
6416                 return (ENOMEM);
6417
6418         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6419
6420         t4_tp_read_la(sc, buf, NULL);
6421         p = buf;
6422
6423         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6424         case 2:
6425                 inc = 2;
6426                 show_func = tp_la_show2;
6427                 break;
6428         case 3:
6429                 inc = 2;
6430                 show_func = tp_la_show3;
6431                 break;
6432         default:
6433                 inc = 1;
6434                 show_func = tp_la_show;
6435         }
6436
6437         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6438                 (*show_func)(sb, p, i);
6439
6440         rc = sbuf_finish(sb);
6441         sbuf_delete(sb);
6442         free(buf, M_CXGBE);
6443         return (rc);
6444 }
6445
6446 static int
6447 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6448 {
6449         struct adapter *sc = arg1;
6450         struct sbuf *sb;
6451         int rc;
6452         u64 nrate[NCHAN], orate[NCHAN];
6453
6454         rc = sysctl_wire_old_buffer(req, 0);
6455         if (rc != 0)
6456                 return (rc);
6457
6458         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6459         if (sb == NULL)
6460                 return (ENOMEM);
6461
6462         t4_get_chan_txrate(sc, nrate, orate);
6463         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6464                  "channel 3\n");
6465         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6466             nrate[0], nrate[1], nrate[2], nrate[3]);
6467         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6468             orate[0], orate[1], orate[2], orate[3]);
6469
6470         rc = sbuf_finish(sb);
6471         sbuf_delete(sb);
6472
6473         return (rc);
6474 }
6475
6476 static int
6477 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6478 {
6479         struct adapter *sc = arg1;
6480         struct sbuf *sb;
6481         uint32_t *buf, *p;
6482         int rc, i;
6483
6484         rc = sysctl_wire_old_buffer(req, 0);
6485         if (rc != 0)
6486                 return (rc);
6487
6488         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6489         if (sb == NULL)
6490                 return (ENOMEM);
6491
6492         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6493             M_ZERO | M_WAITOK);
6494
6495         t4_ulprx_read_la(sc, buf);
6496         p = buf;
6497
6498         sbuf_printf(sb, "      Pcmd        Type   Message"
6499             "                Data");
6500         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6501                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6502                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6503         }
6504
6505         rc = sbuf_finish(sb);
6506         sbuf_delete(sb);
6507         free(buf, M_CXGBE);
6508         return (rc);
6509 }
6510
6511 static int
6512 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6513 {
6514         struct adapter *sc = arg1;
6515         struct sbuf *sb;
6516         int rc, v;
6517
6518         rc = sysctl_wire_old_buffer(req, 0);
6519         if (rc != 0)
6520                 return (rc);
6521
6522         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6523         if (sb == NULL)
6524                 return (ENOMEM);
6525
6526         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6527         if (G_STATSOURCE_T5(v) == 7) {
6528                 if (G_STATMODE(v) == 0) {
6529                         sbuf_printf(sb, "total %d, incomplete %d",
6530                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6531                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6532                 } else if (G_STATMODE(v) == 1) {
6533                         sbuf_printf(sb, "total %d, data overflow %d",
6534                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6535                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6536                 }
6537         }
6538         rc = sbuf_finish(sb);
6539         sbuf_delete(sb);
6540
6541         return (rc);
6542 }
6543 #endif
6544
6545 static inline void
6546 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6547 {
6548         struct buf_ring *br;
6549         struct mbuf *m;
6550
6551         TXQ_LOCK_ASSERT_OWNED(txq);
6552
6553         br = txq->br;
6554         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6555         if (m)
6556                 t4_eth_tx(ifp, txq, m);
6557 }
6558
6559 void
6560 t4_tx_callout(void *arg)
6561 {
6562         struct sge_eq *eq = arg;
6563         struct adapter *sc;
6564
6565         if (EQ_TRYLOCK(eq) == 0)
6566                 goto reschedule;
6567
6568         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6569                 EQ_UNLOCK(eq);
6570 reschedule:
6571                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6572                         callout_schedule(&eq->tx_callout, 1);
6573                 return;
6574         }
6575
6576         EQ_LOCK_ASSERT_OWNED(eq);
6577
6578         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6579
6580                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6581                         struct sge_txq *txq = arg;
6582                         struct port_info *pi = txq->ifp->if_softc;
6583
6584                         sc = pi->adapter;
6585                 } else {
6586                         struct sge_wrq *wrq = arg;
6587
6588                         sc = wrq->adapter;
6589                 }
6590
6591                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6592         }
6593
6594         EQ_UNLOCK(eq);
6595 }
6596
6597 void
6598 t4_tx_task(void *arg, int count)
6599 {
6600         struct sge_eq *eq = arg;
6601
6602         EQ_LOCK(eq);
6603         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6604                 struct sge_txq *txq = arg;
6605                 txq_start(txq->ifp, txq);
6606         } else {
6607                 struct sge_wrq *wrq = arg;
6608                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6609         }
6610         EQ_UNLOCK(eq);
6611 }
6612
6613 static uint32_t
6614 fconf_to_mode(uint32_t fconf)
6615 {
6616         uint32_t mode;
6617
6618         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6619             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6620
6621         if (fconf & F_FRAGMENTATION)
6622                 mode |= T4_FILTER_IP_FRAGMENT;
6623
6624         if (fconf & F_MPSHITTYPE)
6625                 mode |= T4_FILTER_MPS_HIT_TYPE;
6626
6627         if (fconf & F_MACMATCH)
6628                 mode |= T4_FILTER_MAC_IDX;
6629
6630         if (fconf & F_ETHERTYPE)
6631                 mode |= T4_FILTER_ETH_TYPE;
6632
6633         if (fconf & F_PROTOCOL)
6634                 mode |= T4_FILTER_IP_PROTO;
6635
6636         if (fconf & F_TOS)
6637                 mode |= T4_FILTER_IP_TOS;
6638
6639         if (fconf & F_VLAN)
6640                 mode |= T4_FILTER_VLAN;
6641
6642         if (fconf & F_VNIC_ID)
6643                 mode |= T4_FILTER_VNIC;
6644
6645         if (fconf & F_PORT)
6646                 mode |= T4_FILTER_PORT;
6647
6648         if (fconf & F_FCOE)
6649                 mode |= T4_FILTER_FCoE;
6650
6651         return (mode);
6652 }
6653
6654 static uint32_t
6655 mode_to_fconf(uint32_t mode)
6656 {
6657         uint32_t fconf = 0;
6658
6659         if (mode & T4_FILTER_IP_FRAGMENT)
6660                 fconf |= F_FRAGMENTATION;
6661
6662         if (mode & T4_FILTER_MPS_HIT_TYPE)
6663                 fconf |= F_MPSHITTYPE;
6664
6665         if (mode & T4_FILTER_MAC_IDX)
6666                 fconf |= F_MACMATCH;
6667
6668         if (mode & T4_FILTER_ETH_TYPE)
6669                 fconf |= F_ETHERTYPE;
6670
6671         if (mode & T4_FILTER_IP_PROTO)
6672                 fconf |= F_PROTOCOL;
6673
6674         if (mode & T4_FILTER_IP_TOS)
6675                 fconf |= F_TOS;
6676
6677         if (mode & T4_FILTER_VLAN)
6678                 fconf |= F_VLAN;
6679
6680         if (mode & T4_FILTER_VNIC)
6681                 fconf |= F_VNIC_ID;
6682
6683         if (mode & T4_FILTER_PORT)
6684                 fconf |= F_PORT;
6685
6686         if (mode & T4_FILTER_FCoE)
6687                 fconf |= F_FCOE;
6688
6689         return (fconf);
6690 }
6691
6692 static uint32_t
6693 fspec_to_fconf(struct t4_filter_specification *fs)
6694 {
6695         uint32_t fconf = 0;
6696
6697         if (fs->val.frag || fs->mask.frag)
6698                 fconf |= F_FRAGMENTATION;
6699
6700         if (fs->val.matchtype || fs->mask.matchtype)
6701                 fconf |= F_MPSHITTYPE;
6702
6703         if (fs->val.macidx || fs->mask.macidx)
6704                 fconf |= F_MACMATCH;
6705
6706         if (fs->val.ethtype || fs->mask.ethtype)
6707                 fconf |= F_ETHERTYPE;
6708
6709         if (fs->val.proto || fs->mask.proto)
6710                 fconf |= F_PROTOCOL;
6711
6712         if (fs->val.tos || fs->mask.tos)
6713                 fconf |= F_TOS;
6714
6715         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6716                 fconf |= F_VLAN;
6717
6718         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6719                 fconf |= F_VNIC_ID;
6720
6721         if (fs->val.iport || fs->mask.iport)
6722                 fconf |= F_PORT;
6723
6724         if (fs->val.fcoe || fs->mask.fcoe)
6725                 fconf |= F_FCOE;
6726
6727         return (fconf);
6728 }
6729
6730 static int
6731 get_filter_mode(struct adapter *sc, uint32_t *mode)
6732 {
6733         int rc;
6734         uint32_t fconf;
6735
6736         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6737             "t4getfm");
6738         if (rc)
6739                 return (rc);
6740
6741         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6742             A_TP_VLAN_PRI_MAP);
6743
6744         if (sc->params.tp.vlan_pri_map != fconf) {
6745                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6746                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6747                     fconf);
6748                 sc->params.tp.vlan_pri_map = fconf;
6749         }
6750
6751         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6752
6753         end_synchronized_op(sc, LOCK_HELD);
6754         return (0);
6755 }
6756
6757 static int
6758 set_filter_mode(struct adapter *sc, uint32_t mode)
6759 {
6760         uint32_t fconf;
6761         int rc;
6762
6763         fconf = mode_to_fconf(mode);
6764
6765         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6766             "t4setfm");
6767         if (rc)
6768                 return (rc);
6769
6770         if (sc->tids.ftids_in_use > 0) {
6771                 rc = EBUSY;
6772                 goto done;
6773         }
6774
6775 #ifdef TCP_OFFLOAD
6776         if (sc->offload_map) {
6777                 rc = EBUSY;
6778                 goto done;
6779         }
6780 #endif
6781
6782 #ifdef notyet
6783         rc = -t4_set_filter_mode(sc, fconf);
6784         if (rc == 0)
6785                 sc->filter_mode = fconf;
6786 #else
6787         rc = ENOTSUP;
6788 #endif
6789
6790 done:
6791         end_synchronized_op(sc, LOCK_HELD);
6792         return (rc);
6793 }
6794
6795 static inline uint64_t
6796 get_filter_hits(struct adapter *sc, uint32_t fid)
6797 {
6798         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6799         uint64_t hits;
6800
6801         memwin_info(sc, 0, &mw_base, NULL);
6802         off = position_memwin(sc, 0,
6803             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6804         if (is_t4(sc)) {
6805                 hits = t4_read_reg64(sc, mw_base + off + 16);
6806                 hits = be64toh(hits);
6807         } else {
6808                 hits = t4_read_reg(sc, mw_base + off + 24);
6809                 hits = be32toh(hits);
6810         }
6811
6812         return (hits);
6813 }
6814
6815 static int
6816 get_filter(struct adapter *sc, struct t4_filter *t)
6817 {
6818         int i, rc, nfilters = sc->tids.nftids;
6819         struct filter_entry *f;
6820
6821         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6822             "t4getf");
6823         if (rc)
6824                 return (rc);
6825
6826         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6827             t->idx >= nfilters) {
6828                 t->idx = 0xffffffff;
6829                 goto done;
6830         }
6831
6832         f = &sc->tids.ftid_tab[t->idx];
6833         for (i = t->idx; i < nfilters; i++, f++) {
6834                 if (f->valid) {
6835                         t->idx = i;
6836                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6837                         t->smtidx = f->smtidx;
6838                         if (f->fs.hitcnts)
6839                                 t->hits = get_filter_hits(sc, t->idx);
6840                         else
6841                                 t->hits = UINT64_MAX;
6842                         t->fs = f->fs;
6843
6844                         goto done;
6845                 }
6846         }
6847
6848         t->idx = 0xffffffff;
6849 done:
6850         end_synchronized_op(sc, LOCK_HELD);
6851         return (0);
6852 }
6853
6854 static int
6855 set_filter(struct adapter *sc, struct t4_filter *t)
6856 {
6857         unsigned int nfilters, nports;
6858         struct filter_entry *f;
6859         int i, rc;
6860
6861         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6862         if (rc)
6863                 return (rc);
6864
6865         nfilters = sc->tids.nftids;
6866         nports = sc->params.nports;
6867
6868         if (nfilters == 0) {
6869                 rc = ENOTSUP;
6870                 goto done;
6871         }
6872
6873         if (!(sc->flags & FULL_INIT_DONE)) {
6874                 rc = EAGAIN;
6875                 goto done;
6876         }
6877
6878         if (t->idx >= nfilters) {
6879                 rc = EINVAL;
6880                 goto done;
6881         }
6882
6883         /* Validate against the global filter mode */
6884         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6885             sc->params.tp.vlan_pri_map) {
6886                 rc = E2BIG;
6887                 goto done;
6888         }
6889
6890         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6891                 rc = EINVAL;
6892                 goto done;
6893         }
6894
6895         if (t->fs.val.iport >= nports) {
6896                 rc = EINVAL;
6897                 goto done;
6898         }
6899
6900         /* Can't specify an iq if not steering to it */
6901         if (!t->fs.dirsteer && t->fs.iq) {
6902                 rc = EINVAL;
6903                 goto done;
6904         }
6905
6906         /* IPv6 filter idx must be 4 aligned */
6907         if (t->fs.type == 1 &&
6908             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6909                 rc = EINVAL;
6910                 goto done;
6911         }
6912
6913         if (sc->tids.ftid_tab == NULL) {
6914                 KASSERT(sc->tids.ftids_in_use == 0,
6915                     ("%s: no memory allocated but filters_in_use > 0",
6916                     __func__));
6917
6918                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6919                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6920                 if (sc->tids.ftid_tab == NULL) {
6921                         rc = ENOMEM;
6922                         goto done;
6923                 }
6924                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6925         }
6926
6927         for (i = 0; i < 4; i++) {
6928                 f = &sc->tids.ftid_tab[t->idx + i];
6929
6930                 if (f->pending || f->valid) {
6931                         rc = EBUSY;
6932                         goto done;
6933                 }
6934                 if (f->locked) {
6935                         rc = EPERM;
6936                         goto done;
6937                 }
6938
6939                 if (t->fs.type == 0)
6940                         break;
6941         }
6942
6943         f = &sc->tids.ftid_tab[t->idx];
6944         f->fs = t->fs;
6945
6946         rc = set_filter_wr(sc, t->idx);
6947 done:
6948         end_synchronized_op(sc, 0);
6949
6950         if (rc == 0) {
6951                 mtx_lock(&sc->tids.ftid_lock);
6952                 for (;;) {
6953                         if (f->pending == 0) {
6954                                 rc = f->valid ? 0 : EIO;
6955                                 break;
6956                         }
6957
6958                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6959                             PCATCH, "t4setfw", 0)) {
6960                                 rc = EINPROGRESS;
6961                                 break;
6962                         }
6963                 }
6964                 mtx_unlock(&sc->tids.ftid_lock);
6965         }
6966         return (rc);
6967 }
6968
6969 static int
6970 del_filter(struct adapter *sc, struct t4_filter *t)
6971 {
6972         unsigned int nfilters;
6973         struct filter_entry *f;
6974         int rc;
6975
6976         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6977         if (rc)
6978                 return (rc);
6979
6980         nfilters = sc->tids.nftids;
6981
6982         if (nfilters == 0) {
6983                 rc = ENOTSUP;
6984                 goto done;
6985         }
6986
6987         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6988             t->idx >= nfilters) {
6989                 rc = EINVAL;
6990                 goto done;
6991         }
6992
6993         if (!(sc->flags & FULL_INIT_DONE)) {
6994                 rc = EAGAIN;
6995                 goto done;
6996         }
6997
6998         f = &sc->tids.ftid_tab[t->idx];
6999
7000         if (f->pending) {
7001                 rc = EBUSY;
7002                 goto done;
7003         }
7004         if (f->locked) {
7005                 rc = EPERM;
7006                 goto done;
7007         }
7008
7009         if (f->valid) {
7010                 t->fs = f->fs;  /* extra info for the caller */
7011                 rc = del_filter_wr(sc, t->idx);
7012         }
7013
7014 done:
7015         end_synchronized_op(sc, 0);
7016
7017         if (rc == 0) {
7018                 mtx_lock(&sc->tids.ftid_lock);
7019                 for (;;) {
7020                         if (f->pending == 0) {
7021                                 rc = f->valid ? EIO : 0;
7022                                 break;
7023                         }
7024
7025                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7026                             PCATCH, "t4delfw", 0)) {
7027                                 rc = EINPROGRESS;
7028                                 break;
7029                         }
7030                 }
7031                 mtx_unlock(&sc->tids.ftid_lock);
7032         }
7033
7034         return (rc);
7035 }
7036
7037 static void
7038 clear_filter(struct filter_entry *f)
7039 {
7040         if (f->l2t)
7041                 t4_l2t_release(f->l2t);
7042
7043         bzero(f, sizeof (*f));
7044 }
7045
7046 static int
7047 set_filter_wr(struct adapter *sc, int fidx)
7048 {
7049         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7050         struct wrqe *wr;
7051         struct fw_filter_wr *fwr;
7052         unsigned int ftid;
7053
7054         ASSERT_SYNCHRONIZED_OP(sc);
7055
7056         if (f->fs.newdmac || f->fs.newvlan) {
7057                 /* This filter needs an L2T entry; allocate one. */
7058                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7059                 if (f->l2t == NULL)
7060                         return (EAGAIN);
7061                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7062                     f->fs.dmac)) {
7063                         t4_l2t_release(f->l2t);
7064                         f->l2t = NULL;
7065                         return (ENOMEM);
7066                 }
7067         }
7068
7069         ftid = sc->tids.ftid_base + fidx;
7070
7071         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7072         if (wr == NULL)
7073                 return (ENOMEM);
7074
7075         fwr = wrtod(wr);
7076         bzero(fwr, sizeof (*fwr));
7077
7078         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7079         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7080         fwr->tid_to_iq =
7081             htobe32(V_FW_FILTER_WR_TID(ftid) |
7082                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7083                 V_FW_FILTER_WR_NOREPLY(0) |
7084                 V_FW_FILTER_WR_IQ(f->fs.iq));
7085         fwr->del_filter_to_l2tix =
7086             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7087                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7088                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7089                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7090                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7091                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7092                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7093                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7094                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7095                     f->fs.newvlan == VLAN_REWRITE) |
7096                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7097                     f->fs.newvlan == VLAN_REWRITE) |
7098                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7099                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7100                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7101                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7102         fwr->ethtype = htobe16(f->fs.val.ethtype);
7103         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7104         fwr->frag_to_ovlan_vldm =
7105             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7106                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7107                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7108                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7109                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7110                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7111         fwr->smac_sel = 0;
7112         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7113             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7114         fwr->maci_to_matchtypem =
7115             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7116                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7117                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7118                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7119                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7120                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7121                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7122                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7123         fwr->ptcl = f->fs.val.proto;
7124         fwr->ptclm = f->fs.mask.proto;
7125         fwr->ttyp = f->fs.val.tos;
7126         fwr->ttypm = f->fs.mask.tos;
7127         fwr->ivlan = htobe16(f->fs.val.vlan);
7128         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7129         fwr->ovlan = htobe16(f->fs.val.vnic);
7130         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7131         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7132         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7133         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7134         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7135         fwr->lp = htobe16(f->fs.val.dport);
7136         fwr->lpm = htobe16(f->fs.mask.dport);
7137         fwr->fp = htobe16(f->fs.val.sport);
7138         fwr->fpm = htobe16(f->fs.mask.sport);
7139         if (f->fs.newsmac)
7140                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7141
7142         f->pending = 1;
7143         sc->tids.ftids_in_use++;
7144
7145         t4_wrq_tx(sc, wr);
7146         return (0);
7147 }
7148
7149 static int
7150 del_filter_wr(struct adapter *sc, int fidx)
7151 {
7152         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7153         struct wrqe *wr;
7154         struct fw_filter_wr *fwr;
7155         unsigned int ftid;
7156
7157         ftid = sc->tids.ftid_base + fidx;
7158
7159         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7160         if (wr == NULL)
7161                 return (ENOMEM);
7162         fwr = wrtod(wr);
7163         bzero(fwr, sizeof (*fwr));
7164
7165         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7166
7167         f->pending = 1;
7168         t4_wrq_tx(sc, wr);
7169         return (0);
7170 }
7171
7172 int
7173 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7174 {
7175         struct adapter *sc = iq->adapter;
7176         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7177         unsigned int idx = GET_TID(rpl);
7178
7179         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7180             rss->opcode));
7181
7182         if (idx >= sc->tids.ftid_base &&
7183             (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7184                 unsigned int rc = G_COOKIE(rpl->cookie);
7185                 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7186
7187                 mtx_lock(&sc->tids.ftid_lock);
7188                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7189                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7190                             __func__, idx));
7191                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7192                         f->pending = 0;  /* asynchronous setup completed */
7193                         f->valid = 1;
7194                 } else {
7195                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7196                                 /* Add or delete failed, display an error */
7197                                 log(LOG_ERR,
7198                                     "filter %u setup failed with error %u\n",
7199                                     idx, rc);
7200                         }
7201
7202                         clear_filter(f);
7203                         sc->tids.ftids_in_use--;
7204                 }
7205                 wakeup(&sc->tids.ftid_tab);
7206                 mtx_unlock(&sc->tids.ftid_lock);
7207         }
7208
7209         return (0);
7210 }
7211
7212 static int
7213 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7214 {
7215         int rc;
7216
7217         if (cntxt->cid > M_CTXTQID)
7218                 return (EINVAL);
7219
7220         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7221             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7222                 return (EINVAL);
7223
7224         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7225         if (rc)
7226                 return (rc);
7227
7228         if (sc->flags & FW_OK) {
7229                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7230                     &cntxt->data[0]);
7231                 if (rc == 0)
7232                         goto done;
7233         }
7234
7235         /*
7236          * Read via firmware failed or wasn't even attempted.  Read directly via
7237          * the backdoor.
7238          */
7239         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7240 done:
7241         end_synchronized_op(sc, 0);
7242         return (rc);
7243 }
7244
7245 static int
7246 load_fw(struct adapter *sc, struct t4_data *fw)
7247 {
7248         int rc;
7249         uint8_t *fw_data;
7250
7251         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7252         if (rc)
7253                 return (rc);
7254
7255         if (sc->flags & FULL_INIT_DONE) {
7256                 rc = EBUSY;
7257                 goto done;
7258         }
7259
7260         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7261         if (fw_data == NULL) {
7262                 rc = ENOMEM;
7263                 goto done;
7264         }
7265
7266         rc = copyin(fw->data, fw_data, fw->len);
7267         if (rc == 0)
7268                 rc = -t4_load_fw(sc, fw_data, fw->len);
7269
7270         free(fw_data, M_CXGBE);
7271 done:
7272         end_synchronized_op(sc, 0);
7273         return (rc);
7274 }
7275
7276 static int
7277 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7278 {
7279         uint32_t addr, off, remaining, i, n;
7280         uint32_t *buf, *b;
7281         uint32_t mw_base, mw_aperture;
7282         int rc;
7283         uint8_t *dst;
7284
7285         rc = validate_mem_range(sc, mr->addr, mr->len);
7286         if (rc != 0)
7287                 return (rc);
7288
7289         memwin_info(sc, win, &mw_base, &mw_aperture);
7290         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7291         addr = mr->addr;
7292         remaining = mr->len;
7293         dst = (void *)mr->data;
7294
7295         while (remaining) {
7296                 off = position_memwin(sc, win, addr);
7297
7298                 /* number of bytes that we'll copy in the inner loop */
7299                 n = min(remaining, mw_aperture - off);
7300                 for (i = 0; i < n; i += 4)
7301                         *b++ = t4_read_reg(sc, mw_base + off + i);
7302
7303                 rc = copyout(buf, dst, n);
7304                 if (rc != 0)
7305                         break;
7306
7307                 b = buf;
7308                 dst += n;
7309                 remaining -= n;
7310                 addr += n;
7311         }
7312
7313         free(buf, M_CXGBE);
7314         return (rc);
7315 }
7316
7317 static int
7318 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7319 {
7320         int rc;
7321
7322         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7323                 return (EINVAL);
7324
7325         if (i2cd->len > 1) {
7326                 /* XXX: need fw support for longer reads in one go */
7327                 return (ENOTSUP);
7328         }
7329
7330         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7331         if (rc)
7332                 return (rc);
7333         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7334             i2cd->offset, &i2cd->data[0]);
7335         end_synchronized_op(sc, 0);
7336
7337         return (rc);
7338 }
7339
7340 static int
7341 in_range(int val, int lo, int hi)
7342 {
7343
7344         return (val < 0 || (val <= hi && val >= lo));
7345 }
7346
7347 static int
7348 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7349 {
7350         int fw_subcmd, fw_type, rc;
7351
7352         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7353         if (rc)
7354                 return (rc);
7355
7356         if (!(sc->flags & FULL_INIT_DONE)) {
7357                 rc = EAGAIN;
7358                 goto done;
7359         }
7360
7361         /*
7362          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7363          * sub-command and type are in common locations.)
7364          */
7365         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7366                 fw_subcmd = FW_SCHED_SC_CONFIG;
7367         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7368                 fw_subcmd = FW_SCHED_SC_PARAMS;
7369         else {
7370                 rc = EINVAL;
7371                 goto done;
7372         }
7373         if (p->type == SCHED_CLASS_TYPE_PACKET)
7374                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7375         else {
7376                 rc = EINVAL;
7377                 goto done;
7378         }
7379
7380         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7381                 /* Vet our parameters ..*/
7382                 if (p->u.config.minmax < 0) {
7383                         rc = EINVAL;
7384                         goto done;
7385                 }
7386
7387                 /* And pass the request to the firmware ...*/
7388                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7389                 goto done;
7390         }
7391
7392         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7393                 int fw_level;
7394                 int fw_mode;
7395                 int fw_rateunit;
7396                 int fw_ratemode;
7397
7398                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7399                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7400                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7401                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7402                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7403                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7404                 else {
7405                         rc = EINVAL;
7406                         goto done;
7407                 }
7408
7409                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7410                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7411                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7412                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7413                 else {
7414                         rc = EINVAL;
7415                         goto done;
7416                 }
7417
7418                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7419                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7420                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7421                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7422                 else {
7423                         rc = EINVAL;
7424                         goto done;
7425                 }
7426
7427                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7428                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7429                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7430                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7431                 else {
7432                         rc = EINVAL;
7433                         goto done;
7434                 }
7435
7436                 /* Vet our parameters ... */
7437                 if (!in_range(p->u.params.channel, 0, 3) ||
7438                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7439                     !in_range(p->u.params.minrate, 0, 10000000) ||
7440                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7441                     !in_range(p->u.params.weight, 0, 100)) {
7442                         rc = ERANGE;
7443                         goto done;
7444                 }
7445
7446                 /*
7447                  * Translate any unset parameters into the firmware's
7448                  * nomenclature and/or fail the call if the parameters
7449                  * are required ...
7450                  */
7451                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7452                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7453                         rc = EINVAL;
7454                         goto done;
7455                 }
7456                 if (p->u.params.minrate < 0)
7457                         p->u.params.minrate = 0;
7458                 if (p->u.params.maxrate < 0) {
7459                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7460                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7461                                 rc = EINVAL;
7462                                 goto done;
7463                         } else
7464                                 p->u.params.maxrate = 0;
7465                 }
7466                 if (p->u.params.weight < 0) {
7467                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7468                                 rc = EINVAL;
7469                                 goto done;
7470                         } else
7471                                 p->u.params.weight = 0;
7472                 }
7473                 if (p->u.params.pktsize < 0) {
7474                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7475                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7476                                 rc = EINVAL;
7477                                 goto done;
7478                         } else
7479                                 p->u.params.pktsize = 0;
7480                 }
7481
7482                 /* See what the firmware thinks of the request ... */
7483                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7484                     fw_rateunit, fw_ratemode, p->u.params.channel,
7485                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7486                     p->u.params.weight, p->u.params.pktsize);
7487                 goto done;
7488         }
7489
7490         rc = EINVAL;
7491 done:
7492         end_synchronized_op(sc, 0);
7493         return (rc);
7494 }
7495
7496 static int
7497 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7498 {
7499         struct port_info *pi = NULL;
7500         struct sge_txq *txq;
7501         uint32_t fw_mnem, fw_queue, fw_class;
7502         int i, rc;
7503
7504         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7505         if (rc)
7506                 return (rc);
7507
7508         if (!(sc->flags & FULL_INIT_DONE)) {
7509                 rc = EAGAIN;
7510                 goto done;
7511         }
7512
7513         if (p->port >= sc->params.nports) {
7514                 rc = EINVAL;
7515                 goto done;
7516         }
7517
7518         pi = sc->port[p->port];
7519         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7520                 rc = EINVAL;
7521                 goto done;
7522         }
7523
7524         /*
7525          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7526          * Scheduling Class in this case).
7527          */
7528         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7529             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7530         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7531
7532         /*
7533          * If op.queue is non-negative, then we're only changing the scheduling
7534          * on a single specified TX queue.
7535          */
7536         if (p->queue >= 0) {
7537                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7538                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7539                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7540                     &fw_class);
7541                 goto done;
7542         }
7543
7544         /*
7545          * Change the scheduling on all the TX queues for the
7546          * interface.
7547          */
7548         for_each_txq(pi, i, txq) {
7549                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7550                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7551                     &fw_class);
7552                 if (rc)
7553                         goto done;
7554         }
7555
7556         rc = 0;
7557 done:
7558         end_synchronized_op(sc, 0);
7559         return (rc);
7560 }
7561
7562 int
7563 t4_os_find_pci_capability(struct adapter *sc, int cap)
7564 {
7565         int i;
7566
7567         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7568 }
7569
7570 int
7571 t4_os_pci_save_state(struct adapter *sc)
7572 {
7573         device_t dev;
7574         struct pci_devinfo *dinfo;
7575
7576         dev = sc->dev;
7577         dinfo = device_get_ivars(dev);
7578
7579         pci_cfg_save(dev, dinfo, 0);
7580         return (0);
7581 }
7582
7583 int
7584 t4_os_pci_restore_state(struct adapter *sc)
7585 {
7586         device_t dev;
7587         struct pci_devinfo *dinfo;
7588
7589         dev = sc->dev;
7590         dinfo = device_get_ivars(dev);
7591
7592         pci_cfg_restore(dev, dinfo);
7593         return (0);
7594 }
7595
7596 void
7597 t4_os_portmod_changed(const struct adapter *sc, int idx)
7598 {
7599         struct port_info *pi = sc->port[idx];
7600         static const char *mod_str[] = {
7601                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7602         };
7603
7604         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7605                 if_printf(pi->ifp, "transceiver unplugged.\n");
7606         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7607                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7608         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7609                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7610         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7611                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7612                     mod_str[pi->mod_type]);
7613         } else {
7614                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7615                     pi->mod_type);
7616         }
7617 }
7618
7619 void
7620 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7621 {
7622         struct port_info *pi = sc->port[idx];
7623         struct ifnet *ifp = pi->ifp;
7624
7625         if (link_stat) {
7626                 pi->linkdnrc = -1;
7627                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7628                 if_link_state_change(ifp, LINK_STATE_UP);
7629         } else {
7630                 if (reason >= 0)
7631                         pi->linkdnrc = reason;
7632                 if_link_state_change(ifp, LINK_STATE_DOWN);
7633         }
7634 }
7635
7636 void
7637 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7638 {
7639         struct adapter *sc;
7640
7641         sx_slock(&t4_list_lock);
7642         SLIST_FOREACH(sc, &t4_list, link) {
7643                 /*
7644                  * func should not make any assumptions about what state sc is
7645                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7646                  */
7647                 func(sc, arg);
7648         }
7649         sx_sunlock(&t4_list_lock);
7650 }
7651
7652 static int
7653 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7654 {
7655        return (0);
7656 }
7657
7658 static int
7659 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7660 {
7661        return (0);
7662 }
7663
7664 static int
7665 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7666     struct thread *td)
7667 {
7668         int rc;
7669         struct adapter *sc = dev->si_drv1;
7670
7671         rc = priv_check(td, PRIV_DRIVER);
7672         if (rc != 0)
7673                 return (rc);
7674
7675         switch (cmd) {
7676         case CHELSIO_T4_GETREG: {
7677                 struct t4_reg *edata = (struct t4_reg *)data;
7678
7679                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7680                         return (EFAULT);
7681
7682                 if (edata->size == 4)
7683                         edata->val = t4_read_reg(sc, edata->addr);
7684                 else if (edata->size == 8)
7685                         edata->val = t4_read_reg64(sc, edata->addr);
7686                 else
7687                         return (EINVAL);
7688
7689                 break;
7690         }
7691         case CHELSIO_T4_SETREG: {
7692                 struct t4_reg *edata = (struct t4_reg *)data;
7693
7694                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7695                         return (EFAULT);
7696
7697                 if (edata->size == 4) {
7698                         if (edata->val & 0xffffffff00000000)
7699                                 return (EINVAL);
7700                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7701                 } else if (edata->size == 8)
7702                         t4_write_reg64(sc, edata->addr, edata->val);
7703                 else
7704                         return (EINVAL);
7705                 break;
7706         }
7707         case CHELSIO_T4_REGDUMP: {
7708                 struct t4_regdump *regs = (struct t4_regdump *)data;
7709                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7710                 uint8_t *buf;
7711
7712                 if (regs->len < reglen) {
7713                         regs->len = reglen; /* hint to the caller */
7714                         return (ENOBUFS);
7715                 }
7716
7717                 regs->len = reglen;
7718                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7719                 t4_get_regs(sc, regs, buf);
7720                 rc = copyout(buf, regs->data, reglen);
7721                 free(buf, M_CXGBE);
7722                 break;
7723         }
7724         case CHELSIO_T4_GET_FILTER_MODE:
7725                 rc = get_filter_mode(sc, (uint32_t *)data);
7726                 break;
7727         case CHELSIO_T4_SET_FILTER_MODE:
7728                 rc = set_filter_mode(sc, *(uint32_t *)data);
7729                 break;
7730         case CHELSIO_T4_GET_FILTER:
7731                 rc = get_filter(sc, (struct t4_filter *)data);
7732                 break;
7733         case CHELSIO_T4_SET_FILTER:
7734                 rc = set_filter(sc, (struct t4_filter *)data);
7735                 break;
7736         case CHELSIO_T4_DEL_FILTER:
7737                 rc = del_filter(sc, (struct t4_filter *)data);
7738                 break;
7739         case CHELSIO_T4_GET_SGE_CONTEXT:
7740                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7741                 break;
7742         case CHELSIO_T4_LOAD_FW:
7743                 rc = load_fw(sc, (struct t4_data *)data);
7744                 break;
7745         case CHELSIO_T4_GET_MEM:
7746                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7747                 break;
7748         case CHELSIO_T4_GET_I2C:
7749                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7750                 break;
7751         case CHELSIO_T4_CLEAR_STATS: {
7752                 int i;
7753                 u_int port_id = *(uint32_t *)data;
7754                 struct port_info *pi;
7755
7756                 if (port_id >= sc->params.nports)
7757                         return (EINVAL);
7758                 pi = sc->port[port_id];
7759
7760                 /* MAC stats */
7761                 t4_clr_port_stats(sc, pi->tx_chan);
7762
7763                 if (pi->flags & PORT_INIT_DONE) {
7764                         struct sge_rxq *rxq;
7765                         struct sge_txq *txq;
7766                         struct sge_wrq *wrq;
7767
7768                         for_each_rxq(pi, i, rxq) {
7769 #if defined(INET) || defined(INET6)
7770                                 rxq->lro.lro_queued = 0;
7771                                 rxq->lro.lro_flushed = 0;
7772 #endif
7773                                 rxq->rxcsum = 0;
7774                                 rxq->vlan_extraction = 0;
7775                         }
7776
7777                         for_each_txq(pi, i, txq) {
7778                                 txq->txcsum = 0;
7779                                 txq->tso_wrs = 0;
7780                                 txq->vlan_insertion = 0;
7781                                 txq->imm_wrs = 0;
7782                                 txq->sgl_wrs = 0;
7783                                 txq->txpkt_wrs = 0;
7784                                 txq->txpkts_wrs = 0;
7785                                 txq->txpkts_pkts = 0;
7786                                 txq->br->br_drops = 0;
7787                                 txq->no_dmamap = 0;
7788                                 txq->no_desc = 0;
7789                         }
7790
7791 #ifdef TCP_OFFLOAD
7792                         /* nothing to clear for each ofld_rxq */
7793
7794                         for_each_ofld_txq(pi, i, wrq) {
7795                                 wrq->tx_wrs = 0;
7796                                 wrq->no_desc = 0;
7797                         }
7798 #endif
7799                         wrq = &sc->sge.ctrlq[pi->port_id];
7800                         wrq->tx_wrs = 0;
7801                         wrq->no_desc = 0;
7802                 }
7803                 break;
7804         }
7805         case CHELSIO_T4_SCHED_CLASS:
7806                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
7807                 break;
7808         case CHELSIO_T4_SCHED_QUEUE:
7809                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7810                 break;
7811         case CHELSIO_T4_GET_TRACER:
7812                 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7813                 break;
7814         case CHELSIO_T4_SET_TRACER:
7815                 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7816                 break;
7817         default:
7818                 rc = EINVAL;
7819         }
7820
7821         return (rc);
7822 }
7823
7824 #ifdef TCP_OFFLOAD
7825 static int
7826 toe_capability(struct port_info *pi, int enable)
7827 {
7828         int rc;
7829         struct adapter *sc = pi->adapter;
7830
7831         ASSERT_SYNCHRONIZED_OP(sc);
7832
7833         if (!is_offload(sc))
7834                 return (ENODEV);
7835
7836         if (enable) {
7837                 if (!(sc->flags & FULL_INIT_DONE)) {
7838                         rc = cxgbe_init_synchronized(pi);
7839                         if (rc)
7840                                 return (rc);
7841                 }
7842
7843                 if (isset(&sc->offload_map, pi->port_id))
7844                         return (0);
7845
7846                 if (!(sc->flags & TOM_INIT_DONE)) {
7847                         rc = t4_activate_uld(sc, ULD_TOM);
7848                         if (rc == EAGAIN) {
7849                                 log(LOG_WARNING,
7850                                     "You must kldload t4_tom.ko before trying "
7851                                     "to enable TOE on a cxgbe interface.\n");
7852                         }
7853                         if (rc != 0)
7854                                 return (rc);
7855                         KASSERT(sc->tom_softc != NULL,
7856                             ("%s: TOM activated but softc NULL", __func__));
7857                         KASSERT(sc->flags & TOM_INIT_DONE,
7858                             ("%s: TOM activated but flag not set", __func__));
7859                 }
7860
7861                 setbit(&sc->offload_map, pi->port_id);
7862         } else {
7863                 if (!isset(&sc->offload_map, pi->port_id))
7864                         return (0);
7865
7866                 KASSERT(sc->flags & TOM_INIT_DONE,
7867                     ("%s: TOM never initialized?", __func__));
7868                 clrbit(&sc->offload_map, pi->port_id);
7869         }
7870
7871         return (0);
7872 }
7873
7874 /*
7875  * Add an upper layer driver to the global list.
7876  */
7877 int
7878 t4_register_uld(struct uld_info *ui)
7879 {
7880         int rc = 0;
7881         struct uld_info *u;
7882
7883         sx_xlock(&t4_uld_list_lock);
7884         SLIST_FOREACH(u, &t4_uld_list, link) {
7885             if (u->uld_id == ui->uld_id) {
7886                     rc = EEXIST;
7887                     goto done;
7888             }
7889         }
7890
7891         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7892         ui->refcount = 0;
7893 done:
7894         sx_xunlock(&t4_uld_list_lock);
7895         return (rc);
7896 }
7897
7898 int
7899 t4_unregister_uld(struct uld_info *ui)
7900 {
7901         int rc = EINVAL;
7902         struct uld_info *u;
7903
7904         sx_xlock(&t4_uld_list_lock);
7905
7906         SLIST_FOREACH(u, &t4_uld_list, link) {
7907             if (u == ui) {
7908                     if (ui->refcount > 0) {
7909                             rc = EBUSY;
7910                             goto done;
7911                     }
7912
7913                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7914                     rc = 0;
7915                     goto done;
7916             }
7917         }
7918 done:
7919         sx_xunlock(&t4_uld_list_lock);
7920         return (rc);
7921 }
7922
7923 int
7924 t4_activate_uld(struct adapter *sc, int id)
7925 {
7926         int rc = EAGAIN;
7927         struct uld_info *ui;
7928
7929         ASSERT_SYNCHRONIZED_OP(sc);
7930
7931         sx_slock(&t4_uld_list_lock);
7932
7933         SLIST_FOREACH(ui, &t4_uld_list, link) {
7934                 if (ui->uld_id == id) {
7935                         rc = ui->activate(sc);
7936                         if (rc == 0)
7937                                 ui->refcount++;
7938                         goto done;
7939                 }
7940         }
7941 done:
7942         sx_sunlock(&t4_uld_list_lock);
7943
7944         return (rc);
7945 }
7946
7947 int
7948 t4_deactivate_uld(struct adapter *sc, int id)
7949 {
7950         int rc = EINVAL;
7951         struct uld_info *ui;
7952
7953         ASSERT_SYNCHRONIZED_OP(sc);
7954
7955         sx_slock(&t4_uld_list_lock);
7956
7957         SLIST_FOREACH(ui, &t4_uld_list, link) {
7958                 if (ui->uld_id == id) {
7959                         rc = ui->deactivate(sc);
7960                         if (rc == 0)
7961                                 ui->refcount--;
7962                         goto done;
7963                 }
7964         }
7965 done:
7966         sx_sunlock(&t4_uld_list_lock);
7967
7968         return (rc);
7969 }
7970 #endif
7971
7972 /*
7973  * Come up with reasonable defaults for some of the tunables, provided they're
7974  * not set by the user (in which case we'll use the values as is).
7975  */
7976 static void
7977 tweak_tunables(void)
7978 {
7979         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
7980
7981         if (t4_ntxq10g < 1)
7982                 t4_ntxq10g = min(nc, NTXQ_10G);
7983
7984         if (t4_ntxq1g < 1)
7985                 t4_ntxq1g = min(nc, NTXQ_1G);
7986
7987         if (t4_nrxq10g < 1)
7988                 t4_nrxq10g = min(nc, NRXQ_10G);
7989
7990         if (t4_nrxq1g < 1)
7991                 t4_nrxq1g = min(nc, NRXQ_1G);
7992
7993 #ifdef TCP_OFFLOAD
7994         if (t4_nofldtxq10g < 1)
7995                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7996
7997         if (t4_nofldtxq1g < 1)
7998                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7999
8000         if (t4_nofldrxq10g < 1)
8001                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8002
8003         if (t4_nofldrxq1g < 1)
8004                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8005
8006         if (t4_toecaps_allowed == -1)
8007                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8008 #else
8009         if (t4_toecaps_allowed == -1)
8010                 t4_toecaps_allowed = 0;
8011 #endif
8012
8013         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8014                 t4_tmr_idx_10g = TMR_IDX_10G;
8015
8016         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8017                 t4_pktc_idx_10g = PKTC_IDX_10G;
8018
8019         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8020                 t4_tmr_idx_1g = TMR_IDX_1G;
8021
8022         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8023                 t4_pktc_idx_1g = PKTC_IDX_1G;
8024
8025         if (t4_qsize_txq < 128)
8026                 t4_qsize_txq = 128;
8027
8028         if (t4_qsize_rxq < 128)
8029                 t4_qsize_rxq = 128;
8030         while (t4_qsize_rxq & 7)
8031                 t4_qsize_rxq++;
8032
8033         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8034 }
8035
8036 static int
8037 mod_event(module_t mod, int cmd, void *arg)
8038 {
8039         int rc = 0;
8040         static int loaded = 0;
8041
8042         switch (cmd) {
8043         case MOD_LOAD:
8044                 if (atomic_fetchadd_int(&loaded, 1))
8045                         break;
8046                 t4_sge_modload();
8047                 sx_init(&t4_list_lock, "T4/T5 adapters");
8048                 SLIST_INIT(&t4_list);
8049 #ifdef TCP_OFFLOAD
8050                 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8051                 SLIST_INIT(&t4_uld_list);
8052 #endif
8053                 t4_tracer_modload();
8054                 tweak_tunables();
8055                 break;
8056
8057         case MOD_UNLOAD:
8058                 if (atomic_fetchadd_int(&loaded, -1) > 1)
8059                         break;
8060                 t4_tracer_modunload();
8061 #ifdef TCP_OFFLOAD
8062                 sx_slock(&t4_uld_list_lock);
8063                 if (!SLIST_EMPTY(&t4_uld_list)) {
8064                         rc = EBUSY;
8065                         sx_sunlock(&t4_uld_list_lock);
8066                         break;
8067                 }
8068                 sx_sunlock(&t4_uld_list_lock);
8069                 sx_destroy(&t4_uld_list_lock);
8070 #endif
8071                 sx_slock(&t4_list_lock);
8072                 if (!SLIST_EMPTY(&t4_list)) {
8073                         rc = EBUSY;
8074                         sx_sunlock(&t4_list_lock);
8075                         break;
8076                 }
8077                 sx_sunlock(&t4_list_lock);
8078                 sx_destroy(&t4_list_lock);
8079                 break;
8080         }
8081
8082         return (rc);
8083 }
8084
8085 static devclass_t t4_devclass, t5_devclass;
8086 static devclass_t cxgbe_devclass, cxl_devclass;
8087
8088 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8089 MODULE_VERSION(t4nex, 1);
8090 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8091
8092 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8093 MODULE_VERSION(t5nex, 1);
8094 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8095
8096 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8097 MODULE_VERSION(cxgbe, 1);
8098
8099 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8100 MODULE_VERSION(cxl, 1);