]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cxgbe/t4_main.c
Update LLDB to upstream r196259 snapshot
[FreeBSD/FreeBSD.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF      "default"
256 #define FLASH_CF        "flash"
257 #define UWIRE_CF        "uwire"
258 #define FPGA_CF         "fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293
294 struct intrs_and_queues {
295         int intr_type;          /* INTx, MSI, or MSI-X */
296         int nirq;               /* Number of vectors */
297         int intr_flags;
298         int ntxq10g;            /* # of NIC txq's for each 10G port */
299         int nrxq10g;            /* # of NIC rxq's for each 10G port */
300         int ntxq1g;             /* # of NIC txq's for each 1G port */
301         int nrxq1g;             /* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
304         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
305         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
306         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
307 #endif
308 };
309
310 struct filter_entry {
311         uint32_t valid:1;       /* filter allocated and valid */
312         uint32_t locked:1;      /* filter is administratively locked */
313         uint32_t pending:1;     /* filter action is pending firmware reply */
314         uint32_t smtidx:8;      /* Source MAC Table index for smac */
315         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
316
317         struct t4_filter_specification fs;
318 };
319
320 enum {
321         XGMAC_MTU       = (1 << 0),
322         XGMAC_PROMISC   = (1 << 1),
323         XGMAC_ALLMULTI  = (1 << 2),
324         XGMAC_VLANEX    = (1 << 3),
325         XGMAC_UCADDR    = (1 << 4),
326         XGMAC_MCADDRS   = (1 << 5),
327
328         XGMAC_ALL       = 0xffff
329 };
330
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int fwmtype_to_hwmtype(int);
336 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
337     uint32_t *);
338 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
339 static uint32_t position_memwin(struct adapter *, int, uint32_t);
340 static int cfg_itype_and_nqueues(struct adapter *, int, int,
341     struct intrs_and_queues *);
342 static int prep_firmware(struct adapter *);
343 static int partition_resources(struct adapter *, const struct firmware *,
344     const char *);
345 static int get_params__pre_init(struct adapter *);
346 static int get_params__post_init(struct adapter *);
347 static int set_params__post_init(struct adapter *);
348 static void t4_set_desc(struct adapter *);
349 static void build_medialist(struct port_info *);
350 static int update_mac_settings(struct port_info *, int);
351 static int cxgbe_init_synchronized(struct port_info *);
352 static int cxgbe_uninit_synchronized(struct port_info *);
353 static int setup_intr_handlers(struct adapter *);
354 static int adapter_full_init(struct adapter *);
355 static int adapter_full_uninit(struct adapter *);
356 static int port_full_init(struct port_info *);
357 static int port_full_uninit(struct port_info *);
358 static void quiesce_eq(struct adapter *, struct sge_eq *);
359 static void quiesce_iq(struct adapter *, struct sge_iq *);
360 static void quiesce_fl(struct adapter *, struct sge_fl *);
361 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
362     driver_intr_t *, void *, char *);
363 static int t4_free_irq(struct adapter *, struct irq *);
364 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
365     unsigned int);
366 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
367 static void cxgbe_tick(void *);
368 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
369 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
370     struct mbuf *);
371 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
372 static int fw_msg_not_handled(struct adapter *, const __be64 *);
373 static int t4_sysctls(struct adapter *);
374 static int cxgbe_sysctls(struct port_info *);
375 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
376 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
377 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
382 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
383 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
384 #ifdef SBUF_DRAIN
385 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
390 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
393 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
394 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
396 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
398 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
399 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
400 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
401 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
405 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
406 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
407 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
408 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
409 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
410 #endif
411 static inline void txq_start(struct ifnet *, struct sge_txq *);
412 static uint32_t fconf_to_mode(uint32_t);
413 static uint32_t mode_to_fconf(uint32_t);
414 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
415 static int get_filter_mode(struct adapter *, uint32_t *);
416 static int set_filter_mode(struct adapter *, uint32_t);
417 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
418 static int get_filter(struct adapter *, struct t4_filter *);
419 static int set_filter(struct adapter *, struct t4_filter *);
420 static int del_filter(struct adapter *, struct t4_filter *);
421 static void clear_filter(struct filter_entry *);
422 static int set_filter_wr(struct adapter *, int);
423 static int del_filter_wr(struct adapter *, int);
424 static int get_sge_context(struct adapter *, struct t4_sge_context *);
425 static int load_fw(struct adapter *, struct t4_data *);
426 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
427 static int read_i2c(struct adapter *, struct t4_i2c_data *);
428 static int set_sched_class(struct adapter *, struct t4_sched_params *);
429 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
430 #ifdef TCP_OFFLOAD
431 static int toe_capability(struct port_info *, int);
432 #endif
433 static int mod_event(module_t, int, void *);
434
435 struct {
436         uint16_t device;
437         char *desc;
438 } t4_pciids[] = {
439         {0xa000, "Chelsio Terminator 4 FPGA"},
440         {0x4400, "Chelsio T440-dbg"},
441         {0x4401, "Chelsio T420-CR"},
442         {0x4402, "Chelsio T422-CR"},
443         {0x4403, "Chelsio T440-CR"},
444         {0x4404, "Chelsio T420-BCH"},
445         {0x4405, "Chelsio T440-BCH"},
446         {0x4406, "Chelsio T440-CH"},
447         {0x4407, "Chelsio T420-SO"},
448         {0x4408, "Chelsio T420-CX"},
449         {0x4409, "Chelsio T420-BT"},
450         {0x440a, "Chelsio T404-BT"},
451         {0x440e, "Chelsio T440-LP-CR"},
452 }, t5_pciids[] = {
453         {0xb000, "Chelsio Terminator 5 FPGA"},
454         {0x5400, "Chelsio T580-dbg"},
455         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
456         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
457         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
458         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
459         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
460         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
461         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
462         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
463         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
464         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
465         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
466         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
467 #ifdef notyet
468         {0x5404,  "Chelsio T520-BCH"},
469         {0x5405,  "Chelsio T540-BCH"},
470         {0x5406,  "Chelsio T540-CH"},
471         {0x5408,  "Chelsio T520-CX"},
472         {0x540b,  "Chelsio B520-SR"},
473         {0x540c,  "Chelsio B504-BT"},
474         {0x540f,  "Chelsio Amsterdam"},
475         {0x5413,  "Chelsio T580-CHR"},
476 #endif
477 };
478
479 #ifdef TCP_OFFLOAD
480 /*
481  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
482  * exactly the same for both rxq and ofld_rxq.
483  */
484 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
485 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
486 #endif
487
488 /* No easy way to include t4_msg.h before adapter.h so we check this way */
489 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
490 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
491
492 static int
493 t4_probe(device_t dev)
494 {
495         int i;
496         uint16_t v = pci_get_vendor(dev);
497         uint16_t d = pci_get_device(dev);
498         uint8_t f = pci_get_function(dev);
499
500         if (v != PCI_VENDOR_ID_CHELSIO)
501                 return (ENXIO);
502
503         /* Attach only to PF0 of the FPGA */
504         if (d == 0xa000 && f != 0)
505                 return (ENXIO);
506
507         for (i = 0; i < nitems(t4_pciids); i++) {
508                 if (d == t4_pciids[i].device) {
509                         device_set_desc(dev, t4_pciids[i].desc);
510                         return (BUS_PROBE_DEFAULT);
511                 }
512         }
513
514         return (ENXIO);
515 }
516
517 static int
518 t5_probe(device_t dev)
519 {
520         int i;
521         uint16_t v = pci_get_vendor(dev);
522         uint16_t d = pci_get_device(dev);
523         uint8_t f = pci_get_function(dev);
524
525         if (v != PCI_VENDOR_ID_CHELSIO)
526                 return (ENXIO);
527
528         /* Attach only to PF0 of the FPGA */
529         if (d == 0xb000 && f != 0)
530                 return (ENXIO);
531
532         for (i = 0; i < nitems(t5_pciids); i++) {
533                 if (d == t5_pciids[i].device) {
534                         device_set_desc(dev, t5_pciids[i].desc);
535                         return (BUS_PROBE_DEFAULT);
536                 }
537         }
538
539         return (ENXIO);
540 }
541
542 static int
543 t4_attach(device_t dev)
544 {
545         struct adapter *sc;
546         int rc = 0, i, n10g, n1g, rqidx, tqidx;
547         struct intrs_and_queues iaq;
548         struct sge *s;
549 #ifdef TCP_OFFLOAD
550         int ofld_rqidx, ofld_tqidx;
551 #endif
552
553         sc = device_get_softc(dev);
554         sc->dev = dev;
555
556         pci_enable_busmaster(dev);
557         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
558                 uint32_t v;
559
560                 pci_set_max_read_req(dev, 4096);
561                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
562                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
563                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
564         }
565
566         sc->traceq = -1;
567         mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
568         snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
569             device_get_nameunit(dev));
570
571         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
572             device_get_nameunit(dev));
573         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
574         sx_xlock(&t4_list_lock);
575         SLIST_INSERT_HEAD(&t4_list, sc, link);
576         sx_xunlock(&t4_list_lock);
577
578         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
579         TAILQ_INIT(&sc->sfl);
580         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
581
582         rc = map_bars_0_and_4(sc);
583         if (rc != 0)
584                 goto done; /* error message displayed already */
585
586         /*
587          * This is the real PF# to which we're attaching.  Works from within PCI
588          * passthrough environments too, where pci_get_function() could return a
589          * different PF# depending on the passthrough configuration.  We need to
590          * use the real PF# in all our communication with the firmware.
591          */
592         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
593         sc->mbox = sc->pf;
594
595         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
596         sc->an_handler = an_not_handled;
597         for (i = 0; i < nitems(sc->cpl_handler); i++)
598                 sc->cpl_handler[i] = cpl_not_handled;
599         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
600                 sc->fw_msg_handler[i] = fw_msg_not_handled;
601         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
602         t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
603         t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
604         t4_init_sge_cpl_handlers(sc);
605
606         /* Prepare the adapter for operation */
607         rc = -t4_prep_adapter(sc);
608         if (rc != 0) {
609                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
610                 goto done;
611         }
612
613         /*
614          * Do this really early, with the memory windows set up even before the
615          * character device.  The userland tool's register i/o and mem read
616          * will work even in "recovery mode".
617          */
618         setup_memwin(sc);
619         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
620             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
621             device_get_nameunit(dev));
622         if (sc->cdev == NULL)
623                 device_printf(dev, "failed to create nexus char device.\n");
624         else
625                 sc->cdev->si_drv1 = sc;
626
627         /* Go no further if recovery mode has been requested. */
628         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
629                 device_printf(dev, "recovery mode.\n");
630                 goto done;
631         }
632
633         /* Prepare the firmware for operation */
634         rc = prep_firmware(sc);
635         if (rc != 0)
636                 goto done; /* error message displayed already */
637
638         rc = get_params__post_init(sc);
639         if (rc != 0)
640                 goto done; /* error message displayed already */
641
642         rc = set_params__post_init(sc);
643         if (rc != 0)
644                 goto done; /* error message displayed already */
645
646         rc = map_bar_2(sc);
647         if (rc != 0)
648                 goto done; /* error message displayed already */
649
650         rc = t4_create_dma_tag(sc);
651         if (rc != 0)
652                 goto done; /* error message displayed already */
653
654         /*
655          * First pass over all the ports - allocate VIs and initialize some
656          * basic parameters like mac address, port type, etc.  We also figure
657          * out whether a port is 10G or 1G and use that information when
658          * calculating how many interrupts to attempt to allocate.
659          */
660         n10g = n1g = 0;
661         for_each_port(sc, i) {
662                 struct port_info *pi;
663
664                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
665                 sc->port[i] = pi;
666
667                 /* These must be set before t4_port_init */
668                 pi->adapter = sc;
669                 pi->port_id = i;
670
671                 /* Allocate the vi and initialize parameters like mac addr */
672                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
673                 if (rc != 0) {
674                         device_printf(dev, "unable to initialize port %d: %d\n",
675                             i, rc);
676                         free(pi, M_CXGBE);
677                         sc->port[i] = NULL;
678                         goto done;
679                 }
680
681                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
682                     device_get_nameunit(dev), i);
683                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
684                 sc->chan_map[pi->tx_chan] = i;
685
686                 if (is_10G_port(pi) || is_40G_port(pi)) {
687                         n10g++;
688                         pi->tmr_idx = t4_tmr_idx_10g;
689                         pi->pktc_idx = t4_pktc_idx_10g;
690                 } else {
691                         n1g++;
692                         pi->tmr_idx = t4_tmr_idx_1g;
693                         pi->pktc_idx = t4_pktc_idx_1g;
694                 }
695
696                 pi->xact_addr_filt = -1;
697                 pi->linkdnrc = -1;
698
699                 pi->qsize_rxq = t4_qsize_rxq;
700                 pi->qsize_txq = t4_qsize_txq;
701
702                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
703                 if (pi->dev == NULL) {
704                         device_printf(dev,
705                             "failed to add device for port %d.\n", i);
706                         rc = ENXIO;
707                         goto done;
708                 }
709                 device_set_softc(pi->dev, pi);
710         }
711
712         /*
713          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
714          */
715         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
716         if (rc != 0)
717                 goto done; /* error message displayed already */
718
719         sc->intr_type = iaq.intr_type;
720         sc->intr_count = iaq.nirq;
721         sc->flags |= iaq.intr_flags;
722
723         s = &sc->sge;
724         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
725         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
726         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
727         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
728         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
729
730 #ifdef TCP_OFFLOAD
731         if (is_offload(sc)) {
732
733                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
734                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
735                 s->neq += s->nofldtxq + s->nofldrxq;
736                 s->niq += s->nofldrxq;
737
738                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
739                     M_CXGBE, M_ZERO | M_WAITOK);
740                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
741                     M_CXGBE, M_ZERO | M_WAITOK);
742         }
743 #endif
744
745         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
746             M_ZERO | M_WAITOK);
747         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
748             M_ZERO | M_WAITOK);
749         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
750             M_ZERO | M_WAITOK);
751         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
752             M_ZERO | M_WAITOK);
753         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
754             M_ZERO | M_WAITOK);
755
756         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
757             M_ZERO | M_WAITOK);
758
759         t4_init_l2t(sc, M_WAITOK);
760
761         /*
762          * Second pass over the ports.  This time we know the number of rx and
763          * tx queues that each port should get.
764          */
765         rqidx = tqidx = 0;
766 #ifdef TCP_OFFLOAD
767         ofld_rqidx = ofld_tqidx = 0;
768 #endif
769         for_each_port(sc, i) {
770                 struct port_info *pi = sc->port[i];
771
772                 if (pi == NULL)
773                         continue;
774
775                 pi->first_rxq = rqidx;
776                 pi->first_txq = tqidx;
777                 if (is_10G_port(pi) || is_40G_port(pi)) {
778                         pi->nrxq = iaq.nrxq10g;
779                         pi->ntxq = iaq.ntxq10g;
780                 } else {
781                         pi->nrxq = iaq.nrxq1g;
782                         pi->ntxq = iaq.ntxq1g;
783                 }
784
785                 rqidx += pi->nrxq;
786                 tqidx += pi->ntxq;
787
788 #ifdef TCP_OFFLOAD
789                 if (is_offload(sc)) {
790                         pi->first_ofld_rxq = ofld_rqidx;
791                         pi->first_ofld_txq = ofld_tqidx;
792                         if (is_10G_port(pi) || is_40G_port(pi)) {
793                                 pi->nofldrxq = iaq.nofldrxq10g;
794                                 pi->nofldtxq = iaq.nofldtxq10g;
795                         } else {
796                                 pi->nofldrxq = iaq.nofldrxq1g;
797                                 pi->nofldtxq = iaq.nofldtxq1g;
798                         }
799                         ofld_rqidx += pi->nofldrxq;
800                         ofld_tqidx += pi->nofldtxq;
801                 }
802 #endif
803         }
804
805         rc = setup_intr_handlers(sc);
806         if (rc != 0) {
807                 device_printf(dev,
808                     "failed to setup interrupt handlers: %d\n", rc);
809                 goto done;
810         }
811
812         rc = bus_generic_attach(dev);
813         if (rc != 0) {
814                 device_printf(dev,
815                     "failed to attach all child ports: %d\n", rc);
816                 goto done;
817         }
818
819         device_printf(dev,
820             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
821             sc->params.pci.width, sc->params.nports, sc->intr_count,
822             sc->intr_type == INTR_MSIX ? "MSI-X" :
823             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
824             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
825
826         t4_set_desc(sc);
827
828 done:
829         if (rc != 0 && sc->cdev) {
830                 /* cdev was created and so cxgbetool works; recover that way. */
831                 device_printf(dev,
832                     "error during attach, adapter is now in recovery mode.\n");
833                 rc = 0;
834         }
835
836         if (rc != 0)
837                 t4_detach(dev);
838         else
839                 t4_sysctls(sc);
840
841         return (rc);
842 }
843
844 /*
845  * Idempotent
846  */
847 static int
848 t4_detach(device_t dev)
849 {
850         struct adapter *sc;
851         struct port_info *pi;
852         int i, rc;
853
854         sc = device_get_softc(dev);
855
856         if (sc->flags & FULL_INIT_DONE)
857                 t4_intr_disable(sc);
858
859         if (sc->cdev) {
860                 destroy_dev(sc->cdev);
861                 sc->cdev = NULL;
862         }
863
864         rc = bus_generic_detach(dev);
865         if (rc) {
866                 device_printf(dev,
867                     "failed to detach child devices: %d\n", rc);
868                 return (rc);
869         }
870
871         for (i = 0; i < sc->intr_count; i++)
872                 t4_free_irq(sc, &sc->irq[i]);
873
874         for (i = 0; i < MAX_NPORTS; i++) {
875                 pi = sc->port[i];
876                 if (pi) {
877                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
878                         if (pi->dev)
879                                 device_delete_child(dev, pi->dev);
880
881                         mtx_destroy(&pi->pi_lock);
882                         free(pi, M_CXGBE);
883                 }
884         }
885
886         if (sc->flags & FULL_INIT_DONE)
887                 adapter_full_uninit(sc);
888
889         if (sc->flags & FW_OK)
890                 t4_fw_bye(sc, sc->mbox);
891
892         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
893                 pci_release_msi(dev);
894
895         if (sc->regs_res)
896                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
897                     sc->regs_res);
898
899         if (sc->udbs_res)
900                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
901                     sc->udbs_res);
902
903         if (sc->msix_res)
904                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
905                     sc->msix_res);
906
907         if (sc->l2t)
908                 t4_free_l2t(sc->l2t);
909
910 #ifdef TCP_OFFLOAD
911         free(sc->sge.ofld_rxq, M_CXGBE);
912         free(sc->sge.ofld_txq, M_CXGBE);
913 #endif
914         free(sc->irq, M_CXGBE);
915         free(sc->sge.rxq, M_CXGBE);
916         free(sc->sge.txq, M_CXGBE);
917         free(sc->sge.ctrlq, M_CXGBE);
918         free(sc->sge.iqmap, M_CXGBE);
919         free(sc->sge.eqmap, M_CXGBE);
920         free(sc->tids.ftid_tab, M_CXGBE);
921         t4_destroy_dma_tag(sc);
922         if (mtx_initialized(&sc->sc_lock)) {
923                 sx_xlock(&t4_list_lock);
924                 SLIST_REMOVE(&t4_list, sc, adapter, link);
925                 sx_xunlock(&t4_list_lock);
926                 mtx_destroy(&sc->sc_lock);
927         }
928
929         if (mtx_initialized(&sc->tids.ftid_lock))
930                 mtx_destroy(&sc->tids.ftid_lock);
931         if (mtx_initialized(&sc->sfl_lock))
932                 mtx_destroy(&sc->sfl_lock);
933         if (mtx_initialized(&sc->ifp_lock))
934                 mtx_destroy(&sc->ifp_lock);
935
936         bzero(sc, sizeof(*sc));
937
938         return (0);
939 }
940
941
942 static int
943 cxgbe_probe(device_t dev)
944 {
945         char buf[128];
946         struct port_info *pi = device_get_softc(dev);
947
948         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
949         device_set_desc_copy(dev, buf);
950
951         return (BUS_PROBE_DEFAULT);
952 }
953
954 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
955     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
956     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
957 #define T4_CAP_ENABLE (T4_CAP)
958
959 static int
960 cxgbe_attach(device_t dev)
961 {
962         struct port_info *pi = device_get_softc(dev);
963         struct ifnet *ifp;
964
965         /* Allocate an ifnet and set it up */
966         ifp = if_alloc(IFT_ETHER);
967         if (ifp == NULL) {
968                 device_printf(dev, "Cannot allocate ifnet\n");
969                 return (ENOMEM);
970         }
971         pi->ifp = ifp;
972         ifp->if_softc = pi;
973
974         callout_init(&pi->tick, CALLOUT_MPSAFE);
975
976         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
977         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
978
979         ifp->if_init = cxgbe_init;
980         ifp->if_ioctl = cxgbe_ioctl;
981         ifp->if_transmit = cxgbe_transmit;
982         ifp->if_qflush = cxgbe_qflush;
983
984         ifp->if_capabilities = T4_CAP;
985 #ifdef TCP_OFFLOAD
986         if (is_offload(pi->adapter))
987                 ifp->if_capabilities |= IFCAP_TOE;
988 #endif
989         ifp->if_capenable = T4_CAP_ENABLE;
990         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
991             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
992
993         /* Initialize ifmedia for this port */
994         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
995             cxgbe_media_status);
996         build_medialist(pi);
997
998         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
999             EVENTHANDLER_PRI_ANY);
1000
1001         ether_ifattach(ifp, pi->hw_addr);
1002
1003 #ifdef TCP_OFFLOAD
1004         if (is_offload(pi->adapter)) {
1005                 device_printf(dev,
1006                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1007                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1008         } else
1009 #endif
1010                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1011
1012         cxgbe_sysctls(pi);
1013
1014         return (0);
1015 }
1016
1017 static int
1018 cxgbe_detach(device_t dev)
1019 {
1020         struct port_info *pi = device_get_softc(dev);
1021         struct adapter *sc = pi->adapter;
1022         struct ifnet *ifp = pi->ifp;
1023
1024         /* Tell if_ioctl and if_init that the port is going away */
1025         ADAPTER_LOCK(sc);
1026         SET_DOOMED(pi);
1027         wakeup(&sc->flags);
1028         while (IS_BUSY(sc))
1029                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1030         SET_BUSY(sc);
1031 #ifdef INVARIANTS
1032         sc->last_op = "t4detach";
1033         sc->last_op_thr = curthread;
1034 #endif
1035         ADAPTER_UNLOCK(sc);
1036
1037         if (pi->flags & HAS_TRACEQ) {
1038                 sc->traceq = -1;        /* cloner should not create ifnet */
1039                 t4_tracer_port_detach(sc);
1040         }
1041
1042         if (pi->vlan_c)
1043                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1044
1045         PORT_LOCK(pi);
1046         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1047         callout_stop(&pi->tick);
1048         PORT_UNLOCK(pi);
1049         callout_drain(&pi->tick);
1050
1051         /* Let detach proceed even if these fail. */
1052         cxgbe_uninit_synchronized(pi);
1053         port_full_uninit(pi);
1054
1055         ifmedia_removeall(&pi->media);
1056         ether_ifdetach(pi->ifp);
1057         if_free(pi->ifp);
1058
1059         ADAPTER_LOCK(sc);
1060         CLR_BUSY(sc);
1061         wakeup(&sc->flags);
1062         ADAPTER_UNLOCK(sc);
1063
1064         return (0);
1065 }
1066
1067 static void
1068 cxgbe_init(void *arg)
1069 {
1070         struct port_info *pi = arg;
1071         struct adapter *sc = pi->adapter;
1072
1073         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1074                 return;
1075         cxgbe_init_synchronized(pi);
1076         end_synchronized_op(sc, 0);
1077 }
1078
1079 static int
1080 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1081 {
1082         int rc = 0, mtu, flags;
1083         struct port_info *pi = ifp->if_softc;
1084         struct adapter *sc = pi->adapter;
1085         struct ifreq *ifr = (struct ifreq *)data;
1086         uint32_t mask;
1087
1088         switch (cmd) {
1089         case SIOCSIFMTU:
1090                 mtu = ifr->ifr_mtu;
1091                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1092                         return (EINVAL);
1093
1094                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1095                 if (rc)
1096                         return (rc);
1097                 ifp->if_mtu = mtu;
1098                 if (pi->flags & PORT_INIT_DONE) {
1099                         t4_update_fl_bufsize(ifp);
1100                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1101                                 rc = update_mac_settings(pi, XGMAC_MTU);
1102                 }
1103                 end_synchronized_op(sc, 0);
1104                 break;
1105
1106         case SIOCSIFFLAGS:
1107                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1108                 if (rc)
1109                         return (rc);
1110
1111                 if (ifp->if_flags & IFF_UP) {
1112                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1113                                 flags = pi->if_flags;
1114                                 if ((ifp->if_flags ^ flags) &
1115                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1116                                         rc = update_mac_settings(pi,
1117                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1118                                 }
1119                         } else
1120                                 rc = cxgbe_init_synchronized(pi);
1121                         pi->if_flags = ifp->if_flags;
1122                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1123                         rc = cxgbe_uninit_synchronized(pi);
1124                 end_synchronized_op(sc, 0);
1125                 break;
1126
1127         case SIOCADDMULTI:      
1128         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1129                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1130                 if (rc)
1131                         return (rc);
1132                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1133                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1134                 end_synchronized_op(sc, LOCK_HELD);
1135                 break;
1136
1137         case SIOCSIFCAP:
1138                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1139                 if (rc)
1140                         return (rc);
1141
1142                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1143                 if (mask & IFCAP_TXCSUM) {
1144                         ifp->if_capenable ^= IFCAP_TXCSUM;
1145                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1146
1147                         if (IFCAP_TSO4 & ifp->if_capenable &&
1148                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1149                                 ifp->if_capenable &= ~IFCAP_TSO4;
1150                                 if_printf(ifp,
1151                                     "tso4 disabled due to -txcsum.\n");
1152                         }
1153                 }
1154                 if (mask & IFCAP_TXCSUM_IPV6) {
1155                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1156                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1157
1158                         if (IFCAP_TSO6 & ifp->if_capenable &&
1159                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1160                                 ifp->if_capenable &= ~IFCAP_TSO6;
1161                                 if_printf(ifp,
1162                                     "tso6 disabled due to -txcsum6.\n");
1163                         }
1164                 }
1165                 if (mask & IFCAP_RXCSUM)
1166                         ifp->if_capenable ^= IFCAP_RXCSUM;
1167                 if (mask & IFCAP_RXCSUM_IPV6)
1168                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1169
1170                 /*
1171                  * Note that we leave CSUM_TSO alone (it is always set).  The
1172                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1173                  * sending a TSO request our way, so it's sufficient to toggle
1174                  * IFCAP_TSOx only.
1175                  */
1176                 if (mask & IFCAP_TSO4) {
1177                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1178                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1179                                 if_printf(ifp, "enable txcsum first.\n");
1180                                 rc = EAGAIN;
1181                                 goto fail;
1182                         }
1183                         ifp->if_capenable ^= IFCAP_TSO4;
1184                 }
1185                 if (mask & IFCAP_TSO6) {
1186                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1187                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1188                                 if_printf(ifp, "enable txcsum6 first.\n");
1189                                 rc = EAGAIN;
1190                                 goto fail;
1191                         }
1192                         ifp->if_capenable ^= IFCAP_TSO6;
1193                 }
1194                 if (mask & IFCAP_LRO) {
1195 #if defined(INET) || defined(INET6)
1196                         int i;
1197                         struct sge_rxq *rxq;
1198
1199                         ifp->if_capenable ^= IFCAP_LRO;
1200                         for_each_rxq(pi, i, rxq) {
1201                                 if (ifp->if_capenable & IFCAP_LRO)
1202                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1203                                 else
1204                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1205                         }
1206 #endif
1207                 }
1208 #ifdef TCP_OFFLOAD
1209                 if (mask & IFCAP_TOE) {
1210                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1211
1212                         rc = toe_capability(pi, enable);
1213                         if (rc != 0)
1214                                 goto fail;
1215
1216                         ifp->if_capenable ^= mask;
1217                 }
1218 #endif
1219                 if (mask & IFCAP_VLAN_HWTAGGING) {
1220                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1221                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1222                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1223                 }
1224                 if (mask & IFCAP_VLAN_MTU) {
1225                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1226
1227                         /* Need to find out how to disable auto-mtu-inflation */
1228                 }
1229                 if (mask & IFCAP_VLAN_HWTSO)
1230                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1231                 if (mask & IFCAP_VLAN_HWCSUM)
1232                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1233
1234 #ifdef VLAN_CAPABILITIES
1235                 VLAN_CAPABILITIES(ifp);
1236 #endif
1237 fail:
1238                 end_synchronized_op(sc, 0);
1239                 break;
1240
1241         case SIOCSIFMEDIA:
1242         case SIOCGIFMEDIA:
1243                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1244                 break;
1245
1246         default:
1247                 rc = ether_ioctl(ifp, cmd, data);
1248         }
1249
1250         return (rc);
1251 }
1252
1253 static int
1254 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1255 {
1256         struct port_info *pi = ifp->if_softc;
1257         struct adapter *sc = pi->adapter;
1258         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1259         struct buf_ring *br;
1260         int rc;
1261
1262         M_ASSERTPKTHDR(m);
1263
1264         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1265                 m_freem(m);
1266                 return (ENETDOWN);
1267         }
1268
1269         if (m->m_flags & M_FLOWID)
1270                 txq += (m->m_pkthdr.flowid % pi->ntxq);
1271         br = txq->br;
1272
1273         if (TXQ_TRYLOCK(txq) == 0) {
1274                 struct sge_eq *eq = &txq->eq;
1275
1276                 /*
1277                  * It is possible that t4_eth_tx finishes up and releases the
1278                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1279                  * need to make sure that this mbuf doesn't just sit there in
1280                  * the drbr.
1281                  */
1282
1283                 rc = drbr_enqueue(ifp, br, m);
1284                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1285                     !(eq->flags & EQ_DOOMED))
1286                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1287                 return (rc);
1288         }
1289
1290         /*
1291          * txq->m is the mbuf that is held up due to a temporary shortage of
1292          * resources and it should be put on the wire first.  Then what's in
1293          * drbr and finally the mbuf that was just passed in to us.
1294          *
1295          * Return code should indicate the fate of the mbuf that was passed in
1296          * this time.
1297          */
1298
1299         TXQ_LOCK_ASSERT_OWNED(txq);
1300         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1301
1302                 /* Queued for transmission. */
1303
1304                 rc = drbr_enqueue(ifp, br, m);
1305                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1306                 (void) t4_eth_tx(ifp, txq, m);
1307                 TXQ_UNLOCK(txq);
1308                 return (rc);
1309         }
1310
1311         /* Direct transmission. */
1312         rc = t4_eth_tx(ifp, txq, m);
1313         if (rc != 0 && txq->m)
1314                 rc = 0; /* held, will be transmitted soon (hopefully) */
1315
1316         TXQ_UNLOCK(txq);
1317         return (rc);
1318 }
1319
1320 static void
1321 cxgbe_qflush(struct ifnet *ifp)
1322 {
1323         struct port_info *pi = ifp->if_softc;
1324         struct sge_txq *txq;
1325         int i;
1326         struct mbuf *m;
1327
1328         /* queues do not exist if !PORT_INIT_DONE. */
1329         if (pi->flags & PORT_INIT_DONE) {
1330                 for_each_txq(pi, i, txq) {
1331                         TXQ_LOCK(txq);
1332                         m_freem(txq->m);
1333                         txq->m = NULL;
1334                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1335                                 m_freem(m);
1336                         TXQ_UNLOCK(txq);
1337                 }
1338         }
1339         if_qflush(ifp);
1340 }
1341
1342 static int
1343 cxgbe_media_change(struct ifnet *ifp)
1344 {
1345         struct port_info *pi = ifp->if_softc;
1346
1347         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1348
1349         return (EOPNOTSUPP);
1350 }
1351
1352 static void
1353 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1354 {
1355         struct port_info *pi = ifp->if_softc;
1356         struct ifmedia_entry *cur = pi->media.ifm_cur;
1357         int speed = pi->link_cfg.speed;
1358         int data = (pi->port_type << 8) | pi->mod_type;
1359
1360         if (cur->ifm_data != data) {
1361                 build_medialist(pi);
1362                 cur = pi->media.ifm_cur;
1363         }
1364
1365         ifmr->ifm_status = IFM_AVALID;
1366         if (!pi->link_cfg.link_ok)
1367                 return;
1368
1369         ifmr->ifm_status |= IFM_ACTIVE;
1370
1371         /* active and current will differ iff current media is autoselect. */
1372         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1373                 return;
1374
1375         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1376         if (speed == SPEED_10000)
1377                 ifmr->ifm_active |= IFM_10G_T;
1378         else if (speed == SPEED_1000)
1379                 ifmr->ifm_active |= IFM_1000_T;
1380         else if (speed == SPEED_100)
1381                 ifmr->ifm_active |= IFM_100_TX;
1382         else if (speed == SPEED_10)
1383                 ifmr->ifm_active |= IFM_10_T;
1384         else
1385                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1386                             speed));
1387 }
1388
1389 void
1390 t4_fatal_err(struct adapter *sc)
1391 {
1392         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1393         t4_intr_disable(sc);
1394         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1395             device_get_nameunit(sc->dev));
1396 }
1397
1398 static int
1399 map_bars_0_and_4(struct adapter *sc)
1400 {
1401         sc->regs_rid = PCIR_BAR(0);
1402         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1403             &sc->regs_rid, RF_ACTIVE);
1404         if (sc->regs_res == NULL) {
1405                 device_printf(sc->dev, "cannot map registers.\n");
1406                 return (ENXIO);
1407         }
1408         sc->bt = rman_get_bustag(sc->regs_res);
1409         sc->bh = rman_get_bushandle(sc->regs_res);
1410         sc->mmio_len = rman_get_size(sc->regs_res);
1411         setbit(&sc->doorbells, DOORBELL_KDB);
1412
1413         sc->msix_rid = PCIR_BAR(4);
1414         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1415             &sc->msix_rid, RF_ACTIVE);
1416         if (sc->msix_res == NULL) {
1417                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1418                 return (ENXIO);
1419         }
1420
1421         return (0);
1422 }
1423
1424 static int
1425 map_bar_2(struct adapter *sc)
1426 {
1427
1428         /*
1429          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1430          * to map it if RDMA is disabled.
1431          */
1432         if (is_t4(sc) && sc->rdmacaps == 0)
1433                 return (0);
1434
1435         sc->udbs_rid = PCIR_BAR(2);
1436         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1437             &sc->udbs_rid, RF_ACTIVE);
1438         if (sc->udbs_res == NULL) {
1439                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1440                 return (ENXIO);
1441         }
1442         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1443
1444         if (is_t5(sc)) {
1445                 setbit(&sc->doorbells, DOORBELL_UDB);
1446 #if defined(__i386__) || defined(__amd64__)
1447                 if (t5_write_combine) {
1448                         int rc;
1449
1450                         /*
1451                          * Enable write combining on BAR2.  This is the
1452                          * userspace doorbell BAR and is split into 128B
1453                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1454                          * with an egress queue.  The first 64B has the doorbell
1455                          * and the second 64B can be used to submit a tx work
1456                          * request with an implicit doorbell.
1457                          */
1458
1459                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1460                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1461                         if (rc == 0) {
1462                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1463                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1464                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1465                         } else {
1466                                 device_printf(sc->dev,
1467                                     "couldn't enable write combining: %d\n",
1468                                     rc);
1469                         }
1470
1471                         t4_write_reg(sc, A_SGE_STAT_CFG,
1472                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1473                 }
1474 #endif
1475         }
1476
1477         return (0);
1478 }
1479
1480 static const struct memwin t4_memwin[] = {
1481         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1482         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1483         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1484 };
1485
1486 static const struct memwin t5_memwin[] = {
1487         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1488         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1489         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1490 };
1491
1492 static void
1493 setup_memwin(struct adapter *sc)
1494 {
1495         const struct memwin *mw;
1496         int i, n;
1497         uint32_t bar0;
1498
1499         if (is_t4(sc)) {
1500                 /*
1501                  * Read low 32b of bar0 indirectly via the hardware backdoor
1502                  * mechanism.  Works from within PCI passthrough environments
1503                  * too, where rman_get_start() can return a different value.  We
1504                  * need to program the T4 memory window decoders with the actual
1505                  * addresses that will be coming across the PCIe link.
1506                  */
1507                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1508                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1509
1510                 mw = &t4_memwin[0];
1511                 n = nitems(t4_memwin);
1512         } else {
1513                 /* T5 uses the relative offset inside the PCIe BAR */
1514                 bar0 = 0;
1515
1516                 mw = &t5_memwin[0];
1517                 n = nitems(t5_memwin);
1518         }
1519
1520         for (i = 0; i < n; i++, mw++) {
1521                 t4_write_reg(sc,
1522                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1523                     (mw->base + bar0) | V_BIR(0) |
1524                     V_WINDOW(ilog2(mw->aperture) - 10));
1525         }
1526
1527         /* flush */
1528         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1529 }
1530
1531 /*
1532  * Verify that the memory range specified by the addr/len pair is valid and lies
1533  * entirely within a single region (EDCx or MCx).
1534  */
1535 static int
1536 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1537 {
1538         uint32_t em, addr_len, maddr, mlen;
1539
1540         /* Memory can only be accessed in naturally aligned 4 byte units */
1541         if (addr & 3 || len & 3 || len == 0)
1542                 return (EINVAL);
1543
1544         /* Enabled memories */
1545         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1546         if (em & F_EDRAM0_ENABLE) {
1547                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1548                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1549                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1550                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1551                     addr + len <= maddr + mlen)
1552                         return (0);
1553         }
1554         if (em & F_EDRAM1_ENABLE) {
1555                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1556                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1557                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1558                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1559                     addr + len <= maddr + mlen)
1560                         return (0);
1561         }
1562         if (em & F_EXT_MEM_ENABLE) {
1563                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1564                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1565                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1566                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1567                     addr + len <= maddr + mlen)
1568                         return (0);
1569         }
1570         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1571                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1572                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1573                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1574                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1575                     addr + len <= maddr + mlen)
1576                         return (0);
1577         }
1578
1579         return (EFAULT);
1580 }
1581
1582 static int
1583 fwmtype_to_hwmtype(int mtype)
1584 {
1585
1586         switch (mtype) {
1587         case FW_MEMTYPE_EDC0:
1588                 return (MEM_EDC0);
1589         case FW_MEMTYPE_EDC1:
1590                 return (MEM_EDC1);
1591         case FW_MEMTYPE_EXTMEM:
1592                 return (MEM_MC0);
1593         case FW_MEMTYPE_EXTMEM1:
1594                 return (MEM_MC1);
1595         default:
1596                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1597         }
1598 }
1599
1600 /*
1601  * Verify that the memory range specified by the memtype/offset/len pair is
1602  * valid and lies entirely within the memtype specified.  The global address of
1603  * the start of the range is returned in addr.
1604  */
1605 static int
1606 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1607     uint32_t *addr)
1608 {
1609         uint32_t em, addr_len, maddr, mlen;
1610
1611         /* Memory can only be accessed in naturally aligned 4 byte units */
1612         if (off & 3 || len & 3 || len == 0)
1613                 return (EINVAL);
1614
1615         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1616         switch (fwmtype_to_hwmtype(mtype)) {
1617         case MEM_EDC0:
1618                 if (!(em & F_EDRAM0_ENABLE))
1619                         return (EINVAL);
1620                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1621                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1622                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1623                 break;
1624         case MEM_EDC1:
1625                 if (!(em & F_EDRAM1_ENABLE))
1626                         return (EINVAL);
1627                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1628                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1629                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1630                 break;
1631         case MEM_MC:
1632                 if (!(em & F_EXT_MEM_ENABLE))
1633                         return (EINVAL);
1634                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1635                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1636                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1637                 break;
1638         case MEM_MC1:
1639                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1640                         return (EINVAL);
1641                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1642                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1643                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1644                 break;
1645         default:
1646                 return (EINVAL);
1647         }
1648
1649         if (mlen > 0 && off < mlen && off + len <= mlen) {
1650                 *addr = maddr + off;    /* global address */
1651                 return (0);
1652         }
1653
1654         return (EFAULT);
1655 }
1656
1657 static void
1658 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1659 {
1660         const struct memwin *mw;
1661
1662         if (is_t4(sc)) {
1663                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1664                     ("%s: incorrect memwin# (%d)", __func__, win));
1665                 mw = &t4_memwin[win];
1666         } else {
1667                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1668                     ("%s: incorrect memwin# (%d)", __func__, win));
1669                 mw = &t5_memwin[win];
1670         }
1671
1672         if (base != NULL)
1673                 *base = mw->base;
1674         if (aperture != NULL)
1675                 *aperture = mw->aperture;
1676 }
1677
1678 /*
1679  * Positions the memory window such that it can be used to access the specified
1680  * address in the chip's address space.  The return value is the offset of addr
1681  * from the start of the window.
1682  */
1683 static uint32_t
1684 position_memwin(struct adapter *sc, int n, uint32_t addr)
1685 {
1686         uint32_t start, pf;
1687         uint32_t reg;
1688
1689         KASSERT(n >= 0 && n <= 3,
1690             ("%s: invalid window %d.", __func__, n));
1691         KASSERT((addr & 3) == 0,
1692             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1693
1694         if (is_t4(sc)) {
1695                 pf = 0;
1696                 start = addr & ~0xf;    /* start must be 16B aligned */
1697         } else {
1698                 pf = V_PFNUM(sc->pf);
1699                 start = addr & ~0x7f;   /* start must be 128B aligned */
1700         }
1701         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1702
1703         t4_write_reg(sc, reg, start | pf);
1704         t4_read_reg(sc, reg);
1705
1706         return (addr - start);
1707 }
1708
1709 static int
1710 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1711     struct intrs_and_queues *iaq)
1712 {
1713         int rc, itype, navail, nrxq10g, nrxq1g, n;
1714         int nofldrxq10g = 0, nofldrxq1g = 0;
1715
1716         bzero(iaq, sizeof(*iaq));
1717
1718         iaq->ntxq10g = t4_ntxq10g;
1719         iaq->ntxq1g = t4_ntxq1g;
1720         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1721         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1722 #ifdef TCP_OFFLOAD
1723         if (is_offload(sc)) {
1724                 iaq->nofldtxq10g = t4_nofldtxq10g;
1725                 iaq->nofldtxq1g = t4_nofldtxq1g;
1726                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1727                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1728         }
1729 #endif
1730
1731         for (itype = INTR_MSIX; itype; itype >>= 1) {
1732
1733                 if ((itype & t4_intr_types) == 0)
1734                         continue;       /* not allowed */
1735
1736                 if (itype == INTR_MSIX)
1737                         navail = pci_msix_count(sc->dev);
1738                 else if (itype == INTR_MSI)
1739                         navail = pci_msi_count(sc->dev);
1740                 else
1741                         navail = 1;
1742 restart:
1743                 if (navail == 0)
1744                         continue;
1745
1746                 iaq->intr_type = itype;
1747                 iaq->intr_flags = 0;
1748
1749                 /*
1750                  * Best option: an interrupt vector for errors, one for the
1751                  * firmware event queue, and one each for each rxq (NIC as well
1752                  * as offload).
1753                  */
1754                 iaq->nirq = T4_EXTRA_INTR;
1755                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1756                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1757                 if (iaq->nirq <= navail &&
1758                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1759                         iaq->intr_flags |= INTR_DIRECT;
1760                         goto allocate;
1761                 }
1762
1763                 /*
1764                  * Second best option: an interrupt vector for errors, one for
1765                  * the firmware event queue, and one each for either NIC or
1766                  * offload rxq's.
1767                  */
1768                 iaq->nirq = T4_EXTRA_INTR;
1769                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1770                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1771                 if (iaq->nirq <= navail &&
1772                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1773                         goto allocate;
1774
1775                 /*
1776                  * Next best option: an interrupt vector for errors, one for the
1777                  * firmware event queue, and at least one per port.  At this
1778                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1779                  * what's available to us.
1780                  */
1781                 iaq->nirq = T4_EXTRA_INTR;
1782                 iaq->nirq += n10g + n1g;
1783                 if (iaq->nirq <= navail) {
1784                         int leftover = navail - iaq->nirq;
1785
1786                         if (n10g > 0) {
1787                                 int target = max(nrxq10g, nofldrxq10g);
1788
1789                                 n = 1;
1790                                 while (n < target && leftover >= n10g) {
1791                                         leftover -= n10g;
1792                                         iaq->nirq += n10g;
1793                                         n++;
1794                                 }
1795                                 iaq->nrxq10g = min(n, nrxq10g);
1796 #ifdef TCP_OFFLOAD
1797                                 if (is_offload(sc))
1798                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1799 #endif
1800                         }
1801
1802                         if (n1g > 0) {
1803                                 int target = max(nrxq1g, nofldrxq1g);
1804
1805                                 n = 1;
1806                                 while (n < target && leftover >= n1g) {
1807                                         leftover -= n1g;
1808                                         iaq->nirq += n1g;
1809                                         n++;
1810                                 }
1811                                 iaq->nrxq1g = min(n, nrxq1g);
1812 #ifdef TCP_OFFLOAD
1813                                 if (is_offload(sc))
1814                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1815 #endif
1816                         }
1817
1818                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1819                                 goto allocate;
1820                 }
1821
1822                 /*
1823                  * Least desirable option: one interrupt vector for everything.
1824                  */
1825                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1826 #ifdef TCP_OFFLOAD
1827                 if (is_offload(sc))
1828                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1829 #endif
1830
1831 allocate:
1832                 navail = iaq->nirq;
1833                 rc = 0;
1834                 if (itype == INTR_MSIX)
1835                         rc = pci_alloc_msix(sc->dev, &navail);
1836                 else if (itype == INTR_MSI)
1837                         rc = pci_alloc_msi(sc->dev, &navail);
1838
1839                 if (rc == 0) {
1840                         if (navail == iaq->nirq)
1841                                 return (0);
1842
1843                         /*
1844                          * Didn't get the number requested.  Use whatever number
1845                          * the kernel is willing to allocate (it's in navail).
1846                          */
1847                         device_printf(sc->dev, "fewer vectors than requested, "
1848                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1849                             itype, iaq->nirq, navail);
1850                         pci_release_msi(sc->dev);
1851                         goto restart;
1852                 }
1853
1854                 device_printf(sc->dev,
1855                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1856                     itype, rc, iaq->nirq, navail);
1857         }
1858
1859         device_printf(sc->dev,
1860             "failed to find a usable interrupt type.  "
1861             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1862             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1863
1864         return (ENXIO);
1865 }
1866
1867 #define FW_VERSION(chip) ( \
1868     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1869     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1870     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1871     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1872 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1873
1874 struct fw_info {
1875         uint8_t chip;
1876         char *kld_name;
1877         char *fw_mod_name;
1878         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1879 } fw_info[] = {
1880         {
1881                 .chip = CHELSIO_T4,
1882                 .kld_name = "t4fw_cfg",
1883                 .fw_mod_name = "t4fw",
1884                 .fw_hdr = {
1885                         .chip = FW_HDR_CHIP_T4,
1886                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1887                         .intfver_nic = FW_INTFVER(T4, NIC),
1888                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1889                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1890                         .intfver_ri = FW_INTFVER(T4, RI),
1891                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1892                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1893                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1894                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1895                 },
1896         }, {
1897                 .chip = CHELSIO_T5,
1898                 .kld_name = "t5fw_cfg",
1899                 .fw_mod_name = "t5fw",
1900                 .fw_hdr = {
1901                         .chip = FW_HDR_CHIP_T5,
1902                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1903                         .intfver_nic = FW_INTFVER(T5, NIC),
1904                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1905                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1906                         .intfver_ri = FW_INTFVER(T5, RI),
1907                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1908                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1909                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1910                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1911                 },
1912         }
1913 };
1914
1915 static struct fw_info *
1916 find_fw_info(int chip)
1917 {
1918         int i;
1919
1920         for (i = 0; i < nitems(fw_info); i++) {
1921                 if (fw_info[i].chip == chip)
1922                         return (&fw_info[i]);
1923         }
1924         return (NULL);
1925 }
1926
1927 /*
1928  * Is the given firmware API compatible with the one the driver was compiled
1929  * with?
1930  */
1931 static int
1932 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1933 {
1934
1935         /* short circuit if it's the exact same firmware version */
1936         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1937                 return (1);
1938
1939         /*
1940          * XXX: Is this too conservative?  Perhaps I should limit this to the
1941          * features that are supported in the driver.
1942          */
1943 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1944         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1945             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1946             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1947                 return (1);
1948 #undef SAME_INTF
1949
1950         return (0);
1951 }
1952
1953 /*
1954  * The firmware in the KLD is usable, but should it be installed?  This routine
1955  * explains itself in detail if it indicates the KLD firmware should be
1956  * installed.
1957  */
1958 static int
1959 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1960 {
1961         const char *reason;
1962
1963         if (!card_fw_usable) {
1964                 reason = "incompatible or unusable";
1965                 goto install;
1966         }
1967
1968         if (k > c) {
1969                 reason = "older than the version bundled with this driver";
1970                 goto install;
1971         }
1972
1973         if (t4_fw_install == 2 && k != c) {
1974                 reason = "different than the version bundled with this driver";
1975                 goto install;
1976         }
1977
1978         return (0);
1979
1980 install:
1981         if (t4_fw_install == 0) {
1982                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1983                     "but the driver is prohibited from installing a different "
1984                     "firmware on the card.\n",
1985                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1986                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1987
1988                 return (0);
1989         }
1990
1991         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1992             "installing firmware %u.%u.%u.%u on card.\n",
1993             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1994             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1995             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1996             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1997
1998         return (1);
1999 }
2000 /*
2001  * Establish contact with the firmware and determine if we are the master driver
2002  * or not, and whether we are responsible for chip initialization.
2003  */
2004 static int
2005 prep_firmware(struct adapter *sc)
2006 {
2007         const struct firmware *fw = NULL, *default_cfg;
2008         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2009         enum dev_state state;
2010         struct fw_info *fw_info;
2011         struct fw_hdr *card_fw;         /* fw on the card */
2012         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2013         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2014                                            against */
2015
2016         /* Contact firmware. */
2017         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2018         if (rc < 0 || state == DEV_STATE_ERR) {
2019                 rc = -rc;
2020                 device_printf(sc->dev,
2021                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2022                 return (rc);
2023         }
2024         pf = rc;
2025         if (pf == sc->mbox)
2026                 sc->flags |= MASTER_PF;
2027         else if (state == DEV_STATE_UNINIT) {
2028                 /*
2029                  * We didn't get to be the master so we definitely won't be
2030                  * configuring the chip.  It's a bug if someone else hasn't
2031                  * configured it already.
2032                  */
2033                 device_printf(sc->dev, "couldn't be master(%d), "
2034                     "device not already initialized either(%d).\n", rc, state);
2035                 return (EDOOFUS);
2036         }
2037
2038         /* This is the firmware whose headers the driver was compiled against */
2039         fw_info = find_fw_info(chip_id(sc));
2040         if (fw_info == NULL) {
2041                 device_printf(sc->dev,
2042                     "unable to look up firmware information for chip %d.\n",
2043                     chip_id(sc));
2044                 return (EINVAL);
2045         }
2046         drv_fw = &fw_info->fw_hdr;
2047
2048         /*
2049          * The firmware KLD contains many modules.  The KLD name is also the
2050          * name of the module that contains the default config file.
2051          */
2052         default_cfg = firmware_get(fw_info->kld_name);
2053
2054         /* Read the header of the firmware on the card */
2055         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2056         rc = -t4_read_flash(sc, FLASH_FW_START,
2057             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2058         if (rc == 0)
2059                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2060         else {
2061                 device_printf(sc->dev,
2062                     "Unable to read card's firmware header: %d\n", rc);
2063                 card_fw_usable = 0;
2064         }
2065
2066         /* This is the firmware in the KLD */
2067         fw = firmware_get(fw_info->fw_mod_name);
2068         if (fw != NULL) {
2069                 kld_fw = (const void *)fw->data;
2070                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2071         } else {
2072                 kld_fw = NULL;
2073                 kld_fw_usable = 0;
2074         }
2075
2076         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2077             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2078                 /*
2079                  * Common case: the firmware on the card is an exact match and
2080                  * the KLD is an exact match too, or the KLD is
2081                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2082                  * here -- use cxgbetool loadfw if you want to reinstall the
2083                  * same firmware as the one on the card.
2084                  */
2085         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2086             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2087             be32toh(card_fw->fw_ver))) {
2088
2089                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2090                 if (rc != 0) {
2091                         device_printf(sc->dev,
2092                             "failed to install firmware: %d\n", rc);
2093                         goto done;
2094                 }
2095
2096                 /* Installed successfully, update the cached header too. */
2097                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2098                 card_fw_usable = 1;
2099                 need_fw_reset = 0;      /* already reset as part of load_fw */
2100         }
2101
2102         if (!card_fw_usable) {
2103                 uint32_t d, c, k;
2104
2105                 d = ntohl(drv_fw->fw_ver);
2106                 c = ntohl(card_fw->fw_ver);
2107                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2108
2109                 device_printf(sc->dev, "Cannot find a usable firmware: "
2110                     "fw_install %d, chip state %d, "
2111                     "driver compiled with %d.%d.%d.%d, "
2112                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2113                     t4_fw_install, state,
2114                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2115                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2116                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2117                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2118                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2119                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2120                 rc = EINVAL;
2121                 goto done;
2122         }
2123
2124         /* We're using whatever's on the card and it's known to be good. */
2125         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2126         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2127             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2128             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2129             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2130             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2131         t4_get_tp_version(sc, &sc->params.tp_vers);
2132
2133         /* Reset device */
2134         if (need_fw_reset &&
2135             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2136                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2137                 if (rc != ETIMEDOUT && rc != EIO)
2138                         t4_fw_bye(sc, sc->mbox);
2139                 goto done;
2140         }
2141         sc->flags |= FW_OK;
2142
2143         rc = get_params__pre_init(sc);
2144         if (rc != 0)
2145                 goto done; /* error message displayed already */
2146
2147         /* Partition adapter resources as specified in the config file. */
2148         if (state == DEV_STATE_UNINIT) {
2149
2150                 KASSERT(sc->flags & MASTER_PF,
2151                     ("%s: trying to change chip settings when not master.",
2152                     __func__));
2153
2154                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2155                 if (rc != 0)
2156                         goto done;      /* error message displayed already */
2157
2158                 t4_tweak_chip_settings(sc);
2159
2160                 /* get basic stuff going */
2161                 rc = -t4_fw_initialize(sc, sc->mbox);
2162                 if (rc != 0) {
2163                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2164                         goto done;
2165                 }
2166         } else {
2167                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2168                 sc->cfcsum = 0;
2169         }
2170
2171 done:
2172         free(card_fw, M_CXGBE);
2173         if (fw != NULL)
2174                 firmware_put(fw, FIRMWARE_UNLOAD);
2175         if (default_cfg != NULL)
2176                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2177
2178         return (rc);
2179 }
2180
2181 #define FW_PARAM_DEV(param) \
2182         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2183          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2184 #define FW_PARAM_PFVF(param) \
2185         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2186          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2187
2188 /*
2189  * Partition chip resources for use between various PFs, VFs, etc.
2190  */
2191 static int
2192 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2193     const char *name_prefix)
2194 {
2195         const struct firmware *cfg = NULL;
2196         int rc = 0;
2197         struct fw_caps_config_cmd caps;
2198         uint32_t mtype, moff, finicsum, cfcsum;
2199
2200         /*
2201          * Figure out what configuration file to use.  Pick the default config
2202          * file for the card if the user hasn't specified one explicitly.
2203          */
2204         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2205         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2206                 /* Card specific overrides go here. */
2207                 if (pci_get_device(sc->dev) == 0x440a)
2208                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2209                 if (is_fpga(sc))
2210                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2211         }
2212
2213         /*
2214          * We need to load another module if the profile is anything except
2215          * "default" or "flash".
2216          */
2217         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2218             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2219                 char s[32];
2220
2221                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2222                 cfg = firmware_get(s);
2223                 if (cfg == NULL) {
2224                         if (default_cfg != NULL) {
2225                                 device_printf(sc->dev,
2226                                     "unable to load module \"%s\" for "
2227                                     "configuration profile \"%s\", will use "
2228                                     "the default config file instead.\n",
2229                                     s, sc->cfg_file);
2230                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2231                                     "%s", DEFAULT_CF);
2232                         } else {
2233                                 device_printf(sc->dev,
2234                                     "unable to load module \"%s\" for "
2235                                     "configuration profile \"%s\", will use "
2236                                     "the config file on the card's flash "
2237                                     "instead.\n", s, sc->cfg_file);
2238                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2239                                     "%s", FLASH_CF);
2240                         }
2241                 }
2242         }
2243
2244         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2245             default_cfg == NULL) {
2246                 device_printf(sc->dev,
2247                     "default config file not available, will use the config "
2248                     "file on the card's flash instead.\n");
2249                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2250         }
2251
2252         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2253                 u_int cflen, i, n;
2254                 const uint32_t *cfdata;
2255                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2256
2257                 KASSERT(cfg != NULL || default_cfg != NULL,
2258                     ("%s: no config to upload", __func__));
2259
2260                 /*
2261                  * Ask the firmware where it wants us to upload the config file.
2262                  */
2263                 param = FW_PARAM_DEV(CF);
2264                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2265                 if (rc != 0) {
2266                         /* No support for config file?  Shouldn't happen. */
2267                         device_printf(sc->dev,
2268                             "failed to query config file location: %d.\n", rc);
2269                         goto done;
2270                 }
2271                 mtype = G_FW_PARAMS_PARAM_Y(val);
2272                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2273
2274                 /*
2275                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2276                  * useless stuffing/comments at the end of the config file so
2277                  * it's ok to simply throw away the last remaining bytes when
2278                  * the config file is not an exact multiple of 4.  This also
2279                  * helps with the validate_mt_off_len check.
2280                  */
2281                 if (cfg != NULL) {
2282                         cflen = cfg->datasize & ~3;
2283                         cfdata = cfg->data;
2284                 } else {
2285                         cflen = default_cfg->datasize & ~3;
2286                         cfdata = default_cfg->data;
2287                 }
2288
2289                 if (cflen > FLASH_CFG_MAX_SIZE) {
2290                         device_printf(sc->dev,
2291                             "config file too long (%d, max allowed is %d).  "
2292                             "Will try to use the config on the card, if any.\n",
2293                             cflen, FLASH_CFG_MAX_SIZE);
2294                         goto use_config_on_flash;
2295                 }
2296
2297                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2298                 if (rc != 0) {
2299                         device_printf(sc->dev,
2300                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2301                             "Will try to use the config on the card, if any.\n",
2302                             __func__, mtype, moff, cflen, rc);
2303                         goto use_config_on_flash;
2304                 }
2305
2306                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2307                 while (cflen) {
2308                         off = position_memwin(sc, 2, addr);
2309                         n = min(cflen, mw_aperture - off);
2310                         for (i = 0; i < n; i += 4)
2311                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2312                         cflen -= n;
2313                         addr += n;
2314                 }
2315         } else {
2316 use_config_on_flash:
2317                 mtype = FW_MEMTYPE_FLASH;
2318                 moff = t4_flash_cfg_addr(sc);
2319         }
2320
2321         bzero(&caps, sizeof(caps));
2322         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2323             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2324         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2325             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2326             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2327         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2328         if (rc != 0) {
2329                 device_printf(sc->dev,
2330                     "failed to pre-process config file: %d "
2331                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2332                 goto done;
2333         }
2334
2335         finicsum = be32toh(caps.finicsum);
2336         cfcsum = be32toh(caps.cfcsum);
2337         if (finicsum != cfcsum) {
2338                 device_printf(sc->dev,
2339                     "WARNING: config file checksum mismatch: %08x %08x\n",
2340                     finicsum, cfcsum);
2341         }
2342         sc->cfcsum = cfcsum;
2343
2344 #define LIMIT_CAPS(x) do { \
2345         caps.x &= htobe16(t4_##x##_allowed); \
2346         sc->x = htobe16(caps.x); \
2347 } while (0)
2348
2349         /*
2350          * Let the firmware know what features will (not) be used so it can tune
2351          * things accordingly.
2352          */
2353         LIMIT_CAPS(linkcaps);
2354         LIMIT_CAPS(niccaps);
2355         LIMIT_CAPS(toecaps);
2356         LIMIT_CAPS(rdmacaps);
2357         LIMIT_CAPS(iscsicaps);
2358         LIMIT_CAPS(fcoecaps);
2359 #undef LIMIT_CAPS
2360
2361         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2362             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2363         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2364         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2365         if (rc != 0) {
2366                 device_printf(sc->dev,
2367                     "failed to process config file: %d.\n", rc);
2368         }
2369 done:
2370         if (cfg != NULL)
2371                 firmware_put(cfg, FIRMWARE_UNLOAD);
2372         return (rc);
2373 }
2374
2375 /*
2376  * Retrieve parameters that are needed (or nice to have) very early.
2377  */
2378 static int
2379 get_params__pre_init(struct adapter *sc)
2380 {
2381         int rc;
2382         uint32_t param[2], val[2];
2383         struct fw_devlog_cmd cmd;
2384         struct devlog_params *dlog = &sc->params.devlog;
2385
2386         param[0] = FW_PARAM_DEV(PORTVEC);
2387         param[1] = FW_PARAM_DEV(CCLK);
2388         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2389         if (rc != 0) {
2390                 device_printf(sc->dev,
2391                     "failed to query parameters (pre_init): %d.\n", rc);
2392                 return (rc);
2393         }
2394
2395         sc->params.portvec = val[0];
2396         sc->params.nports = bitcount32(val[0]);
2397         sc->params.vpd.cclk = val[1];
2398
2399         /* Read device log parameters. */
2400         bzero(&cmd, sizeof(cmd));
2401         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2402             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2403         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2404         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2405         if (rc != 0) {
2406                 device_printf(sc->dev,
2407                     "failed to get devlog parameters: %d.\n", rc);
2408                 bzero(dlog, sizeof (*dlog));
2409                 rc = 0; /* devlog isn't critical for device operation */
2410         } else {
2411                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2412                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2413                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2414                 dlog->size = be32toh(cmd.memsize_devlog);
2415         }
2416
2417         return (rc);
2418 }
2419
2420 /*
2421  * Retrieve various parameters that are of interest to the driver.  The device
2422  * has been initialized by the firmware at this point.
2423  */
2424 static int
2425 get_params__post_init(struct adapter *sc)
2426 {
2427         int rc;
2428         uint32_t param[7], val[7];
2429         struct fw_caps_config_cmd caps;
2430
2431         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2432         param[1] = FW_PARAM_PFVF(EQ_START);
2433         param[2] = FW_PARAM_PFVF(FILTER_START);
2434         param[3] = FW_PARAM_PFVF(FILTER_END);
2435         param[4] = FW_PARAM_PFVF(L2T_START);
2436         param[5] = FW_PARAM_PFVF(L2T_END);
2437         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2438         if (rc != 0) {
2439                 device_printf(sc->dev,
2440                     "failed to query parameters (post_init): %d.\n", rc);
2441                 return (rc);
2442         }
2443
2444         sc->sge.iq_start = val[0];
2445         sc->sge.eq_start = val[1];
2446         sc->tids.ftid_base = val[2];
2447         sc->tids.nftids = val[3] - val[2] + 1;
2448         sc->vres.l2t.start = val[4];
2449         sc->vres.l2t.size = val[5] - val[4] + 1;
2450         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2451             ("%s: L2 table size (%u) larger than expected (%u)",
2452             __func__, sc->vres.l2t.size, L2T_SIZE));
2453
2454         /* get capabilites */
2455         bzero(&caps, sizeof(caps));
2456         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2457             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2458         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2459         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2460         if (rc != 0) {
2461                 device_printf(sc->dev,
2462                     "failed to get card capabilities: %d.\n", rc);
2463                 return (rc);
2464         }
2465
2466         if (caps.toecaps) {
2467                 /* query offload-related parameters */
2468                 param[0] = FW_PARAM_DEV(NTID);
2469                 param[1] = FW_PARAM_PFVF(SERVER_START);
2470                 param[2] = FW_PARAM_PFVF(SERVER_END);
2471                 param[3] = FW_PARAM_PFVF(TDDP_START);
2472                 param[4] = FW_PARAM_PFVF(TDDP_END);
2473                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2474                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2475                 if (rc != 0) {
2476                         device_printf(sc->dev,
2477                             "failed to query TOE parameters: %d.\n", rc);
2478                         return (rc);
2479                 }
2480                 sc->tids.ntids = val[0];
2481                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2482                 sc->tids.stid_base = val[1];
2483                 sc->tids.nstids = val[2] - val[1] + 1;
2484                 sc->vres.ddp.start = val[3];
2485                 sc->vres.ddp.size = val[4] - val[3] + 1;
2486                 sc->params.ofldq_wr_cred = val[5];
2487                 sc->params.offload = 1;
2488         }
2489         if (caps.rdmacaps) {
2490                 param[0] = FW_PARAM_PFVF(STAG_START);
2491                 param[1] = FW_PARAM_PFVF(STAG_END);
2492                 param[2] = FW_PARAM_PFVF(RQ_START);
2493                 param[3] = FW_PARAM_PFVF(RQ_END);
2494                 param[4] = FW_PARAM_PFVF(PBL_START);
2495                 param[5] = FW_PARAM_PFVF(PBL_END);
2496                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2497                 if (rc != 0) {
2498                         device_printf(sc->dev,
2499                             "failed to query RDMA parameters(1): %d.\n", rc);
2500                         return (rc);
2501                 }
2502                 sc->vres.stag.start = val[0];
2503                 sc->vres.stag.size = val[1] - val[0] + 1;
2504                 sc->vres.rq.start = val[2];
2505                 sc->vres.rq.size = val[3] - val[2] + 1;
2506                 sc->vres.pbl.start = val[4];
2507                 sc->vres.pbl.size = val[5] - val[4] + 1;
2508
2509                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2510                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2511                 param[2] = FW_PARAM_PFVF(CQ_START);
2512                 param[3] = FW_PARAM_PFVF(CQ_END);
2513                 param[4] = FW_PARAM_PFVF(OCQ_START);
2514                 param[5] = FW_PARAM_PFVF(OCQ_END);
2515                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2516                 if (rc != 0) {
2517                         device_printf(sc->dev,
2518                             "failed to query RDMA parameters(2): %d.\n", rc);
2519                         return (rc);
2520                 }
2521                 sc->vres.qp.start = val[0];
2522                 sc->vres.qp.size = val[1] - val[0] + 1;
2523                 sc->vres.cq.start = val[2];
2524                 sc->vres.cq.size = val[3] - val[2] + 1;
2525                 sc->vres.ocq.start = val[4];
2526                 sc->vres.ocq.size = val[5] - val[4] + 1;
2527         }
2528         if (caps.iscsicaps) {
2529                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2530                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2531                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2532                 if (rc != 0) {
2533                         device_printf(sc->dev,
2534                             "failed to query iSCSI parameters: %d.\n", rc);
2535                         return (rc);
2536                 }
2537                 sc->vres.iscsi.start = val[0];
2538                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2539         }
2540
2541         /*
2542          * We've got the params we wanted to query via the firmware.  Now grab
2543          * some others directly from the chip.
2544          */
2545         rc = t4_read_chip_settings(sc);
2546
2547         return (rc);
2548 }
2549
2550 static int
2551 set_params__post_init(struct adapter *sc)
2552 {
2553         uint32_t param, val;
2554
2555         /* ask for encapsulated CPLs */
2556         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2557         val = 1;
2558         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2559
2560         return (0);
2561 }
2562
2563 #undef FW_PARAM_PFVF
2564 #undef FW_PARAM_DEV
2565
2566 static void
2567 t4_set_desc(struct adapter *sc)
2568 {
2569         char buf[128];
2570         struct adapter_params *p = &sc->params;
2571
2572         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2573             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2574             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2575
2576         device_set_desc_copy(sc->dev, buf);
2577 }
2578
2579 static void
2580 build_medialist(struct port_info *pi)
2581 {
2582         struct ifmedia *media = &pi->media;
2583         int data, m;
2584
2585         PORT_LOCK(pi);
2586
2587         ifmedia_removeall(media);
2588
2589         m = IFM_ETHER | IFM_FDX;
2590         data = (pi->port_type << 8) | pi->mod_type;
2591
2592         switch(pi->port_type) {
2593         case FW_PORT_TYPE_BT_XFI:
2594                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2595                 break;
2596
2597         case FW_PORT_TYPE_BT_XAUI:
2598                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2599                 /* fall through */
2600
2601         case FW_PORT_TYPE_BT_SGMII:
2602                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2603                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2604                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2605                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2606                 break;
2607
2608         case FW_PORT_TYPE_CX4:
2609                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2610                 ifmedia_set(media, m | IFM_10G_CX4);
2611                 break;
2612
2613         case FW_PORT_TYPE_SFP:
2614         case FW_PORT_TYPE_FIBER_XFI:
2615         case FW_PORT_TYPE_FIBER_XAUI:
2616                 switch (pi->mod_type) {
2617
2618                 case FW_PORT_MOD_TYPE_LR:
2619                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2620                         ifmedia_set(media, m | IFM_10G_LR);
2621                         break;
2622
2623                 case FW_PORT_MOD_TYPE_SR:
2624                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2625                         ifmedia_set(media, m | IFM_10G_SR);
2626                         break;
2627
2628                 case FW_PORT_MOD_TYPE_LRM:
2629                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2630                         ifmedia_set(media, m | IFM_10G_LRM);
2631                         break;
2632
2633                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2634                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2635                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2636                         ifmedia_set(media, m | IFM_10G_TWINAX);
2637                         break;
2638
2639                 case FW_PORT_MOD_TYPE_NONE:
2640                         m &= ~IFM_FDX;
2641                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2642                         ifmedia_set(media, m | IFM_NONE);
2643                         break;
2644
2645                 case FW_PORT_MOD_TYPE_NA:
2646                 case FW_PORT_MOD_TYPE_ER:
2647                 default:
2648                         device_printf(pi->dev,
2649                             "unknown port_type (%d), mod_type (%d)\n",
2650                             pi->port_type, pi->mod_type);
2651                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2652                         ifmedia_set(media, m | IFM_UNKNOWN);
2653                         break;
2654                 }
2655                 break;
2656
2657         case FW_PORT_TYPE_QSFP:
2658                 switch (pi->mod_type) {
2659
2660                 case FW_PORT_MOD_TYPE_LR:
2661                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2662                         ifmedia_set(media, m | IFM_40G_LR4);
2663                         break;
2664
2665                 case FW_PORT_MOD_TYPE_SR:
2666                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2667                         ifmedia_set(media, m | IFM_40G_SR4);
2668                         break;
2669
2670                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2671                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2672                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2673                         ifmedia_set(media, m | IFM_40G_CR4);
2674                         break;
2675
2676                 case FW_PORT_MOD_TYPE_NONE:
2677                         m &= ~IFM_FDX;
2678                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2679                         ifmedia_set(media, m | IFM_NONE);
2680                         break;
2681
2682                 default:
2683                         device_printf(pi->dev,
2684                             "unknown port_type (%d), mod_type (%d)\n",
2685                             pi->port_type, pi->mod_type);
2686                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2687                         ifmedia_set(media, m | IFM_UNKNOWN);
2688                         break;
2689                 }
2690                 break;
2691
2692         default:
2693                 device_printf(pi->dev,
2694                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2695                     pi->mod_type);
2696                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2697                 ifmedia_set(media, m | IFM_UNKNOWN);
2698                 break;
2699         }
2700
2701         PORT_UNLOCK(pi);
2702 }
2703
2704 #define FW_MAC_EXACT_CHUNK      7
2705
2706 /*
2707  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2708  * indicates which parameters should be programmed (the rest are left alone).
2709  */
2710 static int
2711 update_mac_settings(struct port_info *pi, int flags)
2712 {
2713         int rc;
2714         struct ifnet *ifp = pi->ifp;
2715         struct adapter *sc = pi->adapter;
2716         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2717
2718         ASSERT_SYNCHRONIZED_OP(sc);
2719         KASSERT(flags, ("%s: not told what to update.", __func__));
2720
2721         if (flags & XGMAC_MTU)
2722                 mtu = ifp->if_mtu;
2723
2724         if (flags & XGMAC_PROMISC)
2725                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2726
2727         if (flags & XGMAC_ALLMULTI)
2728                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2729
2730         if (flags & XGMAC_VLANEX)
2731                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2732
2733         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2734             vlanex, false);
2735         if (rc) {
2736                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2737                 return (rc);
2738         }
2739
2740         if (flags & XGMAC_UCADDR) {
2741                 uint8_t ucaddr[ETHER_ADDR_LEN];
2742
2743                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2744                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2745                     ucaddr, true, true);
2746                 if (rc < 0) {
2747                         rc = -rc;
2748                         if_printf(ifp, "change_mac failed: %d\n", rc);
2749                         return (rc);
2750                 } else {
2751                         pi->xact_addr_filt = rc;
2752                         rc = 0;
2753                 }
2754         }
2755
2756         if (flags & XGMAC_MCADDRS) {
2757                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2758                 int del = 1;
2759                 uint64_t hash = 0;
2760                 struct ifmultiaddr *ifma;
2761                 int i = 0, j;
2762
2763                 if_maddr_rlock(ifp);
2764                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2765                         if (ifma->ifma_addr->sa_family != AF_LINK)
2766                                 continue;
2767                         mcaddr[i++] =
2768                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2769
2770                         if (i == FW_MAC_EXACT_CHUNK) {
2771                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2772                                     del, i, mcaddr, NULL, &hash, 0);
2773                                 if (rc < 0) {
2774                                         rc = -rc;
2775                                         for (j = 0; j < i; j++) {
2776                                                 if_printf(ifp,
2777                                                     "failed to add mc address"
2778                                                     " %02x:%02x:%02x:"
2779                                                     "%02x:%02x:%02x rc=%d\n",
2780                                                     mcaddr[j][0], mcaddr[j][1],
2781                                                     mcaddr[j][2], mcaddr[j][3],
2782                                                     mcaddr[j][4], mcaddr[j][5],
2783                                                     rc);
2784                                         }
2785                                         goto mcfail;
2786                                 }
2787                                 del = 0;
2788                                 i = 0;
2789                         }
2790                 }
2791                 if (i > 0) {
2792                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2793                             del, i, mcaddr, NULL, &hash, 0);
2794                         if (rc < 0) {
2795                                 rc = -rc;
2796                                 for (j = 0; j < i; j++) {
2797                                         if_printf(ifp,
2798                                             "failed to add mc address"
2799                                             " %02x:%02x:%02x:"
2800                                             "%02x:%02x:%02x rc=%d\n",
2801                                             mcaddr[j][0], mcaddr[j][1],
2802                                             mcaddr[j][2], mcaddr[j][3],
2803                                             mcaddr[j][4], mcaddr[j][5],
2804                                             rc);
2805                                 }
2806                                 goto mcfail;
2807                         }
2808                 }
2809
2810                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2811                 if (rc != 0)
2812                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2813 mcfail:
2814                 if_maddr_runlock(ifp);
2815         }
2816
2817         return (rc);
2818 }
2819
2820 int
2821 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2822     char *wmesg)
2823 {
2824         int rc, pri;
2825
2826 #ifdef WITNESS
2827         /* the caller thinks it's ok to sleep, but is it really? */
2828         if (flags & SLEEP_OK)
2829                 pause("t4slptst", 1);
2830 #endif
2831
2832         if (INTR_OK)
2833                 pri = PCATCH;
2834         else
2835                 pri = 0;
2836
2837         ADAPTER_LOCK(sc);
2838         for (;;) {
2839
2840                 if (pi && IS_DOOMED(pi)) {
2841                         rc = ENXIO;
2842                         goto done;
2843                 }
2844
2845                 if (!IS_BUSY(sc)) {
2846                         rc = 0;
2847                         break;
2848                 }
2849
2850                 if (!(flags & SLEEP_OK)) {
2851                         rc = EBUSY;
2852                         goto done;
2853                 }
2854
2855                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2856                         rc = EINTR;
2857                         goto done;
2858                 }
2859         }
2860
2861         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2862         SET_BUSY(sc);
2863 #ifdef INVARIANTS
2864         sc->last_op = wmesg;
2865         sc->last_op_thr = curthread;
2866 #endif
2867
2868 done:
2869         if (!(flags & HOLD_LOCK) || rc)
2870                 ADAPTER_UNLOCK(sc);
2871
2872         return (rc);
2873 }
2874
2875 void
2876 end_synchronized_op(struct adapter *sc, int flags)
2877 {
2878
2879         if (flags & LOCK_HELD)
2880                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2881         else
2882                 ADAPTER_LOCK(sc);
2883
2884         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2885         CLR_BUSY(sc);
2886         wakeup(&sc->flags);
2887         ADAPTER_UNLOCK(sc);
2888 }
2889
2890 static int
2891 cxgbe_init_synchronized(struct port_info *pi)
2892 {
2893         struct adapter *sc = pi->adapter;
2894         struct ifnet *ifp = pi->ifp;
2895         int rc = 0;
2896
2897         ASSERT_SYNCHRONIZED_OP(sc);
2898
2899         if (isset(&sc->open_device_map, pi->port_id)) {
2900                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2901                     ("mismatch between open_device_map and if_drv_flags"));
2902                 return (0);     /* already running */
2903         }
2904
2905         if (!(sc->flags & FULL_INIT_DONE) &&
2906             ((rc = adapter_full_init(sc)) != 0))
2907                 return (rc);    /* error message displayed already */
2908
2909         if (!(pi->flags & PORT_INIT_DONE) &&
2910             ((rc = port_full_init(pi)) != 0))
2911                 return (rc); /* error message displayed already */
2912
2913         rc = update_mac_settings(pi, XGMAC_ALL);
2914         if (rc)
2915                 goto done;      /* error message displayed already */
2916
2917         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2918         if (rc != 0) {
2919                 if_printf(ifp, "start_link failed: %d\n", rc);
2920                 goto done;
2921         }
2922
2923         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2924         if (rc != 0) {
2925                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2926                 goto done;
2927         }
2928
2929         /*
2930          * The first iq of the first port to come up is used for tracing.
2931          */
2932         if (sc->traceq < 0) {
2933                 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2934                 t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2935                     A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2936                     V_QUEUENUMBER(sc->traceq));
2937                 pi->flags |= HAS_TRACEQ;
2938         }
2939
2940         /* all ok */
2941         setbit(&sc->open_device_map, pi->port_id);
2942         PORT_LOCK(pi);
2943         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2944         PORT_UNLOCK(pi);
2945
2946         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2947 done:
2948         if (rc != 0)
2949                 cxgbe_uninit_synchronized(pi);
2950
2951         return (rc);
2952 }
2953
2954 /*
2955  * Idempotent.
2956  */
2957 static int
2958 cxgbe_uninit_synchronized(struct port_info *pi)
2959 {
2960         struct adapter *sc = pi->adapter;
2961         struct ifnet *ifp = pi->ifp;
2962         int rc;
2963
2964         ASSERT_SYNCHRONIZED_OP(sc);
2965
2966         /*
2967          * Disable the VI so that all its data in either direction is discarded
2968          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2969          * tick) intact as the TP can deliver negative advice or data that it's
2970          * holding in its RAM (for an offloaded connection) even after the VI is
2971          * disabled.
2972          */
2973         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2974         if (rc) {
2975                 if_printf(ifp, "disable_vi failed: %d\n", rc);
2976                 return (rc);
2977         }
2978
2979         clrbit(&sc->open_device_map, pi->port_id);
2980         PORT_LOCK(pi);
2981         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2982         PORT_UNLOCK(pi);
2983
2984         pi->link_cfg.link_ok = 0;
2985         pi->link_cfg.speed = 0;
2986         pi->linkdnrc = -1;
2987         t4_os_link_changed(sc, pi->port_id, 0, -1);
2988
2989         return (0);
2990 }
2991
2992 /*
2993  * It is ok for this function to fail midway and return right away.  t4_detach
2994  * will walk the entire sc->irq list and clean up whatever is valid.
2995  */
2996 static int
2997 setup_intr_handlers(struct adapter *sc)
2998 {
2999         int rc, rid, p, q;
3000         char s[8];
3001         struct irq *irq;
3002         struct port_info *pi;
3003         struct sge_rxq *rxq;
3004 #ifdef TCP_OFFLOAD
3005         struct sge_ofld_rxq *ofld_rxq;
3006 #endif
3007
3008         /*
3009          * Setup interrupts.
3010          */
3011         irq = &sc->irq[0];
3012         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3013         if (sc->intr_count == 1) {
3014                 KASSERT(!(sc->flags & INTR_DIRECT),
3015                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3016
3017                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3018                 if (rc != 0)
3019                         return (rc);
3020         } else {
3021                 /* Multiple interrupts. */
3022                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3023                     ("%s: too few intr.", __func__));
3024
3025                 /* The first one is always error intr */
3026                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3027                 if (rc != 0)
3028                         return (rc);
3029                 irq++;
3030                 rid++;
3031
3032                 /* The second one is always the firmware event queue */
3033                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3034                     "evt");
3035                 if (rc != 0)
3036                         return (rc);
3037                 irq++;
3038                 rid++;
3039
3040                 /*
3041                  * Note that if INTR_DIRECT is not set then either the NIC rx
3042                  * queues or (exclusive or) the TOE rx queueus will be taking
3043                  * direct interrupts.
3044                  *
3045                  * There is no need to check for is_offload(sc) as nofldrxq
3046                  * will be 0 if offload is disabled.
3047                  */
3048                 for_each_port(sc, p) {
3049                         pi = sc->port[p];
3050
3051 #ifdef TCP_OFFLOAD
3052                         /*
3053                          * Skip over the NIC queues if they aren't taking direct
3054                          * interrupts.
3055                          */
3056                         if (!(sc->flags & INTR_DIRECT) &&
3057                             pi->nofldrxq > pi->nrxq)
3058                                 goto ofld_queues;
3059 #endif
3060                         rxq = &sc->sge.rxq[pi->first_rxq];
3061                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3062                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3063                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3064                                     s);
3065                                 if (rc != 0)
3066                                         return (rc);
3067                                 irq++;
3068                                 rid++;
3069                         }
3070
3071 #ifdef TCP_OFFLOAD
3072                         /*
3073                          * Skip over the offload queues if they aren't taking
3074                          * direct interrupts.
3075                          */
3076                         if (!(sc->flags & INTR_DIRECT))
3077                                 continue;
3078 ofld_queues:
3079                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3080                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3081                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3082                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3083                                     ofld_rxq, s);
3084                                 if (rc != 0)
3085                                         return (rc);
3086                                 irq++;
3087                                 rid++;
3088                         }
3089 #endif
3090                 }
3091         }
3092
3093         return (0);
3094 }
3095
3096 static int
3097 adapter_full_init(struct adapter *sc)
3098 {
3099         int rc, i;
3100
3101         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3102         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3103             ("%s: FULL_INIT_DONE already", __func__));
3104
3105         /*
3106          * queues that belong to the adapter (not any particular port).
3107          */
3108         rc = t4_setup_adapter_queues(sc);
3109         if (rc != 0)
3110                 goto done;
3111
3112         for (i = 0; i < nitems(sc->tq); i++) {
3113                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3114                     taskqueue_thread_enqueue, &sc->tq[i]);
3115                 if (sc->tq[i] == NULL) {
3116                         device_printf(sc->dev,
3117                             "failed to allocate task queue %d\n", i);
3118                         rc = ENOMEM;
3119                         goto done;
3120                 }
3121                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3122                     device_get_nameunit(sc->dev), i);
3123         }
3124
3125         t4_intr_enable(sc);
3126         sc->flags |= FULL_INIT_DONE;
3127 done:
3128         if (rc != 0)
3129                 adapter_full_uninit(sc);
3130
3131         return (rc);
3132 }
3133
3134 static int
3135 adapter_full_uninit(struct adapter *sc)
3136 {
3137         int i;
3138
3139         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3140
3141         t4_teardown_adapter_queues(sc);
3142
3143         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3144                 taskqueue_free(sc->tq[i]);
3145                 sc->tq[i] = NULL;
3146         }
3147
3148         sc->flags &= ~FULL_INIT_DONE;
3149
3150         return (0);
3151 }
3152
3153 static int
3154 port_full_init(struct port_info *pi)
3155 {
3156         struct adapter *sc = pi->adapter;
3157         struct ifnet *ifp = pi->ifp;
3158         uint16_t *rss;
3159         struct sge_rxq *rxq;
3160         int rc, i;
3161
3162         ASSERT_SYNCHRONIZED_OP(sc);
3163         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3164             ("%s: PORT_INIT_DONE already", __func__));
3165
3166         sysctl_ctx_init(&pi->ctx);
3167         pi->flags |= PORT_SYSCTL_CTX;
3168
3169         /*
3170          * Allocate tx/rx/fl queues for this port.
3171          */
3172         rc = t4_setup_port_queues(pi);
3173         if (rc != 0)
3174                 goto done;      /* error message displayed already */
3175
3176         /*
3177          * Setup RSS for this port.
3178          */
3179         rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3180             M_ZERO | M_WAITOK);
3181         for_each_rxq(pi, i, rxq) {
3182                 rss[i] = rxq->iq.abs_id;
3183         }
3184         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3185             pi->rss_size, rss, pi->nrxq);
3186         free(rss, M_CXGBE);
3187         if (rc != 0) {
3188                 if_printf(ifp, "rss_config failed: %d\n", rc);
3189                 goto done;
3190         }
3191
3192         pi->flags |= PORT_INIT_DONE;
3193 done:
3194         if (rc != 0)
3195                 port_full_uninit(pi);
3196
3197         return (rc);
3198 }
3199
3200 /*
3201  * Idempotent.
3202  */
3203 static int
3204 port_full_uninit(struct port_info *pi)
3205 {
3206         struct adapter *sc = pi->adapter;
3207         int i;
3208         struct sge_rxq *rxq;
3209         struct sge_txq *txq;
3210 #ifdef TCP_OFFLOAD
3211         struct sge_ofld_rxq *ofld_rxq;
3212         struct sge_wrq *ofld_txq;
3213 #endif
3214
3215         if (pi->flags & PORT_INIT_DONE) {
3216
3217                 /* Need to quiesce queues.  XXX: ctrl queues? */
3218
3219                 for_each_txq(pi, i, txq) {
3220                         quiesce_eq(sc, &txq->eq);
3221                 }
3222
3223 #ifdef TCP_OFFLOAD
3224                 for_each_ofld_txq(pi, i, ofld_txq) {
3225                         quiesce_eq(sc, &ofld_txq->eq);
3226                 }
3227 #endif
3228
3229                 for_each_rxq(pi, i, rxq) {
3230                         quiesce_iq(sc, &rxq->iq);
3231                         quiesce_fl(sc, &rxq->fl);
3232                 }
3233
3234 #ifdef TCP_OFFLOAD
3235                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3236                         quiesce_iq(sc, &ofld_rxq->iq);
3237                         quiesce_fl(sc, &ofld_rxq->fl);
3238                 }
3239 #endif
3240         }
3241
3242         t4_teardown_port_queues(pi);
3243         pi->flags &= ~PORT_INIT_DONE;
3244
3245         return (0);
3246 }
3247
3248 static void
3249 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3250 {
3251         EQ_LOCK(eq);
3252         eq->flags |= EQ_DOOMED;
3253
3254         /*
3255          * Wait for the response to a credit flush if one's
3256          * pending.
3257          */
3258         while (eq->flags & EQ_CRFLUSHED)
3259                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3260         EQ_UNLOCK(eq);
3261
3262         callout_drain(&eq->tx_callout); /* XXX: iffy */
3263         pause("callout", 10);           /* Still iffy */
3264
3265         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3266 }
3267
3268 static void
3269 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3270 {
3271         (void) sc;      /* unused */
3272
3273         /* Synchronize with the interrupt handler */
3274         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3275                 pause("iqfree", 1);
3276 }
3277
3278 static void
3279 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3280 {
3281         mtx_lock(&sc->sfl_lock);
3282         FL_LOCK(fl);
3283         fl->flags |= FL_DOOMED;
3284         FL_UNLOCK(fl);
3285         mtx_unlock(&sc->sfl_lock);
3286
3287         callout_drain(&sc->sfl_callout);
3288         KASSERT((fl->flags & FL_STARVING) == 0,
3289             ("%s: still starving", __func__));
3290 }
3291
3292 static int
3293 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3294     driver_intr_t *handler, void *arg, char *name)
3295 {
3296         int rc;
3297
3298         irq->rid = rid;
3299         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3300             RF_SHAREABLE | RF_ACTIVE);
3301         if (irq->res == NULL) {
3302                 device_printf(sc->dev,
3303                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3304                 return (ENOMEM);
3305         }
3306
3307         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3308             NULL, handler, arg, &irq->tag);
3309         if (rc != 0) {
3310                 device_printf(sc->dev,
3311                     "failed to setup interrupt for rid %d, name %s: %d\n",
3312                     rid, name, rc);
3313         } else if (name)
3314                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3315
3316         return (rc);
3317 }
3318
3319 static int
3320 t4_free_irq(struct adapter *sc, struct irq *irq)
3321 {
3322         if (irq->tag)
3323                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3324         if (irq->res)
3325                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3326
3327         bzero(irq, sizeof(*irq));
3328
3329         return (0);
3330 }
3331
3332 static void
3333 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3334     unsigned int end)
3335 {
3336         uint32_t *p = (uint32_t *)(buf + start);
3337
3338         for ( ; start <= end; start += sizeof(uint32_t))
3339                 *p++ = t4_read_reg(sc, start);
3340 }
3341
3342 static void
3343 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3344 {
3345         int i, n;
3346         const unsigned int *reg_ranges;
3347         static const unsigned int t4_reg_ranges[] = {
3348                 0x1008, 0x1108,
3349                 0x1180, 0x11b4,
3350                 0x11fc, 0x123c,
3351                 0x1300, 0x173c,
3352                 0x1800, 0x18fc,
3353                 0x3000, 0x30d8,
3354                 0x30e0, 0x5924,
3355                 0x5960, 0x59d4,
3356                 0x5a00, 0x5af8,
3357                 0x6000, 0x6098,
3358                 0x6100, 0x6150,
3359                 0x6200, 0x6208,
3360                 0x6240, 0x6248,
3361                 0x6280, 0x6338,
3362                 0x6370, 0x638c,
3363                 0x6400, 0x643c,
3364                 0x6500, 0x6524,
3365                 0x6a00, 0x6a38,
3366                 0x6a60, 0x6a78,
3367                 0x6b00, 0x6b84,
3368                 0x6bf0, 0x6c84,
3369                 0x6cf0, 0x6d84,
3370                 0x6df0, 0x6e84,
3371                 0x6ef0, 0x6f84,
3372                 0x6ff0, 0x7084,
3373                 0x70f0, 0x7184,
3374                 0x71f0, 0x7284,
3375                 0x72f0, 0x7384,
3376                 0x73f0, 0x7450,
3377                 0x7500, 0x7530,
3378                 0x7600, 0x761c,
3379                 0x7680, 0x76cc,
3380                 0x7700, 0x7798,
3381                 0x77c0, 0x77fc,
3382                 0x7900, 0x79fc,
3383                 0x7b00, 0x7c38,
3384                 0x7d00, 0x7efc,
3385                 0x8dc0, 0x8e1c,
3386                 0x8e30, 0x8e78,
3387                 0x8ea0, 0x8f6c,
3388                 0x8fc0, 0x9074,
3389                 0x90fc, 0x90fc,
3390                 0x9400, 0x9458,
3391                 0x9600, 0x96bc,
3392                 0x9800, 0x9808,
3393                 0x9820, 0x983c,
3394                 0x9850, 0x9864,
3395                 0x9c00, 0x9c6c,
3396                 0x9c80, 0x9cec,
3397                 0x9d00, 0x9d6c,
3398                 0x9d80, 0x9dec,
3399                 0x9e00, 0x9e6c,
3400                 0x9e80, 0x9eec,
3401                 0x9f00, 0x9f6c,
3402                 0x9f80, 0x9fec,
3403                 0xd004, 0xd03c,
3404                 0xdfc0, 0xdfe0,
3405                 0xe000, 0xea7c,
3406                 0xf000, 0x11110,
3407                 0x11118, 0x11190,
3408                 0x19040, 0x1906c,
3409                 0x19078, 0x19080,
3410                 0x1908c, 0x19124,
3411                 0x19150, 0x191b0,
3412                 0x191d0, 0x191e8,
3413                 0x19238, 0x1924c,
3414                 0x193f8, 0x19474,
3415                 0x19490, 0x194f8,
3416                 0x19800, 0x19f30,
3417                 0x1a000, 0x1a06c,
3418                 0x1a0b0, 0x1a120,
3419                 0x1a128, 0x1a138,
3420                 0x1a190, 0x1a1c4,
3421                 0x1a1fc, 0x1a1fc,
3422                 0x1e040, 0x1e04c,
3423                 0x1e284, 0x1e28c,
3424                 0x1e2c0, 0x1e2c0,
3425                 0x1e2e0, 0x1e2e0,
3426                 0x1e300, 0x1e384,
3427                 0x1e3c0, 0x1e3c8,
3428                 0x1e440, 0x1e44c,
3429                 0x1e684, 0x1e68c,
3430                 0x1e6c0, 0x1e6c0,
3431                 0x1e6e0, 0x1e6e0,
3432                 0x1e700, 0x1e784,
3433                 0x1e7c0, 0x1e7c8,
3434                 0x1e840, 0x1e84c,
3435                 0x1ea84, 0x1ea8c,
3436                 0x1eac0, 0x1eac0,
3437                 0x1eae0, 0x1eae0,
3438                 0x1eb00, 0x1eb84,
3439                 0x1ebc0, 0x1ebc8,
3440                 0x1ec40, 0x1ec4c,
3441                 0x1ee84, 0x1ee8c,
3442                 0x1eec0, 0x1eec0,
3443                 0x1eee0, 0x1eee0,
3444                 0x1ef00, 0x1ef84,
3445                 0x1efc0, 0x1efc8,
3446                 0x1f040, 0x1f04c,
3447                 0x1f284, 0x1f28c,
3448                 0x1f2c0, 0x1f2c0,
3449                 0x1f2e0, 0x1f2e0,
3450                 0x1f300, 0x1f384,
3451                 0x1f3c0, 0x1f3c8,
3452                 0x1f440, 0x1f44c,
3453                 0x1f684, 0x1f68c,
3454                 0x1f6c0, 0x1f6c0,
3455                 0x1f6e0, 0x1f6e0,
3456                 0x1f700, 0x1f784,
3457                 0x1f7c0, 0x1f7c8,
3458                 0x1f840, 0x1f84c,
3459                 0x1fa84, 0x1fa8c,
3460                 0x1fac0, 0x1fac0,
3461                 0x1fae0, 0x1fae0,
3462                 0x1fb00, 0x1fb84,
3463                 0x1fbc0, 0x1fbc8,
3464                 0x1fc40, 0x1fc4c,
3465                 0x1fe84, 0x1fe8c,
3466                 0x1fec0, 0x1fec0,
3467                 0x1fee0, 0x1fee0,
3468                 0x1ff00, 0x1ff84,
3469                 0x1ffc0, 0x1ffc8,
3470                 0x20000, 0x2002c,
3471                 0x20100, 0x2013c,
3472                 0x20190, 0x201c8,
3473                 0x20200, 0x20318,
3474                 0x20400, 0x20528,
3475                 0x20540, 0x20614,
3476                 0x21000, 0x21040,
3477                 0x2104c, 0x21060,
3478                 0x210c0, 0x210ec,
3479                 0x21200, 0x21268,
3480                 0x21270, 0x21284,
3481                 0x212fc, 0x21388,
3482                 0x21400, 0x21404,
3483                 0x21500, 0x21518,
3484                 0x2152c, 0x2153c,
3485                 0x21550, 0x21554,
3486                 0x21600, 0x21600,
3487                 0x21608, 0x21628,
3488                 0x21630, 0x2163c,
3489                 0x21700, 0x2171c,
3490                 0x21780, 0x2178c,
3491                 0x21800, 0x21c38,
3492                 0x21c80, 0x21d7c,
3493                 0x21e00, 0x21e04,
3494                 0x22000, 0x2202c,
3495                 0x22100, 0x2213c,
3496                 0x22190, 0x221c8,
3497                 0x22200, 0x22318,
3498                 0x22400, 0x22528,
3499                 0x22540, 0x22614,
3500                 0x23000, 0x23040,
3501                 0x2304c, 0x23060,
3502                 0x230c0, 0x230ec,
3503                 0x23200, 0x23268,
3504                 0x23270, 0x23284,
3505                 0x232fc, 0x23388,
3506                 0x23400, 0x23404,
3507                 0x23500, 0x23518,
3508                 0x2352c, 0x2353c,
3509                 0x23550, 0x23554,
3510                 0x23600, 0x23600,
3511                 0x23608, 0x23628,
3512                 0x23630, 0x2363c,
3513                 0x23700, 0x2371c,
3514                 0x23780, 0x2378c,
3515                 0x23800, 0x23c38,
3516                 0x23c80, 0x23d7c,
3517                 0x23e00, 0x23e04,
3518                 0x24000, 0x2402c,
3519                 0x24100, 0x2413c,
3520                 0x24190, 0x241c8,
3521                 0x24200, 0x24318,
3522                 0x24400, 0x24528,
3523                 0x24540, 0x24614,
3524                 0x25000, 0x25040,
3525                 0x2504c, 0x25060,
3526                 0x250c0, 0x250ec,
3527                 0x25200, 0x25268,
3528                 0x25270, 0x25284,
3529                 0x252fc, 0x25388,
3530                 0x25400, 0x25404,
3531                 0x25500, 0x25518,
3532                 0x2552c, 0x2553c,
3533                 0x25550, 0x25554,
3534                 0x25600, 0x25600,
3535                 0x25608, 0x25628,
3536                 0x25630, 0x2563c,
3537                 0x25700, 0x2571c,
3538                 0x25780, 0x2578c,
3539                 0x25800, 0x25c38,
3540                 0x25c80, 0x25d7c,
3541                 0x25e00, 0x25e04,
3542                 0x26000, 0x2602c,
3543                 0x26100, 0x2613c,
3544                 0x26190, 0x261c8,
3545                 0x26200, 0x26318,
3546                 0x26400, 0x26528,
3547                 0x26540, 0x26614,
3548                 0x27000, 0x27040,
3549                 0x2704c, 0x27060,
3550                 0x270c0, 0x270ec,
3551                 0x27200, 0x27268,
3552                 0x27270, 0x27284,
3553                 0x272fc, 0x27388,
3554                 0x27400, 0x27404,
3555                 0x27500, 0x27518,
3556                 0x2752c, 0x2753c,
3557                 0x27550, 0x27554,
3558                 0x27600, 0x27600,
3559                 0x27608, 0x27628,
3560                 0x27630, 0x2763c,
3561                 0x27700, 0x2771c,
3562                 0x27780, 0x2778c,
3563                 0x27800, 0x27c38,
3564                 0x27c80, 0x27d7c,
3565                 0x27e00, 0x27e04
3566         };
3567         static const unsigned int t5_reg_ranges[] = {
3568                 0x1008, 0x1148,
3569                 0x1180, 0x11b4,
3570                 0x11fc, 0x123c,
3571                 0x1280, 0x173c,
3572                 0x1800, 0x18fc,
3573                 0x3000, 0x3028,
3574                 0x3060, 0x30d8,
3575                 0x30e0, 0x30fc,
3576                 0x3140, 0x357c,
3577                 0x35a8, 0x35cc,
3578                 0x35ec, 0x35ec,
3579                 0x3600, 0x5624,
3580                 0x56cc, 0x575c,
3581                 0x580c, 0x5814,
3582                 0x5890, 0x58bc,
3583                 0x5940, 0x59dc,
3584                 0x59fc, 0x5a18,
3585                 0x5a60, 0x5a9c,
3586                 0x5b94, 0x5bfc,
3587                 0x6000, 0x6040,
3588                 0x6058, 0x614c,
3589                 0x7700, 0x7798,
3590                 0x77c0, 0x78fc,
3591                 0x7b00, 0x7c54,
3592                 0x7d00, 0x7efc,
3593                 0x8dc0, 0x8de0,
3594                 0x8df8, 0x8e84,
3595                 0x8ea0, 0x8f84,
3596                 0x8fc0, 0x90f8,
3597                 0x9400, 0x9470,
3598                 0x9600, 0x96f4,
3599                 0x9800, 0x9808,
3600                 0x9820, 0x983c,
3601                 0x9850, 0x9864,
3602                 0x9c00, 0x9c6c,
3603                 0x9c80, 0x9cec,
3604                 0x9d00, 0x9d6c,
3605                 0x9d80, 0x9dec,
3606                 0x9e00, 0x9e6c,
3607                 0x9e80, 0x9eec,
3608                 0x9f00, 0x9f6c,
3609                 0x9f80, 0xa020,
3610                 0xd004, 0xd03c,
3611                 0xdfc0, 0xdfe0,
3612                 0xe000, 0x11088,
3613                 0x1109c, 0x11110,
3614                 0x11118, 0x1117c,
3615                 0x11190, 0x11204,
3616                 0x19040, 0x1906c,
3617                 0x19078, 0x19080,
3618                 0x1908c, 0x19124,
3619                 0x19150, 0x191b0,
3620                 0x191d0, 0x191e8,
3621                 0x19238, 0x19290,
3622                 0x193f8, 0x19474,
3623                 0x19490, 0x194cc,
3624                 0x194f0, 0x194f8,
3625                 0x19c00, 0x19c60,
3626                 0x19c94, 0x19e10,
3627                 0x19e50, 0x19f34,
3628                 0x19f40, 0x19f50,
3629                 0x19f90, 0x19fe4,
3630                 0x1a000, 0x1a06c,
3631                 0x1a0b0, 0x1a120,
3632                 0x1a128, 0x1a138,
3633                 0x1a190, 0x1a1c4,
3634                 0x1a1fc, 0x1a1fc,
3635                 0x1e008, 0x1e00c,
3636                 0x1e040, 0x1e04c,
3637                 0x1e284, 0x1e290,
3638                 0x1e2c0, 0x1e2c0,
3639                 0x1e2e0, 0x1e2e0,
3640                 0x1e300, 0x1e384,
3641                 0x1e3c0, 0x1e3c8,
3642                 0x1e408, 0x1e40c,
3643                 0x1e440, 0x1e44c,
3644                 0x1e684, 0x1e690,
3645                 0x1e6c0, 0x1e6c0,
3646                 0x1e6e0, 0x1e6e0,
3647                 0x1e700, 0x1e784,
3648                 0x1e7c0, 0x1e7c8,
3649                 0x1e808, 0x1e80c,
3650                 0x1e840, 0x1e84c,
3651                 0x1ea84, 0x1ea90,
3652                 0x1eac0, 0x1eac0,
3653                 0x1eae0, 0x1eae0,
3654                 0x1eb00, 0x1eb84,
3655                 0x1ebc0, 0x1ebc8,
3656                 0x1ec08, 0x1ec0c,
3657                 0x1ec40, 0x1ec4c,
3658                 0x1ee84, 0x1ee90,
3659                 0x1eec0, 0x1eec0,
3660                 0x1eee0, 0x1eee0,
3661                 0x1ef00, 0x1ef84,
3662                 0x1efc0, 0x1efc8,
3663                 0x1f008, 0x1f00c,
3664                 0x1f040, 0x1f04c,
3665                 0x1f284, 0x1f290,
3666                 0x1f2c0, 0x1f2c0,
3667                 0x1f2e0, 0x1f2e0,
3668                 0x1f300, 0x1f384,
3669                 0x1f3c0, 0x1f3c8,
3670                 0x1f408, 0x1f40c,
3671                 0x1f440, 0x1f44c,
3672                 0x1f684, 0x1f690,
3673                 0x1f6c0, 0x1f6c0,
3674                 0x1f6e0, 0x1f6e0,
3675                 0x1f700, 0x1f784,
3676                 0x1f7c0, 0x1f7c8,
3677                 0x1f808, 0x1f80c,
3678                 0x1f840, 0x1f84c,
3679                 0x1fa84, 0x1fa90,
3680                 0x1fac0, 0x1fac0,
3681                 0x1fae0, 0x1fae0,
3682                 0x1fb00, 0x1fb84,
3683                 0x1fbc0, 0x1fbc8,
3684                 0x1fc08, 0x1fc0c,
3685                 0x1fc40, 0x1fc4c,
3686                 0x1fe84, 0x1fe90,
3687                 0x1fec0, 0x1fec0,
3688                 0x1fee0, 0x1fee0,
3689                 0x1ff00, 0x1ff84,
3690                 0x1ffc0, 0x1ffc8,
3691                 0x30000, 0x30030,
3692                 0x30100, 0x30144,
3693                 0x30190, 0x301d0,
3694                 0x30200, 0x30318,
3695                 0x30400, 0x3052c,
3696                 0x30540, 0x3061c,
3697                 0x30800, 0x30834,
3698                 0x308c0, 0x30908,
3699                 0x30910, 0x309ac,
3700                 0x30a00, 0x30a2c,
3701                 0x30a44, 0x30a50,
3702                 0x30a74, 0x30c24,
3703                 0x30d00, 0x30d00,
3704                 0x30d08, 0x30d14,
3705                 0x30d1c, 0x30d20,
3706                 0x30d3c, 0x30d50,
3707                 0x31200, 0x3120c,
3708                 0x31220, 0x31220,
3709                 0x31240, 0x31240,
3710                 0x31600, 0x3160c,
3711                 0x31a00, 0x31a1c,
3712                 0x31e00, 0x31e20,
3713                 0x31e38, 0x31e3c,
3714                 0x31e80, 0x31e80,
3715                 0x31e88, 0x31ea8,
3716                 0x31eb0, 0x31eb4,
3717                 0x31ec8, 0x31ed4,
3718                 0x31fb8, 0x32004,
3719                 0x32200, 0x32200,
3720                 0x32208, 0x32240,
3721                 0x32248, 0x32280,
3722                 0x32288, 0x322c0,
3723                 0x322c8, 0x322fc,
3724                 0x32600, 0x32630,
3725                 0x32a00, 0x32abc,
3726                 0x32b00, 0x32b70,
3727                 0x33000, 0x33048,
3728                 0x33060, 0x3309c,
3729                 0x330f0, 0x33148,
3730                 0x33160, 0x3319c,
3731                 0x331f0, 0x332e4,
3732                 0x332f8, 0x333e4,
3733                 0x333f8, 0x33448,
3734                 0x33460, 0x3349c,
3735                 0x334f0, 0x33548,
3736                 0x33560, 0x3359c,
3737                 0x335f0, 0x336e4,
3738                 0x336f8, 0x337e4,
3739                 0x337f8, 0x337fc,
3740                 0x33814, 0x33814,
3741                 0x3382c, 0x3382c,
3742                 0x33880, 0x3388c,
3743                 0x338e8, 0x338ec,
3744                 0x33900, 0x33948,
3745                 0x33960, 0x3399c,
3746                 0x339f0, 0x33ae4,
3747                 0x33af8, 0x33b10,
3748                 0x33b28, 0x33b28,
3749                 0x33b3c, 0x33b50,
3750                 0x33bf0, 0x33c10,
3751                 0x33c28, 0x33c28,
3752                 0x33c3c, 0x33c50,
3753                 0x33cf0, 0x33cfc,
3754                 0x34000, 0x34030,
3755                 0x34100, 0x34144,
3756                 0x34190, 0x341d0,
3757                 0x34200, 0x34318,
3758                 0x34400, 0x3452c,
3759                 0x34540, 0x3461c,
3760                 0x34800, 0x34834,
3761                 0x348c0, 0x34908,
3762                 0x34910, 0x349ac,
3763                 0x34a00, 0x34a2c,
3764                 0x34a44, 0x34a50,
3765                 0x34a74, 0x34c24,
3766                 0x34d00, 0x34d00,
3767                 0x34d08, 0x34d14,
3768                 0x34d1c, 0x34d20,
3769                 0x34d3c, 0x34d50,
3770                 0x35200, 0x3520c,
3771                 0x35220, 0x35220,
3772                 0x35240, 0x35240,
3773                 0x35600, 0x3560c,
3774                 0x35a00, 0x35a1c,
3775                 0x35e00, 0x35e20,
3776                 0x35e38, 0x35e3c,
3777                 0x35e80, 0x35e80,
3778                 0x35e88, 0x35ea8,
3779                 0x35eb0, 0x35eb4,
3780                 0x35ec8, 0x35ed4,
3781                 0x35fb8, 0x36004,
3782                 0x36200, 0x36200,
3783                 0x36208, 0x36240,
3784                 0x36248, 0x36280,
3785                 0x36288, 0x362c0,
3786                 0x362c8, 0x362fc,
3787                 0x36600, 0x36630,
3788                 0x36a00, 0x36abc,
3789                 0x36b00, 0x36b70,
3790                 0x37000, 0x37048,
3791                 0x37060, 0x3709c,
3792                 0x370f0, 0x37148,
3793                 0x37160, 0x3719c,
3794                 0x371f0, 0x372e4,
3795                 0x372f8, 0x373e4,
3796                 0x373f8, 0x37448,
3797                 0x37460, 0x3749c,
3798                 0x374f0, 0x37548,
3799                 0x37560, 0x3759c,
3800                 0x375f0, 0x376e4,
3801                 0x376f8, 0x377e4,
3802                 0x377f8, 0x377fc,
3803                 0x37814, 0x37814,
3804                 0x3782c, 0x3782c,
3805                 0x37880, 0x3788c,
3806                 0x378e8, 0x378ec,
3807                 0x37900, 0x37948,
3808                 0x37960, 0x3799c,
3809                 0x379f0, 0x37ae4,
3810                 0x37af8, 0x37b10,
3811                 0x37b28, 0x37b28,
3812                 0x37b3c, 0x37b50,
3813                 0x37bf0, 0x37c10,
3814                 0x37c28, 0x37c28,
3815                 0x37c3c, 0x37c50,
3816                 0x37cf0, 0x37cfc,
3817                 0x38000, 0x38030,
3818                 0x38100, 0x38144,
3819                 0x38190, 0x381d0,
3820                 0x38200, 0x38318,
3821                 0x38400, 0x3852c,
3822                 0x38540, 0x3861c,
3823                 0x38800, 0x38834,
3824                 0x388c0, 0x38908,
3825                 0x38910, 0x389ac,
3826                 0x38a00, 0x38a2c,
3827                 0x38a44, 0x38a50,
3828                 0x38a74, 0x38c24,
3829                 0x38d00, 0x38d00,
3830                 0x38d08, 0x38d14,
3831                 0x38d1c, 0x38d20,
3832                 0x38d3c, 0x38d50,
3833                 0x39200, 0x3920c,
3834                 0x39220, 0x39220,
3835                 0x39240, 0x39240,
3836                 0x39600, 0x3960c,
3837                 0x39a00, 0x39a1c,
3838                 0x39e00, 0x39e20,
3839                 0x39e38, 0x39e3c,
3840                 0x39e80, 0x39e80,
3841                 0x39e88, 0x39ea8,
3842                 0x39eb0, 0x39eb4,
3843                 0x39ec8, 0x39ed4,
3844                 0x39fb8, 0x3a004,
3845                 0x3a200, 0x3a200,
3846                 0x3a208, 0x3a240,
3847                 0x3a248, 0x3a280,
3848                 0x3a288, 0x3a2c0,
3849                 0x3a2c8, 0x3a2fc,
3850                 0x3a600, 0x3a630,
3851                 0x3aa00, 0x3aabc,
3852                 0x3ab00, 0x3ab70,
3853                 0x3b000, 0x3b048,
3854                 0x3b060, 0x3b09c,
3855                 0x3b0f0, 0x3b148,
3856                 0x3b160, 0x3b19c,
3857                 0x3b1f0, 0x3b2e4,
3858                 0x3b2f8, 0x3b3e4,
3859                 0x3b3f8, 0x3b448,
3860                 0x3b460, 0x3b49c,
3861                 0x3b4f0, 0x3b548,
3862                 0x3b560, 0x3b59c,
3863                 0x3b5f0, 0x3b6e4,
3864                 0x3b6f8, 0x3b7e4,
3865                 0x3b7f8, 0x3b7fc,
3866                 0x3b814, 0x3b814,
3867                 0x3b82c, 0x3b82c,
3868                 0x3b880, 0x3b88c,
3869                 0x3b8e8, 0x3b8ec,
3870                 0x3b900, 0x3b948,
3871                 0x3b960, 0x3b99c,
3872                 0x3b9f0, 0x3bae4,
3873                 0x3baf8, 0x3bb10,
3874                 0x3bb28, 0x3bb28,
3875                 0x3bb3c, 0x3bb50,
3876                 0x3bbf0, 0x3bc10,
3877                 0x3bc28, 0x3bc28,
3878                 0x3bc3c, 0x3bc50,
3879                 0x3bcf0, 0x3bcfc,
3880                 0x3c000, 0x3c030,
3881                 0x3c100, 0x3c144,
3882                 0x3c190, 0x3c1d0,
3883                 0x3c200, 0x3c318,
3884                 0x3c400, 0x3c52c,
3885                 0x3c540, 0x3c61c,
3886                 0x3c800, 0x3c834,
3887                 0x3c8c0, 0x3c908,
3888                 0x3c910, 0x3c9ac,
3889                 0x3ca00, 0x3ca2c,
3890                 0x3ca44, 0x3ca50,
3891                 0x3ca74, 0x3cc24,
3892                 0x3cd00, 0x3cd00,
3893                 0x3cd08, 0x3cd14,
3894                 0x3cd1c, 0x3cd20,
3895                 0x3cd3c, 0x3cd50,
3896                 0x3d200, 0x3d20c,
3897                 0x3d220, 0x3d220,
3898                 0x3d240, 0x3d240,
3899                 0x3d600, 0x3d60c,
3900                 0x3da00, 0x3da1c,
3901                 0x3de00, 0x3de20,
3902                 0x3de38, 0x3de3c,
3903                 0x3de80, 0x3de80,
3904                 0x3de88, 0x3dea8,
3905                 0x3deb0, 0x3deb4,
3906                 0x3dec8, 0x3ded4,
3907                 0x3dfb8, 0x3e004,
3908                 0x3e200, 0x3e200,
3909                 0x3e208, 0x3e240,
3910                 0x3e248, 0x3e280,
3911                 0x3e288, 0x3e2c0,
3912                 0x3e2c8, 0x3e2fc,
3913                 0x3e600, 0x3e630,
3914                 0x3ea00, 0x3eabc,
3915                 0x3eb00, 0x3eb70,
3916                 0x3f000, 0x3f048,
3917                 0x3f060, 0x3f09c,
3918                 0x3f0f0, 0x3f148,
3919                 0x3f160, 0x3f19c,
3920                 0x3f1f0, 0x3f2e4,
3921                 0x3f2f8, 0x3f3e4,
3922                 0x3f3f8, 0x3f448,
3923                 0x3f460, 0x3f49c,
3924                 0x3f4f0, 0x3f548,
3925                 0x3f560, 0x3f59c,
3926                 0x3f5f0, 0x3f6e4,
3927                 0x3f6f8, 0x3f7e4,
3928                 0x3f7f8, 0x3f7fc,
3929                 0x3f814, 0x3f814,
3930                 0x3f82c, 0x3f82c,
3931                 0x3f880, 0x3f88c,
3932                 0x3f8e8, 0x3f8ec,
3933                 0x3f900, 0x3f948,
3934                 0x3f960, 0x3f99c,
3935                 0x3f9f0, 0x3fae4,
3936                 0x3faf8, 0x3fb10,
3937                 0x3fb28, 0x3fb28,
3938                 0x3fb3c, 0x3fb50,
3939                 0x3fbf0, 0x3fc10,
3940                 0x3fc28, 0x3fc28,
3941                 0x3fc3c, 0x3fc50,
3942                 0x3fcf0, 0x3fcfc,
3943                 0x40000, 0x4000c,
3944                 0x40040, 0x40068,
3945                 0x4007c, 0x40144,
3946                 0x40180, 0x4018c,
3947                 0x40200, 0x40298,
3948                 0x402ac, 0x4033c,
3949                 0x403f8, 0x403fc,
3950                 0x41304, 0x413c4,
3951                 0x41400, 0x4141c,
3952                 0x41480, 0x414d0,
3953                 0x44000, 0x44078,
3954                 0x440c0, 0x44278,
3955                 0x442c0, 0x44478,
3956                 0x444c0, 0x44678,
3957                 0x446c0, 0x44878,
3958                 0x448c0, 0x449fc,
3959                 0x45000, 0x45068,
3960                 0x45080, 0x45084,
3961                 0x450a0, 0x450b0,
3962                 0x45200, 0x45268,
3963                 0x45280, 0x45284,
3964                 0x452a0, 0x452b0,
3965                 0x460c0, 0x460e4,
3966                 0x47000, 0x4708c,
3967                 0x47200, 0x47250,
3968                 0x47400, 0x47420,
3969                 0x47600, 0x47618,
3970                 0x47800, 0x47814,
3971                 0x48000, 0x4800c,
3972                 0x48040, 0x48068,
3973                 0x4807c, 0x48144,
3974                 0x48180, 0x4818c,
3975                 0x48200, 0x48298,
3976                 0x482ac, 0x4833c,
3977                 0x483f8, 0x483fc,
3978                 0x49304, 0x493c4,
3979                 0x49400, 0x4941c,
3980                 0x49480, 0x494d0,
3981                 0x4c000, 0x4c078,
3982                 0x4c0c0, 0x4c278,
3983                 0x4c2c0, 0x4c478,
3984                 0x4c4c0, 0x4c678,
3985                 0x4c6c0, 0x4c878,
3986                 0x4c8c0, 0x4c9fc,
3987                 0x4d000, 0x4d068,
3988                 0x4d080, 0x4d084,
3989                 0x4d0a0, 0x4d0b0,
3990                 0x4d200, 0x4d268,
3991                 0x4d280, 0x4d284,
3992                 0x4d2a0, 0x4d2b0,
3993                 0x4e0c0, 0x4e0e4,
3994                 0x4f000, 0x4f08c,
3995                 0x4f200, 0x4f250,
3996                 0x4f400, 0x4f420,
3997                 0x4f600, 0x4f618,
3998                 0x4f800, 0x4f814,
3999                 0x50000, 0x500cc,
4000                 0x50400, 0x50400,
4001                 0x50800, 0x508cc,
4002                 0x50c00, 0x50c00,
4003                 0x51000, 0x5101c,
4004                 0x51300, 0x51308,
4005         };
4006
4007         if (is_t4(sc)) {
4008                 reg_ranges = &t4_reg_ranges[0];
4009                 n = nitems(t4_reg_ranges);
4010         } else {
4011                 reg_ranges = &t5_reg_ranges[0];
4012                 n = nitems(t5_reg_ranges);
4013         }
4014
4015         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4016         for (i = 0; i < n; i += 2)
4017                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4018 }
4019
4020 static void
4021 cxgbe_tick(void *arg)
4022 {
4023         struct port_info *pi = arg;
4024         struct ifnet *ifp = pi->ifp;
4025         struct sge_txq *txq;
4026         int i, drops;
4027         struct port_stats *s = &pi->stats;
4028
4029         PORT_LOCK(pi);
4030         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4031                 PORT_UNLOCK(pi);
4032                 return; /* without scheduling another callout */
4033         }
4034
4035         t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4036
4037         ifp->if_opackets = s->tx_frames - s->tx_pause;
4038         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4039         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4040         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4041         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4042         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4043         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4044             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4045             s->rx_trunc3;
4046
4047         drops = s->tx_drop;
4048         for_each_txq(pi, i, txq)
4049                 drops += txq->br->br_drops;
4050         ifp->if_snd.ifq_drops = drops;
4051
4052         ifp->if_oerrors = s->tx_error_frames;
4053         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4054             s->rx_fcs_err + s->rx_len_err;
4055
4056         callout_schedule(&pi->tick, hz);
4057         PORT_UNLOCK(pi);
4058 }
4059
4060 static void
4061 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4062 {
4063         struct ifnet *vlan;
4064
4065         if (arg != ifp || ifp->if_type != IFT_ETHER)
4066                 return;
4067
4068         vlan = VLAN_DEVAT(ifp, vid);
4069         VLAN_SETCOOKIE(vlan, ifp);
4070 }
4071
4072 static int
4073 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4074 {
4075
4076 #ifdef INVARIANTS
4077         panic("%s: opcode 0x%02x on iq %p with payload %p",
4078             __func__, rss->opcode, iq, m);
4079 #else
4080         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4081             __func__, rss->opcode, iq, m);
4082         m_freem(m);
4083 #endif
4084         return (EDOOFUS);
4085 }
4086
4087 int
4088 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4089 {
4090         uintptr_t *loc, new;
4091
4092         if (opcode >= nitems(sc->cpl_handler))
4093                 return (EINVAL);
4094
4095         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4096         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4097         atomic_store_rel_ptr(loc, new);
4098
4099         return (0);
4100 }
4101
4102 static int
4103 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4104 {
4105
4106 #ifdef INVARIANTS
4107         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4108 #else
4109         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4110             __func__, iq, ctrl);
4111 #endif
4112         return (EDOOFUS);
4113 }
4114
4115 int
4116 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4117 {
4118         uintptr_t *loc, new;
4119
4120         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4121         loc = (uintptr_t *) &sc->an_handler;
4122         atomic_store_rel_ptr(loc, new);
4123
4124         return (0);
4125 }
4126
4127 static int
4128 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4129 {
4130         const struct cpl_fw6_msg *cpl =
4131             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4132
4133 #ifdef INVARIANTS
4134         panic("%s: fw_msg type %d", __func__, cpl->type);
4135 #else
4136         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4137 #endif
4138         return (EDOOFUS);
4139 }
4140
4141 int
4142 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4143 {
4144         uintptr_t *loc, new;
4145
4146         if (type >= nitems(sc->fw_msg_handler))
4147                 return (EINVAL);
4148
4149         /*
4150          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4151          * handler dispatch table.  Reject any attempt to install a handler for
4152          * this subtype.
4153          */
4154         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4155                 return (EINVAL);
4156
4157         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4158         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4159         atomic_store_rel_ptr(loc, new);
4160
4161         return (0);
4162 }
4163
4164 static int
4165 t4_sysctls(struct adapter *sc)
4166 {
4167         struct sysctl_ctx_list *ctx;
4168         struct sysctl_oid *oid;
4169         struct sysctl_oid_list *children, *c0;
4170         static char *caps[] = {
4171                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4172                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4173                     "\6HASHFILTER\7ETHOFLD",
4174                 "\20\1TOE",                             /* caps[2] toecaps */
4175                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4176                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4177                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4178                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4179                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4180                     "\4PO_INITIAOR\5PO_TARGET"
4181         };
4182         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4183
4184         ctx = device_get_sysctl_ctx(sc->dev);
4185
4186         /*
4187          * dev.t4nex.X.
4188          */
4189         oid = device_get_sysctl_tree(sc->dev);
4190         c0 = children = SYSCTL_CHILDREN(oid);
4191
4192         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4193             sc->params.nports, "# of ports");
4194
4195         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4196             NULL, chip_rev(sc), "chip hardware revision");
4197
4198         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4199             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4200
4201         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4202             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4203
4204         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4205             sc->cfcsum, "config file checksum");
4206
4207         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4208             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4209             sysctl_bitfield, "A", "available doorbells");
4210
4211         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4212             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4213             sysctl_bitfield, "A", "available link capabilities");
4214
4215         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4216             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4217             sysctl_bitfield, "A", "available NIC capabilities");
4218
4219         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4220             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4221             sysctl_bitfield, "A", "available TCP offload capabilities");
4222
4223         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4224             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4225             sysctl_bitfield, "A", "available RDMA capabilities");
4226
4227         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4228             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4229             sysctl_bitfield, "A", "available iSCSI capabilities");
4230
4231         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4232             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4233             sysctl_bitfield, "A", "available FCoE capabilities");
4234
4235         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4236             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4237
4238         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4239             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4240             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4241             "interrupt holdoff timer values (us)");
4242
4243         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4244             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4245             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4246             "interrupt holdoff packet counter values");
4247
4248         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4249             NULL, sc->tids.nftids, "number of filters");
4250
4251         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4252             CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4253             "chip temperature (in Celsius)");
4254
4255         t4_sge_sysctls(sc, ctx, children);
4256
4257         sc->lro_timeout = 100;
4258         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4259             &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4260
4261 #ifdef SBUF_DRAIN
4262         /*
4263          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4264          */
4265         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4266             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4267             "logs and miscellaneous information");
4268         children = SYSCTL_CHILDREN(oid);
4269
4270         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4271             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4272             sysctl_cctrl, "A", "congestion control");
4273
4274         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4275             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4276             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4277
4278         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4279             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4280             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4281
4282         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4283             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4284             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4285
4286         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4287             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4288             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4289
4290         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4291             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4292             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4293
4294         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4295             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4296             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4297
4298         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4299             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4300             sysctl_cim_la, "A", "CIM logic analyzer");
4301
4302         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4303             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4304             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4305
4306         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4307             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4308             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4309
4310         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4311             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4312             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4313
4314         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4315             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4316             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4317
4318         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4319             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4320             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4321
4322         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4323             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4324             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4325
4326         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4327             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4328             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4329
4330         if (is_t5(sc)) {
4331                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4332                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4333                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4334
4335                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4336                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4337                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4338         }
4339
4340         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4341             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4342             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4343
4344         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4345             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4346             sysctl_cim_qcfg, "A", "CIM queue configuration");
4347
4348         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4349             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4350             sysctl_cpl_stats, "A", "CPL statistics");
4351
4352         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4353             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4354             sysctl_ddp_stats, "A", "DDP statistics");
4355
4356         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4357             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4358             sysctl_devlog, "A", "firmware's device log");
4359
4360         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4361             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4362             sysctl_fcoe_stats, "A", "FCoE statistics");
4363
4364         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4365             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4366             sysctl_hw_sched, "A", "hardware scheduler ");
4367
4368         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4369             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4370             sysctl_l2t, "A", "hardware L2 table");
4371
4372         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4373             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4374             sysctl_lb_stats, "A", "loopback statistics");
4375
4376         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4377             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4378             sysctl_meminfo, "A", "memory regions");
4379
4380         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4381             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4382             sysctl_mps_tcam, "A", "MPS TCAM entries");
4383
4384         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4385             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4386             sysctl_path_mtus, "A", "path MTUs");
4387
4388         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4389             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4390             sysctl_pm_stats, "A", "PM statistics");
4391
4392         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4393             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4394             sysctl_rdma_stats, "A", "RDMA statistics");
4395
4396         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4397             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4398             sysctl_tcp_stats, "A", "TCP statistics");
4399
4400         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4401             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4402             sysctl_tids, "A", "TID information");
4403
4404         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4405             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4406             sysctl_tp_err_stats, "A", "TP error statistics");
4407
4408         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4409             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4410             sysctl_tp_la, "A", "TP logic analyzer");
4411
4412         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4413             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4414             sysctl_tx_rate, "A", "Tx rate");
4415
4416         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4417             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4418             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4419
4420         if (is_t5(sc)) {
4421                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4422                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4423                     sysctl_wcwr_stats, "A", "write combined work requests");
4424         }
4425 #endif
4426
4427 #ifdef TCP_OFFLOAD
4428         if (is_offload(sc)) {
4429                 /*
4430                  * dev.t4nex.X.toe.
4431                  */
4432                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4433                     NULL, "TOE parameters");
4434                 children = SYSCTL_CHILDREN(oid);
4435
4436                 sc->tt.sndbuf = 256 * 1024;
4437                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4438                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4439
4440                 sc->tt.ddp = 0;
4441                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4442                     &sc->tt.ddp, 0, "DDP allowed");
4443
4444                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4445                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4446                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4447
4448                 sc->tt.ddp_thres =
4449                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4450                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4451                     &sc->tt.ddp_thres, 0, "DDP threshold");
4452
4453                 sc->tt.rx_coalesce = 1;
4454                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4455                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4456         }
4457 #endif
4458
4459
4460         return (0);
4461 }
4462
4463 static int
4464 cxgbe_sysctls(struct port_info *pi)
4465 {
4466         struct sysctl_ctx_list *ctx;
4467         struct sysctl_oid *oid;
4468         struct sysctl_oid_list *children;
4469
4470         ctx = device_get_sysctl_ctx(pi->dev);
4471
4472         /*
4473          * dev.cxgbe.X.
4474          */
4475         oid = device_get_sysctl_tree(pi->dev);
4476         children = SYSCTL_CHILDREN(oid);
4477
4478         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4479            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4480         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4481                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4482                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4483                     "PHY temperature (in Celsius)");
4484                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4485                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4486                     "PHY firmware version");
4487         }
4488         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4489             &pi->nrxq, 0, "# of rx queues");
4490         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4491             &pi->ntxq, 0, "# of tx queues");
4492         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4493             &pi->first_rxq, 0, "index of first rx queue");
4494         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4495             &pi->first_txq, 0, "index of first tx queue");
4496
4497 #ifdef TCP_OFFLOAD
4498         if (is_offload(pi->adapter)) {
4499                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4500                     &pi->nofldrxq, 0,
4501                     "# of rx queues for offloaded TCP connections");
4502                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4503                     &pi->nofldtxq, 0,
4504                     "# of tx queues for offloaded TCP connections");
4505                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4506                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4507                     "index of first TOE rx queue");
4508                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4509                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4510                     "index of first TOE tx queue");
4511         }
4512 #endif
4513
4514         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4515             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4516             "holdoff timer index");
4517         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4518             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4519             "holdoff packet counter index");
4520
4521         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4522             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4523             "rx queue size");
4524         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4525             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4526             "tx queue size");
4527
4528         /*
4529          * dev.cxgbe.X.stats.
4530          */
4531         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4532             NULL, "port statistics");
4533         children = SYSCTL_CHILDREN(oid);
4534
4535 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4536         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4537             CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4538             sysctl_handle_t4_reg64, "QU", desc)
4539
4540         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4541             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4542         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4543             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4544         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4545             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4546         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4547             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4548         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4549             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4550         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4551             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4552         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4553             "# of tx frames in this range",
4554             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4555         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4556             "# of tx frames in this range",
4557             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4558         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4559             "# of tx frames in this range",
4560             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4561         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4562             "# of tx frames in this range",
4563             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4564         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4565             "# of tx frames in this range",
4566             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4567         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4568             "# of tx frames in this range",
4569             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4570         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4571             "# of tx frames in this range",
4572             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4573         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4574             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4575         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4576             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4577         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4578             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4579         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4580             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4581         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4582             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4583         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4584             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4585         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4586             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4587         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4588             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4589         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4590             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4591         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4592             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4593
4594         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4595             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4596         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4597             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4598         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4599             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4600         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4601             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4602         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4603             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4604         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4605             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4606         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4607             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4608         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4609             "# of frames received with bad FCS",
4610             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4611         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4612             "# of frames received with length error",
4613             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4614         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4615             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4616         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4617             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4618         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4619             "# of rx frames in this range",
4620             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4621         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4622             "# of rx frames in this range",
4623             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4624         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4625             "# of rx frames in this range",
4626             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4627         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4628             "# of rx frames in this range",
4629             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4630         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4631             "# of rx frames in this range",
4632             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4633         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4634             "# of rx frames in this range",
4635             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4636         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4637             "# of rx frames in this range",
4638             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4639         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4640             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4641         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4642             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4643         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4644             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4645         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4646             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4647         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4648             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4649         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4650             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4651         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4652             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4653         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4654             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4655         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4656             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4657
4658 #undef SYSCTL_ADD_T4_REG64
4659
4660 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4661         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4662             &pi->stats.name, desc)
4663
4664         /* We get these from port_stats and they may be stale by upto 1s */
4665         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4666             "# drops due to buffer-group 0 overflows");
4667         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4668             "# drops due to buffer-group 1 overflows");
4669         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4670             "# drops due to buffer-group 2 overflows");
4671         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4672             "# drops due to buffer-group 3 overflows");
4673         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4674             "# of buffer-group 0 truncated packets");
4675         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4676             "# of buffer-group 1 truncated packets");
4677         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4678             "# of buffer-group 2 truncated packets");
4679         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4680             "# of buffer-group 3 truncated packets");
4681
4682 #undef SYSCTL_ADD_T4_PORTSTAT
4683
4684         return (0);
4685 }
4686
4687 static int
4688 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4689 {
4690         int rc, *i;
4691         struct sbuf sb;
4692
4693         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4694         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4695                 sbuf_printf(&sb, "%d ", *i);
4696         sbuf_trim(&sb);
4697         sbuf_finish(&sb);
4698         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4699         sbuf_delete(&sb);
4700         return (rc);
4701 }
4702
4703 static int
4704 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4705 {
4706         int rc;
4707         struct sbuf *sb;
4708
4709         rc = sysctl_wire_old_buffer(req, 0);
4710         if (rc != 0)
4711                 return(rc);
4712
4713         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4714         if (sb == NULL)
4715                 return (ENOMEM);
4716
4717         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4718         rc = sbuf_finish(sb);
4719         sbuf_delete(sb);
4720
4721         return (rc);
4722 }
4723
4724 static int
4725 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4726 {
4727         struct port_info *pi = arg1;
4728         int op = arg2;
4729         struct adapter *sc = pi->adapter;
4730         u_int v;
4731         int rc;
4732
4733         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4734         if (rc)
4735                 return (rc);
4736         /* XXX: magic numbers */
4737         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4738             &v);
4739         end_synchronized_op(sc, 0);
4740         if (rc)
4741                 return (rc);
4742         if (op == 0)
4743                 v /= 256;
4744
4745         rc = sysctl_handle_int(oidp, &v, 0, req);
4746         return (rc);
4747 }
4748
4749 static int
4750 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4751 {
4752         struct port_info *pi = arg1;
4753         struct adapter *sc = pi->adapter;
4754         int idx, rc, i;
4755         struct sge_rxq *rxq;
4756 #ifdef TCP_OFFLOAD
4757         struct sge_ofld_rxq *ofld_rxq;
4758 #endif
4759         uint8_t v;
4760
4761         idx = pi->tmr_idx;
4762
4763         rc = sysctl_handle_int(oidp, &idx, 0, req);
4764         if (rc != 0 || req->newptr == NULL)
4765                 return (rc);
4766
4767         if (idx < 0 || idx >= SGE_NTIMERS)
4768                 return (EINVAL);
4769
4770         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4771             "t4tmr");
4772         if (rc)
4773                 return (rc);
4774
4775         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4776         for_each_rxq(pi, i, rxq) {
4777 #ifdef atomic_store_rel_8
4778                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4779 #else
4780                 rxq->iq.intr_params = v;
4781 #endif
4782         }
4783 #ifdef TCP_OFFLOAD
4784         for_each_ofld_rxq(pi, i, ofld_rxq) {
4785 #ifdef atomic_store_rel_8
4786                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4787 #else
4788                 ofld_rxq->iq.intr_params = v;
4789 #endif
4790         }
4791 #endif
4792         pi->tmr_idx = idx;
4793
4794         end_synchronized_op(sc, LOCK_HELD);
4795         return (0);
4796 }
4797
4798 static int
4799 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4800 {
4801         struct port_info *pi = arg1;
4802         struct adapter *sc = pi->adapter;
4803         int idx, rc;
4804
4805         idx = pi->pktc_idx;
4806
4807         rc = sysctl_handle_int(oidp, &idx, 0, req);
4808         if (rc != 0 || req->newptr == NULL)
4809                 return (rc);
4810
4811         if (idx < -1 || idx >= SGE_NCOUNTERS)
4812                 return (EINVAL);
4813
4814         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4815             "t4pktc");
4816         if (rc)
4817                 return (rc);
4818
4819         if (pi->flags & PORT_INIT_DONE)
4820                 rc = EBUSY; /* cannot be changed once the queues are created */
4821         else
4822                 pi->pktc_idx = idx;
4823
4824         end_synchronized_op(sc, LOCK_HELD);
4825         return (rc);
4826 }
4827
4828 static int
4829 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4830 {
4831         struct port_info *pi = arg1;
4832         struct adapter *sc = pi->adapter;
4833         int qsize, rc;
4834
4835         qsize = pi->qsize_rxq;
4836
4837         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4838         if (rc != 0 || req->newptr == NULL)
4839                 return (rc);
4840
4841         if (qsize < 128 || (qsize & 7))
4842                 return (EINVAL);
4843
4844         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4845             "t4rxqs");
4846         if (rc)
4847                 return (rc);
4848
4849         if (pi->flags & PORT_INIT_DONE)
4850                 rc = EBUSY; /* cannot be changed once the queues are created */
4851         else
4852                 pi->qsize_rxq = qsize;
4853
4854         end_synchronized_op(sc, LOCK_HELD);
4855         return (rc);
4856 }
4857
4858 static int
4859 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4860 {
4861         struct port_info *pi = arg1;
4862         struct adapter *sc = pi->adapter;
4863         int qsize, rc;
4864
4865         qsize = pi->qsize_txq;
4866
4867         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4868         if (rc != 0 || req->newptr == NULL)
4869                 return (rc);
4870
4871         /* bufring size must be powerof2 */
4872         if (qsize < 128 || !powerof2(qsize))
4873                 return (EINVAL);
4874
4875         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4876             "t4txqs");
4877         if (rc)
4878                 return (rc);
4879
4880         if (pi->flags & PORT_INIT_DONE)
4881                 rc = EBUSY; /* cannot be changed once the queues are created */
4882         else
4883                 pi->qsize_txq = qsize;
4884
4885         end_synchronized_op(sc, LOCK_HELD);
4886         return (rc);
4887 }
4888
4889 static int
4890 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4891 {
4892         struct adapter *sc = arg1;
4893         int reg = arg2;
4894         uint64_t val;
4895
4896         val = t4_read_reg64(sc, reg);
4897
4898         return (sysctl_handle_64(oidp, &val, 0, req));
4899 }
4900
4901 static int
4902 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4903 {
4904         struct adapter *sc = arg1;
4905         int rc, t;
4906         uint32_t param, val;
4907
4908         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4909         if (rc)
4910                 return (rc);
4911         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4912             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4913             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4914         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4915         end_synchronized_op(sc, 0);
4916         if (rc)
4917                 return (rc);
4918
4919         /* unknown is returned as 0 but we display -1 in that case */
4920         t = val == 0 ? -1 : val;
4921
4922         rc = sysctl_handle_int(oidp, &t, 0, req);
4923         return (rc);
4924 }
4925
4926 #ifdef SBUF_DRAIN
4927 static int
4928 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4929 {
4930         struct adapter *sc = arg1;
4931         struct sbuf *sb;
4932         int rc, i;
4933         uint16_t incr[NMTUS][NCCTRL_WIN];
4934         static const char *dec_fac[] = {
4935                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4936                 "0.9375"
4937         };
4938
4939         rc = sysctl_wire_old_buffer(req, 0);
4940         if (rc != 0)
4941                 return (rc);
4942
4943         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4944         if (sb == NULL)
4945                 return (ENOMEM);
4946
4947         t4_read_cong_tbl(sc, incr);
4948
4949         for (i = 0; i < NCCTRL_WIN; ++i) {
4950                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4951                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4952                     incr[5][i], incr[6][i], incr[7][i]);
4953                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4954                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4955                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4956                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4957         }
4958
4959         rc = sbuf_finish(sb);
4960         sbuf_delete(sb);
4961
4962         return (rc);
4963 }
4964
4965 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4966         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
4967         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
4968         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
4969 };
4970
4971 static int
4972 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4973 {
4974         struct adapter *sc = arg1;
4975         struct sbuf *sb;
4976         int rc, i, n, qid = arg2;
4977         uint32_t *buf, *p;
4978         char *qtype;
4979         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4980
4981         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4982             ("%s: bad qid %d\n", __func__, qid));
4983
4984         if (qid < CIM_NUM_IBQ) {
4985                 /* inbound queue */
4986                 qtype = "IBQ";
4987                 n = 4 * CIM_IBQ_SIZE;
4988                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4989                 rc = t4_read_cim_ibq(sc, qid, buf, n);
4990         } else {
4991                 /* outbound queue */
4992                 qtype = "OBQ";
4993                 qid -= CIM_NUM_IBQ;
4994                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4995                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4996                 rc = t4_read_cim_obq(sc, qid, buf, n);
4997         }
4998
4999         if (rc < 0) {
5000                 rc = -rc;
5001                 goto done;
5002         }
5003         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5004
5005         rc = sysctl_wire_old_buffer(req, 0);
5006         if (rc != 0)
5007                 goto done;
5008
5009         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5010         if (sb == NULL) {
5011                 rc = ENOMEM;
5012                 goto done;
5013         }
5014
5015         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5016         for (i = 0, p = buf; i < n; i += 16, p += 4)
5017                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5018                     p[2], p[3]);
5019
5020         rc = sbuf_finish(sb);
5021         sbuf_delete(sb);
5022 done:
5023         free(buf, M_CXGBE);
5024         return (rc);
5025 }
5026
5027 static int
5028 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5029 {
5030         struct adapter *sc = arg1;
5031         u_int cfg;
5032         struct sbuf *sb;
5033         uint32_t *buf, *p;
5034         int rc;
5035
5036         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5037         if (rc != 0)
5038                 return (rc);
5039
5040         rc = sysctl_wire_old_buffer(req, 0);
5041         if (rc != 0)
5042                 return (rc);
5043
5044         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5045         if (sb == NULL)
5046                 return (ENOMEM);
5047
5048         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5049             M_ZERO | M_WAITOK);
5050
5051         rc = -t4_cim_read_la(sc, buf, NULL);
5052         if (rc != 0)
5053                 goto done;
5054
5055         sbuf_printf(sb, "Status   Data      PC%s",
5056             cfg & F_UPDBGLACAPTPCONLY ? "" :
5057             "     LS0Stat  LS0Addr             LS0Data");
5058
5059         KASSERT((sc->params.cim_la_size & 7) == 0,
5060             ("%s: p will walk off the end of buf", __func__));
5061
5062         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5063                 if (cfg & F_UPDBGLACAPTPCONLY) {
5064                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5065                             p[6], p[7]);
5066                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5067                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5068                             p[4] & 0xff, p[5] >> 8);
5069                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5070                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5071                             p[1] & 0xf, p[2] >> 4);
5072                 } else {
5073                         sbuf_printf(sb,
5074                             "\n  %02x   %x%07x %x%07x %08x %08x "
5075                             "%08x%08x%08x%08x",
5076                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5077                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5078                             p[6], p[7]);
5079                 }
5080         }
5081
5082         rc = sbuf_finish(sb);
5083         sbuf_delete(sb);
5084 done:
5085         free(buf, M_CXGBE);
5086         return (rc);
5087 }
5088
5089 static int
5090 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5091 {
5092         struct adapter *sc = arg1;
5093         u_int i;
5094         struct sbuf *sb;
5095         uint32_t *buf, *p;
5096         int rc;
5097
5098         rc = sysctl_wire_old_buffer(req, 0);
5099         if (rc != 0)
5100                 return (rc);
5101
5102         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5103         if (sb == NULL)
5104                 return (ENOMEM);
5105
5106         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5107             M_ZERO | M_WAITOK);
5108
5109         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5110         p = buf;
5111
5112         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5113                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5114                     p[1], p[0]);
5115         }
5116
5117         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5118         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5119                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5120                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5121                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5122                     (p[1] >> 2) | ((p[2] & 3) << 30),
5123                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5124                     p[0] & 1);
5125         }
5126
5127         rc = sbuf_finish(sb);
5128         sbuf_delete(sb);
5129         free(buf, M_CXGBE);
5130         return (rc);
5131 }
5132
5133 static int
5134 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5135 {
5136         struct adapter *sc = arg1;
5137         u_int i;
5138         struct sbuf *sb;
5139         uint32_t *buf, *p;
5140         int rc;
5141
5142         rc = sysctl_wire_old_buffer(req, 0);
5143         if (rc != 0)
5144                 return (rc);
5145
5146         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5147         if (sb == NULL)
5148                 return (ENOMEM);
5149
5150         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5151             M_ZERO | M_WAITOK);
5152
5153         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5154         p = buf;
5155
5156         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5157         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5158                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5159                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5160                     p[4], p[3], p[2], p[1], p[0]);
5161         }
5162
5163         sbuf_printf(sb, "\n\nCntl ID               Data");
5164         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5165                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5166                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5167         }
5168
5169         rc = sbuf_finish(sb);
5170         sbuf_delete(sb);
5171         free(buf, M_CXGBE);
5172         return (rc);
5173 }
5174
5175 static int
5176 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5177 {
5178         struct adapter *sc = arg1;
5179         struct sbuf *sb;
5180         int rc, i;
5181         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5182         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5183         uint16_t thres[CIM_NUM_IBQ];
5184         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5185         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5186         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5187
5188         if (is_t4(sc)) {
5189                 cim_num_obq = CIM_NUM_OBQ;
5190                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5191                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5192         } else {
5193                 cim_num_obq = CIM_NUM_OBQ_T5;
5194                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5195                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5196         }
5197         nq = CIM_NUM_IBQ + cim_num_obq;
5198
5199         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5200         if (rc == 0)
5201                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5202         if (rc != 0)
5203                 return (rc);
5204
5205         t4_read_cimq_cfg(sc, base, size, thres);
5206
5207         rc = sysctl_wire_old_buffer(req, 0);
5208         if (rc != 0)
5209                 return (rc);
5210
5211         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5212         if (sb == NULL)
5213                 return (ENOMEM);
5214
5215         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5216
5217         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5218                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5219                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5220                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5221                     G_QUEREMFLITS(p[2]) * 16);
5222         for ( ; i < nq; i++, p += 4, wr += 2)
5223                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5224                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5225                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5226                     G_QUEREMFLITS(p[2]) * 16);
5227
5228         rc = sbuf_finish(sb);
5229         sbuf_delete(sb);
5230
5231         return (rc);
5232 }
5233
5234 static int
5235 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5236 {
5237         struct adapter *sc = arg1;
5238         struct sbuf *sb;
5239         int rc;
5240         struct tp_cpl_stats stats;
5241
5242         rc = sysctl_wire_old_buffer(req, 0);
5243         if (rc != 0)
5244                 return (rc);
5245
5246         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5247         if (sb == NULL)
5248                 return (ENOMEM);
5249
5250         t4_tp_get_cpl_stats(sc, &stats);
5251
5252         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5253             "channel 3\n");
5254         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5255                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5256         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5257                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5258
5259         rc = sbuf_finish(sb);
5260         sbuf_delete(sb);
5261
5262         return (rc);
5263 }
5264
5265 static int
5266 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5267 {
5268         struct adapter *sc = arg1;
5269         struct sbuf *sb;
5270         int rc;
5271         struct tp_usm_stats stats;
5272
5273         rc = sysctl_wire_old_buffer(req, 0);
5274         if (rc != 0)
5275                 return(rc);
5276
5277         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5278         if (sb == NULL)
5279                 return (ENOMEM);
5280
5281         t4_get_usm_stats(sc, &stats);
5282
5283         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5284         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5285         sbuf_printf(sb, "Drops:  %u", stats.drops);
5286
5287         rc = sbuf_finish(sb);
5288         sbuf_delete(sb);
5289
5290         return (rc);
5291 }
5292
5293 const char *devlog_level_strings[] = {
5294         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5295         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5296         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5297         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5298         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5299         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5300 };
5301
5302 const char *devlog_facility_strings[] = {
5303         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5304         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5305         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5306         [FW_DEVLOG_FACILITY_RES]        = "RES",
5307         [FW_DEVLOG_FACILITY_HW]         = "HW",
5308         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5309         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5310         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5311         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5312         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5313         [FW_DEVLOG_FACILITY_VI]         = "VI",
5314         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5315         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5316         [FW_DEVLOG_FACILITY_TM]         = "TM",
5317         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5318         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5319         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5320         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5321         [FW_DEVLOG_FACILITY_RI]         = "RI",
5322         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5323         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5324         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5325         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5326 };
5327
5328 static int
5329 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5330 {
5331         struct adapter *sc = arg1;
5332         struct devlog_params *dparams = &sc->params.devlog;
5333         struct fw_devlog_e *buf, *e;
5334         int i, j, rc, nentries, first = 0, m;
5335         struct sbuf *sb;
5336         uint64_t ftstamp = UINT64_MAX;
5337
5338         if (dparams->start == 0) {
5339                 dparams->memtype = FW_MEMTYPE_EDC0;
5340                 dparams->start = 0x84000;
5341                 dparams->size = 32768;
5342         }
5343
5344         nentries = dparams->size / sizeof(struct fw_devlog_e);
5345
5346         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5347         if (buf == NULL)
5348                 return (ENOMEM);
5349
5350         m = fwmtype_to_hwmtype(dparams->memtype);
5351         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5352         if (rc != 0)
5353                 goto done;
5354
5355         for (i = 0; i < nentries; i++) {
5356                 e = &buf[i];
5357
5358                 if (e->timestamp == 0)
5359                         break;  /* end */
5360
5361                 e->timestamp = be64toh(e->timestamp);
5362                 e->seqno = be32toh(e->seqno);
5363                 for (j = 0; j < 8; j++)
5364                         e->params[j] = be32toh(e->params[j]);
5365
5366                 if (e->timestamp < ftstamp) {
5367                         ftstamp = e->timestamp;
5368                         first = i;
5369                 }
5370         }
5371
5372         if (buf[first].timestamp == 0)
5373                 goto done;      /* nothing in the log */
5374
5375         rc = sysctl_wire_old_buffer(req, 0);
5376         if (rc != 0)
5377                 goto done;
5378
5379         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5380         if (sb == NULL) {
5381                 rc = ENOMEM;
5382                 goto done;
5383         }
5384         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5385             "Seq#", "Tstamp", "Level", "Facility", "Message");
5386
5387         i = first;
5388         do {
5389                 e = &buf[i];
5390                 if (e->timestamp == 0)
5391                         break;  /* end */
5392
5393                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5394                     e->seqno, e->timestamp,
5395                     (e->level < nitems(devlog_level_strings) ?
5396                         devlog_level_strings[e->level] : "UNKNOWN"),
5397                     (e->facility < nitems(devlog_facility_strings) ?
5398                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5399                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5400                     e->params[2], e->params[3], e->params[4],
5401                     e->params[5], e->params[6], e->params[7]);
5402
5403                 if (++i == nentries)
5404                         i = 0;
5405         } while (i != first);
5406
5407         rc = sbuf_finish(sb);
5408         sbuf_delete(sb);
5409 done:
5410         free(buf, M_CXGBE);
5411         return (rc);
5412 }
5413
5414 static int
5415 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5416 {
5417         struct adapter *sc = arg1;
5418         struct sbuf *sb;
5419         int rc;
5420         struct tp_fcoe_stats stats[4];
5421
5422         rc = sysctl_wire_old_buffer(req, 0);
5423         if (rc != 0)
5424                 return (rc);
5425
5426         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5427         if (sb == NULL)
5428                 return (ENOMEM);
5429
5430         t4_get_fcoe_stats(sc, 0, &stats[0]);
5431         t4_get_fcoe_stats(sc, 1, &stats[1]);
5432         t4_get_fcoe_stats(sc, 2, &stats[2]);
5433         t4_get_fcoe_stats(sc, 3, &stats[3]);
5434
5435         sbuf_printf(sb, "                   channel 0        channel 1        "
5436             "channel 2        channel 3\n");
5437         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5438             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5439             stats[3].octetsDDP);
5440         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5441             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5442         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5443             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5444             stats[3].framesDrop);
5445
5446         rc = sbuf_finish(sb);
5447         sbuf_delete(sb);
5448
5449         return (rc);
5450 }
5451
5452 static int
5453 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5454 {
5455         struct adapter *sc = arg1;
5456         struct sbuf *sb;
5457         int rc, i;
5458         unsigned int map, kbps, ipg, mode;
5459         unsigned int pace_tab[NTX_SCHED];
5460
5461         rc = sysctl_wire_old_buffer(req, 0);
5462         if (rc != 0)
5463                 return (rc);
5464
5465         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5466         if (sb == NULL)
5467                 return (ENOMEM);
5468
5469         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5470         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5471         t4_read_pace_tbl(sc, pace_tab);
5472
5473         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5474             "Class IPG (0.1 ns)   Flow IPG (us)");
5475
5476         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5477                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5478                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5479                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5480                 if (kbps)
5481                         sbuf_printf(sb, "%9u     ", kbps);
5482                 else
5483                         sbuf_printf(sb, " disabled     ");
5484
5485                 if (ipg)
5486                         sbuf_printf(sb, "%13u        ", ipg);
5487                 else
5488                         sbuf_printf(sb, "     disabled        ");
5489
5490                 if (pace_tab[i])
5491                         sbuf_printf(sb, "%10u", pace_tab[i]);
5492                 else
5493                         sbuf_printf(sb, "  disabled");
5494         }
5495
5496         rc = sbuf_finish(sb);
5497         sbuf_delete(sb);
5498
5499         return (rc);
5500 }
5501
5502 static int
5503 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5504 {
5505         struct adapter *sc = arg1;
5506         struct sbuf *sb;
5507         int rc, i, j;
5508         uint64_t *p0, *p1;
5509         struct lb_port_stats s[2];
5510         static const char *stat_name[] = {
5511                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5512                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5513                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5514                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5515                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5516                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5517                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5518         };
5519
5520         rc = sysctl_wire_old_buffer(req, 0);
5521         if (rc != 0)
5522                 return (rc);
5523
5524         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5525         if (sb == NULL)
5526                 return (ENOMEM);
5527
5528         memset(s, 0, sizeof(s));
5529
5530         for (i = 0; i < 4; i += 2) {
5531                 t4_get_lb_stats(sc, i, &s[0]);
5532                 t4_get_lb_stats(sc, i + 1, &s[1]);
5533
5534                 p0 = &s[0].octets;
5535                 p1 = &s[1].octets;
5536                 sbuf_printf(sb, "%s                       Loopback %u"
5537                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5538
5539                 for (j = 0; j < nitems(stat_name); j++)
5540                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5541                                    *p0++, *p1++);
5542         }
5543
5544         rc = sbuf_finish(sb);
5545         sbuf_delete(sb);
5546
5547         return (rc);
5548 }
5549
5550 static int
5551 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5552 {
5553         int rc = 0;
5554         struct port_info *pi = arg1;
5555         struct sbuf *sb;
5556         static const char *linkdnreasons[] = {
5557                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5558                 "PHY overheated", "unknown", "rx los", "reserved7"
5559         };
5560
5561         rc = sysctl_wire_old_buffer(req, 0);
5562         if (rc != 0)
5563                 return(rc);
5564         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5565         if (sb == NULL)
5566                 return (ENOMEM);
5567
5568         if (pi->linkdnrc < 0)
5569                 sbuf_printf(sb, "n/a");
5570         else if (pi->linkdnrc < nitems(linkdnreasons))
5571                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5572         else
5573                 sbuf_printf(sb, "%d", pi->linkdnrc);
5574
5575         rc = sbuf_finish(sb);
5576         sbuf_delete(sb);
5577
5578         return (rc);
5579 }
5580
5581 struct mem_desc {
5582         unsigned int base;
5583         unsigned int limit;
5584         unsigned int idx;
5585 };
5586
5587 static int
5588 mem_desc_cmp(const void *a, const void *b)
5589 {
5590         return ((const struct mem_desc *)a)->base -
5591                ((const struct mem_desc *)b)->base;
5592 }
5593
5594 static void
5595 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5596     unsigned int to)
5597 {
5598         unsigned int size;
5599
5600         size = to - from + 1;
5601         if (size == 0)
5602                 return;
5603
5604         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5605         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5606 }
5607
5608 static int
5609 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5610 {
5611         struct adapter *sc = arg1;
5612         struct sbuf *sb;
5613         int rc, i, n;
5614         uint32_t lo, hi, used, alloc;
5615         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5616         static const char *region[] = {
5617                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5618                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5619                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5620                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5621                 "RQUDP region:", "PBL region:", "TXPBL region:",
5622                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5623                 "On-chip queues:"
5624         };
5625         struct mem_desc avail[4];
5626         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5627         struct mem_desc *md = mem;
5628
5629         rc = sysctl_wire_old_buffer(req, 0);
5630         if (rc != 0)
5631                 return (rc);
5632
5633         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5634         if (sb == NULL)
5635                 return (ENOMEM);
5636
5637         for (i = 0; i < nitems(mem); i++) {
5638                 mem[i].limit = 0;
5639                 mem[i].idx = i;
5640         }
5641
5642         /* Find and sort the populated memory ranges */
5643         i = 0;
5644         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5645         if (lo & F_EDRAM0_ENABLE) {
5646                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5647                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5648                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5649                 avail[i].idx = 0;
5650                 i++;
5651         }
5652         if (lo & F_EDRAM1_ENABLE) {
5653                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5654                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5655                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5656                 avail[i].idx = 1;
5657                 i++;
5658         }
5659         if (lo & F_EXT_MEM_ENABLE) {
5660                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5661                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5662                 avail[i].limit = avail[i].base +
5663                     (G_EXT_MEM_SIZE(hi) << 20);
5664                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5665                 i++;
5666         }
5667         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5668                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5669                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5670                 avail[i].limit = avail[i].base +
5671                     (G_EXT_MEM1_SIZE(hi) << 20);
5672                 avail[i].idx = 4;
5673                 i++;
5674         }
5675         if (!i)                                    /* no memory available */
5676                 return 0;
5677         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5678
5679         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5680         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5681         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5682         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5683         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5684         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5685         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5686         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5687         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5688
5689         /* the next few have explicit upper bounds */
5690         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5691         md->limit = md->base - 1 +
5692                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5693                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5694         md++;
5695
5696         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5697         md->limit = md->base - 1 +
5698                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5699                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5700         md++;
5701
5702         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5703                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5704                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5705                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5706         } else {
5707                 md->base = 0;
5708                 md->idx = nitems(region);  /* hide it */
5709         }
5710         md++;
5711
5712 #define ulp_region(reg) \
5713         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5714         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5715
5716         ulp_region(RX_ISCSI);
5717         ulp_region(RX_TDDP);
5718         ulp_region(TX_TPT);
5719         ulp_region(RX_STAG);
5720         ulp_region(RX_RQ);
5721         ulp_region(RX_RQUDP);
5722         ulp_region(RX_PBL);
5723         ulp_region(TX_PBL);
5724 #undef ulp_region
5725
5726         md->base = 0;
5727         md->idx = nitems(region);
5728         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5729                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5730                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5731                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5732         }
5733         md++;
5734
5735         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5736         md->limit = md->base + sc->tids.ntids - 1;
5737         md++;
5738         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5739         md->limit = md->base + sc->tids.ntids - 1;
5740         md++;
5741
5742         md->base = sc->vres.ocq.start;
5743         if (sc->vres.ocq.size)
5744                 md->limit = md->base + sc->vres.ocq.size - 1;
5745         else
5746                 md->idx = nitems(region);  /* hide it */
5747         md++;
5748
5749         /* add any address-space holes, there can be up to 3 */
5750         for (n = 0; n < i - 1; n++)
5751                 if (avail[n].limit < avail[n + 1].base)
5752                         (md++)->base = avail[n].limit;
5753         if (avail[n].limit)
5754                 (md++)->base = avail[n].limit;
5755
5756         n = md - mem;
5757         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5758
5759         for (lo = 0; lo < i; lo++)
5760                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5761                                 avail[lo].limit - 1);
5762
5763         sbuf_printf(sb, "\n");
5764         for (i = 0; i < n; i++) {
5765                 if (mem[i].idx >= nitems(region))
5766                         continue;                        /* skip holes */
5767                 if (!mem[i].limit)
5768                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5769                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5770                                 mem[i].limit);
5771         }
5772
5773         sbuf_printf(sb, "\n");
5774         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5775         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5776         mem_region_show(sb, "uP RAM:", lo, hi);
5777
5778         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5779         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5780         mem_region_show(sb, "uP Extmem2:", lo, hi);
5781
5782         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5783         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5784                    G_PMRXMAXPAGE(lo),
5785                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5786                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5787
5788         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5789         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5790         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5791                    G_PMTXMAXPAGE(lo),
5792                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5793                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5794         sbuf_printf(sb, "%u p-structs\n",
5795                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5796
5797         for (i = 0; i < 4; i++) {
5798                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5799                 if (is_t4(sc)) {
5800                         used = G_USED(lo);
5801                         alloc = G_ALLOC(lo);
5802                 } else {
5803                         used = G_T5_USED(lo);
5804                         alloc = G_T5_ALLOC(lo);
5805                 }
5806                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5807                            i, used, alloc);
5808         }
5809         for (i = 0; i < 4; i++) {
5810                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5811                 if (is_t4(sc)) {
5812                         used = G_USED(lo);
5813                         alloc = G_ALLOC(lo);
5814                 } else {
5815                         used = G_T5_USED(lo);
5816                         alloc = G_T5_ALLOC(lo);
5817                 }
5818                 sbuf_printf(sb,
5819                            "\nLoopback %d using %u pages out of %u allocated",
5820                            i, used, alloc);
5821         }
5822
5823         rc = sbuf_finish(sb);
5824         sbuf_delete(sb);
5825
5826         return (rc);
5827 }
5828
5829 static inline void
5830 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5831 {
5832         *mask = x | y;
5833         y = htobe64(y);
5834         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5835 }
5836
5837 static int
5838 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5839 {
5840         struct adapter *sc = arg1;
5841         struct sbuf *sb;
5842         int rc, i, n;
5843
5844         rc = sysctl_wire_old_buffer(req, 0);
5845         if (rc != 0)
5846                 return (rc);
5847
5848         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5849         if (sb == NULL)
5850                 return (ENOMEM);
5851
5852         sbuf_printf(sb,
5853             "Idx  Ethernet address     Mask     Vld Ports PF"
5854             "  VF              Replication             P0 P1 P2 P3  ML");
5855         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5856             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5857         for (i = 0; i < n; i++) {
5858                 uint64_t tcamx, tcamy, mask;
5859                 uint32_t cls_lo, cls_hi;
5860                 uint8_t addr[ETHER_ADDR_LEN];
5861
5862                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5863                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5864                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5865                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5866
5867                 if (tcamx & tcamy)
5868                         continue;
5869
5870                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5871                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5872                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5873                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5874                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5875                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5876                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5877
5878                 if (cls_lo & F_REPLICATE) {
5879                         struct fw_ldst_cmd ldst_cmd;
5880
5881                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5882                         ldst_cmd.op_to_addrspace =
5883                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5884                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5885                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5886                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5887                         ldst_cmd.u.mps.fid_ctl =
5888                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5889                                 V_FW_LDST_CMD_CTL(i));
5890
5891                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5892                             "t4mps");
5893                         if (rc)
5894                                 break;
5895                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5896                             sizeof(ldst_cmd), &ldst_cmd);
5897                         end_synchronized_op(sc, 0);
5898
5899                         if (rc != 0) {
5900                                 sbuf_printf(sb,
5901                                     " ------------ error %3u ------------", rc);
5902                                 rc = 0;
5903                         } else {
5904                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5905                                     be32toh(ldst_cmd.u.mps.rplc127_96),
5906                                     be32toh(ldst_cmd.u.mps.rplc95_64),
5907                                     be32toh(ldst_cmd.u.mps.rplc63_32),
5908                                     be32toh(ldst_cmd.u.mps.rplc31_0));
5909                         }
5910                 } else
5911                         sbuf_printf(sb, "%36s", "");
5912
5913                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5914                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5915                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5916         }
5917
5918         if (rc)
5919                 (void) sbuf_finish(sb);
5920         else
5921                 rc = sbuf_finish(sb);
5922         sbuf_delete(sb);
5923
5924         return (rc);
5925 }
5926
5927 static int
5928 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5929 {
5930         struct adapter *sc = arg1;
5931         struct sbuf *sb;
5932         int rc;
5933         uint16_t mtus[NMTUS];
5934
5935         rc = sysctl_wire_old_buffer(req, 0);
5936         if (rc != 0)
5937                 return (rc);
5938
5939         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5940         if (sb == NULL)
5941                 return (ENOMEM);
5942
5943         t4_read_mtu_tbl(sc, mtus, NULL);
5944
5945         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5946             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5947             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5948             mtus[14], mtus[15]);
5949
5950         rc = sbuf_finish(sb);
5951         sbuf_delete(sb);
5952
5953         return (rc);
5954 }
5955
5956 static int
5957 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5958 {
5959         struct adapter *sc = arg1;
5960         struct sbuf *sb;
5961         int rc, i;
5962         uint32_t cnt[PM_NSTATS];
5963         uint64_t cyc[PM_NSTATS];
5964         static const char *rx_stats[] = {
5965                 "Read:", "Write bypass:", "Write mem:", "Flush:"
5966         };
5967         static const char *tx_stats[] = {
5968                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
5969         };
5970
5971         rc = sysctl_wire_old_buffer(req, 0);
5972         if (rc != 0)
5973                 return (rc);
5974
5975         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5976         if (sb == NULL)
5977                 return (ENOMEM);
5978
5979         t4_pmtx_get_stats(sc, cnt, cyc);
5980         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
5981         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
5982                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
5983                     cyc[i]);
5984
5985         t4_pmrx_get_stats(sc, cnt, cyc);
5986         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
5987         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
5988                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
5989                     cyc[i]);
5990
5991         rc = sbuf_finish(sb);
5992         sbuf_delete(sb);
5993
5994         return (rc);
5995 }
5996
5997 static int
5998 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5999 {
6000         struct adapter *sc = arg1;
6001         struct sbuf *sb;
6002         int rc;
6003         struct tp_rdma_stats stats;
6004
6005         rc = sysctl_wire_old_buffer(req, 0);
6006         if (rc != 0)
6007                 return (rc);
6008
6009         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6010         if (sb == NULL)
6011                 return (ENOMEM);
6012
6013         t4_tp_get_rdma_stats(sc, &stats);
6014         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6015         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6016
6017         rc = sbuf_finish(sb);
6018         sbuf_delete(sb);
6019
6020         return (rc);
6021 }
6022
6023 static int
6024 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6025 {
6026         struct adapter *sc = arg1;
6027         struct sbuf *sb;
6028         int rc;
6029         struct tp_tcp_stats v4, v6;
6030
6031         rc = sysctl_wire_old_buffer(req, 0);
6032         if (rc != 0)
6033                 return (rc);
6034
6035         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6036         if (sb == NULL)
6037                 return (ENOMEM);
6038
6039         t4_tp_get_tcp_stats(sc, &v4, &v6);
6040         sbuf_printf(sb,
6041             "                                IP                 IPv6\n");
6042         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6043             v4.tcpOutRsts, v6.tcpOutRsts);
6044         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6045             v4.tcpInSegs, v6.tcpInSegs);
6046         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6047             v4.tcpOutSegs, v6.tcpOutSegs);
6048         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6049             v4.tcpRetransSegs, v6.tcpRetransSegs);
6050
6051         rc = sbuf_finish(sb);
6052         sbuf_delete(sb);
6053
6054         return (rc);
6055 }
6056
6057 static int
6058 sysctl_tids(SYSCTL_HANDLER_ARGS)
6059 {
6060         struct adapter *sc = arg1;
6061         struct sbuf *sb;
6062         int rc;
6063         struct tid_info *t = &sc->tids;
6064
6065         rc = sysctl_wire_old_buffer(req, 0);
6066         if (rc != 0)
6067                 return (rc);
6068
6069         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6070         if (sb == NULL)
6071                 return (ENOMEM);
6072
6073         if (t->natids) {
6074                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6075                     t->atids_in_use);
6076         }
6077
6078         if (t->ntids) {
6079                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6080                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6081
6082                         if (b) {
6083                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6084                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6085                                     t->ntids - 1);
6086                         } else {
6087                                 sbuf_printf(sb, "TID range: %u-%u",
6088                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6089                                     t->ntids - 1);
6090                         }
6091                 } else
6092                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6093                 sbuf_printf(sb, ", in use: %u\n",
6094                     atomic_load_acq_int(&t->tids_in_use));
6095         }
6096
6097         if (t->nstids) {
6098                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6099                     t->stid_base + t->nstids - 1, t->stids_in_use);
6100         }
6101
6102         if (t->nftids) {
6103                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6104                     t->ftid_base + t->nftids - 1);
6105         }
6106
6107         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6108             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6109             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6110
6111         rc = sbuf_finish(sb);
6112         sbuf_delete(sb);
6113
6114         return (rc);
6115 }
6116
6117 static int
6118 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6119 {
6120         struct adapter *sc = arg1;
6121         struct sbuf *sb;
6122         int rc;
6123         struct tp_err_stats stats;
6124
6125         rc = sysctl_wire_old_buffer(req, 0);
6126         if (rc != 0)
6127                 return (rc);
6128
6129         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6130         if (sb == NULL)
6131                 return (ENOMEM);
6132
6133         t4_tp_get_err_stats(sc, &stats);
6134
6135         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6136                       "channel 3\n");
6137         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6138             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6139             stats.macInErrs[3]);
6140         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6141             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6142             stats.hdrInErrs[3]);
6143         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6144             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6145             stats.tcpInErrs[3]);
6146         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6147             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6148             stats.tcp6InErrs[3]);
6149         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6150             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6151             stats.tnlCongDrops[3]);
6152         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6153             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6154             stats.tnlTxDrops[3]);
6155         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6156             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6157             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6158         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6159             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6160             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6161         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6162             stats.ofldNoNeigh, stats.ofldCongDefer);
6163
6164         rc = sbuf_finish(sb);
6165         sbuf_delete(sb);
6166
6167         return (rc);
6168 }
6169
6170 struct field_desc {
6171         const char *name;
6172         u_int start;
6173         u_int width;
6174 };
6175
6176 static void
6177 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6178 {
6179         char buf[32];
6180         int line_size = 0;
6181
6182         while (f->name) {
6183                 uint64_t mask = (1ULL << f->width) - 1;
6184                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6185                     ((uintmax_t)v >> f->start) & mask);
6186
6187                 if (line_size + len >= 79) {
6188                         line_size = 8;
6189                         sbuf_printf(sb, "\n        ");
6190                 }
6191                 sbuf_printf(sb, "%s ", buf);
6192                 line_size += len + 1;
6193                 f++;
6194         }
6195         sbuf_printf(sb, "\n");
6196 }
6197
6198 static struct field_desc tp_la0[] = {
6199         { "RcfOpCodeOut", 60, 4 },
6200         { "State", 56, 4 },
6201         { "WcfState", 52, 4 },
6202         { "RcfOpcSrcOut", 50, 2 },
6203         { "CRxError", 49, 1 },
6204         { "ERxError", 48, 1 },
6205         { "SanityFailed", 47, 1 },
6206         { "SpuriousMsg", 46, 1 },
6207         { "FlushInputMsg", 45, 1 },
6208         { "FlushInputCpl", 44, 1 },
6209         { "RssUpBit", 43, 1 },
6210         { "RssFilterHit", 42, 1 },
6211         { "Tid", 32, 10 },
6212         { "InitTcb", 31, 1 },
6213         { "LineNumber", 24, 7 },
6214         { "Emsg", 23, 1 },
6215         { "EdataOut", 22, 1 },
6216         { "Cmsg", 21, 1 },
6217         { "CdataOut", 20, 1 },
6218         { "EreadPdu", 19, 1 },
6219         { "CreadPdu", 18, 1 },
6220         { "TunnelPkt", 17, 1 },
6221         { "RcfPeerFin", 16, 1 },
6222         { "RcfReasonOut", 12, 4 },
6223         { "TxCchannel", 10, 2 },
6224         { "RcfTxChannel", 8, 2 },
6225         { "RxEchannel", 6, 2 },
6226         { "RcfRxChannel", 5, 1 },
6227         { "RcfDataOutSrdy", 4, 1 },
6228         { "RxDvld", 3, 1 },
6229         { "RxOoDvld", 2, 1 },
6230         { "RxCongestion", 1, 1 },
6231         { "TxCongestion", 0, 1 },
6232         { NULL }
6233 };
6234
6235 static struct field_desc tp_la1[] = {
6236         { "CplCmdIn", 56, 8 },
6237         { "CplCmdOut", 48, 8 },
6238         { "ESynOut", 47, 1 },
6239         { "EAckOut", 46, 1 },
6240         { "EFinOut", 45, 1 },
6241         { "ERstOut", 44, 1 },
6242         { "SynIn", 43, 1 },
6243         { "AckIn", 42, 1 },
6244         { "FinIn", 41, 1 },
6245         { "RstIn", 40, 1 },
6246         { "DataIn", 39, 1 },
6247         { "DataInVld", 38, 1 },
6248         { "PadIn", 37, 1 },
6249         { "RxBufEmpty", 36, 1 },
6250         { "RxDdp", 35, 1 },
6251         { "RxFbCongestion", 34, 1 },
6252         { "TxFbCongestion", 33, 1 },
6253         { "TxPktSumSrdy", 32, 1 },
6254         { "RcfUlpType", 28, 4 },
6255         { "Eread", 27, 1 },
6256         { "Ebypass", 26, 1 },
6257         { "Esave", 25, 1 },
6258         { "Static0", 24, 1 },
6259         { "Cread", 23, 1 },
6260         { "Cbypass", 22, 1 },
6261         { "Csave", 21, 1 },
6262         { "CPktOut", 20, 1 },
6263         { "RxPagePoolFull", 18, 2 },
6264         { "RxLpbkPkt", 17, 1 },
6265         { "TxLpbkPkt", 16, 1 },
6266         { "RxVfValid", 15, 1 },
6267         { "SynLearned", 14, 1 },
6268         { "SetDelEntry", 13, 1 },
6269         { "SetInvEntry", 12, 1 },
6270         { "CpcmdDvld", 11, 1 },
6271         { "CpcmdSave", 10, 1 },
6272         { "RxPstructsFull", 8, 2 },
6273         { "EpcmdDvld", 7, 1 },
6274         { "EpcmdFlush", 6, 1 },
6275         { "EpcmdTrimPrefix", 5, 1 },
6276         { "EpcmdTrimPostfix", 4, 1 },
6277         { "ERssIp4Pkt", 3, 1 },
6278         { "ERssIp6Pkt", 2, 1 },
6279         { "ERssTcpUdpPkt", 1, 1 },
6280         { "ERssFceFipPkt", 0, 1 },
6281         { NULL }
6282 };
6283
6284 static struct field_desc tp_la2[] = {
6285         { "CplCmdIn", 56, 8 },
6286         { "MpsVfVld", 55, 1 },
6287         { "MpsPf", 52, 3 },
6288         { "MpsVf", 44, 8 },
6289         { "SynIn", 43, 1 },
6290         { "AckIn", 42, 1 },
6291         { "FinIn", 41, 1 },
6292         { "RstIn", 40, 1 },
6293         { "DataIn", 39, 1 },
6294         { "DataInVld", 38, 1 },
6295         { "PadIn", 37, 1 },
6296         { "RxBufEmpty", 36, 1 },
6297         { "RxDdp", 35, 1 },
6298         { "RxFbCongestion", 34, 1 },
6299         { "TxFbCongestion", 33, 1 },
6300         { "TxPktSumSrdy", 32, 1 },
6301         { "RcfUlpType", 28, 4 },
6302         { "Eread", 27, 1 },
6303         { "Ebypass", 26, 1 },
6304         { "Esave", 25, 1 },
6305         { "Static0", 24, 1 },
6306         { "Cread", 23, 1 },
6307         { "Cbypass", 22, 1 },
6308         { "Csave", 21, 1 },
6309         { "CPktOut", 20, 1 },
6310         { "RxPagePoolFull", 18, 2 },
6311         { "RxLpbkPkt", 17, 1 },
6312         { "TxLpbkPkt", 16, 1 },
6313         { "RxVfValid", 15, 1 },
6314         { "SynLearned", 14, 1 },
6315         { "SetDelEntry", 13, 1 },
6316         { "SetInvEntry", 12, 1 },
6317         { "CpcmdDvld", 11, 1 },
6318         { "CpcmdSave", 10, 1 },
6319         { "RxPstructsFull", 8, 2 },
6320         { "EpcmdDvld", 7, 1 },
6321         { "EpcmdFlush", 6, 1 },
6322         { "EpcmdTrimPrefix", 5, 1 },
6323         { "EpcmdTrimPostfix", 4, 1 },
6324         { "ERssIp4Pkt", 3, 1 },
6325         { "ERssIp6Pkt", 2, 1 },
6326         { "ERssTcpUdpPkt", 1, 1 },
6327         { "ERssFceFipPkt", 0, 1 },
6328         { NULL }
6329 };
6330
6331 static void
6332 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6333 {
6334
6335         field_desc_show(sb, *p, tp_la0);
6336 }
6337
6338 static void
6339 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6340 {
6341
6342         if (idx)
6343                 sbuf_printf(sb, "\n");
6344         field_desc_show(sb, p[0], tp_la0);
6345         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6346                 field_desc_show(sb, p[1], tp_la0);
6347 }
6348
6349 static void
6350 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6351 {
6352
6353         if (idx)
6354                 sbuf_printf(sb, "\n");
6355         field_desc_show(sb, p[0], tp_la0);
6356         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6357                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6358 }
6359
6360 static int
6361 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6362 {
6363         struct adapter *sc = arg1;
6364         struct sbuf *sb;
6365         uint64_t *buf, *p;
6366         int rc;
6367         u_int i, inc;
6368         void (*show_func)(struct sbuf *, uint64_t *, int);
6369
6370         rc = sysctl_wire_old_buffer(req, 0);
6371         if (rc != 0)
6372                 return (rc);
6373
6374         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6375         if (sb == NULL)
6376                 return (ENOMEM);
6377
6378         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6379
6380         t4_tp_read_la(sc, buf, NULL);
6381         p = buf;
6382
6383         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6384         case 2:
6385                 inc = 2;
6386                 show_func = tp_la_show2;
6387                 break;
6388         case 3:
6389                 inc = 2;
6390                 show_func = tp_la_show3;
6391                 break;
6392         default:
6393                 inc = 1;
6394                 show_func = tp_la_show;
6395         }
6396
6397         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6398                 (*show_func)(sb, p, i);
6399
6400         rc = sbuf_finish(sb);
6401         sbuf_delete(sb);
6402         free(buf, M_CXGBE);
6403         return (rc);
6404 }
6405
6406 static int
6407 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6408 {
6409         struct adapter *sc = arg1;
6410         struct sbuf *sb;
6411         int rc;
6412         u64 nrate[NCHAN], orate[NCHAN];
6413
6414         rc = sysctl_wire_old_buffer(req, 0);
6415         if (rc != 0)
6416                 return (rc);
6417
6418         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6419         if (sb == NULL)
6420                 return (ENOMEM);
6421
6422         t4_get_chan_txrate(sc, nrate, orate);
6423         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6424                  "channel 3\n");
6425         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6426             nrate[0], nrate[1], nrate[2], nrate[3]);
6427         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6428             orate[0], orate[1], orate[2], orate[3]);
6429
6430         rc = sbuf_finish(sb);
6431         sbuf_delete(sb);
6432
6433         return (rc);
6434 }
6435
6436 static int
6437 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6438 {
6439         struct adapter *sc = arg1;
6440         struct sbuf *sb;
6441         uint32_t *buf, *p;
6442         int rc, i;
6443
6444         rc = sysctl_wire_old_buffer(req, 0);
6445         if (rc != 0)
6446                 return (rc);
6447
6448         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6449         if (sb == NULL)
6450                 return (ENOMEM);
6451
6452         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6453             M_ZERO | M_WAITOK);
6454
6455         t4_ulprx_read_la(sc, buf);
6456         p = buf;
6457
6458         sbuf_printf(sb, "      Pcmd        Type   Message"
6459             "                Data");
6460         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6461                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6462                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6463         }
6464
6465         rc = sbuf_finish(sb);
6466         sbuf_delete(sb);
6467         free(buf, M_CXGBE);
6468         return (rc);
6469 }
6470
6471 static int
6472 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6473 {
6474         struct adapter *sc = arg1;
6475         struct sbuf *sb;
6476         int rc, v;
6477
6478         rc = sysctl_wire_old_buffer(req, 0);
6479         if (rc != 0)
6480                 return (rc);
6481
6482         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6483         if (sb == NULL)
6484                 return (ENOMEM);
6485
6486         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6487         if (G_STATSOURCE_T5(v) == 7) {
6488                 if (G_STATMODE(v) == 0) {
6489                         sbuf_printf(sb, "total %d, incomplete %d",
6490                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6491                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6492                 } else if (G_STATMODE(v) == 1) {
6493                         sbuf_printf(sb, "total %d, data overflow %d",
6494                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6495                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6496                 }
6497         }
6498         rc = sbuf_finish(sb);
6499         sbuf_delete(sb);
6500
6501         return (rc);
6502 }
6503 #endif
6504
6505 static inline void
6506 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6507 {
6508         struct buf_ring *br;
6509         struct mbuf *m;
6510
6511         TXQ_LOCK_ASSERT_OWNED(txq);
6512
6513         br = txq->br;
6514         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6515         if (m)
6516                 t4_eth_tx(ifp, txq, m);
6517 }
6518
6519 void
6520 t4_tx_callout(void *arg)
6521 {
6522         struct sge_eq *eq = arg;
6523         struct adapter *sc;
6524
6525         if (EQ_TRYLOCK(eq) == 0)
6526                 goto reschedule;
6527
6528         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6529                 EQ_UNLOCK(eq);
6530 reschedule:
6531                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6532                         callout_schedule(&eq->tx_callout, 1);
6533                 return;
6534         }
6535
6536         EQ_LOCK_ASSERT_OWNED(eq);
6537
6538         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6539
6540                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6541                         struct sge_txq *txq = arg;
6542                         struct port_info *pi = txq->ifp->if_softc;
6543
6544                         sc = pi->adapter;
6545                 } else {
6546                         struct sge_wrq *wrq = arg;
6547
6548                         sc = wrq->adapter;
6549                 }
6550
6551                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6552         }
6553
6554         EQ_UNLOCK(eq);
6555 }
6556
6557 void
6558 t4_tx_task(void *arg, int count)
6559 {
6560         struct sge_eq *eq = arg;
6561
6562         EQ_LOCK(eq);
6563         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6564                 struct sge_txq *txq = arg;
6565                 txq_start(txq->ifp, txq);
6566         } else {
6567                 struct sge_wrq *wrq = arg;
6568                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6569         }
6570         EQ_UNLOCK(eq);
6571 }
6572
6573 static uint32_t
6574 fconf_to_mode(uint32_t fconf)
6575 {
6576         uint32_t mode;
6577
6578         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6579             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6580
6581         if (fconf & F_FRAGMENTATION)
6582                 mode |= T4_FILTER_IP_FRAGMENT;
6583
6584         if (fconf & F_MPSHITTYPE)
6585                 mode |= T4_FILTER_MPS_HIT_TYPE;
6586
6587         if (fconf & F_MACMATCH)
6588                 mode |= T4_FILTER_MAC_IDX;
6589
6590         if (fconf & F_ETHERTYPE)
6591                 mode |= T4_FILTER_ETH_TYPE;
6592
6593         if (fconf & F_PROTOCOL)
6594                 mode |= T4_FILTER_IP_PROTO;
6595
6596         if (fconf & F_TOS)
6597                 mode |= T4_FILTER_IP_TOS;
6598
6599         if (fconf & F_VLAN)
6600                 mode |= T4_FILTER_VLAN;
6601
6602         if (fconf & F_VNIC_ID)
6603                 mode |= T4_FILTER_VNIC;
6604
6605         if (fconf & F_PORT)
6606                 mode |= T4_FILTER_PORT;
6607
6608         if (fconf & F_FCOE)
6609                 mode |= T4_FILTER_FCoE;
6610
6611         return (mode);
6612 }
6613
6614 static uint32_t
6615 mode_to_fconf(uint32_t mode)
6616 {
6617         uint32_t fconf = 0;
6618
6619         if (mode & T4_FILTER_IP_FRAGMENT)
6620                 fconf |= F_FRAGMENTATION;
6621
6622         if (mode & T4_FILTER_MPS_HIT_TYPE)
6623                 fconf |= F_MPSHITTYPE;
6624
6625         if (mode & T4_FILTER_MAC_IDX)
6626                 fconf |= F_MACMATCH;
6627
6628         if (mode & T4_FILTER_ETH_TYPE)
6629                 fconf |= F_ETHERTYPE;
6630
6631         if (mode & T4_FILTER_IP_PROTO)
6632                 fconf |= F_PROTOCOL;
6633
6634         if (mode & T4_FILTER_IP_TOS)
6635                 fconf |= F_TOS;
6636
6637         if (mode & T4_FILTER_VLAN)
6638                 fconf |= F_VLAN;
6639
6640         if (mode & T4_FILTER_VNIC)
6641                 fconf |= F_VNIC_ID;
6642
6643         if (mode & T4_FILTER_PORT)
6644                 fconf |= F_PORT;
6645
6646         if (mode & T4_FILTER_FCoE)
6647                 fconf |= F_FCOE;
6648
6649         return (fconf);
6650 }
6651
6652 static uint32_t
6653 fspec_to_fconf(struct t4_filter_specification *fs)
6654 {
6655         uint32_t fconf = 0;
6656
6657         if (fs->val.frag || fs->mask.frag)
6658                 fconf |= F_FRAGMENTATION;
6659
6660         if (fs->val.matchtype || fs->mask.matchtype)
6661                 fconf |= F_MPSHITTYPE;
6662
6663         if (fs->val.macidx || fs->mask.macidx)
6664                 fconf |= F_MACMATCH;
6665
6666         if (fs->val.ethtype || fs->mask.ethtype)
6667                 fconf |= F_ETHERTYPE;
6668
6669         if (fs->val.proto || fs->mask.proto)
6670                 fconf |= F_PROTOCOL;
6671
6672         if (fs->val.tos || fs->mask.tos)
6673                 fconf |= F_TOS;
6674
6675         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6676                 fconf |= F_VLAN;
6677
6678         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6679                 fconf |= F_VNIC_ID;
6680
6681         if (fs->val.iport || fs->mask.iport)
6682                 fconf |= F_PORT;
6683
6684         if (fs->val.fcoe || fs->mask.fcoe)
6685                 fconf |= F_FCOE;
6686
6687         return (fconf);
6688 }
6689
6690 static int
6691 get_filter_mode(struct adapter *sc, uint32_t *mode)
6692 {
6693         int rc;
6694         uint32_t fconf;
6695
6696         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6697             "t4getfm");
6698         if (rc)
6699                 return (rc);
6700
6701         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6702             A_TP_VLAN_PRI_MAP);
6703
6704         if (sc->params.tp.vlan_pri_map != fconf) {
6705                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6706                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6707                     fconf);
6708                 sc->params.tp.vlan_pri_map = fconf;
6709         }
6710
6711         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6712
6713         end_synchronized_op(sc, LOCK_HELD);
6714         return (0);
6715 }
6716
6717 static int
6718 set_filter_mode(struct adapter *sc, uint32_t mode)
6719 {
6720         uint32_t fconf;
6721         int rc;
6722
6723         fconf = mode_to_fconf(mode);
6724
6725         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6726             "t4setfm");
6727         if (rc)
6728                 return (rc);
6729
6730         if (sc->tids.ftids_in_use > 0) {
6731                 rc = EBUSY;
6732                 goto done;
6733         }
6734
6735 #ifdef TCP_OFFLOAD
6736         if (sc->offload_map) {
6737                 rc = EBUSY;
6738                 goto done;
6739         }
6740 #endif
6741
6742 #ifdef notyet
6743         rc = -t4_set_filter_mode(sc, fconf);
6744         if (rc == 0)
6745                 sc->filter_mode = fconf;
6746 #else
6747         rc = ENOTSUP;
6748 #endif
6749
6750 done:
6751         end_synchronized_op(sc, LOCK_HELD);
6752         return (rc);
6753 }
6754
6755 static inline uint64_t
6756 get_filter_hits(struct adapter *sc, uint32_t fid)
6757 {
6758         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6759         uint64_t hits;
6760
6761         memwin_info(sc, 0, &mw_base, NULL);
6762         off = position_memwin(sc, 0,
6763             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6764         if (is_t4(sc)) {
6765                 hits = t4_read_reg64(sc, mw_base + off + 16);
6766                 hits = be64toh(hits);
6767         } else {
6768                 hits = t4_read_reg(sc, mw_base + off + 24);
6769                 hits = be32toh(hits);
6770         }
6771
6772         return (hits);
6773 }
6774
6775 static int
6776 get_filter(struct adapter *sc, struct t4_filter *t)
6777 {
6778         int i, rc, nfilters = sc->tids.nftids;
6779         struct filter_entry *f;
6780
6781         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6782             "t4getf");
6783         if (rc)
6784                 return (rc);
6785
6786         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6787             t->idx >= nfilters) {
6788                 t->idx = 0xffffffff;
6789                 goto done;
6790         }
6791
6792         f = &sc->tids.ftid_tab[t->idx];
6793         for (i = t->idx; i < nfilters; i++, f++) {
6794                 if (f->valid) {
6795                         t->idx = i;
6796                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6797                         t->smtidx = f->smtidx;
6798                         if (f->fs.hitcnts)
6799                                 t->hits = get_filter_hits(sc, t->idx);
6800                         else
6801                                 t->hits = UINT64_MAX;
6802                         t->fs = f->fs;
6803
6804                         goto done;
6805                 }
6806         }
6807
6808         t->idx = 0xffffffff;
6809 done:
6810         end_synchronized_op(sc, LOCK_HELD);
6811         return (0);
6812 }
6813
6814 static int
6815 set_filter(struct adapter *sc, struct t4_filter *t)
6816 {
6817         unsigned int nfilters, nports;
6818         struct filter_entry *f;
6819         int i, rc;
6820
6821         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6822         if (rc)
6823                 return (rc);
6824
6825         nfilters = sc->tids.nftids;
6826         nports = sc->params.nports;
6827
6828         if (nfilters == 0) {
6829                 rc = ENOTSUP;
6830                 goto done;
6831         }
6832
6833         if (!(sc->flags & FULL_INIT_DONE)) {
6834                 rc = EAGAIN;
6835                 goto done;
6836         }
6837
6838         if (t->idx >= nfilters) {
6839                 rc = EINVAL;
6840                 goto done;
6841         }
6842
6843         /* Validate against the global filter mode */
6844         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6845             sc->params.tp.vlan_pri_map) {
6846                 rc = E2BIG;
6847                 goto done;
6848         }
6849
6850         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6851                 rc = EINVAL;
6852                 goto done;
6853         }
6854
6855         if (t->fs.val.iport >= nports) {
6856                 rc = EINVAL;
6857                 goto done;
6858         }
6859
6860         /* Can't specify an iq if not steering to it */
6861         if (!t->fs.dirsteer && t->fs.iq) {
6862                 rc = EINVAL;
6863                 goto done;
6864         }
6865
6866         /* IPv6 filter idx must be 4 aligned */
6867         if (t->fs.type == 1 &&
6868             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6869                 rc = EINVAL;
6870                 goto done;
6871         }
6872
6873         if (sc->tids.ftid_tab == NULL) {
6874                 KASSERT(sc->tids.ftids_in_use == 0,
6875                     ("%s: no memory allocated but filters_in_use > 0",
6876                     __func__));
6877
6878                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6879                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6880                 if (sc->tids.ftid_tab == NULL) {
6881                         rc = ENOMEM;
6882                         goto done;
6883                 }
6884                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6885         }
6886
6887         for (i = 0; i < 4; i++) {
6888                 f = &sc->tids.ftid_tab[t->idx + i];
6889
6890                 if (f->pending || f->valid) {
6891                         rc = EBUSY;
6892                         goto done;
6893                 }
6894                 if (f->locked) {
6895                         rc = EPERM;
6896                         goto done;
6897                 }
6898
6899                 if (t->fs.type == 0)
6900                         break;
6901         }
6902
6903         f = &sc->tids.ftid_tab[t->idx];
6904         f->fs = t->fs;
6905
6906         rc = set_filter_wr(sc, t->idx);
6907 done:
6908         end_synchronized_op(sc, 0);
6909
6910         if (rc == 0) {
6911                 mtx_lock(&sc->tids.ftid_lock);
6912                 for (;;) {
6913                         if (f->pending == 0) {
6914                                 rc = f->valid ? 0 : EIO;
6915                                 break;
6916                         }
6917
6918                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6919                             PCATCH, "t4setfw", 0)) {
6920                                 rc = EINPROGRESS;
6921                                 break;
6922                         }
6923                 }
6924                 mtx_unlock(&sc->tids.ftid_lock);
6925         }
6926         return (rc);
6927 }
6928
6929 static int
6930 del_filter(struct adapter *sc, struct t4_filter *t)
6931 {
6932         unsigned int nfilters;
6933         struct filter_entry *f;
6934         int rc;
6935
6936         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6937         if (rc)
6938                 return (rc);
6939
6940         nfilters = sc->tids.nftids;
6941
6942         if (nfilters == 0) {
6943                 rc = ENOTSUP;
6944                 goto done;
6945         }
6946
6947         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6948             t->idx >= nfilters) {
6949                 rc = EINVAL;
6950                 goto done;
6951         }
6952
6953         if (!(sc->flags & FULL_INIT_DONE)) {
6954                 rc = EAGAIN;
6955                 goto done;
6956         }
6957
6958         f = &sc->tids.ftid_tab[t->idx];
6959
6960         if (f->pending) {
6961                 rc = EBUSY;
6962                 goto done;
6963         }
6964         if (f->locked) {
6965                 rc = EPERM;
6966                 goto done;
6967         }
6968
6969         if (f->valid) {
6970                 t->fs = f->fs;  /* extra info for the caller */
6971                 rc = del_filter_wr(sc, t->idx);
6972         }
6973
6974 done:
6975         end_synchronized_op(sc, 0);
6976
6977         if (rc == 0) {
6978                 mtx_lock(&sc->tids.ftid_lock);
6979                 for (;;) {
6980                         if (f->pending == 0) {
6981                                 rc = f->valid ? EIO : 0;
6982                                 break;
6983                         }
6984
6985                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6986                             PCATCH, "t4delfw", 0)) {
6987                                 rc = EINPROGRESS;
6988                                 break;
6989                         }
6990                 }
6991                 mtx_unlock(&sc->tids.ftid_lock);
6992         }
6993
6994         return (rc);
6995 }
6996
6997 static void
6998 clear_filter(struct filter_entry *f)
6999 {
7000         if (f->l2t)
7001                 t4_l2t_release(f->l2t);
7002
7003         bzero(f, sizeof (*f));
7004 }
7005
7006 static int
7007 set_filter_wr(struct adapter *sc, int fidx)
7008 {
7009         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7010         struct wrqe *wr;
7011         struct fw_filter_wr *fwr;
7012         unsigned int ftid;
7013
7014         ASSERT_SYNCHRONIZED_OP(sc);
7015
7016         if (f->fs.newdmac || f->fs.newvlan) {
7017                 /* This filter needs an L2T entry; allocate one. */
7018                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7019                 if (f->l2t == NULL)
7020                         return (EAGAIN);
7021                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7022                     f->fs.dmac)) {
7023                         t4_l2t_release(f->l2t);
7024                         f->l2t = NULL;
7025                         return (ENOMEM);
7026                 }
7027         }
7028
7029         ftid = sc->tids.ftid_base + fidx;
7030
7031         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7032         if (wr == NULL)
7033                 return (ENOMEM);
7034
7035         fwr = wrtod(wr);
7036         bzero(fwr, sizeof (*fwr));
7037
7038         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7039         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7040         fwr->tid_to_iq =
7041             htobe32(V_FW_FILTER_WR_TID(ftid) |
7042                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7043                 V_FW_FILTER_WR_NOREPLY(0) |
7044                 V_FW_FILTER_WR_IQ(f->fs.iq));
7045         fwr->del_filter_to_l2tix =
7046             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7047                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7048                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7049                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7050                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7051                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7052                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7053                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7054                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7055                     f->fs.newvlan == VLAN_REWRITE) |
7056                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7057                     f->fs.newvlan == VLAN_REWRITE) |
7058                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7059                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7060                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7061                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7062         fwr->ethtype = htobe16(f->fs.val.ethtype);
7063         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7064         fwr->frag_to_ovlan_vldm =
7065             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7066                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7067                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7068                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7069                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7070                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7071         fwr->smac_sel = 0;
7072         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7073             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7074         fwr->maci_to_matchtypem =
7075             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7076                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7077                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7078                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7079                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7080                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7081                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7082                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7083         fwr->ptcl = f->fs.val.proto;
7084         fwr->ptclm = f->fs.mask.proto;
7085         fwr->ttyp = f->fs.val.tos;
7086         fwr->ttypm = f->fs.mask.tos;
7087         fwr->ivlan = htobe16(f->fs.val.vlan);
7088         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7089         fwr->ovlan = htobe16(f->fs.val.vnic);
7090         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7091         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7092         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7093         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7094         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7095         fwr->lp = htobe16(f->fs.val.dport);
7096         fwr->lpm = htobe16(f->fs.mask.dport);
7097         fwr->fp = htobe16(f->fs.val.sport);
7098         fwr->fpm = htobe16(f->fs.mask.sport);
7099         if (f->fs.newsmac)
7100                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7101
7102         f->pending = 1;
7103         sc->tids.ftids_in_use++;
7104
7105         t4_wrq_tx(sc, wr);
7106         return (0);
7107 }
7108
7109 static int
7110 del_filter_wr(struct adapter *sc, int fidx)
7111 {
7112         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7113         struct wrqe *wr;
7114         struct fw_filter_wr *fwr;
7115         unsigned int ftid;
7116
7117         ftid = sc->tids.ftid_base + fidx;
7118
7119         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7120         if (wr == NULL)
7121                 return (ENOMEM);
7122         fwr = wrtod(wr);
7123         bzero(fwr, sizeof (*fwr));
7124
7125         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7126
7127         f->pending = 1;
7128         t4_wrq_tx(sc, wr);
7129         return (0);
7130 }
7131
7132 int
7133 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7134 {
7135         struct adapter *sc = iq->adapter;
7136         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7137         unsigned int idx = GET_TID(rpl);
7138
7139         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7140             rss->opcode));
7141
7142         if (idx >= sc->tids.ftid_base &&
7143             (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7144                 unsigned int rc = G_COOKIE(rpl->cookie);
7145                 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7146
7147                 mtx_lock(&sc->tids.ftid_lock);
7148                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7149                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7150                             __func__, idx));
7151                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7152                         f->pending = 0;  /* asynchronous setup completed */
7153                         f->valid = 1;
7154                 } else {
7155                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7156                                 /* Add or delete failed, display an error */
7157                                 log(LOG_ERR,
7158                                     "filter %u setup failed with error %u\n",
7159                                     idx, rc);
7160                         }
7161
7162                         clear_filter(f);
7163                         sc->tids.ftids_in_use--;
7164                 }
7165                 wakeup(&sc->tids.ftid_tab);
7166                 mtx_unlock(&sc->tids.ftid_lock);
7167         }
7168
7169         return (0);
7170 }
7171
7172 static int
7173 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7174 {
7175         int rc;
7176
7177         if (cntxt->cid > M_CTXTQID)
7178                 return (EINVAL);
7179
7180         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7181             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7182                 return (EINVAL);
7183
7184         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7185         if (rc)
7186                 return (rc);
7187
7188         if (sc->flags & FW_OK) {
7189                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7190                     &cntxt->data[0]);
7191                 if (rc == 0)
7192                         goto done;
7193         }
7194
7195         /*
7196          * Read via firmware failed or wasn't even attempted.  Read directly via
7197          * the backdoor.
7198          */
7199         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7200 done:
7201         end_synchronized_op(sc, 0);
7202         return (rc);
7203 }
7204
7205 static int
7206 load_fw(struct adapter *sc, struct t4_data *fw)
7207 {
7208         int rc;
7209         uint8_t *fw_data;
7210
7211         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7212         if (rc)
7213                 return (rc);
7214
7215         if (sc->flags & FULL_INIT_DONE) {
7216                 rc = EBUSY;
7217                 goto done;
7218         }
7219
7220         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7221         if (fw_data == NULL) {
7222                 rc = ENOMEM;
7223                 goto done;
7224         }
7225
7226         rc = copyin(fw->data, fw_data, fw->len);
7227         if (rc == 0)
7228                 rc = -t4_load_fw(sc, fw_data, fw->len);
7229
7230         free(fw_data, M_CXGBE);
7231 done:
7232         end_synchronized_op(sc, 0);
7233         return (rc);
7234 }
7235
7236 static int
7237 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7238 {
7239         uint32_t addr, off, remaining, i, n;
7240         uint32_t *buf, *b;
7241         uint32_t mw_base, mw_aperture;
7242         int rc;
7243         uint8_t *dst;
7244
7245         rc = validate_mem_range(sc, mr->addr, mr->len);
7246         if (rc != 0)
7247                 return (rc);
7248
7249         memwin_info(sc, win, &mw_base, &mw_aperture);
7250         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7251         addr = mr->addr;
7252         remaining = mr->len;
7253         dst = (void *)mr->data;
7254
7255         while (remaining) {
7256                 off = position_memwin(sc, win, addr);
7257
7258                 /* number of bytes that we'll copy in the inner loop */
7259                 n = min(remaining, mw_aperture - off);
7260                 for (i = 0; i < n; i += 4)
7261                         *b++ = t4_read_reg(sc, mw_base + off + i);
7262
7263                 rc = copyout(buf, dst, n);
7264                 if (rc != 0)
7265                         break;
7266
7267                 b = buf;
7268                 dst += n;
7269                 remaining -= n;
7270                 addr += n;
7271         }
7272
7273         free(buf, M_CXGBE);
7274         return (rc);
7275 }
7276
7277 static int
7278 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7279 {
7280         int rc;
7281
7282         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7283                 return (EINVAL);
7284
7285         if (i2cd->len > 1) {
7286                 /* XXX: need fw support for longer reads in one go */
7287                 return (ENOTSUP);
7288         }
7289
7290         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7291         if (rc)
7292                 return (rc);
7293         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7294             i2cd->offset, &i2cd->data[0]);
7295         end_synchronized_op(sc, 0);
7296
7297         return (rc);
7298 }
7299
7300 static int
7301 in_range(int val, int lo, int hi)
7302 {
7303
7304         return (val < 0 || (val <= hi && val >= lo));
7305 }
7306
7307 static int
7308 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7309 {
7310         int fw_subcmd, fw_type, rc;
7311
7312         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7313         if (rc)
7314                 return (rc);
7315
7316         if (!(sc->flags & FULL_INIT_DONE)) {
7317                 rc = EAGAIN;
7318                 goto done;
7319         }
7320
7321         /*
7322          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7323          * sub-command and type are in common locations.)
7324          */
7325         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7326                 fw_subcmd = FW_SCHED_SC_CONFIG;
7327         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7328                 fw_subcmd = FW_SCHED_SC_PARAMS;
7329         else {
7330                 rc = EINVAL;
7331                 goto done;
7332         }
7333         if (p->type == SCHED_CLASS_TYPE_PACKET)
7334                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7335         else {
7336                 rc = EINVAL;
7337                 goto done;
7338         }
7339
7340         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7341                 /* Vet our parameters ..*/
7342                 if (p->u.config.minmax < 0) {
7343                         rc = EINVAL;
7344                         goto done;
7345                 }
7346
7347                 /* And pass the request to the firmware ...*/
7348                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7349                 goto done;
7350         }
7351
7352         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7353                 int fw_level;
7354                 int fw_mode;
7355                 int fw_rateunit;
7356                 int fw_ratemode;
7357
7358                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7359                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7360                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7361                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7362                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7363                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7364                 else {
7365                         rc = EINVAL;
7366                         goto done;
7367                 }
7368
7369                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7370                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7371                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7372                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7373                 else {
7374                         rc = EINVAL;
7375                         goto done;
7376                 }
7377
7378                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7379                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7380                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7381                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7382                 else {
7383                         rc = EINVAL;
7384                         goto done;
7385                 }
7386
7387                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7388                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7389                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7390                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7391                 else {
7392                         rc = EINVAL;
7393                         goto done;
7394                 }
7395
7396                 /* Vet our parameters ... */
7397                 if (!in_range(p->u.params.channel, 0, 3) ||
7398                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7399                     !in_range(p->u.params.minrate, 0, 10000000) ||
7400                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7401                     !in_range(p->u.params.weight, 0, 100)) {
7402                         rc = ERANGE;
7403                         goto done;
7404                 }
7405
7406                 /*
7407                  * Translate any unset parameters into the firmware's
7408                  * nomenclature and/or fail the call if the parameters
7409                  * are required ...
7410                  */
7411                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7412                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7413                         rc = EINVAL;
7414                         goto done;
7415                 }
7416                 if (p->u.params.minrate < 0)
7417                         p->u.params.minrate = 0;
7418                 if (p->u.params.maxrate < 0) {
7419                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7420                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7421                                 rc = EINVAL;
7422                                 goto done;
7423                         } else
7424                                 p->u.params.maxrate = 0;
7425                 }
7426                 if (p->u.params.weight < 0) {
7427                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7428                                 rc = EINVAL;
7429                                 goto done;
7430                         } else
7431                                 p->u.params.weight = 0;
7432                 }
7433                 if (p->u.params.pktsize < 0) {
7434                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7435                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7436                                 rc = EINVAL;
7437                                 goto done;
7438                         } else
7439                                 p->u.params.pktsize = 0;
7440                 }
7441
7442                 /* See what the firmware thinks of the request ... */
7443                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7444                     fw_rateunit, fw_ratemode, p->u.params.channel,
7445                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7446                     p->u.params.weight, p->u.params.pktsize);
7447                 goto done;
7448         }
7449
7450         rc = EINVAL;
7451 done:
7452         end_synchronized_op(sc, 0);
7453         return (rc);
7454 }
7455
7456 static int
7457 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7458 {
7459         struct port_info *pi = NULL;
7460         struct sge_txq *txq;
7461         uint32_t fw_mnem, fw_queue, fw_class;
7462         int i, rc;
7463
7464         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7465         if (rc)
7466                 return (rc);
7467
7468         if (!(sc->flags & FULL_INIT_DONE)) {
7469                 rc = EAGAIN;
7470                 goto done;
7471         }
7472
7473         if (p->port >= sc->params.nports) {
7474                 rc = EINVAL;
7475                 goto done;
7476         }
7477
7478         pi = sc->port[p->port];
7479         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7480                 rc = EINVAL;
7481                 goto done;
7482         }
7483
7484         /*
7485          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7486          * Scheduling Class in this case).
7487          */
7488         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7489             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7490         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7491
7492         /*
7493          * If op.queue is non-negative, then we're only changing the scheduling
7494          * on a single specified TX queue.
7495          */
7496         if (p->queue >= 0) {
7497                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7498                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7499                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7500                     &fw_class);
7501                 goto done;
7502         }
7503
7504         /*
7505          * Change the scheduling on all the TX queues for the
7506          * interface.
7507          */
7508         for_each_txq(pi, i, txq) {
7509                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7510                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7511                     &fw_class);
7512                 if (rc)
7513                         goto done;
7514         }
7515
7516         rc = 0;
7517 done:
7518         end_synchronized_op(sc, 0);
7519         return (rc);
7520 }
7521
7522 int
7523 t4_os_find_pci_capability(struct adapter *sc, int cap)
7524 {
7525         int i;
7526
7527         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7528 }
7529
7530 int
7531 t4_os_pci_save_state(struct adapter *sc)
7532 {
7533         device_t dev;
7534         struct pci_devinfo *dinfo;
7535
7536         dev = sc->dev;
7537         dinfo = device_get_ivars(dev);
7538
7539         pci_cfg_save(dev, dinfo, 0);
7540         return (0);
7541 }
7542
7543 int
7544 t4_os_pci_restore_state(struct adapter *sc)
7545 {
7546         device_t dev;
7547         struct pci_devinfo *dinfo;
7548
7549         dev = sc->dev;
7550         dinfo = device_get_ivars(dev);
7551
7552         pci_cfg_restore(dev, dinfo);
7553         return (0);
7554 }
7555
7556 void
7557 t4_os_portmod_changed(const struct adapter *sc, int idx)
7558 {
7559         struct port_info *pi = sc->port[idx];
7560         static const char *mod_str[] = {
7561                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7562         };
7563
7564         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7565                 if_printf(pi->ifp, "transceiver unplugged.\n");
7566         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7567                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7568         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7569                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7570         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7571                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7572                     mod_str[pi->mod_type]);
7573         } else {
7574                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7575                     pi->mod_type);
7576         }
7577 }
7578
7579 void
7580 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7581 {
7582         struct port_info *pi = sc->port[idx];
7583         struct ifnet *ifp = pi->ifp;
7584
7585         if (link_stat) {
7586                 pi->linkdnrc = -1;
7587                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7588                 if_link_state_change(ifp, LINK_STATE_UP);
7589         } else {
7590                 if (reason >= 0)
7591                         pi->linkdnrc = reason;
7592                 if_link_state_change(ifp, LINK_STATE_DOWN);
7593         }
7594 }
7595
7596 void
7597 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7598 {
7599         struct adapter *sc;
7600
7601         sx_slock(&t4_list_lock);
7602         SLIST_FOREACH(sc, &t4_list, link) {
7603                 /*
7604                  * func should not make any assumptions about what state sc is
7605                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7606                  */
7607                 func(sc, arg);
7608         }
7609         sx_sunlock(&t4_list_lock);
7610 }
7611
7612 static int
7613 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7614 {
7615        return (0);
7616 }
7617
7618 static int
7619 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7620 {
7621        return (0);
7622 }
7623
7624 static int
7625 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7626     struct thread *td)
7627 {
7628         int rc;
7629         struct adapter *sc = dev->si_drv1;
7630
7631         rc = priv_check(td, PRIV_DRIVER);
7632         if (rc != 0)
7633                 return (rc);
7634
7635         switch (cmd) {
7636         case CHELSIO_T4_GETREG: {
7637                 struct t4_reg *edata = (struct t4_reg *)data;
7638
7639                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7640                         return (EFAULT);
7641
7642                 if (edata->size == 4)
7643                         edata->val = t4_read_reg(sc, edata->addr);
7644                 else if (edata->size == 8)
7645                         edata->val = t4_read_reg64(sc, edata->addr);
7646                 else
7647                         return (EINVAL);
7648
7649                 break;
7650         }
7651         case CHELSIO_T4_SETREG: {
7652                 struct t4_reg *edata = (struct t4_reg *)data;
7653
7654                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7655                         return (EFAULT);
7656
7657                 if (edata->size == 4) {
7658                         if (edata->val & 0xffffffff00000000)
7659                                 return (EINVAL);
7660                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7661                 } else if (edata->size == 8)
7662                         t4_write_reg64(sc, edata->addr, edata->val);
7663                 else
7664                         return (EINVAL);
7665                 break;
7666         }
7667         case CHELSIO_T4_REGDUMP: {
7668                 struct t4_regdump *regs = (struct t4_regdump *)data;
7669                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7670                 uint8_t *buf;
7671
7672                 if (regs->len < reglen) {
7673                         regs->len = reglen; /* hint to the caller */
7674                         return (ENOBUFS);
7675                 }
7676
7677                 regs->len = reglen;
7678                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7679                 t4_get_regs(sc, regs, buf);
7680                 rc = copyout(buf, regs->data, reglen);
7681                 free(buf, M_CXGBE);
7682                 break;
7683         }
7684         case CHELSIO_T4_GET_FILTER_MODE:
7685                 rc = get_filter_mode(sc, (uint32_t *)data);
7686                 break;
7687         case CHELSIO_T4_SET_FILTER_MODE:
7688                 rc = set_filter_mode(sc, *(uint32_t *)data);
7689                 break;
7690         case CHELSIO_T4_GET_FILTER:
7691                 rc = get_filter(sc, (struct t4_filter *)data);
7692                 break;
7693         case CHELSIO_T4_SET_FILTER:
7694                 rc = set_filter(sc, (struct t4_filter *)data);
7695                 break;
7696         case CHELSIO_T4_DEL_FILTER:
7697                 rc = del_filter(sc, (struct t4_filter *)data);
7698                 break;
7699         case CHELSIO_T4_GET_SGE_CONTEXT:
7700                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7701                 break;
7702         case CHELSIO_T4_LOAD_FW:
7703                 rc = load_fw(sc, (struct t4_data *)data);
7704                 break;
7705         case CHELSIO_T4_GET_MEM:
7706                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7707                 break;
7708         case CHELSIO_T4_GET_I2C:
7709                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7710                 break;
7711         case CHELSIO_T4_CLEAR_STATS: {
7712                 int i;
7713                 u_int port_id = *(uint32_t *)data;
7714                 struct port_info *pi;
7715
7716                 if (port_id >= sc->params.nports)
7717                         return (EINVAL);
7718
7719                 /* MAC stats */
7720                 t4_clr_port_stats(sc, port_id);
7721
7722                 pi = sc->port[port_id];
7723                 if (pi->flags & PORT_INIT_DONE) {
7724                         struct sge_rxq *rxq;
7725                         struct sge_txq *txq;
7726                         struct sge_wrq *wrq;
7727
7728                         for_each_rxq(pi, i, rxq) {
7729 #if defined(INET) || defined(INET6)
7730                                 rxq->lro.lro_queued = 0;
7731                                 rxq->lro.lro_flushed = 0;
7732 #endif
7733                                 rxq->rxcsum = 0;
7734                                 rxq->vlan_extraction = 0;
7735                         }
7736
7737                         for_each_txq(pi, i, txq) {
7738                                 txq->txcsum = 0;
7739                                 txq->tso_wrs = 0;
7740                                 txq->vlan_insertion = 0;
7741                                 txq->imm_wrs = 0;
7742                                 txq->sgl_wrs = 0;
7743                                 txq->txpkt_wrs = 0;
7744                                 txq->txpkts_wrs = 0;
7745                                 txq->txpkts_pkts = 0;
7746                                 txq->br->br_drops = 0;
7747                                 txq->no_dmamap = 0;
7748                                 txq->no_desc = 0;
7749                         }
7750
7751 #ifdef TCP_OFFLOAD
7752                         /* nothing to clear for each ofld_rxq */
7753
7754                         for_each_ofld_txq(pi, i, wrq) {
7755                                 wrq->tx_wrs = 0;
7756                                 wrq->no_desc = 0;
7757                         }
7758 #endif
7759                         wrq = &sc->sge.ctrlq[pi->port_id];
7760                         wrq->tx_wrs = 0;
7761                         wrq->no_desc = 0;
7762                 }
7763                 break;
7764         }
7765         case CHELSIO_T4_SCHED_CLASS:
7766                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
7767                 break;
7768         case CHELSIO_T4_SCHED_QUEUE:
7769                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7770                 break;
7771         case CHELSIO_T4_GET_TRACER:
7772                 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7773                 break;
7774         case CHELSIO_T4_SET_TRACER:
7775                 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7776                 break;
7777         default:
7778                 rc = EINVAL;
7779         }
7780
7781         return (rc);
7782 }
7783
7784 #ifdef TCP_OFFLOAD
7785 static int
7786 toe_capability(struct port_info *pi, int enable)
7787 {
7788         int rc;
7789         struct adapter *sc = pi->adapter;
7790
7791         ASSERT_SYNCHRONIZED_OP(sc);
7792
7793         if (!is_offload(sc))
7794                 return (ENODEV);
7795
7796         if (enable) {
7797                 if (!(sc->flags & FULL_INIT_DONE)) {
7798                         rc = cxgbe_init_synchronized(pi);
7799                         if (rc)
7800                                 return (rc);
7801                 }
7802
7803                 if (isset(&sc->offload_map, pi->port_id))
7804                         return (0);
7805
7806                 if (!(sc->flags & TOM_INIT_DONE)) {
7807                         rc = t4_activate_uld(sc, ULD_TOM);
7808                         if (rc == EAGAIN) {
7809                                 log(LOG_WARNING,
7810                                     "You must kldload t4_tom.ko before trying "
7811                                     "to enable TOE on a cxgbe interface.\n");
7812                         }
7813                         if (rc != 0)
7814                                 return (rc);
7815                         KASSERT(sc->tom_softc != NULL,
7816                             ("%s: TOM activated but softc NULL", __func__));
7817                         KASSERT(sc->flags & TOM_INIT_DONE,
7818                             ("%s: TOM activated but flag not set", __func__));
7819                 }
7820
7821                 setbit(&sc->offload_map, pi->port_id);
7822         } else {
7823                 if (!isset(&sc->offload_map, pi->port_id))
7824                         return (0);
7825
7826                 KASSERT(sc->flags & TOM_INIT_DONE,
7827                     ("%s: TOM never initialized?", __func__));
7828                 clrbit(&sc->offload_map, pi->port_id);
7829         }
7830
7831         return (0);
7832 }
7833
7834 /*
7835  * Add an upper layer driver to the global list.
7836  */
7837 int
7838 t4_register_uld(struct uld_info *ui)
7839 {
7840         int rc = 0;
7841         struct uld_info *u;
7842
7843         sx_xlock(&t4_uld_list_lock);
7844         SLIST_FOREACH(u, &t4_uld_list, link) {
7845             if (u->uld_id == ui->uld_id) {
7846                     rc = EEXIST;
7847                     goto done;
7848             }
7849         }
7850
7851         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7852         ui->refcount = 0;
7853 done:
7854         sx_xunlock(&t4_uld_list_lock);
7855         return (rc);
7856 }
7857
7858 int
7859 t4_unregister_uld(struct uld_info *ui)
7860 {
7861         int rc = EINVAL;
7862         struct uld_info *u;
7863
7864         sx_xlock(&t4_uld_list_lock);
7865
7866         SLIST_FOREACH(u, &t4_uld_list, link) {
7867             if (u == ui) {
7868                     if (ui->refcount > 0) {
7869                             rc = EBUSY;
7870                             goto done;
7871                     }
7872
7873                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7874                     rc = 0;
7875                     goto done;
7876             }
7877         }
7878 done:
7879         sx_xunlock(&t4_uld_list_lock);
7880         return (rc);
7881 }
7882
7883 int
7884 t4_activate_uld(struct adapter *sc, int id)
7885 {
7886         int rc = EAGAIN;
7887         struct uld_info *ui;
7888
7889         ASSERT_SYNCHRONIZED_OP(sc);
7890
7891         sx_slock(&t4_uld_list_lock);
7892
7893         SLIST_FOREACH(ui, &t4_uld_list, link) {
7894                 if (ui->uld_id == id) {
7895                         rc = ui->activate(sc);
7896                         if (rc == 0)
7897                                 ui->refcount++;
7898                         goto done;
7899                 }
7900         }
7901 done:
7902         sx_sunlock(&t4_uld_list_lock);
7903
7904         return (rc);
7905 }
7906
7907 int
7908 t4_deactivate_uld(struct adapter *sc, int id)
7909 {
7910         int rc = EINVAL;
7911         struct uld_info *ui;
7912
7913         ASSERT_SYNCHRONIZED_OP(sc);
7914
7915         sx_slock(&t4_uld_list_lock);
7916
7917         SLIST_FOREACH(ui, &t4_uld_list, link) {
7918                 if (ui->uld_id == id) {
7919                         rc = ui->deactivate(sc);
7920                         if (rc == 0)
7921                                 ui->refcount--;
7922                         goto done;
7923                 }
7924         }
7925 done:
7926         sx_sunlock(&t4_uld_list_lock);
7927
7928         return (rc);
7929 }
7930 #endif
7931
7932 /*
7933  * Come up with reasonable defaults for some of the tunables, provided they're
7934  * not set by the user (in which case we'll use the values as is).
7935  */
7936 static void
7937 tweak_tunables(void)
7938 {
7939         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
7940
7941         if (t4_ntxq10g < 1)
7942                 t4_ntxq10g = min(nc, NTXQ_10G);
7943
7944         if (t4_ntxq1g < 1)
7945                 t4_ntxq1g = min(nc, NTXQ_1G);
7946
7947         if (t4_nrxq10g < 1)
7948                 t4_nrxq10g = min(nc, NRXQ_10G);
7949
7950         if (t4_nrxq1g < 1)
7951                 t4_nrxq1g = min(nc, NRXQ_1G);
7952
7953 #ifdef TCP_OFFLOAD
7954         if (t4_nofldtxq10g < 1)
7955                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7956
7957         if (t4_nofldtxq1g < 1)
7958                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7959
7960         if (t4_nofldrxq10g < 1)
7961                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7962
7963         if (t4_nofldrxq1g < 1)
7964                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7965
7966         if (t4_toecaps_allowed == -1)
7967                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7968 #else
7969         if (t4_toecaps_allowed == -1)
7970                 t4_toecaps_allowed = 0;
7971 #endif
7972
7973         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7974                 t4_tmr_idx_10g = TMR_IDX_10G;
7975
7976         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7977                 t4_pktc_idx_10g = PKTC_IDX_10G;
7978
7979         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7980                 t4_tmr_idx_1g = TMR_IDX_1G;
7981
7982         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7983                 t4_pktc_idx_1g = PKTC_IDX_1G;
7984
7985         if (t4_qsize_txq < 128)
7986                 t4_qsize_txq = 128;
7987
7988         if (t4_qsize_rxq < 128)
7989                 t4_qsize_rxq = 128;
7990         while (t4_qsize_rxq & 7)
7991                 t4_qsize_rxq++;
7992
7993         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7994 }
7995
7996 static int
7997 mod_event(module_t mod, int cmd, void *arg)
7998 {
7999         int rc = 0;
8000         static int loaded = 0;
8001
8002         switch (cmd) {
8003         case MOD_LOAD:
8004                 if (atomic_fetchadd_int(&loaded, 1))
8005                         break;
8006                 t4_sge_modload();
8007                 sx_init(&t4_list_lock, "T4/T5 adapters");
8008                 SLIST_INIT(&t4_list);
8009 #ifdef TCP_OFFLOAD
8010                 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8011                 SLIST_INIT(&t4_uld_list);
8012 #endif
8013                 t4_tracer_modload();
8014                 tweak_tunables();
8015                 break;
8016
8017         case MOD_UNLOAD:
8018                 if (atomic_fetchadd_int(&loaded, -1) > 1)
8019                         break;
8020                 t4_tracer_modunload();
8021 #ifdef TCP_OFFLOAD
8022                 sx_slock(&t4_uld_list_lock);
8023                 if (!SLIST_EMPTY(&t4_uld_list)) {
8024                         rc = EBUSY;
8025                         sx_sunlock(&t4_uld_list_lock);
8026                         break;
8027                 }
8028                 sx_sunlock(&t4_uld_list_lock);
8029                 sx_destroy(&t4_uld_list_lock);
8030 #endif
8031                 sx_slock(&t4_list_lock);
8032                 if (!SLIST_EMPTY(&t4_list)) {
8033                         rc = EBUSY;
8034                         sx_sunlock(&t4_list_lock);
8035                         break;
8036                 }
8037                 sx_sunlock(&t4_list_lock);
8038                 sx_destroy(&t4_list_lock);
8039                 break;
8040         }
8041
8042         return (rc);
8043 }
8044
8045 static devclass_t t4_devclass, t5_devclass;
8046 static devclass_t cxgbe_devclass, cxl_devclass;
8047
8048 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8049 MODULE_VERSION(t4nex, 1);
8050 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8051
8052 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8053 MODULE_VERSION(t5nex, 1);
8054 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8055
8056 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8057 MODULE_VERSION(cxgbe, 1);
8058
8059 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8060 MODULE_VERSION(cxl, 1);