]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/cxgbe/t4_main.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct sx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct sx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 #ifdef TCP_OFFLOAD
201 #define NOFLDTXQ_10G 8
202 static int t4_nofldtxq10g = -1;
203 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204
205 #define NOFLDRXQ_10G 2
206 static int t4_nofldrxq10g = -1;
207 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208
209 #define NOFLDTXQ_1G 2
210 static int t4_nofldtxq1g = -1;
211 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212
213 #define NOFLDRXQ_1G 1
214 static int t4_nofldrxq1g = -1;
215 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216 #endif
217
218 /*
219  * Holdoff parameters for 10G and 1G ports.
220  */
221 #define TMR_IDX_10G 1
222 static int t4_tmr_idx_10g = TMR_IDX_10G;
223 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224
225 #define PKTC_IDX_10G (-1)
226 static int t4_pktc_idx_10g = PKTC_IDX_10G;
227 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228
229 #define TMR_IDX_1G 1
230 static int t4_tmr_idx_1g = TMR_IDX_1G;
231 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232
233 #define PKTC_IDX_1G (-1)
234 static int t4_pktc_idx_1g = PKTC_IDX_1G;
235 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236
237 /*
238  * Size (# of entries) of each tx and rx queue.
239  */
240 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242
243 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245
246 /*
247  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248  */
249 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251
252 /*
253  * Configuration file.
254  */
255 #define DEFAULT_CF      "default"
256 #define FLASH_CF        "flash"
257 #define UWIRE_CF        "uwire"
258 #define FPGA_CF         "fpga"
259 static char t4_cfg_file[32] = DEFAULT_CF;
260 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261
262 /*
263  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264  * encouraged respectively).
265  */
266 static unsigned int t4_fw_install = 1;
267 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268
269 /*
270  * ASIC features that will be used.  Disable the ones you don't want so that the
271  * chip resources aren't wasted on features that will not be used.
272  */
273 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
274 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275
276 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278
279 static int t4_toecaps_allowed = -1;
280 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281
282 static int t4_rdmacaps_allowed = 0;
283 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284
285 static int t4_iscsicaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287
288 static int t4_fcoecaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290
291 static int t5_write_combine = 0;
292 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293
294 struct intrs_and_queues {
295         int intr_type;          /* INTx, MSI, or MSI-X */
296         int nirq;               /* Number of vectors */
297         int intr_flags;
298         int ntxq10g;            /* # of NIC txq's for each 10G port */
299         int nrxq10g;            /* # of NIC rxq's for each 10G port */
300         int ntxq1g;             /* # of NIC txq's for each 1G port */
301         int nrxq1g;             /* # of NIC rxq's for each 1G port */
302 #ifdef TCP_OFFLOAD
303         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
304         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
305         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
306         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
307 #endif
308 };
309
310 struct filter_entry {
311         uint32_t valid:1;       /* filter allocated and valid */
312         uint32_t locked:1;      /* filter is administratively locked */
313         uint32_t pending:1;     /* filter action is pending firmware reply */
314         uint32_t smtidx:8;      /* Source MAC Table index for smac */
315         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
316
317         struct t4_filter_specification fs;
318 };
319
320 enum {
321         XGMAC_MTU       = (1 << 0),
322         XGMAC_PROMISC   = (1 << 1),
323         XGMAC_ALLMULTI  = (1 << 2),
324         XGMAC_VLANEX    = (1 << 3),
325         XGMAC_UCADDR    = (1 << 4),
326         XGMAC_MCADDRS   = (1 << 5),
327
328         XGMAC_ALL       = 0xffff
329 };
330
331 static int map_bars_0_and_4(struct adapter *);
332 static int map_bar_2(struct adapter *);
333 static void setup_memwin(struct adapter *);
334 static int validate_mem_range(struct adapter *, uint32_t, int);
335 static int fwmtype_to_hwmtype(int);
336 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
337     uint32_t *);
338 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
339 static uint32_t position_memwin(struct adapter *, int, uint32_t);
340 static int cfg_itype_and_nqueues(struct adapter *, int, int,
341     struct intrs_and_queues *);
342 static int prep_firmware(struct adapter *);
343 static int partition_resources(struct adapter *, const struct firmware *,
344     const char *);
345 static int get_params__pre_init(struct adapter *);
346 static int get_params__post_init(struct adapter *);
347 static int set_params__post_init(struct adapter *);
348 static void t4_set_desc(struct adapter *);
349 static void build_medialist(struct port_info *);
350 static int update_mac_settings(struct port_info *, int);
351 static int cxgbe_init_synchronized(struct port_info *);
352 static int cxgbe_uninit_synchronized(struct port_info *);
353 static int setup_intr_handlers(struct adapter *);
354 static int adapter_full_init(struct adapter *);
355 static int adapter_full_uninit(struct adapter *);
356 static int port_full_init(struct port_info *);
357 static int port_full_uninit(struct port_info *);
358 static void quiesce_eq(struct adapter *, struct sge_eq *);
359 static void quiesce_iq(struct adapter *, struct sge_iq *);
360 static void quiesce_fl(struct adapter *, struct sge_fl *);
361 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
362     driver_intr_t *, void *, char *);
363 static int t4_free_irq(struct adapter *, struct irq *);
364 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
365     unsigned int);
366 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
367 static void cxgbe_tick(void *);
368 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
369 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
370     struct mbuf *);
371 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
372 static int fw_msg_not_handled(struct adapter *, const __be64 *);
373 static int t4_sysctls(struct adapter *);
374 static int cxgbe_sysctls(struct port_info *);
375 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
376 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
377 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
378 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
379 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
380 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
381 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
382 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
383 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
384 #ifdef SBUF_DRAIN
385 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
386 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
387 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
388 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
389 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
390 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
392 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
393 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
394 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
395 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
396 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
398 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
399 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
400 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
401 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
402 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
403 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
404 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
405 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
406 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
407 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
408 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
409 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
410 #endif
411 static inline void txq_start(struct ifnet *, struct sge_txq *);
412 static uint32_t fconf_to_mode(uint32_t);
413 static uint32_t mode_to_fconf(uint32_t);
414 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
415 static int get_filter_mode(struct adapter *, uint32_t *);
416 static int set_filter_mode(struct adapter *, uint32_t);
417 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
418 static int get_filter(struct adapter *, struct t4_filter *);
419 static int set_filter(struct adapter *, struct t4_filter *);
420 static int del_filter(struct adapter *, struct t4_filter *);
421 static void clear_filter(struct filter_entry *);
422 static int set_filter_wr(struct adapter *, int);
423 static int del_filter_wr(struct adapter *, int);
424 static int get_sge_context(struct adapter *, struct t4_sge_context *);
425 static int load_fw(struct adapter *, struct t4_data *);
426 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
427 static int read_i2c(struct adapter *, struct t4_i2c_data *);
428 #ifdef TCP_OFFLOAD
429 static int toe_capability(struct port_info *, int);
430 #endif
431 static int mod_event(module_t, int, void *);
432
433 struct {
434         uint16_t device;
435         char *desc;
436 } t4_pciids[] = {
437         {0xa000, "Chelsio Terminator 4 FPGA"},
438         {0x4400, "Chelsio T440-dbg"},
439         {0x4401, "Chelsio T420-CR"},
440         {0x4402, "Chelsio T422-CR"},
441         {0x4403, "Chelsio T440-CR"},
442         {0x4404, "Chelsio T420-BCH"},
443         {0x4405, "Chelsio T440-BCH"},
444         {0x4406, "Chelsio T440-CH"},
445         {0x4407, "Chelsio T420-SO"},
446         {0x4408, "Chelsio T420-CX"},
447         {0x4409, "Chelsio T420-BT"},
448         {0x440a, "Chelsio T404-BT"},
449         {0x440e, "Chelsio T440-LP-CR"},
450 }, t5_pciids[] = {
451         {0xb000, "Chelsio Terminator 5 FPGA"},
452         {0x5400, "Chelsio T580-dbg"},
453         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
454         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
455         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
456         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
457         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
458         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
459         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
460         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
461         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
462         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
463         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
464         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
465 #ifdef notyet
466         {0x5404,  "Chelsio T520-BCH"},
467         {0x5405,  "Chelsio T540-BCH"},
468         {0x5406,  "Chelsio T540-CH"},
469         {0x5408,  "Chelsio T520-CX"},
470         {0x540b,  "Chelsio B520-SR"},
471         {0x540c,  "Chelsio B504-BT"},
472         {0x540f,  "Chelsio Amsterdam"},
473         {0x5413,  "Chelsio T580-CHR"},
474 #endif
475 };
476
477 #ifdef TCP_OFFLOAD
478 /*
479  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
480  * exactly the same for both rxq and ofld_rxq.
481  */
482 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
483 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
484 #endif
485
486 /* No easy way to include t4_msg.h before adapter.h so we check this way */
487 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
488 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
489
490 static int
491 t4_probe(device_t dev)
492 {
493         int i;
494         uint16_t v = pci_get_vendor(dev);
495         uint16_t d = pci_get_device(dev);
496         uint8_t f = pci_get_function(dev);
497
498         if (v != PCI_VENDOR_ID_CHELSIO)
499                 return (ENXIO);
500
501         /* Attach only to PF0 of the FPGA */
502         if (d == 0xa000 && f != 0)
503                 return (ENXIO);
504
505         for (i = 0; i < nitems(t4_pciids); i++) {
506                 if (d == t4_pciids[i].device) {
507                         device_set_desc(dev, t4_pciids[i].desc);
508                         return (BUS_PROBE_DEFAULT);
509                 }
510         }
511
512         return (ENXIO);
513 }
514
515 static int
516 t5_probe(device_t dev)
517 {
518         int i;
519         uint16_t v = pci_get_vendor(dev);
520         uint16_t d = pci_get_device(dev);
521         uint8_t f = pci_get_function(dev);
522
523         if (v != PCI_VENDOR_ID_CHELSIO)
524                 return (ENXIO);
525
526         /* Attach only to PF0 of the FPGA */
527         if (d == 0xb000 && f != 0)
528                 return (ENXIO);
529
530         for (i = 0; i < nitems(t5_pciids); i++) {
531                 if (d == t5_pciids[i].device) {
532                         device_set_desc(dev, t5_pciids[i].desc);
533                         return (BUS_PROBE_DEFAULT);
534                 }
535         }
536
537         return (ENXIO);
538 }
539
540 static int
541 t4_attach(device_t dev)
542 {
543         struct adapter *sc;
544         int rc = 0, i, n10g, n1g, rqidx, tqidx;
545         struct intrs_and_queues iaq;
546         struct sge *s;
547 #ifdef TCP_OFFLOAD
548         int ofld_rqidx, ofld_tqidx;
549 #endif
550
551         sc = device_get_softc(dev);
552         sc->dev = dev;
553
554         pci_enable_busmaster(dev);
555         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
556                 uint32_t v;
557
558                 pci_set_max_read_req(dev, 4096);
559                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
560                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
561                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
562         }
563
564         sc->traceq = -1;
565         mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
566         snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
567             device_get_nameunit(dev));
568
569         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
570             device_get_nameunit(dev));
571         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
572         sx_xlock(&t4_list_lock);
573         SLIST_INSERT_HEAD(&t4_list, sc, link);
574         sx_xunlock(&t4_list_lock);
575
576         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
577         TAILQ_INIT(&sc->sfl);
578         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
579
580         rc = map_bars_0_and_4(sc);
581         if (rc != 0)
582                 goto done; /* error message displayed already */
583
584         /*
585          * This is the real PF# to which we're attaching.  Works from within PCI
586          * passthrough environments too, where pci_get_function() could return a
587          * different PF# depending on the passthrough configuration.  We need to
588          * use the real PF# in all our communication with the firmware.
589          */
590         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
591         sc->mbox = sc->pf;
592
593         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
594         sc->an_handler = an_not_handled;
595         for (i = 0; i < nitems(sc->cpl_handler); i++)
596                 sc->cpl_handler[i] = cpl_not_handled;
597         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
598                 sc->fw_msg_handler[i] = fw_msg_not_handled;
599         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
600         t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
601         t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
602         t4_init_sge_cpl_handlers(sc);
603
604         /* Prepare the adapter for operation */
605         rc = -t4_prep_adapter(sc);
606         if (rc != 0) {
607                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
608                 goto done;
609         }
610
611         /*
612          * Do this really early, with the memory windows set up even before the
613          * character device.  The userland tool's register i/o and mem read
614          * will work even in "recovery mode".
615          */
616         setup_memwin(sc);
617         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
618             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
619             device_get_nameunit(dev));
620         if (sc->cdev == NULL)
621                 device_printf(dev, "failed to create nexus char device.\n");
622         else
623                 sc->cdev->si_drv1 = sc;
624
625         /* Go no further if recovery mode has been requested. */
626         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
627                 device_printf(dev, "recovery mode.\n");
628                 goto done;
629         }
630
631         /* Prepare the firmware for operation */
632         rc = prep_firmware(sc);
633         if (rc != 0)
634                 goto done; /* error message displayed already */
635
636         rc = get_params__post_init(sc);
637         if (rc != 0)
638                 goto done; /* error message displayed already */
639
640         rc = set_params__post_init(sc);
641         if (rc != 0)
642                 goto done; /* error message displayed already */
643
644         rc = map_bar_2(sc);
645         if (rc != 0)
646                 goto done; /* error message displayed already */
647
648         rc = t4_create_dma_tag(sc);
649         if (rc != 0)
650                 goto done; /* error message displayed already */
651
652         /*
653          * First pass over all the ports - allocate VIs and initialize some
654          * basic parameters like mac address, port type, etc.  We also figure
655          * out whether a port is 10G or 1G and use that information when
656          * calculating how many interrupts to attempt to allocate.
657          */
658         n10g = n1g = 0;
659         for_each_port(sc, i) {
660                 struct port_info *pi;
661
662                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
663                 sc->port[i] = pi;
664
665                 /* These must be set before t4_port_init */
666                 pi->adapter = sc;
667                 pi->port_id = i;
668
669                 /* Allocate the vi and initialize parameters like mac addr */
670                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
671                 if (rc != 0) {
672                         device_printf(dev, "unable to initialize port %d: %d\n",
673                             i, rc);
674                         free(pi, M_CXGBE);
675                         sc->port[i] = NULL;
676                         goto done;
677                 }
678
679                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
680                     device_get_nameunit(dev), i);
681                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
682                 sc->chan_map[pi->tx_chan] = i;
683
684                 if (is_10G_port(pi) || is_40G_port(pi)) {
685                         n10g++;
686                         pi->tmr_idx = t4_tmr_idx_10g;
687                         pi->pktc_idx = t4_pktc_idx_10g;
688                 } else {
689                         n1g++;
690                         pi->tmr_idx = t4_tmr_idx_1g;
691                         pi->pktc_idx = t4_pktc_idx_1g;
692                 }
693
694                 pi->xact_addr_filt = -1;
695                 pi->linkdnrc = -1;
696
697                 pi->qsize_rxq = t4_qsize_rxq;
698                 pi->qsize_txq = t4_qsize_txq;
699
700                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
701                 if (pi->dev == NULL) {
702                         device_printf(dev,
703                             "failed to add device for port %d.\n", i);
704                         rc = ENXIO;
705                         goto done;
706                 }
707                 device_set_softc(pi->dev, pi);
708         }
709
710         /*
711          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
712          */
713         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
714         if (rc != 0)
715                 goto done; /* error message displayed already */
716
717         sc->intr_type = iaq.intr_type;
718         sc->intr_count = iaq.nirq;
719         sc->flags |= iaq.intr_flags;
720
721         s = &sc->sge;
722         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
723         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
724         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
725         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
726         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
727
728 #ifdef TCP_OFFLOAD
729         if (is_offload(sc)) {
730
731                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
732                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
733                 s->neq += s->nofldtxq + s->nofldrxq;
734                 s->niq += s->nofldrxq;
735
736                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
737                     M_CXGBE, M_ZERO | M_WAITOK);
738                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
739                     M_CXGBE, M_ZERO | M_WAITOK);
740         }
741 #endif
742
743         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
744             M_ZERO | M_WAITOK);
745         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
746             M_ZERO | M_WAITOK);
747         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
748             M_ZERO | M_WAITOK);
749         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
750             M_ZERO | M_WAITOK);
751         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
752             M_ZERO | M_WAITOK);
753
754         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
755             M_ZERO | M_WAITOK);
756
757         t4_init_l2t(sc, M_WAITOK);
758
759         /*
760          * Second pass over the ports.  This time we know the number of rx and
761          * tx queues that each port should get.
762          */
763         rqidx = tqidx = 0;
764 #ifdef TCP_OFFLOAD
765         ofld_rqidx = ofld_tqidx = 0;
766 #endif
767         for_each_port(sc, i) {
768                 struct port_info *pi = sc->port[i];
769
770                 if (pi == NULL)
771                         continue;
772
773                 pi->first_rxq = rqidx;
774                 pi->first_txq = tqidx;
775                 if (is_10G_port(pi) || is_40G_port(pi)) {
776                         pi->nrxq = iaq.nrxq10g;
777                         pi->ntxq = iaq.ntxq10g;
778                 } else {
779                         pi->nrxq = iaq.nrxq1g;
780                         pi->ntxq = iaq.ntxq1g;
781                 }
782
783                 rqidx += pi->nrxq;
784                 tqidx += pi->ntxq;
785
786 #ifdef TCP_OFFLOAD
787                 if (is_offload(sc)) {
788                         pi->first_ofld_rxq = ofld_rqidx;
789                         pi->first_ofld_txq = ofld_tqidx;
790                         if (is_10G_port(pi) || is_40G_port(pi)) {
791                                 pi->nofldrxq = iaq.nofldrxq10g;
792                                 pi->nofldtxq = iaq.nofldtxq10g;
793                         } else {
794                                 pi->nofldrxq = iaq.nofldrxq1g;
795                                 pi->nofldtxq = iaq.nofldtxq1g;
796                         }
797                         ofld_rqidx += pi->nofldrxq;
798                         ofld_tqidx += pi->nofldtxq;
799                 }
800 #endif
801         }
802
803         rc = setup_intr_handlers(sc);
804         if (rc != 0) {
805                 device_printf(dev,
806                     "failed to setup interrupt handlers: %d\n", rc);
807                 goto done;
808         }
809
810         rc = bus_generic_attach(dev);
811         if (rc != 0) {
812                 device_printf(dev,
813                     "failed to attach all child ports: %d\n", rc);
814                 goto done;
815         }
816
817         device_printf(dev,
818             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
819             sc->params.pci.width, sc->params.nports, sc->intr_count,
820             sc->intr_type == INTR_MSIX ? "MSI-X" :
821             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
822             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
823
824         t4_set_desc(sc);
825
826 done:
827         if (rc != 0 && sc->cdev) {
828                 /* cdev was created and so cxgbetool works; recover that way. */
829                 device_printf(dev,
830                     "error during attach, adapter is now in recovery mode.\n");
831                 rc = 0;
832         }
833
834         if (rc != 0)
835                 t4_detach(dev);
836         else
837                 t4_sysctls(sc);
838
839         return (rc);
840 }
841
842 /*
843  * Idempotent
844  */
845 static int
846 t4_detach(device_t dev)
847 {
848         struct adapter *sc;
849         struct port_info *pi;
850         int i, rc;
851
852         sc = device_get_softc(dev);
853
854         if (sc->flags & FULL_INIT_DONE)
855                 t4_intr_disable(sc);
856
857         if (sc->cdev) {
858                 destroy_dev(sc->cdev);
859                 sc->cdev = NULL;
860         }
861
862         rc = bus_generic_detach(dev);
863         if (rc) {
864                 device_printf(dev,
865                     "failed to detach child devices: %d\n", rc);
866                 return (rc);
867         }
868
869         for (i = 0; i < sc->intr_count; i++)
870                 t4_free_irq(sc, &sc->irq[i]);
871
872         for (i = 0; i < MAX_NPORTS; i++) {
873                 pi = sc->port[i];
874                 if (pi) {
875                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
876                         if (pi->dev)
877                                 device_delete_child(dev, pi->dev);
878
879                         mtx_destroy(&pi->pi_lock);
880                         free(pi, M_CXGBE);
881                 }
882         }
883
884         if (sc->flags & FULL_INIT_DONE)
885                 adapter_full_uninit(sc);
886
887         if (sc->flags & FW_OK)
888                 t4_fw_bye(sc, sc->mbox);
889
890         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
891                 pci_release_msi(dev);
892
893         if (sc->regs_res)
894                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
895                     sc->regs_res);
896
897         if (sc->udbs_res)
898                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
899                     sc->udbs_res);
900
901         if (sc->msix_res)
902                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
903                     sc->msix_res);
904
905         if (sc->l2t)
906                 t4_free_l2t(sc->l2t);
907
908 #ifdef TCP_OFFLOAD
909         free(sc->sge.ofld_rxq, M_CXGBE);
910         free(sc->sge.ofld_txq, M_CXGBE);
911 #endif
912         free(sc->irq, M_CXGBE);
913         free(sc->sge.rxq, M_CXGBE);
914         free(sc->sge.txq, M_CXGBE);
915         free(sc->sge.ctrlq, M_CXGBE);
916         free(sc->sge.iqmap, M_CXGBE);
917         free(sc->sge.eqmap, M_CXGBE);
918         free(sc->tids.ftid_tab, M_CXGBE);
919         t4_destroy_dma_tag(sc);
920         if (mtx_initialized(&sc->sc_lock)) {
921                 sx_xlock(&t4_list_lock);
922                 SLIST_REMOVE(&t4_list, sc, adapter, link);
923                 sx_xunlock(&t4_list_lock);
924                 mtx_destroy(&sc->sc_lock);
925         }
926
927         if (mtx_initialized(&sc->tids.ftid_lock))
928                 mtx_destroy(&sc->tids.ftid_lock);
929         if (mtx_initialized(&sc->sfl_lock))
930                 mtx_destroy(&sc->sfl_lock);
931         if (mtx_initialized(&sc->ifp_lock))
932                 mtx_destroy(&sc->ifp_lock);
933
934         bzero(sc, sizeof(*sc));
935
936         return (0);
937 }
938
939
940 static int
941 cxgbe_probe(device_t dev)
942 {
943         char buf[128];
944         struct port_info *pi = device_get_softc(dev);
945
946         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
947         device_set_desc_copy(dev, buf);
948
949         return (BUS_PROBE_DEFAULT);
950 }
951
952 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
953     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
954     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
955 #define T4_CAP_ENABLE (T4_CAP)
956
957 static int
958 cxgbe_attach(device_t dev)
959 {
960         struct port_info *pi = device_get_softc(dev);
961         struct ifnet *ifp;
962
963         /* Allocate an ifnet and set it up */
964         ifp = if_alloc(IFT_ETHER);
965         if (ifp == NULL) {
966                 device_printf(dev, "Cannot allocate ifnet\n");
967                 return (ENOMEM);
968         }
969         pi->ifp = ifp;
970         ifp->if_softc = pi;
971
972         callout_init(&pi->tick, CALLOUT_MPSAFE);
973
974         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
975         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
976
977         ifp->if_init = cxgbe_init;
978         ifp->if_ioctl = cxgbe_ioctl;
979         ifp->if_transmit = cxgbe_transmit;
980         ifp->if_qflush = cxgbe_qflush;
981
982         ifp->if_capabilities = T4_CAP;
983 #ifdef TCP_OFFLOAD
984         if (is_offload(pi->adapter))
985                 ifp->if_capabilities |= IFCAP_TOE;
986 #endif
987         ifp->if_capenable = T4_CAP_ENABLE;
988         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
989             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
990
991         /* Initialize ifmedia for this port */
992         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
993             cxgbe_media_status);
994         build_medialist(pi);
995
996         pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
997             EVENTHANDLER_PRI_ANY);
998
999         ether_ifattach(ifp, pi->hw_addr);
1000
1001 #ifdef TCP_OFFLOAD
1002         if (is_offload(pi->adapter)) {
1003                 device_printf(dev,
1004                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1005                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1006         } else
1007 #endif
1008                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1009
1010         cxgbe_sysctls(pi);
1011
1012         return (0);
1013 }
1014
1015 static int
1016 cxgbe_detach(device_t dev)
1017 {
1018         struct port_info *pi = device_get_softc(dev);
1019         struct adapter *sc = pi->adapter;
1020         struct ifnet *ifp = pi->ifp;
1021
1022         /* Tell if_ioctl and if_init that the port is going away */
1023         ADAPTER_LOCK(sc);
1024         SET_DOOMED(pi);
1025         wakeup(&sc->flags);
1026         while (IS_BUSY(sc))
1027                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1028         SET_BUSY(sc);
1029 #ifdef INVARIANTS
1030         sc->last_op = "t4detach";
1031         sc->last_op_thr = curthread;
1032 #endif
1033         ADAPTER_UNLOCK(sc);
1034
1035         if (pi->flags & HAS_TRACEQ) {
1036                 sc->traceq = -1;        /* cloner should not create ifnet */
1037                 t4_tracer_port_detach(sc);
1038         }
1039
1040         if (pi->vlan_c)
1041                 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1042
1043         PORT_LOCK(pi);
1044         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1045         callout_stop(&pi->tick);
1046         PORT_UNLOCK(pi);
1047         callout_drain(&pi->tick);
1048
1049         /* Let detach proceed even if these fail. */
1050         cxgbe_uninit_synchronized(pi);
1051         port_full_uninit(pi);
1052
1053         ifmedia_removeall(&pi->media);
1054         ether_ifdetach(pi->ifp);
1055         if_free(pi->ifp);
1056
1057         ADAPTER_LOCK(sc);
1058         CLR_BUSY(sc);
1059         wakeup(&sc->flags);
1060         ADAPTER_UNLOCK(sc);
1061
1062         return (0);
1063 }
1064
1065 static void
1066 cxgbe_init(void *arg)
1067 {
1068         struct port_info *pi = arg;
1069         struct adapter *sc = pi->adapter;
1070
1071         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1072                 return;
1073         cxgbe_init_synchronized(pi);
1074         end_synchronized_op(sc, 0);
1075 }
1076
1077 static int
1078 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1079 {
1080         int rc = 0, mtu, flags;
1081         struct port_info *pi = ifp->if_softc;
1082         struct adapter *sc = pi->adapter;
1083         struct ifreq *ifr = (struct ifreq *)data;
1084         uint32_t mask;
1085
1086         switch (cmd) {
1087         case SIOCSIFMTU:
1088                 mtu = ifr->ifr_mtu;
1089                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1090                         return (EINVAL);
1091
1092                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1093                 if (rc)
1094                         return (rc);
1095                 ifp->if_mtu = mtu;
1096                 if (pi->flags & PORT_INIT_DONE) {
1097                         t4_update_fl_bufsize(ifp);
1098                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1099                                 rc = update_mac_settings(pi, XGMAC_MTU);
1100                 }
1101                 end_synchronized_op(sc, 0);
1102                 break;
1103
1104         case SIOCSIFFLAGS:
1105                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1106                 if (rc)
1107                         return (rc);
1108
1109                 if (ifp->if_flags & IFF_UP) {
1110                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1111                                 flags = pi->if_flags;
1112                                 if ((ifp->if_flags ^ flags) &
1113                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1114                                         rc = update_mac_settings(pi,
1115                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1116                                 }
1117                         } else
1118                                 rc = cxgbe_init_synchronized(pi);
1119                         pi->if_flags = ifp->if_flags;
1120                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1121                         rc = cxgbe_uninit_synchronized(pi);
1122                 end_synchronized_op(sc, 0);
1123                 break;
1124
1125         case SIOCADDMULTI:      
1126         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1127                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1128                 if (rc)
1129                         return (rc);
1130                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1131                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1132                 end_synchronized_op(sc, LOCK_HELD);
1133                 break;
1134
1135         case SIOCSIFCAP:
1136                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1137                 if (rc)
1138                         return (rc);
1139
1140                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1141                 if (mask & IFCAP_TXCSUM) {
1142                         ifp->if_capenable ^= IFCAP_TXCSUM;
1143                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1144
1145                         if (IFCAP_TSO4 & ifp->if_capenable &&
1146                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1147                                 ifp->if_capenable &= ~IFCAP_TSO4;
1148                                 if_printf(ifp,
1149                                     "tso4 disabled due to -txcsum.\n");
1150                         }
1151                 }
1152                 if (mask & IFCAP_TXCSUM_IPV6) {
1153                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1154                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1155
1156                         if (IFCAP_TSO6 & ifp->if_capenable &&
1157                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1158                                 ifp->if_capenable &= ~IFCAP_TSO6;
1159                                 if_printf(ifp,
1160                                     "tso6 disabled due to -txcsum6.\n");
1161                         }
1162                 }
1163                 if (mask & IFCAP_RXCSUM)
1164                         ifp->if_capenable ^= IFCAP_RXCSUM;
1165                 if (mask & IFCAP_RXCSUM_IPV6)
1166                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1167
1168                 /*
1169                  * Note that we leave CSUM_TSO alone (it is always set).  The
1170                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1171                  * sending a TSO request our way, so it's sufficient to toggle
1172                  * IFCAP_TSOx only.
1173                  */
1174                 if (mask & IFCAP_TSO4) {
1175                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1176                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1177                                 if_printf(ifp, "enable txcsum first.\n");
1178                                 rc = EAGAIN;
1179                                 goto fail;
1180                         }
1181                         ifp->if_capenable ^= IFCAP_TSO4;
1182                 }
1183                 if (mask & IFCAP_TSO6) {
1184                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1185                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1186                                 if_printf(ifp, "enable txcsum6 first.\n");
1187                                 rc = EAGAIN;
1188                                 goto fail;
1189                         }
1190                         ifp->if_capenable ^= IFCAP_TSO6;
1191                 }
1192                 if (mask & IFCAP_LRO) {
1193 #if defined(INET) || defined(INET6)
1194                         int i;
1195                         struct sge_rxq *rxq;
1196
1197                         ifp->if_capenable ^= IFCAP_LRO;
1198                         for_each_rxq(pi, i, rxq) {
1199                                 if (ifp->if_capenable & IFCAP_LRO)
1200                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1201                                 else
1202                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1203                         }
1204 #endif
1205                 }
1206 #ifdef TCP_OFFLOAD
1207                 if (mask & IFCAP_TOE) {
1208                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1209
1210                         rc = toe_capability(pi, enable);
1211                         if (rc != 0)
1212                                 goto fail;
1213
1214                         ifp->if_capenable ^= mask;
1215                 }
1216 #endif
1217                 if (mask & IFCAP_VLAN_HWTAGGING) {
1218                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1219                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1220                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1221                 }
1222                 if (mask & IFCAP_VLAN_MTU) {
1223                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1224
1225                         /* Need to find out how to disable auto-mtu-inflation */
1226                 }
1227                 if (mask & IFCAP_VLAN_HWTSO)
1228                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1229                 if (mask & IFCAP_VLAN_HWCSUM)
1230                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1231
1232 #ifdef VLAN_CAPABILITIES
1233                 VLAN_CAPABILITIES(ifp);
1234 #endif
1235 fail:
1236                 end_synchronized_op(sc, 0);
1237                 break;
1238
1239         case SIOCSIFMEDIA:
1240         case SIOCGIFMEDIA:
1241                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1242                 break;
1243
1244         default:
1245                 rc = ether_ioctl(ifp, cmd, data);
1246         }
1247
1248         return (rc);
1249 }
1250
1251 static int
1252 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1253 {
1254         struct port_info *pi = ifp->if_softc;
1255         struct adapter *sc = pi->adapter;
1256         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1257         struct buf_ring *br;
1258         int rc;
1259
1260         M_ASSERTPKTHDR(m);
1261
1262         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1263                 m_freem(m);
1264                 return (ENETDOWN);
1265         }
1266
1267         if (m->m_flags & M_FLOWID)
1268                 txq += (m->m_pkthdr.flowid % pi->ntxq);
1269         br = txq->br;
1270
1271         if (TXQ_TRYLOCK(txq) == 0) {
1272                 struct sge_eq *eq = &txq->eq;
1273
1274                 /*
1275                  * It is possible that t4_eth_tx finishes up and releases the
1276                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1277                  * need to make sure that this mbuf doesn't just sit there in
1278                  * the drbr.
1279                  */
1280
1281                 rc = drbr_enqueue(ifp, br, m);
1282                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1283                     !(eq->flags & EQ_DOOMED))
1284                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1285                 return (rc);
1286         }
1287
1288         /*
1289          * txq->m is the mbuf that is held up due to a temporary shortage of
1290          * resources and it should be put on the wire first.  Then what's in
1291          * drbr and finally the mbuf that was just passed in to us.
1292          *
1293          * Return code should indicate the fate of the mbuf that was passed in
1294          * this time.
1295          */
1296
1297         TXQ_LOCK_ASSERT_OWNED(txq);
1298         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1299
1300                 /* Queued for transmission. */
1301
1302                 rc = drbr_enqueue(ifp, br, m);
1303                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1304                 (void) t4_eth_tx(ifp, txq, m);
1305                 TXQ_UNLOCK(txq);
1306                 return (rc);
1307         }
1308
1309         /* Direct transmission. */
1310         rc = t4_eth_tx(ifp, txq, m);
1311         if (rc != 0 && txq->m)
1312                 rc = 0; /* held, will be transmitted soon (hopefully) */
1313
1314         TXQ_UNLOCK(txq);
1315         return (rc);
1316 }
1317
1318 static void
1319 cxgbe_qflush(struct ifnet *ifp)
1320 {
1321         struct port_info *pi = ifp->if_softc;
1322         struct sge_txq *txq;
1323         int i;
1324         struct mbuf *m;
1325
1326         /* queues do not exist if !PORT_INIT_DONE. */
1327         if (pi->flags & PORT_INIT_DONE) {
1328                 for_each_txq(pi, i, txq) {
1329                         TXQ_LOCK(txq);
1330                         m_freem(txq->m);
1331                         txq->m = NULL;
1332                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1333                                 m_freem(m);
1334                         TXQ_UNLOCK(txq);
1335                 }
1336         }
1337         if_qflush(ifp);
1338 }
1339
1340 static int
1341 cxgbe_media_change(struct ifnet *ifp)
1342 {
1343         struct port_info *pi = ifp->if_softc;
1344
1345         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1346
1347         return (EOPNOTSUPP);
1348 }
1349
1350 static void
1351 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1352 {
1353         struct port_info *pi = ifp->if_softc;
1354         struct ifmedia_entry *cur = pi->media.ifm_cur;
1355         int speed = pi->link_cfg.speed;
1356         int data = (pi->port_type << 8) | pi->mod_type;
1357
1358         if (cur->ifm_data != data) {
1359                 build_medialist(pi);
1360                 cur = pi->media.ifm_cur;
1361         }
1362
1363         ifmr->ifm_status = IFM_AVALID;
1364         if (!pi->link_cfg.link_ok)
1365                 return;
1366
1367         ifmr->ifm_status |= IFM_ACTIVE;
1368
1369         /* active and current will differ iff current media is autoselect. */
1370         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1371                 return;
1372
1373         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1374         if (speed == SPEED_10000)
1375                 ifmr->ifm_active |= IFM_10G_T;
1376         else if (speed == SPEED_1000)
1377                 ifmr->ifm_active |= IFM_1000_T;
1378         else if (speed == SPEED_100)
1379                 ifmr->ifm_active |= IFM_100_TX;
1380         else if (speed == SPEED_10)
1381                 ifmr->ifm_active |= IFM_10_T;
1382         else
1383                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1384                             speed));
1385 }
1386
1387 void
1388 t4_fatal_err(struct adapter *sc)
1389 {
1390         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1391         t4_intr_disable(sc);
1392         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1393             device_get_nameunit(sc->dev));
1394 }
1395
1396 static int
1397 map_bars_0_and_4(struct adapter *sc)
1398 {
1399         sc->regs_rid = PCIR_BAR(0);
1400         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1401             &sc->regs_rid, RF_ACTIVE);
1402         if (sc->regs_res == NULL) {
1403                 device_printf(sc->dev, "cannot map registers.\n");
1404                 return (ENXIO);
1405         }
1406         sc->bt = rman_get_bustag(sc->regs_res);
1407         sc->bh = rman_get_bushandle(sc->regs_res);
1408         sc->mmio_len = rman_get_size(sc->regs_res);
1409         setbit(&sc->doorbells, DOORBELL_KDB);
1410
1411         sc->msix_rid = PCIR_BAR(4);
1412         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1413             &sc->msix_rid, RF_ACTIVE);
1414         if (sc->msix_res == NULL) {
1415                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1416                 return (ENXIO);
1417         }
1418
1419         return (0);
1420 }
1421
1422 static int
1423 map_bar_2(struct adapter *sc)
1424 {
1425
1426         /*
1427          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1428          * to map it if RDMA is disabled.
1429          */
1430         if (is_t4(sc) && sc->rdmacaps == 0)
1431                 return (0);
1432
1433         sc->udbs_rid = PCIR_BAR(2);
1434         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1435             &sc->udbs_rid, RF_ACTIVE);
1436         if (sc->udbs_res == NULL) {
1437                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1438                 return (ENXIO);
1439         }
1440         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1441
1442         if (is_t5(sc)) {
1443                 setbit(&sc->doorbells, DOORBELL_UDB);
1444 #if defined(__i386__) || defined(__amd64__)
1445                 if (t5_write_combine) {
1446                         int rc;
1447
1448                         /*
1449                          * Enable write combining on BAR2.  This is the
1450                          * userspace doorbell BAR and is split into 128B
1451                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1452                          * with an egress queue.  The first 64B has the doorbell
1453                          * and the second 64B can be used to submit a tx work
1454                          * request with an implicit doorbell.
1455                          */
1456
1457                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1458                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1459                         if (rc == 0) {
1460                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1461                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1462                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1463                         } else {
1464                                 device_printf(sc->dev,
1465                                     "couldn't enable write combining: %d\n",
1466                                     rc);
1467                         }
1468
1469                         t4_write_reg(sc, A_SGE_STAT_CFG,
1470                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1471                 }
1472 #endif
1473         }
1474
1475         return (0);
1476 }
1477
1478 static const struct memwin t4_memwin[] = {
1479         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1480         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1481         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1482 };
1483
1484 static const struct memwin t5_memwin[] = {
1485         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1486         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1487         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1488 };
1489
1490 static void
1491 setup_memwin(struct adapter *sc)
1492 {
1493         const struct memwin *mw;
1494         int i, n;
1495         uint32_t bar0;
1496
1497         if (is_t4(sc)) {
1498                 /*
1499                  * Read low 32b of bar0 indirectly via the hardware backdoor
1500                  * mechanism.  Works from within PCI passthrough environments
1501                  * too, where rman_get_start() can return a different value.  We
1502                  * need to program the T4 memory window decoders with the actual
1503                  * addresses that will be coming across the PCIe link.
1504                  */
1505                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1506                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1507
1508                 mw = &t4_memwin[0];
1509                 n = nitems(t4_memwin);
1510         } else {
1511                 /* T5 uses the relative offset inside the PCIe BAR */
1512                 bar0 = 0;
1513
1514                 mw = &t5_memwin[0];
1515                 n = nitems(t5_memwin);
1516         }
1517
1518         for (i = 0; i < n; i++, mw++) {
1519                 t4_write_reg(sc,
1520                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1521                     (mw->base + bar0) | V_BIR(0) |
1522                     V_WINDOW(ilog2(mw->aperture) - 10));
1523         }
1524
1525         /* flush */
1526         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1527 }
1528
1529 /*
1530  * Verify that the memory range specified by the addr/len pair is valid and lies
1531  * entirely within a single region (EDCx or MCx).
1532  */
1533 static int
1534 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1535 {
1536         uint32_t em, addr_len, maddr, mlen;
1537
1538         /* Memory can only be accessed in naturally aligned 4 byte units */
1539         if (addr & 3 || len & 3 || len == 0)
1540                 return (EINVAL);
1541
1542         /* Enabled memories */
1543         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1544         if (em & F_EDRAM0_ENABLE) {
1545                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1546                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1547                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1548                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549                     addr + len <= maddr + mlen)
1550                         return (0);
1551         }
1552         if (em & F_EDRAM1_ENABLE) {
1553                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1554                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1555                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1556                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1557                     addr + len <= maddr + mlen)
1558                         return (0);
1559         }
1560         if (em & F_EXT_MEM_ENABLE) {
1561                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1562                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1563                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1564                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1565                     addr + len <= maddr + mlen)
1566                         return (0);
1567         }
1568         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1569                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1570                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1571                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1572                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1573                     addr + len <= maddr + mlen)
1574                         return (0);
1575         }
1576
1577         return (EFAULT);
1578 }
1579
1580 static int
1581 fwmtype_to_hwmtype(int mtype)
1582 {
1583
1584         switch (mtype) {
1585         case FW_MEMTYPE_EDC0:
1586                 return (MEM_EDC0);
1587         case FW_MEMTYPE_EDC1:
1588                 return (MEM_EDC1);
1589         case FW_MEMTYPE_EXTMEM:
1590                 return (MEM_MC0);
1591         case FW_MEMTYPE_EXTMEM1:
1592                 return (MEM_MC1);
1593         default:
1594                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1595         }
1596 }
1597
1598 /*
1599  * Verify that the memory range specified by the memtype/offset/len pair is
1600  * valid and lies entirely within the memtype specified.  The global address of
1601  * the start of the range is returned in addr.
1602  */
1603 static int
1604 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1605     uint32_t *addr)
1606 {
1607         uint32_t em, addr_len, maddr, mlen;
1608
1609         /* Memory can only be accessed in naturally aligned 4 byte units */
1610         if (off & 3 || len & 3 || len == 0)
1611                 return (EINVAL);
1612
1613         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1614         switch (fwmtype_to_hwmtype(mtype)) {
1615         case MEM_EDC0:
1616                 if (!(em & F_EDRAM0_ENABLE))
1617                         return (EINVAL);
1618                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1619                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1620                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1621                 break;
1622         case MEM_EDC1:
1623                 if (!(em & F_EDRAM1_ENABLE))
1624                         return (EINVAL);
1625                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1626                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1627                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1628                 break;
1629         case MEM_MC:
1630                 if (!(em & F_EXT_MEM_ENABLE))
1631                         return (EINVAL);
1632                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1633                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1634                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1635                 break;
1636         case MEM_MC1:
1637                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1638                         return (EINVAL);
1639                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1640                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1641                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1642                 break;
1643         default:
1644                 return (EINVAL);
1645         }
1646
1647         if (mlen > 0 && off < mlen && off + len <= mlen) {
1648                 *addr = maddr + off;    /* global address */
1649                 return (0);
1650         }
1651
1652         return (EFAULT);
1653 }
1654
1655 static void
1656 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1657 {
1658         const struct memwin *mw;
1659
1660         if (is_t4(sc)) {
1661                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1662                     ("%s: incorrect memwin# (%d)", __func__, win));
1663                 mw = &t4_memwin[win];
1664         } else {
1665                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1666                     ("%s: incorrect memwin# (%d)", __func__, win));
1667                 mw = &t5_memwin[win];
1668         }
1669
1670         if (base != NULL)
1671                 *base = mw->base;
1672         if (aperture != NULL)
1673                 *aperture = mw->aperture;
1674 }
1675
1676 /*
1677  * Positions the memory window such that it can be used to access the specified
1678  * address in the chip's address space.  The return value is the offset of addr
1679  * from the start of the window.
1680  */
1681 static uint32_t
1682 position_memwin(struct adapter *sc, int n, uint32_t addr)
1683 {
1684         uint32_t start, pf;
1685         uint32_t reg;
1686
1687         KASSERT(n >= 0 && n <= 3,
1688             ("%s: invalid window %d.", __func__, n));
1689         KASSERT((addr & 3) == 0,
1690             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1691
1692         if (is_t4(sc)) {
1693                 pf = 0;
1694                 start = addr & ~0xf;    /* start must be 16B aligned */
1695         } else {
1696                 pf = V_PFNUM(sc->pf);
1697                 start = addr & ~0x7f;   /* start must be 128B aligned */
1698         }
1699         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1700
1701         t4_write_reg(sc, reg, start | pf);
1702         t4_read_reg(sc, reg);
1703
1704         return (addr - start);
1705 }
1706
1707 static int
1708 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1709     struct intrs_and_queues *iaq)
1710 {
1711         int rc, itype, navail, nrxq10g, nrxq1g, n;
1712         int nofldrxq10g = 0, nofldrxq1g = 0;
1713
1714         bzero(iaq, sizeof(*iaq));
1715
1716         iaq->ntxq10g = t4_ntxq10g;
1717         iaq->ntxq1g = t4_ntxq1g;
1718         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1719         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1720 #ifdef TCP_OFFLOAD
1721         if (is_offload(sc)) {
1722                 iaq->nofldtxq10g = t4_nofldtxq10g;
1723                 iaq->nofldtxq1g = t4_nofldtxq1g;
1724                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1725                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1726         }
1727 #endif
1728
1729         for (itype = INTR_MSIX; itype; itype >>= 1) {
1730
1731                 if ((itype & t4_intr_types) == 0)
1732                         continue;       /* not allowed */
1733
1734                 if (itype == INTR_MSIX)
1735                         navail = pci_msix_count(sc->dev);
1736                 else if (itype == INTR_MSI)
1737                         navail = pci_msi_count(sc->dev);
1738                 else
1739                         navail = 1;
1740 restart:
1741                 if (navail == 0)
1742                         continue;
1743
1744                 iaq->intr_type = itype;
1745                 iaq->intr_flags = 0;
1746
1747                 /*
1748                  * Best option: an interrupt vector for errors, one for the
1749                  * firmware event queue, and one each for each rxq (NIC as well
1750                  * as offload).
1751                  */
1752                 iaq->nirq = T4_EXTRA_INTR;
1753                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1754                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1755                 if (iaq->nirq <= navail &&
1756                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1757                         iaq->intr_flags |= INTR_DIRECT;
1758                         goto allocate;
1759                 }
1760
1761                 /*
1762                  * Second best option: an interrupt vector for errors, one for
1763                  * the firmware event queue, and one each for either NIC or
1764                  * offload rxq's.
1765                  */
1766                 iaq->nirq = T4_EXTRA_INTR;
1767                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1768                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1769                 if (iaq->nirq <= navail &&
1770                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1771                         goto allocate;
1772
1773                 /*
1774                  * Next best option: an interrupt vector for errors, one for the
1775                  * firmware event queue, and at least one per port.  At this
1776                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1777                  * what's available to us.
1778                  */
1779                 iaq->nirq = T4_EXTRA_INTR;
1780                 iaq->nirq += n10g + n1g;
1781                 if (iaq->nirq <= navail) {
1782                         int leftover = navail - iaq->nirq;
1783
1784                         if (n10g > 0) {
1785                                 int target = max(nrxq10g, nofldrxq10g);
1786
1787                                 n = 1;
1788                                 while (n < target && leftover >= n10g) {
1789                                         leftover -= n10g;
1790                                         iaq->nirq += n10g;
1791                                         n++;
1792                                 }
1793                                 iaq->nrxq10g = min(n, nrxq10g);
1794 #ifdef TCP_OFFLOAD
1795                                 if (is_offload(sc))
1796                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1797 #endif
1798                         }
1799
1800                         if (n1g > 0) {
1801                                 int target = max(nrxq1g, nofldrxq1g);
1802
1803                                 n = 1;
1804                                 while (n < target && leftover >= n1g) {
1805                                         leftover -= n1g;
1806                                         iaq->nirq += n1g;
1807                                         n++;
1808                                 }
1809                                 iaq->nrxq1g = min(n, nrxq1g);
1810 #ifdef TCP_OFFLOAD
1811                                 if (is_offload(sc))
1812                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1813 #endif
1814                         }
1815
1816                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1817                                 goto allocate;
1818                 }
1819
1820                 /*
1821                  * Least desirable option: one interrupt vector for everything.
1822                  */
1823                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1824 #ifdef TCP_OFFLOAD
1825                 if (is_offload(sc))
1826                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1827 #endif
1828
1829 allocate:
1830                 navail = iaq->nirq;
1831                 rc = 0;
1832                 if (itype == INTR_MSIX)
1833                         rc = pci_alloc_msix(sc->dev, &navail);
1834                 else if (itype == INTR_MSI)
1835                         rc = pci_alloc_msi(sc->dev, &navail);
1836
1837                 if (rc == 0) {
1838                         if (navail == iaq->nirq)
1839                                 return (0);
1840
1841                         /*
1842                          * Didn't get the number requested.  Use whatever number
1843                          * the kernel is willing to allocate (it's in navail).
1844                          */
1845                         device_printf(sc->dev, "fewer vectors than requested, "
1846                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1847                             itype, iaq->nirq, navail);
1848                         pci_release_msi(sc->dev);
1849                         goto restart;
1850                 }
1851
1852                 device_printf(sc->dev,
1853                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1854                     itype, rc, iaq->nirq, navail);
1855         }
1856
1857         device_printf(sc->dev,
1858             "failed to find a usable interrupt type.  "
1859             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1860             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1861
1862         return (ENXIO);
1863 }
1864
1865 #define FW_VERSION(chip) ( \
1866     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1867     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1868     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1869     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1870 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1871
1872 struct fw_info {
1873         uint8_t chip;
1874         char *kld_name;
1875         char *fw_mod_name;
1876         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1877 } fw_info[] = {
1878         {
1879                 .chip = CHELSIO_T4,
1880                 .kld_name = "t4fw_cfg",
1881                 .fw_mod_name = "t4fw",
1882                 .fw_hdr = {
1883                         .chip = FW_HDR_CHIP_T4,
1884                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1885                         .intfver_nic = FW_INTFVER(T4, NIC),
1886                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1887                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1888                         .intfver_ri = FW_INTFVER(T4, RI),
1889                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1890                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1891                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1892                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1893                 },
1894         }, {
1895                 .chip = CHELSIO_T5,
1896                 .kld_name = "t5fw_cfg",
1897                 .fw_mod_name = "t5fw",
1898                 .fw_hdr = {
1899                         .chip = FW_HDR_CHIP_T5,
1900                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1901                         .intfver_nic = FW_INTFVER(T5, NIC),
1902                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1903                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1904                         .intfver_ri = FW_INTFVER(T5, RI),
1905                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1906                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1907                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1908                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1909                 },
1910         }
1911 };
1912
1913 static struct fw_info *
1914 find_fw_info(int chip)
1915 {
1916         int i;
1917
1918         for (i = 0; i < nitems(fw_info); i++) {
1919                 if (fw_info[i].chip == chip)
1920                         return (&fw_info[i]);
1921         }
1922         return (NULL);
1923 }
1924
1925 /*
1926  * Is the given firmware API compatible with the one the driver was compiled
1927  * with?
1928  */
1929 static int
1930 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1931 {
1932
1933         /* short circuit if it's the exact same firmware version */
1934         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1935                 return (1);
1936
1937         /*
1938          * XXX: Is this too conservative?  Perhaps I should limit this to the
1939          * features that are supported in the driver.
1940          */
1941 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1942         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1943             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1944             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1945                 return (1);
1946 #undef SAME_INTF
1947
1948         return (0);
1949 }
1950
1951 /*
1952  * The firmware in the KLD is usable, but should it be installed?  This routine
1953  * explains itself in detail if it indicates the KLD firmware should be
1954  * installed.
1955  */
1956 static int
1957 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1958 {
1959         const char *reason;
1960
1961         if (!card_fw_usable) {
1962                 reason = "incompatible or unusable";
1963                 goto install;
1964         }
1965
1966         if (k > c) {
1967                 reason = "older than the version bundled with this driver";
1968                 goto install;
1969         }
1970
1971         if (t4_fw_install == 2 && k != c) {
1972                 reason = "different than the version bundled with this driver";
1973                 goto install;
1974         }
1975
1976         return (0);
1977
1978 install:
1979         if (t4_fw_install == 0) {
1980                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1981                     "but the driver is prohibited from installing a different "
1982                     "firmware on the card.\n",
1983                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1984                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1985
1986                 return (0);
1987         }
1988
1989         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1990             "installing firmware %u.%u.%u.%u on card.\n",
1991             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1992             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1993             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1994             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1995
1996         return (1);
1997 }
1998 /*
1999  * Establish contact with the firmware and determine if we are the master driver
2000  * or not, and whether we are responsible for chip initialization.
2001  */
2002 static int
2003 prep_firmware(struct adapter *sc)
2004 {
2005         const struct firmware *fw = NULL, *default_cfg;
2006         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2007         enum dev_state state;
2008         struct fw_info *fw_info;
2009         struct fw_hdr *card_fw;         /* fw on the card */
2010         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2011         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2012                                            against */
2013
2014         /* Contact firmware. */
2015         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2016         if (rc < 0 || state == DEV_STATE_ERR) {
2017                 rc = -rc;
2018                 device_printf(sc->dev,
2019                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2020                 return (rc);
2021         }
2022         pf = rc;
2023         if (pf == sc->mbox)
2024                 sc->flags |= MASTER_PF;
2025         else if (state == DEV_STATE_UNINIT) {
2026                 /*
2027                  * We didn't get to be the master so we definitely won't be
2028                  * configuring the chip.  It's a bug if someone else hasn't
2029                  * configured it already.
2030                  */
2031                 device_printf(sc->dev, "couldn't be master(%d), "
2032                     "device not already initialized either(%d).\n", rc, state);
2033                 return (EDOOFUS);
2034         }
2035
2036         /* This is the firmware whose headers the driver was compiled against */
2037         fw_info = find_fw_info(chip_id(sc));
2038         if (fw_info == NULL) {
2039                 device_printf(sc->dev,
2040                     "unable to look up firmware information for chip %d.\n",
2041                     chip_id(sc));
2042                 return (EINVAL);
2043         }
2044         drv_fw = &fw_info->fw_hdr;
2045
2046         /*
2047          * The firmware KLD contains many modules.  The KLD name is also the
2048          * name of the module that contains the default config file.
2049          */
2050         default_cfg = firmware_get(fw_info->kld_name);
2051
2052         /* Read the header of the firmware on the card */
2053         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2054         rc = -t4_read_flash(sc, FLASH_FW_START,
2055             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2056         if (rc == 0)
2057                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2058         else {
2059                 device_printf(sc->dev,
2060                     "Unable to read card's firmware header: %d\n", rc);
2061                 card_fw_usable = 0;
2062         }
2063
2064         /* This is the firmware in the KLD */
2065         fw = firmware_get(fw_info->fw_mod_name);
2066         if (fw != NULL) {
2067                 kld_fw = (const void *)fw->data;
2068                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2069         } else {
2070                 kld_fw = NULL;
2071                 kld_fw_usable = 0;
2072         }
2073
2074         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2075             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2076                 /*
2077                  * Common case: the firmware on the card is an exact match and
2078                  * the KLD is an exact match too, or the KLD is
2079                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2080                  * here -- use cxgbetool loadfw if you want to reinstall the
2081                  * same firmware as the one on the card.
2082                  */
2083         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2084             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2085             be32toh(card_fw->fw_ver))) {
2086
2087                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2088                 if (rc != 0) {
2089                         device_printf(sc->dev,
2090                             "failed to install firmware: %d\n", rc);
2091                         goto done;
2092                 }
2093
2094                 /* Installed successfully, update the cached header too. */
2095                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2096                 card_fw_usable = 1;
2097                 need_fw_reset = 0;      /* already reset as part of load_fw */
2098         }
2099
2100         if (!card_fw_usable) {
2101                 uint32_t d, c, k;
2102
2103                 d = ntohl(drv_fw->fw_ver);
2104                 c = ntohl(card_fw->fw_ver);
2105                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2106
2107                 device_printf(sc->dev, "Cannot find a usable firmware: "
2108                     "fw_install %d, chip state %d, "
2109                     "driver compiled with %d.%d.%d.%d, "
2110                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2111                     t4_fw_install, state,
2112                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2113                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2114                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2115                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2116                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2117                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2118                 rc = EINVAL;
2119                 goto done;
2120         }
2121
2122         /* We're using whatever's on the card and it's known to be good. */
2123         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2124         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2125             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2126             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2127             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2128             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2129         t4_get_tp_version(sc, &sc->params.tp_vers);
2130
2131         /* Reset device */
2132         if (need_fw_reset &&
2133             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2134                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2135                 if (rc != ETIMEDOUT && rc != EIO)
2136                         t4_fw_bye(sc, sc->mbox);
2137                 goto done;
2138         }
2139         sc->flags |= FW_OK;
2140
2141         rc = get_params__pre_init(sc);
2142         if (rc != 0)
2143                 goto done; /* error message displayed already */
2144
2145         /* Partition adapter resources as specified in the config file. */
2146         if (state == DEV_STATE_UNINIT) {
2147
2148                 KASSERT(sc->flags & MASTER_PF,
2149                     ("%s: trying to change chip settings when not master.",
2150                     __func__));
2151
2152                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2153                 if (rc != 0)
2154                         goto done;      /* error message displayed already */
2155
2156                 t4_tweak_chip_settings(sc);
2157
2158                 /* get basic stuff going */
2159                 rc = -t4_fw_initialize(sc, sc->mbox);
2160                 if (rc != 0) {
2161                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2162                         goto done;
2163                 }
2164         } else {
2165                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2166                 sc->cfcsum = 0;
2167         }
2168
2169 done:
2170         free(card_fw, M_CXGBE);
2171         if (fw != NULL)
2172                 firmware_put(fw, FIRMWARE_UNLOAD);
2173         if (default_cfg != NULL)
2174                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2175
2176         return (rc);
2177 }
2178
2179 #define FW_PARAM_DEV(param) \
2180         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2181          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2182 #define FW_PARAM_PFVF(param) \
2183         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2184          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2185
2186 /*
2187  * Partition chip resources for use between various PFs, VFs, etc.
2188  */
2189 static int
2190 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2191     const char *name_prefix)
2192 {
2193         const struct firmware *cfg = NULL;
2194         int rc = 0;
2195         struct fw_caps_config_cmd caps;
2196         uint32_t mtype, moff, finicsum, cfcsum;
2197
2198         /*
2199          * Figure out what configuration file to use.  Pick the default config
2200          * file for the card if the user hasn't specified one explicitly.
2201          */
2202         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2203         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2204                 /* Card specific overrides go here. */
2205                 if (pci_get_device(sc->dev) == 0x440a)
2206                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2207                 if (is_fpga(sc))
2208                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2209         }
2210
2211         /*
2212          * We need to load another module if the profile is anything except
2213          * "default" or "flash".
2214          */
2215         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2216             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2217                 char s[32];
2218
2219                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2220                 cfg = firmware_get(s);
2221                 if (cfg == NULL) {
2222                         if (default_cfg != NULL) {
2223                                 device_printf(sc->dev,
2224                                     "unable to load module \"%s\" for "
2225                                     "configuration profile \"%s\", will use "
2226                                     "the default config file instead.\n",
2227                                     s, sc->cfg_file);
2228                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2229                                     "%s", DEFAULT_CF);
2230                         } else {
2231                                 device_printf(sc->dev,
2232                                     "unable to load module \"%s\" for "
2233                                     "configuration profile \"%s\", will use "
2234                                     "the config file on the card's flash "
2235                                     "instead.\n", s, sc->cfg_file);
2236                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2237                                     "%s", FLASH_CF);
2238                         }
2239                 }
2240         }
2241
2242         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2243             default_cfg == NULL) {
2244                 device_printf(sc->dev,
2245                     "default config file not available, will use the config "
2246                     "file on the card's flash instead.\n");
2247                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2248         }
2249
2250         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2251                 u_int cflen, i, n;
2252                 const uint32_t *cfdata;
2253                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2254
2255                 KASSERT(cfg != NULL || default_cfg != NULL,
2256                     ("%s: no config to upload", __func__));
2257
2258                 /*
2259                  * Ask the firmware where it wants us to upload the config file.
2260                  */
2261                 param = FW_PARAM_DEV(CF);
2262                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2263                 if (rc != 0) {
2264                         /* No support for config file?  Shouldn't happen. */
2265                         device_printf(sc->dev,
2266                             "failed to query config file location: %d.\n", rc);
2267                         goto done;
2268                 }
2269                 mtype = G_FW_PARAMS_PARAM_Y(val);
2270                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2271
2272                 /*
2273                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2274                  * useless stuffing/comments at the end of the config file so
2275                  * it's ok to simply throw away the last remaining bytes when
2276                  * the config file is not an exact multiple of 4.  This also
2277                  * helps with the validate_mt_off_len check.
2278                  */
2279                 if (cfg != NULL) {
2280                         cflen = cfg->datasize & ~3;
2281                         cfdata = cfg->data;
2282                 } else {
2283                         cflen = default_cfg->datasize & ~3;
2284                         cfdata = default_cfg->data;
2285                 }
2286
2287                 if (cflen > FLASH_CFG_MAX_SIZE) {
2288                         device_printf(sc->dev,
2289                             "config file too long (%d, max allowed is %d).  "
2290                             "Will try to use the config on the card, if any.\n",
2291                             cflen, FLASH_CFG_MAX_SIZE);
2292                         goto use_config_on_flash;
2293                 }
2294
2295                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2296                 if (rc != 0) {
2297                         device_printf(sc->dev,
2298                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2299                             "Will try to use the config on the card, if any.\n",
2300                             __func__, mtype, moff, cflen, rc);
2301                         goto use_config_on_flash;
2302                 }
2303
2304                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2305                 while (cflen) {
2306                         off = position_memwin(sc, 2, addr);
2307                         n = min(cflen, mw_aperture - off);
2308                         for (i = 0; i < n; i += 4)
2309                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2310                         cflen -= n;
2311                         addr += n;
2312                 }
2313         } else {
2314 use_config_on_flash:
2315                 mtype = FW_MEMTYPE_FLASH;
2316                 moff = t4_flash_cfg_addr(sc);
2317         }
2318
2319         bzero(&caps, sizeof(caps));
2320         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2321             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2322         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2323             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2324             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2325         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2326         if (rc != 0) {
2327                 device_printf(sc->dev,
2328                     "failed to pre-process config file: %d "
2329                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2330                 goto done;
2331         }
2332
2333         finicsum = be32toh(caps.finicsum);
2334         cfcsum = be32toh(caps.cfcsum);
2335         if (finicsum != cfcsum) {
2336                 device_printf(sc->dev,
2337                     "WARNING: config file checksum mismatch: %08x %08x\n",
2338                     finicsum, cfcsum);
2339         }
2340         sc->cfcsum = cfcsum;
2341
2342 #define LIMIT_CAPS(x) do { \
2343         caps.x &= htobe16(t4_##x##_allowed); \
2344         sc->x = htobe16(caps.x); \
2345 } while (0)
2346
2347         /*
2348          * Let the firmware know what features will (not) be used so it can tune
2349          * things accordingly.
2350          */
2351         LIMIT_CAPS(linkcaps);
2352         LIMIT_CAPS(niccaps);
2353         LIMIT_CAPS(toecaps);
2354         LIMIT_CAPS(rdmacaps);
2355         LIMIT_CAPS(iscsicaps);
2356         LIMIT_CAPS(fcoecaps);
2357 #undef LIMIT_CAPS
2358
2359         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2360             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2361         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2362         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2363         if (rc != 0) {
2364                 device_printf(sc->dev,
2365                     "failed to process config file: %d.\n", rc);
2366         }
2367 done:
2368         if (cfg != NULL)
2369                 firmware_put(cfg, FIRMWARE_UNLOAD);
2370         return (rc);
2371 }
2372
2373 /*
2374  * Retrieve parameters that are needed (or nice to have) very early.
2375  */
2376 static int
2377 get_params__pre_init(struct adapter *sc)
2378 {
2379         int rc;
2380         uint32_t param[2], val[2];
2381         struct fw_devlog_cmd cmd;
2382         struct devlog_params *dlog = &sc->params.devlog;
2383
2384         param[0] = FW_PARAM_DEV(PORTVEC);
2385         param[1] = FW_PARAM_DEV(CCLK);
2386         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2387         if (rc != 0) {
2388                 device_printf(sc->dev,
2389                     "failed to query parameters (pre_init): %d.\n", rc);
2390                 return (rc);
2391         }
2392
2393         sc->params.portvec = val[0];
2394         sc->params.nports = bitcount32(val[0]);
2395         sc->params.vpd.cclk = val[1];
2396
2397         /* Read device log parameters. */
2398         bzero(&cmd, sizeof(cmd));
2399         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2400             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2401         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2402         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2403         if (rc != 0) {
2404                 device_printf(sc->dev,
2405                     "failed to get devlog parameters: %d.\n", rc);
2406                 bzero(dlog, sizeof (*dlog));
2407                 rc = 0; /* devlog isn't critical for device operation */
2408         } else {
2409                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2410                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2411                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2412                 dlog->size = be32toh(cmd.memsize_devlog);
2413         }
2414
2415         return (rc);
2416 }
2417
2418 /*
2419  * Retrieve various parameters that are of interest to the driver.  The device
2420  * has been initialized by the firmware at this point.
2421  */
2422 static int
2423 get_params__post_init(struct adapter *sc)
2424 {
2425         int rc;
2426         uint32_t param[7], val[7];
2427         struct fw_caps_config_cmd caps;
2428
2429         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2430         param[1] = FW_PARAM_PFVF(EQ_START);
2431         param[2] = FW_PARAM_PFVF(FILTER_START);
2432         param[3] = FW_PARAM_PFVF(FILTER_END);
2433         param[4] = FW_PARAM_PFVF(L2T_START);
2434         param[5] = FW_PARAM_PFVF(L2T_END);
2435         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2436         if (rc != 0) {
2437                 device_printf(sc->dev,
2438                     "failed to query parameters (post_init): %d.\n", rc);
2439                 return (rc);
2440         }
2441
2442         sc->sge.iq_start = val[0];
2443         sc->sge.eq_start = val[1];
2444         sc->tids.ftid_base = val[2];
2445         sc->tids.nftids = val[3] - val[2] + 1;
2446         sc->vres.l2t.start = val[4];
2447         sc->vres.l2t.size = val[5] - val[4] + 1;
2448         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2449             ("%s: L2 table size (%u) larger than expected (%u)",
2450             __func__, sc->vres.l2t.size, L2T_SIZE));
2451
2452         /* get capabilites */
2453         bzero(&caps, sizeof(caps));
2454         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2455             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2456         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2457         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2458         if (rc != 0) {
2459                 device_printf(sc->dev,
2460                     "failed to get card capabilities: %d.\n", rc);
2461                 return (rc);
2462         }
2463
2464         if (caps.toecaps) {
2465                 /* query offload-related parameters */
2466                 param[0] = FW_PARAM_DEV(NTID);
2467                 param[1] = FW_PARAM_PFVF(SERVER_START);
2468                 param[2] = FW_PARAM_PFVF(SERVER_END);
2469                 param[3] = FW_PARAM_PFVF(TDDP_START);
2470                 param[4] = FW_PARAM_PFVF(TDDP_END);
2471                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2472                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2473                 if (rc != 0) {
2474                         device_printf(sc->dev,
2475                             "failed to query TOE parameters: %d.\n", rc);
2476                         return (rc);
2477                 }
2478                 sc->tids.ntids = val[0];
2479                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2480                 sc->tids.stid_base = val[1];
2481                 sc->tids.nstids = val[2] - val[1] + 1;
2482                 sc->vres.ddp.start = val[3];
2483                 sc->vres.ddp.size = val[4] - val[3] + 1;
2484                 sc->params.ofldq_wr_cred = val[5];
2485                 sc->params.offload = 1;
2486         }
2487         if (caps.rdmacaps) {
2488                 param[0] = FW_PARAM_PFVF(STAG_START);
2489                 param[1] = FW_PARAM_PFVF(STAG_END);
2490                 param[2] = FW_PARAM_PFVF(RQ_START);
2491                 param[3] = FW_PARAM_PFVF(RQ_END);
2492                 param[4] = FW_PARAM_PFVF(PBL_START);
2493                 param[5] = FW_PARAM_PFVF(PBL_END);
2494                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2495                 if (rc != 0) {
2496                         device_printf(sc->dev,
2497                             "failed to query RDMA parameters(1): %d.\n", rc);
2498                         return (rc);
2499                 }
2500                 sc->vres.stag.start = val[0];
2501                 sc->vres.stag.size = val[1] - val[0] + 1;
2502                 sc->vres.rq.start = val[2];
2503                 sc->vres.rq.size = val[3] - val[2] + 1;
2504                 sc->vres.pbl.start = val[4];
2505                 sc->vres.pbl.size = val[5] - val[4] + 1;
2506
2507                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2508                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2509                 param[2] = FW_PARAM_PFVF(CQ_START);
2510                 param[3] = FW_PARAM_PFVF(CQ_END);
2511                 param[4] = FW_PARAM_PFVF(OCQ_START);
2512                 param[5] = FW_PARAM_PFVF(OCQ_END);
2513                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2514                 if (rc != 0) {
2515                         device_printf(sc->dev,
2516                             "failed to query RDMA parameters(2): %d.\n", rc);
2517                         return (rc);
2518                 }
2519                 sc->vres.qp.start = val[0];
2520                 sc->vres.qp.size = val[1] - val[0] + 1;
2521                 sc->vres.cq.start = val[2];
2522                 sc->vres.cq.size = val[3] - val[2] + 1;
2523                 sc->vres.ocq.start = val[4];
2524                 sc->vres.ocq.size = val[5] - val[4] + 1;
2525         }
2526         if (caps.iscsicaps) {
2527                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2528                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2529                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2530                 if (rc != 0) {
2531                         device_printf(sc->dev,
2532                             "failed to query iSCSI parameters: %d.\n", rc);
2533                         return (rc);
2534                 }
2535                 sc->vres.iscsi.start = val[0];
2536                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2537         }
2538
2539         /*
2540          * We've got the params we wanted to query via the firmware.  Now grab
2541          * some others directly from the chip.
2542          */
2543         rc = t4_read_chip_settings(sc);
2544
2545         return (rc);
2546 }
2547
2548 static int
2549 set_params__post_init(struct adapter *sc)
2550 {
2551         uint32_t param, val;
2552
2553         /* ask for encapsulated CPLs */
2554         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2555         val = 1;
2556         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2557
2558         return (0);
2559 }
2560
2561 #undef FW_PARAM_PFVF
2562 #undef FW_PARAM_DEV
2563
2564 static void
2565 t4_set_desc(struct adapter *sc)
2566 {
2567         char buf[128];
2568         struct adapter_params *p = &sc->params;
2569
2570         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2571             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2572             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2573
2574         device_set_desc_copy(sc->dev, buf);
2575 }
2576
2577 static void
2578 build_medialist(struct port_info *pi)
2579 {
2580         struct ifmedia *media = &pi->media;
2581         int data, m;
2582
2583         PORT_LOCK(pi);
2584
2585         ifmedia_removeall(media);
2586
2587         m = IFM_ETHER | IFM_FDX;
2588         data = (pi->port_type << 8) | pi->mod_type;
2589
2590         switch(pi->port_type) {
2591         case FW_PORT_TYPE_BT_XFI:
2592                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2593                 break;
2594
2595         case FW_PORT_TYPE_BT_XAUI:
2596                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2597                 /* fall through */
2598
2599         case FW_PORT_TYPE_BT_SGMII:
2600                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2601                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2602                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2603                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2604                 break;
2605
2606         case FW_PORT_TYPE_CX4:
2607                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2608                 ifmedia_set(media, m | IFM_10G_CX4);
2609                 break;
2610
2611         case FW_PORT_TYPE_SFP:
2612         case FW_PORT_TYPE_FIBER_XFI:
2613         case FW_PORT_TYPE_FIBER_XAUI:
2614                 switch (pi->mod_type) {
2615
2616                 case FW_PORT_MOD_TYPE_LR:
2617                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2618                         ifmedia_set(media, m | IFM_10G_LR);
2619                         break;
2620
2621                 case FW_PORT_MOD_TYPE_SR:
2622                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2623                         ifmedia_set(media, m | IFM_10G_SR);
2624                         break;
2625
2626                 case FW_PORT_MOD_TYPE_LRM:
2627                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2628                         ifmedia_set(media, m | IFM_10G_LRM);
2629                         break;
2630
2631                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2632                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2633                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2634                         ifmedia_set(media, m | IFM_10G_TWINAX);
2635                         break;
2636
2637                 case FW_PORT_MOD_TYPE_NONE:
2638                         m &= ~IFM_FDX;
2639                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2640                         ifmedia_set(media, m | IFM_NONE);
2641                         break;
2642
2643                 case FW_PORT_MOD_TYPE_NA:
2644                 case FW_PORT_MOD_TYPE_ER:
2645                 default:
2646                         device_printf(pi->dev,
2647                             "unknown port_type (%d), mod_type (%d)\n",
2648                             pi->port_type, pi->mod_type);
2649                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2650                         ifmedia_set(media, m | IFM_UNKNOWN);
2651                         break;
2652                 }
2653                 break;
2654
2655         case FW_PORT_TYPE_QSFP:
2656                 switch (pi->mod_type) {
2657
2658                 case FW_PORT_MOD_TYPE_LR:
2659                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2660                         ifmedia_set(media, m | IFM_40G_LR4);
2661                         break;
2662
2663                 case FW_PORT_MOD_TYPE_SR:
2664                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2665                         ifmedia_set(media, m | IFM_40G_SR4);
2666                         break;
2667
2668                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2669                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2670                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2671                         ifmedia_set(media, m | IFM_40G_CR4);
2672                         break;
2673
2674                 case FW_PORT_MOD_TYPE_NONE:
2675                         m &= ~IFM_FDX;
2676                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2677                         ifmedia_set(media, m | IFM_NONE);
2678                         break;
2679
2680                 default:
2681                         device_printf(pi->dev,
2682                             "unknown port_type (%d), mod_type (%d)\n",
2683                             pi->port_type, pi->mod_type);
2684                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2685                         ifmedia_set(media, m | IFM_UNKNOWN);
2686                         break;
2687                 }
2688                 break;
2689
2690         default:
2691                 device_printf(pi->dev,
2692                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2693                     pi->mod_type);
2694                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2695                 ifmedia_set(media, m | IFM_UNKNOWN);
2696                 break;
2697         }
2698
2699         PORT_UNLOCK(pi);
2700 }
2701
2702 #define FW_MAC_EXACT_CHUNK      7
2703
2704 /*
2705  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2706  * indicates which parameters should be programmed (the rest are left alone).
2707  */
2708 static int
2709 update_mac_settings(struct port_info *pi, int flags)
2710 {
2711         int rc;
2712         struct ifnet *ifp = pi->ifp;
2713         struct adapter *sc = pi->adapter;
2714         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2715
2716         ASSERT_SYNCHRONIZED_OP(sc);
2717         KASSERT(flags, ("%s: not told what to update.", __func__));
2718
2719         if (flags & XGMAC_MTU)
2720                 mtu = ifp->if_mtu;
2721
2722         if (flags & XGMAC_PROMISC)
2723                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2724
2725         if (flags & XGMAC_ALLMULTI)
2726                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2727
2728         if (flags & XGMAC_VLANEX)
2729                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2730
2731         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2732             vlanex, false);
2733         if (rc) {
2734                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2735                 return (rc);
2736         }
2737
2738         if (flags & XGMAC_UCADDR) {
2739                 uint8_t ucaddr[ETHER_ADDR_LEN];
2740
2741                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2742                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2743                     ucaddr, true, true);
2744                 if (rc < 0) {
2745                         rc = -rc;
2746                         if_printf(ifp, "change_mac failed: %d\n", rc);
2747                         return (rc);
2748                 } else {
2749                         pi->xact_addr_filt = rc;
2750                         rc = 0;
2751                 }
2752         }
2753
2754         if (flags & XGMAC_MCADDRS) {
2755                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2756                 int del = 1;
2757                 uint64_t hash = 0;
2758                 struct ifmultiaddr *ifma;
2759                 int i = 0, j;
2760
2761                 if_maddr_rlock(ifp);
2762                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2763                         if (ifma->ifma_addr->sa_family != AF_LINK)
2764                                 continue;
2765                         mcaddr[i++] =
2766                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2767
2768                         if (i == FW_MAC_EXACT_CHUNK) {
2769                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2770                                     del, i, mcaddr, NULL, &hash, 0);
2771                                 if (rc < 0) {
2772                                         rc = -rc;
2773                                         for (j = 0; j < i; j++) {
2774                                                 if_printf(ifp,
2775                                                     "failed to add mc address"
2776                                                     " %02x:%02x:%02x:"
2777                                                     "%02x:%02x:%02x rc=%d\n",
2778                                                     mcaddr[j][0], mcaddr[j][1],
2779                                                     mcaddr[j][2], mcaddr[j][3],
2780                                                     mcaddr[j][4], mcaddr[j][5],
2781                                                     rc);
2782                                         }
2783                                         goto mcfail;
2784                                 }
2785                                 del = 0;
2786                                 i = 0;
2787                         }
2788                 }
2789                 if (i > 0) {
2790                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2791                             del, i, mcaddr, NULL, &hash, 0);
2792                         if (rc < 0) {
2793                                 rc = -rc;
2794                                 for (j = 0; j < i; j++) {
2795                                         if_printf(ifp,
2796                                             "failed to add mc address"
2797                                             " %02x:%02x:%02x:"
2798                                             "%02x:%02x:%02x rc=%d\n",
2799                                             mcaddr[j][0], mcaddr[j][1],
2800                                             mcaddr[j][2], mcaddr[j][3],
2801                                             mcaddr[j][4], mcaddr[j][5],
2802                                             rc);
2803                                 }
2804                                 goto mcfail;
2805                         }
2806                 }
2807
2808                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2809                 if (rc != 0)
2810                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2811 mcfail:
2812                 if_maddr_runlock(ifp);
2813         }
2814
2815         return (rc);
2816 }
2817
2818 int
2819 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2820     char *wmesg)
2821 {
2822         int rc, pri;
2823
2824 #ifdef WITNESS
2825         /* the caller thinks it's ok to sleep, but is it really? */
2826         if (flags & SLEEP_OK)
2827                 pause("t4slptst", 1);
2828 #endif
2829
2830         if (INTR_OK)
2831                 pri = PCATCH;
2832         else
2833                 pri = 0;
2834
2835         ADAPTER_LOCK(sc);
2836         for (;;) {
2837
2838                 if (pi && IS_DOOMED(pi)) {
2839                         rc = ENXIO;
2840                         goto done;
2841                 }
2842
2843                 if (!IS_BUSY(sc)) {
2844                         rc = 0;
2845                         break;
2846                 }
2847
2848                 if (!(flags & SLEEP_OK)) {
2849                         rc = EBUSY;
2850                         goto done;
2851                 }
2852
2853                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2854                         rc = EINTR;
2855                         goto done;
2856                 }
2857         }
2858
2859         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2860         SET_BUSY(sc);
2861 #ifdef INVARIANTS
2862         sc->last_op = wmesg;
2863         sc->last_op_thr = curthread;
2864 #endif
2865
2866 done:
2867         if (!(flags & HOLD_LOCK) || rc)
2868                 ADAPTER_UNLOCK(sc);
2869
2870         return (rc);
2871 }
2872
2873 void
2874 end_synchronized_op(struct adapter *sc, int flags)
2875 {
2876
2877         if (flags & LOCK_HELD)
2878                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2879         else
2880                 ADAPTER_LOCK(sc);
2881
2882         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2883         CLR_BUSY(sc);
2884         wakeup(&sc->flags);
2885         ADAPTER_UNLOCK(sc);
2886 }
2887
2888 static int
2889 cxgbe_init_synchronized(struct port_info *pi)
2890 {
2891         struct adapter *sc = pi->adapter;
2892         struct ifnet *ifp = pi->ifp;
2893         int rc = 0;
2894
2895         ASSERT_SYNCHRONIZED_OP(sc);
2896
2897         if (isset(&sc->open_device_map, pi->port_id)) {
2898                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2899                     ("mismatch between open_device_map and if_drv_flags"));
2900                 return (0);     /* already running */
2901         }
2902
2903         if (!(sc->flags & FULL_INIT_DONE) &&
2904             ((rc = adapter_full_init(sc)) != 0))
2905                 return (rc);    /* error message displayed already */
2906
2907         if (!(pi->flags & PORT_INIT_DONE) &&
2908             ((rc = port_full_init(pi)) != 0))
2909                 return (rc); /* error message displayed already */
2910
2911         rc = update_mac_settings(pi, XGMAC_ALL);
2912         if (rc)
2913                 goto done;      /* error message displayed already */
2914
2915         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2916         if (rc != 0) {
2917                 if_printf(ifp, "start_link failed: %d\n", rc);
2918                 goto done;
2919         }
2920
2921         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2922         if (rc != 0) {
2923                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2924                 goto done;
2925         }
2926
2927         /*
2928          * The first iq of the first port to come up is used for tracing.
2929          */
2930         if (sc->traceq < 0) {
2931                 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2932                 t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2933                     A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2934                     V_QUEUENUMBER(sc->traceq));
2935                 pi->flags |= HAS_TRACEQ;
2936         }
2937
2938         /* all ok */
2939         setbit(&sc->open_device_map, pi->port_id);
2940         PORT_LOCK(pi);
2941         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2942         PORT_UNLOCK(pi);
2943
2944         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2945 done:
2946         if (rc != 0)
2947                 cxgbe_uninit_synchronized(pi);
2948
2949         return (rc);
2950 }
2951
2952 /*
2953  * Idempotent.
2954  */
2955 static int
2956 cxgbe_uninit_synchronized(struct port_info *pi)
2957 {
2958         struct adapter *sc = pi->adapter;
2959         struct ifnet *ifp = pi->ifp;
2960         int rc;
2961
2962         ASSERT_SYNCHRONIZED_OP(sc);
2963
2964         /*
2965          * Disable the VI so that all its data in either direction is discarded
2966          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2967          * tick) intact as the TP can deliver negative advice or data that it's
2968          * holding in its RAM (for an offloaded connection) even after the VI is
2969          * disabled.
2970          */
2971         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2972         if (rc) {
2973                 if_printf(ifp, "disable_vi failed: %d\n", rc);
2974                 return (rc);
2975         }
2976
2977         clrbit(&sc->open_device_map, pi->port_id);
2978         PORT_LOCK(pi);
2979         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2980         PORT_UNLOCK(pi);
2981
2982         pi->link_cfg.link_ok = 0;
2983         pi->link_cfg.speed = 0;
2984         pi->linkdnrc = -1;
2985         t4_os_link_changed(sc, pi->port_id, 0, -1);
2986
2987         return (0);
2988 }
2989
2990 /*
2991  * It is ok for this function to fail midway and return right away.  t4_detach
2992  * will walk the entire sc->irq list and clean up whatever is valid.
2993  */
2994 static int
2995 setup_intr_handlers(struct adapter *sc)
2996 {
2997         int rc, rid, p, q;
2998         char s[8];
2999         struct irq *irq;
3000         struct port_info *pi;
3001         struct sge_rxq *rxq;
3002 #ifdef TCP_OFFLOAD
3003         struct sge_ofld_rxq *ofld_rxq;
3004 #endif
3005
3006         /*
3007          * Setup interrupts.
3008          */
3009         irq = &sc->irq[0];
3010         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3011         if (sc->intr_count == 1) {
3012                 KASSERT(!(sc->flags & INTR_DIRECT),
3013                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3014
3015                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3016                 if (rc != 0)
3017                         return (rc);
3018         } else {
3019                 /* Multiple interrupts. */
3020                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3021                     ("%s: too few intr.", __func__));
3022
3023                 /* The first one is always error intr */
3024                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3025                 if (rc != 0)
3026                         return (rc);
3027                 irq++;
3028                 rid++;
3029
3030                 /* The second one is always the firmware event queue */
3031                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3032                     "evt");
3033                 if (rc != 0)
3034                         return (rc);
3035                 irq++;
3036                 rid++;
3037
3038                 /*
3039                  * Note that if INTR_DIRECT is not set then either the NIC rx
3040                  * queues or (exclusive or) the TOE rx queueus will be taking
3041                  * direct interrupts.
3042                  *
3043                  * There is no need to check for is_offload(sc) as nofldrxq
3044                  * will be 0 if offload is disabled.
3045                  */
3046                 for_each_port(sc, p) {
3047                         pi = sc->port[p];
3048
3049 #ifdef TCP_OFFLOAD
3050                         /*
3051                          * Skip over the NIC queues if they aren't taking direct
3052                          * interrupts.
3053                          */
3054                         if (!(sc->flags & INTR_DIRECT) &&
3055                             pi->nofldrxq > pi->nrxq)
3056                                 goto ofld_queues;
3057 #endif
3058                         rxq = &sc->sge.rxq[pi->first_rxq];
3059                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3060                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3061                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3062                                     s);
3063                                 if (rc != 0)
3064                                         return (rc);
3065                                 irq++;
3066                                 rid++;
3067                         }
3068
3069 #ifdef TCP_OFFLOAD
3070                         /*
3071                          * Skip over the offload queues if they aren't taking
3072                          * direct interrupts.
3073                          */
3074                         if (!(sc->flags & INTR_DIRECT))
3075                                 continue;
3076 ofld_queues:
3077                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3078                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3079                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3080                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3081                                     ofld_rxq, s);
3082                                 if (rc != 0)
3083                                         return (rc);
3084                                 irq++;
3085                                 rid++;
3086                         }
3087 #endif
3088                 }
3089         }
3090
3091         return (0);
3092 }
3093
3094 static int
3095 adapter_full_init(struct adapter *sc)
3096 {
3097         int rc, i;
3098
3099         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3100         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3101             ("%s: FULL_INIT_DONE already", __func__));
3102
3103         /*
3104          * queues that belong to the adapter (not any particular port).
3105          */
3106         rc = t4_setup_adapter_queues(sc);
3107         if (rc != 0)
3108                 goto done;
3109
3110         for (i = 0; i < nitems(sc->tq); i++) {
3111                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3112                     taskqueue_thread_enqueue, &sc->tq[i]);
3113                 if (sc->tq[i] == NULL) {
3114                         device_printf(sc->dev,
3115                             "failed to allocate task queue %d\n", i);
3116                         rc = ENOMEM;
3117                         goto done;
3118                 }
3119                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3120                     device_get_nameunit(sc->dev), i);
3121         }
3122
3123         t4_intr_enable(sc);
3124         sc->flags |= FULL_INIT_DONE;
3125 done:
3126         if (rc != 0)
3127                 adapter_full_uninit(sc);
3128
3129         return (rc);
3130 }
3131
3132 static int
3133 adapter_full_uninit(struct adapter *sc)
3134 {
3135         int i;
3136
3137         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3138
3139         t4_teardown_adapter_queues(sc);
3140
3141         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3142                 taskqueue_free(sc->tq[i]);
3143                 sc->tq[i] = NULL;
3144         }
3145
3146         sc->flags &= ~FULL_INIT_DONE;
3147
3148         return (0);
3149 }
3150
3151 static int
3152 port_full_init(struct port_info *pi)
3153 {
3154         struct adapter *sc = pi->adapter;
3155         struct ifnet *ifp = pi->ifp;
3156         uint16_t *rss;
3157         struct sge_rxq *rxq;
3158         int rc, i;
3159
3160         ASSERT_SYNCHRONIZED_OP(sc);
3161         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3162             ("%s: PORT_INIT_DONE already", __func__));
3163
3164         sysctl_ctx_init(&pi->ctx);
3165         pi->flags |= PORT_SYSCTL_CTX;
3166
3167         /*
3168          * Allocate tx/rx/fl queues for this port.
3169          */
3170         rc = t4_setup_port_queues(pi);
3171         if (rc != 0)
3172                 goto done;      /* error message displayed already */
3173
3174         /*
3175          * Setup RSS for this port.
3176          */
3177         rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3178             M_ZERO | M_WAITOK);
3179         for_each_rxq(pi, i, rxq) {
3180                 rss[i] = rxq->iq.abs_id;
3181         }
3182         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3183             pi->rss_size, rss, pi->nrxq);
3184         free(rss, M_CXGBE);
3185         if (rc != 0) {
3186                 if_printf(ifp, "rss_config failed: %d\n", rc);
3187                 goto done;
3188         }
3189
3190         pi->flags |= PORT_INIT_DONE;
3191 done:
3192         if (rc != 0)
3193                 port_full_uninit(pi);
3194
3195         return (rc);
3196 }
3197
3198 /*
3199  * Idempotent.
3200  */
3201 static int
3202 port_full_uninit(struct port_info *pi)
3203 {
3204         struct adapter *sc = pi->adapter;
3205         int i;
3206         struct sge_rxq *rxq;
3207         struct sge_txq *txq;
3208 #ifdef TCP_OFFLOAD
3209         struct sge_ofld_rxq *ofld_rxq;
3210         struct sge_wrq *ofld_txq;
3211 #endif
3212
3213         if (pi->flags & PORT_INIT_DONE) {
3214
3215                 /* Need to quiesce queues.  XXX: ctrl queues? */
3216
3217                 for_each_txq(pi, i, txq) {
3218                         quiesce_eq(sc, &txq->eq);
3219                 }
3220
3221 #ifdef TCP_OFFLOAD
3222                 for_each_ofld_txq(pi, i, ofld_txq) {
3223                         quiesce_eq(sc, &ofld_txq->eq);
3224                 }
3225 #endif
3226
3227                 for_each_rxq(pi, i, rxq) {
3228                         quiesce_iq(sc, &rxq->iq);
3229                         quiesce_fl(sc, &rxq->fl);
3230                 }
3231
3232 #ifdef TCP_OFFLOAD
3233                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3234                         quiesce_iq(sc, &ofld_rxq->iq);
3235                         quiesce_fl(sc, &ofld_rxq->fl);
3236                 }
3237 #endif
3238         }
3239
3240         t4_teardown_port_queues(pi);
3241         pi->flags &= ~PORT_INIT_DONE;
3242
3243         return (0);
3244 }
3245
3246 static void
3247 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3248 {
3249         EQ_LOCK(eq);
3250         eq->flags |= EQ_DOOMED;
3251
3252         /*
3253          * Wait for the response to a credit flush if one's
3254          * pending.
3255          */
3256         while (eq->flags & EQ_CRFLUSHED)
3257                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3258         EQ_UNLOCK(eq);
3259
3260         callout_drain(&eq->tx_callout); /* XXX: iffy */
3261         pause("callout", 10);           /* Still iffy */
3262
3263         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3264 }
3265
3266 static void
3267 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3268 {
3269         (void) sc;      /* unused */
3270
3271         /* Synchronize with the interrupt handler */
3272         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3273                 pause("iqfree", 1);
3274 }
3275
3276 static void
3277 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3278 {
3279         mtx_lock(&sc->sfl_lock);
3280         FL_LOCK(fl);
3281         fl->flags |= FL_DOOMED;
3282         FL_UNLOCK(fl);
3283         mtx_unlock(&sc->sfl_lock);
3284
3285         callout_drain(&sc->sfl_callout);
3286         KASSERT((fl->flags & FL_STARVING) == 0,
3287             ("%s: still starving", __func__));
3288 }
3289
3290 static int
3291 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3292     driver_intr_t *handler, void *arg, char *name)
3293 {
3294         int rc;
3295
3296         irq->rid = rid;
3297         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3298             RF_SHAREABLE | RF_ACTIVE);
3299         if (irq->res == NULL) {
3300                 device_printf(sc->dev,
3301                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3302                 return (ENOMEM);
3303         }
3304
3305         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3306             NULL, handler, arg, &irq->tag);
3307         if (rc != 0) {
3308                 device_printf(sc->dev,
3309                     "failed to setup interrupt for rid %d, name %s: %d\n",
3310                     rid, name, rc);
3311         } else if (name)
3312                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3313
3314         return (rc);
3315 }
3316
3317 static int
3318 t4_free_irq(struct adapter *sc, struct irq *irq)
3319 {
3320         if (irq->tag)
3321                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3322         if (irq->res)
3323                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3324
3325         bzero(irq, sizeof(*irq));
3326
3327         return (0);
3328 }
3329
3330 static void
3331 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3332     unsigned int end)
3333 {
3334         uint32_t *p = (uint32_t *)(buf + start);
3335
3336         for ( ; start <= end; start += sizeof(uint32_t))
3337                 *p++ = t4_read_reg(sc, start);
3338 }
3339
3340 static void
3341 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3342 {
3343         int i, n;
3344         const unsigned int *reg_ranges;
3345         static const unsigned int t4_reg_ranges[] = {
3346                 0x1008, 0x1108,
3347                 0x1180, 0x11b4,
3348                 0x11fc, 0x123c,
3349                 0x1300, 0x173c,
3350                 0x1800, 0x18fc,
3351                 0x3000, 0x30d8,
3352                 0x30e0, 0x5924,
3353                 0x5960, 0x59d4,
3354                 0x5a00, 0x5af8,
3355                 0x6000, 0x6098,
3356                 0x6100, 0x6150,
3357                 0x6200, 0x6208,
3358                 0x6240, 0x6248,
3359                 0x6280, 0x6338,
3360                 0x6370, 0x638c,
3361                 0x6400, 0x643c,
3362                 0x6500, 0x6524,
3363                 0x6a00, 0x6a38,
3364                 0x6a60, 0x6a78,
3365                 0x6b00, 0x6b84,
3366                 0x6bf0, 0x6c84,
3367                 0x6cf0, 0x6d84,
3368                 0x6df0, 0x6e84,
3369                 0x6ef0, 0x6f84,
3370                 0x6ff0, 0x7084,
3371                 0x70f0, 0x7184,
3372                 0x71f0, 0x7284,
3373                 0x72f0, 0x7384,
3374                 0x73f0, 0x7450,
3375                 0x7500, 0x7530,
3376                 0x7600, 0x761c,
3377                 0x7680, 0x76cc,
3378                 0x7700, 0x7798,
3379                 0x77c0, 0x77fc,
3380                 0x7900, 0x79fc,
3381                 0x7b00, 0x7c38,
3382                 0x7d00, 0x7efc,
3383                 0x8dc0, 0x8e1c,
3384                 0x8e30, 0x8e78,
3385                 0x8ea0, 0x8f6c,
3386                 0x8fc0, 0x9074,
3387                 0x90fc, 0x90fc,
3388                 0x9400, 0x9458,
3389                 0x9600, 0x96bc,
3390                 0x9800, 0x9808,
3391                 0x9820, 0x983c,
3392                 0x9850, 0x9864,
3393                 0x9c00, 0x9c6c,
3394                 0x9c80, 0x9cec,
3395                 0x9d00, 0x9d6c,
3396                 0x9d80, 0x9dec,
3397                 0x9e00, 0x9e6c,
3398                 0x9e80, 0x9eec,
3399                 0x9f00, 0x9f6c,
3400                 0x9f80, 0x9fec,
3401                 0xd004, 0xd03c,
3402                 0xdfc0, 0xdfe0,
3403                 0xe000, 0xea7c,
3404                 0xf000, 0x11190,
3405                 0x19040, 0x1906c,
3406                 0x19078, 0x19080,
3407                 0x1908c, 0x19124,
3408                 0x19150, 0x191b0,
3409                 0x191d0, 0x191e8,
3410                 0x19238, 0x1924c,
3411                 0x193f8, 0x19474,
3412                 0x19490, 0x194f8,
3413                 0x19800, 0x19f30,
3414                 0x1a000, 0x1a06c,
3415                 0x1a0b0, 0x1a120,
3416                 0x1a128, 0x1a138,
3417                 0x1a190, 0x1a1c4,
3418                 0x1a1fc, 0x1a1fc,
3419                 0x1e040, 0x1e04c,
3420                 0x1e284, 0x1e28c,
3421                 0x1e2c0, 0x1e2c0,
3422                 0x1e2e0, 0x1e2e0,
3423                 0x1e300, 0x1e384,
3424                 0x1e3c0, 0x1e3c8,
3425                 0x1e440, 0x1e44c,
3426                 0x1e684, 0x1e68c,
3427                 0x1e6c0, 0x1e6c0,
3428                 0x1e6e0, 0x1e6e0,
3429                 0x1e700, 0x1e784,
3430                 0x1e7c0, 0x1e7c8,
3431                 0x1e840, 0x1e84c,
3432                 0x1ea84, 0x1ea8c,
3433                 0x1eac0, 0x1eac0,
3434                 0x1eae0, 0x1eae0,
3435                 0x1eb00, 0x1eb84,
3436                 0x1ebc0, 0x1ebc8,
3437                 0x1ec40, 0x1ec4c,
3438                 0x1ee84, 0x1ee8c,
3439                 0x1eec0, 0x1eec0,
3440                 0x1eee0, 0x1eee0,
3441                 0x1ef00, 0x1ef84,
3442                 0x1efc0, 0x1efc8,
3443                 0x1f040, 0x1f04c,
3444                 0x1f284, 0x1f28c,
3445                 0x1f2c0, 0x1f2c0,
3446                 0x1f2e0, 0x1f2e0,
3447                 0x1f300, 0x1f384,
3448                 0x1f3c0, 0x1f3c8,
3449                 0x1f440, 0x1f44c,
3450                 0x1f684, 0x1f68c,
3451                 0x1f6c0, 0x1f6c0,
3452                 0x1f6e0, 0x1f6e0,
3453                 0x1f700, 0x1f784,
3454                 0x1f7c0, 0x1f7c8,
3455                 0x1f840, 0x1f84c,
3456                 0x1fa84, 0x1fa8c,
3457                 0x1fac0, 0x1fac0,
3458                 0x1fae0, 0x1fae0,
3459                 0x1fb00, 0x1fb84,
3460                 0x1fbc0, 0x1fbc8,
3461                 0x1fc40, 0x1fc4c,
3462                 0x1fe84, 0x1fe8c,
3463                 0x1fec0, 0x1fec0,
3464                 0x1fee0, 0x1fee0,
3465                 0x1ff00, 0x1ff84,
3466                 0x1ffc0, 0x1ffc8,
3467                 0x20000, 0x2002c,
3468                 0x20100, 0x2013c,
3469                 0x20190, 0x201c8,
3470                 0x20200, 0x20318,
3471                 0x20400, 0x20528,
3472                 0x20540, 0x20614,
3473                 0x21000, 0x21040,
3474                 0x2104c, 0x21060,
3475                 0x210c0, 0x210ec,
3476                 0x21200, 0x21268,
3477                 0x21270, 0x21284,
3478                 0x212fc, 0x21388,
3479                 0x21400, 0x21404,
3480                 0x21500, 0x21518,
3481                 0x2152c, 0x2153c,
3482                 0x21550, 0x21554,
3483                 0x21600, 0x21600,
3484                 0x21608, 0x21628,
3485                 0x21630, 0x2163c,
3486                 0x21700, 0x2171c,
3487                 0x21780, 0x2178c,
3488                 0x21800, 0x21c38,
3489                 0x21c80, 0x21d7c,
3490                 0x21e00, 0x21e04,
3491                 0x22000, 0x2202c,
3492                 0x22100, 0x2213c,
3493                 0x22190, 0x221c8,
3494                 0x22200, 0x22318,
3495                 0x22400, 0x22528,
3496                 0x22540, 0x22614,
3497                 0x23000, 0x23040,
3498                 0x2304c, 0x23060,
3499                 0x230c0, 0x230ec,
3500                 0x23200, 0x23268,
3501                 0x23270, 0x23284,
3502                 0x232fc, 0x23388,
3503                 0x23400, 0x23404,
3504                 0x23500, 0x23518,
3505                 0x2352c, 0x2353c,
3506                 0x23550, 0x23554,
3507                 0x23600, 0x23600,
3508                 0x23608, 0x23628,
3509                 0x23630, 0x2363c,
3510                 0x23700, 0x2371c,
3511                 0x23780, 0x2378c,
3512                 0x23800, 0x23c38,
3513                 0x23c80, 0x23d7c,
3514                 0x23e00, 0x23e04,
3515                 0x24000, 0x2402c,
3516                 0x24100, 0x2413c,
3517                 0x24190, 0x241c8,
3518                 0x24200, 0x24318,
3519                 0x24400, 0x24528,
3520                 0x24540, 0x24614,
3521                 0x25000, 0x25040,
3522                 0x2504c, 0x25060,
3523                 0x250c0, 0x250ec,
3524                 0x25200, 0x25268,
3525                 0x25270, 0x25284,
3526                 0x252fc, 0x25388,
3527                 0x25400, 0x25404,
3528                 0x25500, 0x25518,
3529                 0x2552c, 0x2553c,
3530                 0x25550, 0x25554,
3531                 0x25600, 0x25600,
3532                 0x25608, 0x25628,
3533                 0x25630, 0x2563c,
3534                 0x25700, 0x2571c,
3535                 0x25780, 0x2578c,
3536                 0x25800, 0x25c38,
3537                 0x25c80, 0x25d7c,
3538                 0x25e00, 0x25e04,
3539                 0x26000, 0x2602c,
3540                 0x26100, 0x2613c,
3541                 0x26190, 0x261c8,
3542                 0x26200, 0x26318,
3543                 0x26400, 0x26528,
3544                 0x26540, 0x26614,
3545                 0x27000, 0x27040,
3546                 0x2704c, 0x27060,
3547                 0x270c0, 0x270ec,
3548                 0x27200, 0x27268,
3549                 0x27270, 0x27284,
3550                 0x272fc, 0x27388,
3551                 0x27400, 0x27404,
3552                 0x27500, 0x27518,
3553                 0x2752c, 0x2753c,
3554                 0x27550, 0x27554,
3555                 0x27600, 0x27600,
3556                 0x27608, 0x27628,
3557                 0x27630, 0x2763c,
3558                 0x27700, 0x2771c,
3559                 0x27780, 0x2778c,
3560                 0x27800, 0x27c38,
3561                 0x27c80, 0x27d7c,
3562                 0x27e00, 0x27e04
3563         };
3564         static const unsigned int t5_reg_ranges[] = {
3565                 0x1008, 0x1148,
3566                 0x1180, 0x11b4,
3567                 0x11fc, 0x123c,
3568                 0x1280, 0x173c,
3569                 0x1800, 0x18fc,
3570                 0x3000, 0x3028,
3571                 0x3060, 0x30d8,
3572                 0x30e0, 0x30fc,
3573                 0x3140, 0x357c,
3574                 0x35a8, 0x35cc,
3575                 0x35ec, 0x35ec,
3576                 0x3600, 0x5624,
3577                 0x56cc, 0x575c,
3578                 0x580c, 0x5814,
3579                 0x5890, 0x58bc,
3580                 0x5940, 0x59dc,
3581                 0x59fc, 0x5a18,
3582                 0x5a60, 0x5a9c,
3583                 0x5b94, 0x5bfc,
3584                 0x6000, 0x6040,
3585                 0x6058, 0x614c,
3586                 0x7700, 0x7798,
3587                 0x77c0, 0x78fc,
3588                 0x7b00, 0x7c54,
3589                 0x7d00, 0x7efc,
3590                 0x8dc0, 0x8de0,
3591                 0x8df8, 0x8e84,
3592                 0x8ea0, 0x8f84,
3593                 0x8fc0, 0x90f8,
3594                 0x9400, 0x9470,
3595                 0x9600, 0x96f4,
3596                 0x9800, 0x9808,
3597                 0x9820, 0x983c,
3598                 0x9850, 0x9864,
3599                 0x9c00, 0x9c6c,
3600                 0x9c80, 0x9cec,
3601                 0x9d00, 0x9d6c,
3602                 0x9d80, 0x9dec,
3603                 0x9e00, 0x9e6c,
3604                 0x9e80, 0x9eec,
3605                 0x9f00, 0x9f6c,
3606                 0x9f80, 0xa020,
3607                 0xd004, 0xd03c,
3608                 0xdfc0, 0xdfe0,
3609                 0xe000, 0x11088,
3610                 0x1109c, 0x1117c,
3611                 0x11190, 0x11204,
3612                 0x19040, 0x1906c,
3613                 0x19078, 0x19080,
3614                 0x1908c, 0x19124,
3615                 0x19150, 0x191b0,
3616                 0x191d0, 0x191e8,
3617                 0x19238, 0x19290,
3618                 0x193f8, 0x19474,
3619                 0x19490, 0x194cc,
3620                 0x194f0, 0x194f8,
3621                 0x19c00, 0x19c60,
3622                 0x19c94, 0x19e10,
3623                 0x19e50, 0x19f34,
3624                 0x19f40, 0x19f50,
3625                 0x19f90, 0x19fe4,
3626                 0x1a000, 0x1a06c,
3627                 0x1a0b0, 0x1a120,
3628                 0x1a128, 0x1a138,
3629                 0x1a190, 0x1a1c4,
3630                 0x1a1fc, 0x1a1fc,
3631                 0x1e008, 0x1e00c,
3632                 0x1e040, 0x1e04c,
3633                 0x1e284, 0x1e290,
3634                 0x1e2c0, 0x1e2c0,
3635                 0x1e2e0, 0x1e2e0,
3636                 0x1e300, 0x1e384,
3637                 0x1e3c0, 0x1e3c8,
3638                 0x1e408, 0x1e40c,
3639                 0x1e440, 0x1e44c,
3640                 0x1e684, 0x1e690,
3641                 0x1e6c0, 0x1e6c0,
3642                 0x1e6e0, 0x1e6e0,
3643                 0x1e700, 0x1e784,
3644                 0x1e7c0, 0x1e7c8,
3645                 0x1e808, 0x1e80c,
3646                 0x1e840, 0x1e84c,
3647                 0x1ea84, 0x1ea90,
3648                 0x1eac0, 0x1eac0,
3649                 0x1eae0, 0x1eae0,
3650                 0x1eb00, 0x1eb84,
3651                 0x1ebc0, 0x1ebc8,
3652                 0x1ec08, 0x1ec0c,
3653                 0x1ec40, 0x1ec4c,
3654                 0x1ee84, 0x1ee90,
3655                 0x1eec0, 0x1eec0,
3656                 0x1eee0, 0x1eee0,
3657                 0x1ef00, 0x1ef84,
3658                 0x1efc0, 0x1efc8,
3659                 0x1f008, 0x1f00c,
3660                 0x1f040, 0x1f04c,
3661                 0x1f284, 0x1f290,
3662                 0x1f2c0, 0x1f2c0,
3663                 0x1f2e0, 0x1f2e0,
3664                 0x1f300, 0x1f384,
3665                 0x1f3c0, 0x1f3c8,
3666                 0x1f408, 0x1f40c,
3667                 0x1f440, 0x1f44c,
3668                 0x1f684, 0x1f690,
3669                 0x1f6c0, 0x1f6c0,
3670                 0x1f6e0, 0x1f6e0,
3671                 0x1f700, 0x1f784,
3672                 0x1f7c0, 0x1f7c8,
3673                 0x1f808, 0x1f80c,
3674                 0x1f840, 0x1f84c,
3675                 0x1fa84, 0x1fa90,
3676                 0x1fac0, 0x1fac0,
3677                 0x1fae0, 0x1fae0,
3678                 0x1fb00, 0x1fb84,
3679                 0x1fbc0, 0x1fbc8,
3680                 0x1fc08, 0x1fc0c,
3681                 0x1fc40, 0x1fc4c,
3682                 0x1fe84, 0x1fe90,
3683                 0x1fec0, 0x1fec0,
3684                 0x1fee0, 0x1fee0,
3685                 0x1ff00, 0x1ff84,
3686                 0x1ffc0, 0x1ffc8,
3687                 0x30000, 0x30030,
3688                 0x30100, 0x30144,
3689                 0x30190, 0x301d0,
3690                 0x30200, 0x30318,
3691                 0x30400, 0x3052c,
3692                 0x30540, 0x3061c,
3693                 0x30800, 0x30834,
3694                 0x308c0, 0x30908,
3695                 0x30910, 0x309ac,
3696                 0x30a00, 0x30a2c,
3697                 0x30a44, 0x30a50,
3698                 0x30a74, 0x30c24,
3699                 0x30d00, 0x30d00,
3700                 0x30d08, 0x30d14,
3701                 0x30d1c, 0x30d20,
3702                 0x30d3c, 0x30d50,
3703                 0x31200, 0x3120c,
3704                 0x31220, 0x31220,
3705                 0x31240, 0x31240,
3706                 0x31600, 0x3160c,
3707                 0x31a00, 0x31a1c,
3708                 0x31e00, 0x31e20,
3709                 0x31e38, 0x31e3c,
3710                 0x31e80, 0x31e80,
3711                 0x31e88, 0x31ea8,
3712                 0x31eb0, 0x31eb4,
3713                 0x31ec8, 0x31ed4,
3714                 0x31fb8, 0x32004,
3715                 0x32200, 0x32200,
3716                 0x32208, 0x32240,
3717                 0x32248, 0x32280,
3718                 0x32288, 0x322c0,
3719                 0x322c8, 0x322fc,
3720                 0x32600, 0x32630,
3721                 0x32a00, 0x32abc,
3722                 0x32b00, 0x32b70,
3723                 0x33000, 0x33048,
3724                 0x33060, 0x3309c,
3725                 0x330f0, 0x33148,
3726                 0x33160, 0x3319c,
3727                 0x331f0, 0x332e4,
3728                 0x332f8, 0x333e4,
3729                 0x333f8, 0x33448,
3730                 0x33460, 0x3349c,
3731                 0x334f0, 0x33548,
3732                 0x33560, 0x3359c,
3733                 0x335f0, 0x336e4,
3734                 0x336f8, 0x337e4,
3735                 0x337f8, 0x337fc,
3736                 0x33814, 0x33814,
3737                 0x3382c, 0x3382c,
3738                 0x33880, 0x3388c,
3739                 0x338e8, 0x338ec,
3740                 0x33900, 0x33948,
3741                 0x33960, 0x3399c,
3742                 0x339f0, 0x33ae4,
3743                 0x33af8, 0x33b10,
3744                 0x33b28, 0x33b28,
3745                 0x33b3c, 0x33b50,
3746                 0x33bf0, 0x33c10,
3747                 0x33c28, 0x33c28,
3748                 0x33c3c, 0x33c50,
3749                 0x33cf0, 0x33cfc,
3750                 0x34000, 0x34030,
3751                 0x34100, 0x34144,
3752                 0x34190, 0x341d0,
3753                 0x34200, 0x34318,
3754                 0x34400, 0x3452c,
3755                 0x34540, 0x3461c,
3756                 0x34800, 0x34834,
3757                 0x348c0, 0x34908,
3758                 0x34910, 0x349ac,
3759                 0x34a00, 0x34a2c,
3760                 0x34a44, 0x34a50,
3761                 0x34a74, 0x34c24,
3762                 0x34d00, 0x34d00,
3763                 0x34d08, 0x34d14,
3764                 0x34d1c, 0x34d20,
3765                 0x34d3c, 0x34d50,
3766                 0x35200, 0x3520c,
3767                 0x35220, 0x35220,
3768                 0x35240, 0x35240,
3769                 0x35600, 0x3560c,
3770                 0x35a00, 0x35a1c,
3771                 0x35e00, 0x35e20,
3772                 0x35e38, 0x35e3c,
3773                 0x35e80, 0x35e80,
3774                 0x35e88, 0x35ea8,
3775                 0x35eb0, 0x35eb4,
3776                 0x35ec8, 0x35ed4,
3777                 0x35fb8, 0x36004,
3778                 0x36200, 0x36200,
3779                 0x36208, 0x36240,
3780                 0x36248, 0x36280,
3781                 0x36288, 0x362c0,
3782                 0x362c8, 0x362fc,
3783                 0x36600, 0x36630,
3784                 0x36a00, 0x36abc,
3785                 0x36b00, 0x36b70,
3786                 0x37000, 0x37048,
3787                 0x37060, 0x3709c,
3788                 0x370f0, 0x37148,
3789                 0x37160, 0x3719c,
3790                 0x371f0, 0x372e4,
3791                 0x372f8, 0x373e4,
3792                 0x373f8, 0x37448,
3793                 0x37460, 0x3749c,
3794                 0x374f0, 0x37548,
3795                 0x37560, 0x3759c,
3796                 0x375f0, 0x376e4,
3797                 0x376f8, 0x377e4,
3798                 0x377f8, 0x377fc,
3799                 0x37814, 0x37814,
3800                 0x3782c, 0x3782c,
3801                 0x37880, 0x3788c,
3802                 0x378e8, 0x378ec,
3803                 0x37900, 0x37948,
3804                 0x37960, 0x3799c,
3805                 0x379f0, 0x37ae4,
3806                 0x37af8, 0x37b10,
3807                 0x37b28, 0x37b28,
3808                 0x37b3c, 0x37b50,
3809                 0x37bf0, 0x37c10,
3810                 0x37c28, 0x37c28,
3811                 0x37c3c, 0x37c50,
3812                 0x37cf0, 0x37cfc,
3813                 0x38000, 0x38030,
3814                 0x38100, 0x38144,
3815                 0x38190, 0x381d0,
3816                 0x38200, 0x38318,
3817                 0x38400, 0x3852c,
3818                 0x38540, 0x3861c,
3819                 0x38800, 0x38834,
3820                 0x388c0, 0x38908,
3821                 0x38910, 0x389ac,
3822                 0x38a00, 0x38a2c,
3823                 0x38a44, 0x38a50,
3824                 0x38a74, 0x38c24,
3825                 0x38d00, 0x38d00,
3826                 0x38d08, 0x38d14,
3827                 0x38d1c, 0x38d20,
3828                 0x38d3c, 0x38d50,
3829                 0x39200, 0x3920c,
3830                 0x39220, 0x39220,
3831                 0x39240, 0x39240,
3832                 0x39600, 0x3960c,
3833                 0x39a00, 0x39a1c,
3834                 0x39e00, 0x39e20,
3835                 0x39e38, 0x39e3c,
3836                 0x39e80, 0x39e80,
3837                 0x39e88, 0x39ea8,
3838                 0x39eb0, 0x39eb4,
3839                 0x39ec8, 0x39ed4,
3840                 0x39fb8, 0x3a004,
3841                 0x3a200, 0x3a200,
3842                 0x3a208, 0x3a240,
3843                 0x3a248, 0x3a280,
3844                 0x3a288, 0x3a2c0,
3845                 0x3a2c8, 0x3a2fc,
3846                 0x3a600, 0x3a630,
3847                 0x3aa00, 0x3aabc,
3848                 0x3ab00, 0x3ab70,
3849                 0x3b000, 0x3b048,
3850                 0x3b060, 0x3b09c,
3851                 0x3b0f0, 0x3b148,
3852                 0x3b160, 0x3b19c,
3853                 0x3b1f0, 0x3b2e4,
3854                 0x3b2f8, 0x3b3e4,
3855                 0x3b3f8, 0x3b448,
3856                 0x3b460, 0x3b49c,
3857                 0x3b4f0, 0x3b548,
3858                 0x3b560, 0x3b59c,
3859                 0x3b5f0, 0x3b6e4,
3860                 0x3b6f8, 0x3b7e4,
3861                 0x3b7f8, 0x3b7fc,
3862                 0x3b814, 0x3b814,
3863                 0x3b82c, 0x3b82c,
3864                 0x3b880, 0x3b88c,
3865                 0x3b8e8, 0x3b8ec,
3866                 0x3b900, 0x3b948,
3867                 0x3b960, 0x3b99c,
3868                 0x3b9f0, 0x3bae4,
3869                 0x3baf8, 0x3bb10,
3870                 0x3bb28, 0x3bb28,
3871                 0x3bb3c, 0x3bb50,
3872                 0x3bbf0, 0x3bc10,
3873                 0x3bc28, 0x3bc28,
3874                 0x3bc3c, 0x3bc50,
3875                 0x3bcf0, 0x3bcfc,
3876                 0x3c000, 0x3c030,
3877                 0x3c100, 0x3c144,
3878                 0x3c190, 0x3c1d0,
3879                 0x3c200, 0x3c318,
3880                 0x3c400, 0x3c52c,
3881                 0x3c540, 0x3c61c,
3882                 0x3c800, 0x3c834,
3883                 0x3c8c0, 0x3c908,
3884                 0x3c910, 0x3c9ac,
3885                 0x3ca00, 0x3ca2c,
3886                 0x3ca44, 0x3ca50,
3887                 0x3ca74, 0x3cc24,
3888                 0x3cd00, 0x3cd00,
3889                 0x3cd08, 0x3cd14,
3890                 0x3cd1c, 0x3cd20,
3891                 0x3cd3c, 0x3cd50,
3892                 0x3d200, 0x3d20c,
3893                 0x3d220, 0x3d220,
3894                 0x3d240, 0x3d240,
3895                 0x3d600, 0x3d60c,
3896                 0x3da00, 0x3da1c,
3897                 0x3de00, 0x3de20,
3898                 0x3de38, 0x3de3c,
3899                 0x3de80, 0x3de80,
3900                 0x3de88, 0x3dea8,
3901                 0x3deb0, 0x3deb4,
3902                 0x3dec8, 0x3ded4,
3903                 0x3dfb8, 0x3e004,
3904                 0x3e200, 0x3e200,
3905                 0x3e208, 0x3e240,
3906                 0x3e248, 0x3e280,
3907                 0x3e288, 0x3e2c0,
3908                 0x3e2c8, 0x3e2fc,
3909                 0x3e600, 0x3e630,
3910                 0x3ea00, 0x3eabc,
3911                 0x3eb00, 0x3eb70,
3912                 0x3f000, 0x3f048,
3913                 0x3f060, 0x3f09c,
3914                 0x3f0f0, 0x3f148,
3915                 0x3f160, 0x3f19c,
3916                 0x3f1f0, 0x3f2e4,
3917                 0x3f2f8, 0x3f3e4,
3918                 0x3f3f8, 0x3f448,
3919                 0x3f460, 0x3f49c,
3920                 0x3f4f0, 0x3f548,
3921                 0x3f560, 0x3f59c,
3922                 0x3f5f0, 0x3f6e4,
3923                 0x3f6f8, 0x3f7e4,
3924                 0x3f7f8, 0x3f7fc,
3925                 0x3f814, 0x3f814,
3926                 0x3f82c, 0x3f82c,
3927                 0x3f880, 0x3f88c,
3928                 0x3f8e8, 0x3f8ec,
3929                 0x3f900, 0x3f948,
3930                 0x3f960, 0x3f99c,
3931                 0x3f9f0, 0x3fae4,
3932                 0x3faf8, 0x3fb10,
3933                 0x3fb28, 0x3fb28,
3934                 0x3fb3c, 0x3fb50,
3935                 0x3fbf0, 0x3fc10,
3936                 0x3fc28, 0x3fc28,
3937                 0x3fc3c, 0x3fc50,
3938                 0x3fcf0, 0x3fcfc,
3939                 0x40000, 0x4000c,
3940                 0x40040, 0x40068,
3941                 0x4007c, 0x40144,
3942                 0x40180, 0x4018c,
3943                 0x40200, 0x40298,
3944                 0x402ac, 0x4033c,
3945                 0x403f8, 0x403fc,
3946                 0x41304, 0x413c4,
3947                 0x41400, 0x4141c,
3948                 0x41480, 0x414d0,
3949                 0x44000, 0x44078,
3950                 0x440c0, 0x44278,
3951                 0x442c0, 0x44478,
3952                 0x444c0, 0x44678,
3953                 0x446c0, 0x44878,
3954                 0x448c0, 0x449fc,
3955                 0x45000, 0x45068,
3956                 0x45080, 0x45084,
3957                 0x450a0, 0x450b0,
3958                 0x45200, 0x45268,
3959                 0x45280, 0x45284,
3960                 0x452a0, 0x452b0,
3961                 0x460c0, 0x460e4,
3962                 0x47000, 0x4708c,
3963                 0x47200, 0x47250,
3964                 0x47400, 0x47420,
3965                 0x47600, 0x47618,
3966                 0x47800, 0x47814,
3967                 0x48000, 0x4800c,
3968                 0x48040, 0x48068,
3969                 0x4807c, 0x48144,
3970                 0x48180, 0x4818c,
3971                 0x48200, 0x48298,
3972                 0x482ac, 0x4833c,
3973                 0x483f8, 0x483fc,
3974                 0x49304, 0x493c4,
3975                 0x49400, 0x4941c,
3976                 0x49480, 0x494d0,
3977                 0x4c000, 0x4c078,
3978                 0x4c0c0, 0x4c278,
3979                 0x4c2c0, 0x4c478,
3980                 0x4c4c0, 0x4c678,
3981                 0x4c6c0, 0x4c878,
3982                 0x4c8c0, 0x4c9fc,
3983                 0x4d000, 0x4d068,
3984                 0x4d080, 0x4d084,
3985                 0x4d0a0, 0x4d0b0,
3986                 0x4d200, 0x4d268,
3987                 0x4d280, 0x4d284,
3988                 0x4d2a0, 0x4d2b0,
3989                 0x4e0c0, 0x4e0e4,
3990                 0x4f000, 0x4f08c,
3991                 0x4f200, 0x4f250,
3992                 0x4f400, 0x4f420,
3993                 0x4f600, 0x4f618,
3994                 0x4f800, 0x4f814,
3995                 0x50000, 0x500cc,
3996                 0x50400, 0x50400,
3997                 0x50800, 0x508cc,
3998                 0x50c00, 0x50c00,
3999                 0x51000, 0x5101c,
4000                 0x51300, 0x51308,
4001         };
4002
4003         if (is_t4(sc)) {
4004                 reg_ranges = &t4_reg_ranges[0];
4005                 n = nitems(t4_reg_ranges);
4006         } else {
4007                 reg_ranges = &t5_reg_ranges[0];
4008                 n = nitems(t5_reg_ranges);
4009         }
4010
4011         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4012         for (i = 0; i < n; i += 2)
4013                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4014 }
4015
4016 static void
4017 cxgbe_tick(void *arg)
4018 {
4019         struct port_info *pi = arg;
4020         struct ifnet *ifp = pi->ifp;
4021         struct sge_txq *txq;
4022         int i, drops;
4023         struct port_stats *s = &pi->stats;
4024
4025         PORT_LOCK(pi);
4026         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4027                 PORT_UNLOCK(pi);
4028                 return; /* without scheduling another callout */
4029         }
4030
4031         t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4032
4033         ifp->if_opackets = s->tx_frames - s->tx_pause;
4034         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4035         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4036         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4037         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4038         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4039         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4040             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4041             s->rx_trunc3;
4042
4043         drops = s->tx_drop;
4044         for_each_txq(pi, i, txq)
4045                 drops += txq->br->br_drops;
4046         ifp->if_snd.ifq_drops = drops;
4047
4048         ifp->if_oerrors = s->tx_error_frames;
4049         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4050             s->rx_fcs_err + s->rx_len_err;
4051
4052         callout_schedule(&pi->tick, hz);
4053         PORT_UNLOCK(pi);
4054 }
4055
4056 static void
4057 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4058 {
4059         struct ifnet *vlan;
4060
4061         if (arg != ifp || ifp->if_type != IFT_ETHER)
4062                 return;
4063
4064         vlan = VLAN_DEVAT(ifp, vid);
4065         VLAN_SETCOOKIE(vlan, ifp);
4066 }
4067
4068 static int
4069 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4070 {
4071
4072 #ifdef INVARIANTS
4073         panic("%s: opcode 0x%02x on iq %p with payload %p",
4074             __func__, rss->opcode, iq, m);
4075 #else
4076         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4077             __func__, rss->opcode, iq, m);
4078         m_freem(m);
4079 #endif
4080         return (EDOOFUS);
4081 }
4082
4083 int
4084 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4085 {
4086         uintptr_t *loc, new;
4087
4088         if (opcode >= nitems(sc->cpl_handler))
4089                 return (EINVAL);
4090
4091         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4092         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4093         atomic_store_rel_ptr(loc, new);
4094
4095         return (0);
4096 }
4097
4098 static int
4099 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4100 {
4101
4102 #ifdef INVARIANTS
4103         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4104 #else
4105         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4106             __func__, iq, ctrl);
4107 #endif
4108         return (EDOOFUS);
4109 }
4110
4111 int
4112 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4113 {
4114         uintptr_t *loc, new;
4115
4116         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4117         loc = (uintptr_t *) &sc->an_handler;
4118         atomic_store_rel_ptr(loc, new);
4119
4120         return (0);
4121 }
4122
4123 static int
4124 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4125 {
4126         const struct cpl_fw6_msg *cpl =
4127             __containerof(rpl, struct cpl_fw6_msg, data[0]);
4128
4129 #ifdef INVARIANTS
4130         panic("%s: fw_msg type %d", __func__, cpl->type);
4131 #else
4132         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4133 #endif
4134         return (EDOOFUS);
4135 }
4136
4137 int
4138 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4139 {
4140         uintptr_t *loc, new;
4141
4142         if (type >= nitems(sc->fw_msg_handler))
4143                 return (EINVAL);
4144
4145         /*
4146          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4147          * handler dispatch table.  Reject any attempt to install a handler for
4148          * this subtype.
4149          */
4150         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4151                 return (EINVAL);
4152
4153         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4154         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4155         atomic_store_rel_ptr(loc, new);
4156
4157         return (0);
4158 }
4159
4160 static int
4161 t4_sysctls(struct adapter *sc)
4162 {
4163         struct sysctl_ctx_list *ctx;
4164         struct sysctl_oid *oid;
4165         struct sysctl_oid_list *children, *c0;
4166         static char *caps[] = {
4167                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4168                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",       /* caps[1] niccaps */
4169                 "\20\1TOE",                             /* caps[2] toecaps */
4170                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4171                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4172                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4173                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4174                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4175         };
4176         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4177
4178         ctx = device_get_sysctl_ctx(sc->dev);
4179
4180         /*
4181          * dev.t4nex.X.
4182          */
4183         oid = device_get_sysctl_tree(sc->dev);
4184         c0 = children = SYSCTL_CHILDREN(oid);
4185
4186         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4187             sc->params.nports, "# of ports");
4188
4189         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4190             NULL, chip_rev(sc), "chip hardware revision");
4191
4192         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4193             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4194
4195         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4196             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4197
4198         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4199             sc->cfcsum, "config file checksum");
4200
4201         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4202             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4203             sysctl_bitfield, "A", "available doorbells");
4204
4205         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4206             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4207             sysctl_bitfield, "A", "available link capabilities");
4208
4209         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4210             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4211             sysctl_bitfield, "A", "available NIC capabilities");
4212
4213         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4214             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4215             sysctl_bitfield, "A", "available TCP offload capabilities");
4216
4217         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4218             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4219             sysctl_bitfield, "A", "available RDMA capabilities");
4220
4221         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4222             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4223             sysctl_bitfield, "A", "available iSCSI capabilities");
4224
4225         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4226             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4227             sysctl_bitfield, "A", "available FCoE capabilities");
4228
4229         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4230             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4231
4232         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4233             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4234             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4235             "interrupt holdoff timer values (us)");
4236
4237         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4238             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4239             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4240             "interrupt holdoff packet counter values");
4241
4242         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4243             NULL, sc->tids.nftids, "number of filters");
4244
4245         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4246             CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4247             "chip temperature (in Celsius)");
4248
4249         t4_sge_sysctls(sc, ctx, children);
4250
4251         sc->lro_timeout = 100;
4252         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4253             &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4254
4255 #ifdef SBUF_DRAIN
4256         /*
4257          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4258          */
4259         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4260             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4261             "logs and miscellaneous information");
4262         children = SYSCTL_CHILDREN(oid);
4263
4264         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4265             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4266             sysctl_cctrl, "A", "congestion control");
4267
4268         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4269             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4270             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4271
4272         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4273             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4274             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4275
4276         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4277             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4278             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4279
4280         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4281             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4282             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4283
4284         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4285             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4286             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4287
4288         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4289             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4290             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4291
4292         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4293             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4294             sysctl_cim_la, "A", "CIM logic analyzer");
4295
4296         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4297             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4298             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4299
4300         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4301             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4302             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4303
4304         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4305             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4306             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4307
4308         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4309             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4310             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4311
4312         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4313             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4314             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4315
4316         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4317             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4318             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4319
4320         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4321             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4322             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4323
4324         if (is_t5(sc)) {
4325                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4326                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4327                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4328
4329                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4330                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4331                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4332         }
4333
4334         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4335             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4336             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4337
4338         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4339             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4340             sysctl_cim_qcfg, "A", "CIM queue configuration");
4341
4342         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4343             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4344             sysctl_cpl_stats, "A", "CPL statistics");
4345
4346         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4347             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4348             sysctl_ddp_stats, "A", "DDP statistics");
4349
4350         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4351             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4352             sysctl_devlog, "A", "firmware's device log");
4353
4354         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4355             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4356             sysctl_fcoe_stats, "A", "FCoE statistics");
4357
4358         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4359             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4360             sysctl_hw_sched, "A", "hardware scheduler ");
4361
4362         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4363             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4364             sysctl_l2t, "A", "hardware L2 table");
4365
4366         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4367             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4368             sysctl_lb_stats, "A", "loopback statistics");
4369
4370         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4371             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4372             sysctl_meminfo, "A", "memory regions");
4373
4374         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4375             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4376             sysctl_mps_tcam, "A", "MPS TCAM entries");
4377
4378         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4379             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4380             sysctl_path_mtus, "A", "path MTUs");
4381
4382         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4383             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4384             sysctl_pm_stats, "A", "PM statistics");
4385
4386         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4387             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4388             sysctl_rdma_stats, "A", "RDMA statistics");
4389
4390         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4391             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4392             sysctl_tcp_stats, "A", "TCP statistics");
4393
4394         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4395             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4396             sysctl_tids, "A", "TID information");
4397
4398         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4399             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4400             sysctl_tp_err_stats, "A", "TP error statistics");
4401
4402         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4403             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4404             sysctl_tp_la, "A", "TP logic analyzer");
4405
4406         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4407             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4408             sysctl_tx_rate, "A", "Tx rate");
4409
4410         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4411             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4412             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4413
4414         if (is_t5(sc)) {
4415                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4416                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4417                     sysctl_wcwr_stats, "A", "write combined work requests");
4418         }
4419 #endif
4420
4421 #ifdef TCP_OFFLOAD
4422         if (is_offload(sc)) {
4423                 /*
4424                  * dev.t4nex.X.toe.
4425                  */
4426                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4427                     NULL, "TOE parameters");
4428                 children = SYSCTL_CHILDREN(oid);
4429
4430                 sc->tt.sndbuf = 256 * 1024;
4431                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4432                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4433
4434                 sc->tt.ddp = 0;
4435                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4436                     &sc->tt.ddp, 0, "DDP allowed");
4437
4438                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4439                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4440                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4441
4442                 sc->tt.ddp_thres =
4443                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4444                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4445                     &sc->tt.ddp_thres, 0, "DDP threshold");
4446
4447                 sc->tt.rx_coalesce = 1;
4448                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4449                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4450         }
4451 #endif
4452
4453
4454         return (0);
4455 }
4456
4457 static int
4458 cxgbe_sysctls(struct port_info *pi)
4459 {
4460         struct sysctl_ctx_list *ctx;
4461         struct sysctl_oid *oid;
4462         struct sysctl_oid_list *children;
4463
4464         ctx = device_get_sysctl_ctx(pi->dev);
4465
4466         /*
4467          * dev.cxgbe.X.
4468          */
4469         oid = device_get_sysctl_tree(pi->dev);
4470         children = SYSCTL_CHILDREN(oid);
4471
4472         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4473            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4474         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4475                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4476                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4477                     "PHY temperature (in Celsius)");
4478                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4479                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4480                     "PHY firmware version");
4481         }
4482         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4483             &pi->nrxq, 0, "# of rx queues");
4484         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4485             &pi->ntxq, 0, "# of tx queues");
4486         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4487             &pi->first_rxq, 0, "index of first rx queue");
4488         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4489             &pi->first_txq, 0, "index of first tx queue");
4490
4491 #ifdef TCP_OFFLOAD
4492         if (is_offload(pi->adapter)) {
4493                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4494                     &pi->nofldrxq, 0,
4495                     "# of rx queues for offloaded TCP connections");
4496                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4497                     &pi->nofldtxq, 0,
4498                     "# of tx queues for offloaded TCP connections");
4499                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4500                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4501                     "index of first TOE rx queue");
4502                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4503                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4504                     "index of first TOE tx queue");
4505         }
4506 #endif
4507
4508         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4509             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4510             "holdoff timer index");
4511         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4512             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4513             "holdoff packet counter index");
4514
4515         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4516             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4517             "rx queue size");
4518         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4519             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4520             "tx queue size");
4521
4522         /*
4523          * dev.cxgbe.X.stats.
4524          */
4525         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4526             NULL, "port statistics");
4527         children = SYSCTL_CHILDREN(oid);
4528
4529 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4530         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4531             CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4532             sysctl_handle_t4_reg64, "QU", desc)
4533
4534         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4535             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4536         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4537             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4538         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4539             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4540         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4541             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4542         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4543             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4544         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4545             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4546         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4547             "# of tx frames in this range",
4548             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4549         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4550             "# of tx frames in this range",
4551             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4552         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4553             "# of tx frames in this range",
4554             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4555         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4556             "# of tx frames in this range",
4557             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4558         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4559             "# of tx frames in this range",
4560             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4561         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4562             "# of tx frames in this range",
4563             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4564         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4565             "# of tx frames in this range",
4566             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4567         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4568             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4569         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4570             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4571         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4572             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4573         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4574             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4575         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4576             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4577         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4578             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4579         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4580             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4581         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4582             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4583         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4584             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4585         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4586             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4587
4588         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4589             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4590         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4591             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4592         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4593             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4594         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4595             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4596         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4597             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4598         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4599             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4600         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4601             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4602         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4603             "# of frames received with bad FCS",
4604             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4605         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4606             "# of frames received with length error",
4607             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4608         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4609             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4610         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4611             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4612         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4613             "# of rx frames in this range",
4614             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4615         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4616             "# of rx frames in this range",
4617             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4618         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4619             "# of rx frames in this range",
4620             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4621         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4622             "# of rx frames in this range",
4623             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4624         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4625             "# of rx frames in this range",
4626             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4627         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4628             "# of rx frames in this range",
4629             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4630         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4631             "# of rx frames in this range",
4632             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4633         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4634             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4635         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4636             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4637         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4638             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4639         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4640             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4641         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4642             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4643         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4644             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4645         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4646             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4647         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4648             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4649         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4650             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4651
4652 #undef SYSCTL_ADD_T4_REG64
4653
4654 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4655         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4656             &pi->stats.name, desc)
4657
4658         /* We get these from port_stats and they may be stale by upto 1s */
4659         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4660             "# drops due to buffer-group 0 overflows");
4661         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4662             "# drops due to buffer-group 1 overflows");
4663         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4664             "# drops due to buffer-group 2 overflows");
4665         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4666             "# drops due to buffer-group 3 overflows");
4667         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4668             "# of buffer-group 0 truncated packets");
4669         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4670             "# of buffer-group 1 truncated packets");
4671         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4672             "# of buffer-group 2 truncated packets");
4673         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4674             "# of buffer-group 3 truncated packets");
4675
4676 #undef SYSCTL_ADD_T4_PORTSTAT
4677
4678         return (0);
4679 }
4680
4681 static int
4682 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4683 {
4684         int rc, *i;
4685         struct sbuf sb;
4686
4687         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4688         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4689                 sbuf_printf(&sb, "%d ", *i);
4690         sbuf_trim(&sb);
4691         sbuf_finish(&sb);
4692         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4693         sbuf_delete(&sb);
4694         return (rc);
4695 }
4696
4697 static int
4698 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4699 {
4700         int rc;
4701         struct sbuf *sb;
4702
4703         rc = sysctl_wire_old_buffer(req, 0);
4704         if (rc != 0)
4705                 return(rc);
4706
4707         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4708         if (sb == NULL)
4709                 return (ENOMEM);
4710
4711         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4712         rc = sbuf_finish(sb);
4713         sbuf_delete(sb);
4714
4715         return (rc);
4716 }
4717
4718 static int
4719 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4720 {
4721         struct port_info *pi = arg1;
4722         int op = arg2;
4723         struct adapter *sc = pi->adapter;
4724         u_int v;
4725         int rc;
4726
4727         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4728         if (rc)
4729                 return (rc);
4730         /* XXX: magic numbers */
4731         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4732             &v);
4733         end_synchronized_op(sc, 0);
4734         if (rc)
4735                 return (rc);
4736         if (op == 0)
4737                 v /= 256;
4738
4739         rc = sysctl_handle_int(oidp, &v, 0, req);
4740         return (rc);
4741 }
4742
4743 static int
4744 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4745 {
4746         struct port_info *pi = arg1;
4747         struct adapter *sc = pi->adapter;
4748         int idx, rc, i;
4749         struct sge_rxq *rxq;
4750 #ifdef TCP_OFFLOAD
4751         struct sge_ofld_rxq *ofld_rxq;
4752 #endif
4753         uint8_t v;
4754
4755         idx = pi->tmr_idx;
4756
4757         rc = sysctl_handle_int(oidp, &idx, 0, req);
4758         if (rc != 0 || req->newptr == NULL)
4759                 return (rc);
4760
4761         if (idx < 0 || idx >= SGE_NTIMERS)
4762                 return (EINVAL);
4763
4764         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4765             "t4tmr");
4766         if (rc)
4767                 return (rc);
4768
4769         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4770         for_each_rxq(pi, i, rxq) {
4771 #ifdef atomic_store_rel_8
4772                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4773 #else
4774                 rxq->iq.intr_params = v;
4775 #endif
4776         }
4777 #ifdef TCP_OFFLOAD
4778         for_each_ofld_rxq(pi, i, ofld_rxq) {
4779 #ifdef atomic_store_rel_8
4780                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4781 #else
4782                 ofld_rxq->iq.intr_params = v;
4783 #endif
4784         }
4785 #endif
4786         pi->tmr_idx = idx;
4787
4788         end_synchronized_op(sc, LOCK_HELD);
4789         return (0);
4790 }
4791
4792 static int
4793 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4794 {
4795         struct port_info *pi = arg1;
4796         struct adapter *sc = pi->adapter;
4797         int idx, rc;
4798
4799         idx = pi->pktc_idx;
4800
4801         rc = sysctl_handle_int(oidp, &idx, 0, req);
4802         if (rc != 0 || req->newptr == NULL)
4803                 return (rc);
4804
4805         if (idx < -1 || idx >= SGE_NCOUNTERS)
4806                 return (EINVAL);
4807
4808         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4809             "t4pktc");
4810         if (rc)
4811                 return (rc);
4812
4813         if (pi->flags & PORT_INIT_DONE)
4814                 rc = EBUSY; /* cannot be changed once the queues are created */
4815         else
4816                 pi->pktc_idx = idx;
4817
4818         end_synchronized_op(sc, LOCK_HELD);
4819         return (rc);
4820 }
4821
4822 static int
4823 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4824 {
4825         struct port_info *pi = arg1;
4826         struct adapter *sc = pi->adapter;
4827         int qsize, rc;
4828
4829         qsize = pi->qsize_rxq;
4830
4831         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4832         if (rc != 0 || req->newptr == NULL)
4833                 return (rc);
4834
4835         if (qsize < 128 || (qsize & 7))
4836                 return (EINVAL);
4837
4838         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4839             "t4rxqs");
4840         if (rc)
4841                 return (rc);
4842
4843         if (pi->flags & PORT_INIT_DONE)
4844                 rc = EBUSY; /* cannot be changed once the queues are created */
4845         else
4846                 pi->qsize_rxq = qsize;
4847
4848         end_synchronized_op(sc, LOCK_HELD);
4849         return (rc);
4850 }
4851
4852 static int
4853 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4854 {
4855         struct port_info *pi = arg1;
4856         struct adapter *sc = pi->adapter;
4857         int qsize, rc;
4858
4859         qsize = pi->qsize_txq;
4860
4861         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4862         if (rc != 0 || req->newptr == NULL)
4863                 return (rc);
4864
4865         /* bufring size must be powerof2 */
4866         if (qsize < 128 || !powerof2(qsize))
4867                 return (EINVAL);
4868
4869         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4870             "t4txqs");
4871         if (rc)
4872                 return (rc);
4873
4874         if (pi->flags & PORT_INIT_DONE)
4875                 rc = EBUSY; /* cannot be changed once the queues are created */
4876         else
4877                 pi->qsize_txq = qsize;
4878
4879         end_synchronized_op(sc, LOCK_HELD);
4880         return (rc);
4881 }
4882
4883 static int
4884 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4885 {
4886         struct adapter *sc = arg1;
4887         int reg = arg2;
4888         uint64_t val;
4889
4890         val = t4_read_reg64(sc, reg);
4891
4892         return (sysctl_handle_64(oidp, &val, 0, req));
4893 }
4894
4895 static int
4896 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4897 {
4898         struct adapter *sc = arg1;
4899         int rc, t;
4900         uint32_t param, val;
4901
4902         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4903         if (rc)
4904                 return (rc);
4905         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4906             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4907             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4908         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4909         end_synchronized_op(sc, 0);
4910         if (rc)
4911                 return (rc);
4912
4913         /* unknown is returned as 0 but we display -1 in that case */
4914         t = val == 0 ? -1 : val;
4915
4916         rc = sysctl_handle_int(oidp, &t, 0, req);
4917         return (rc);
4918 }
4919
4920 #ifdef SBUF_DRAIN
4921 static int
4922 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4923 {
4924         struct adapter *sc = arg1;
4925         struct sbuf *sb;
4926         int rc, i;
4927         uint16_t incr[NMTUS][NCCTRL_WIN];
4928         static const char *dec_fac[] = {
4929                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4930                 "0.9375"
4931         };
4932
4933         rc = sysctl_wire_old_buffer(req, 0);
4934         if (rc != 0)
4935                 return (rc);
4936
4937         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4938         if (sb == NULL)
4939                 return (ENOMEM);
4940
4941         t4_read_cong_tbl(sc, incr);
4942
4943         for (i = 0; i < NCCTRL_WIN; ++i) {
4944                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4945                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4946                     incr[5][i], incr[6][i], incr[7][i]);
4947                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4948                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4949                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4950                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4951         }
4952
4953         rc = sbuf_finish(sb);
4954         sbuf_delete(sb);
4955
4956         return (rc);
4957 }
4958
4959 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4960         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
4961         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
4962         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
4963 };
4964
4965 static int
4966 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4967 {
4968         struct adapter *sc = arg1;
4969         struct sbuf *sb;
4970         int rc, i, n, qid = arg2;
4971         uint32_t *buf, *p;
4972         char *qtype;
4973         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4974
4975         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4976             ("%s: bad qid %d\n", __func__, qid));
4977
4978         if (qid < CIM_NUM_IBQ) {
4979                 /* inbound queue */
4980                 qtype = "IBQ";
4981                 n = 4 * CIM_IBQ_SIZE;
4982                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4983                 rc = t4_read_cim_ibq(sc, qid, buf, n);
4984         } else {
4985                 /* outbound queue */
4986                 qtype = "OBQ";
4987                 qid -= CIM_NUM_IBQ;
4988                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4989                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4990                 rc = t4_read_cim_obq(sc, qid, buf, n);
4991         }
4992
4993         if (rc < 0) {
4994                 rc = -rc;
4995                 goto done;
4996         }
4997         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
4998
4999         rc = sysctl_wire_old_buffer(req, 0);
5000         if (rc != 0)
5001                 goto done;
5002
5003         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5004         if (sb == NULL) {
5005                 rc = ENOMEM;
5006                 goto done;
5007         }
5008
5009         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5010         for (i = 0, p = buf; i < n; i += 16, p += 4)
5011                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5012                     p[2], p[3]);
5013
5014         rc = sbuf_finish(sb);
5015         sbuf_delete(sb);
5016 done:
5017         free(buf, M_CXGBE);
5018         return (rc);
5019 }
5020
5021 static int
5022 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5023 {
5024         struct adapter *sc = arg1;
5025         u_int cfg;
5026         struct sbuf *sb;
5027         uint32_t *buf, *p;
5028         int rc;
5029
5030         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5031         if (rc != 0)
5032                 return (rc);
5033
5034         rc = sysctl_wire_old_buffer(req, 0);
5035         if (rc != 0)
5036                 return (rc);
5037
5038         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5039         if (sb == NULL)
5040                 return (ENOMEM);
5041
5042         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5043             M_ZERO | M_WAITOK);
5044
5045         rc = -t4_cim_read_la(sc, buf, NULL);
5046         if (rc != 0)
5047                 goto done;
5048
5049         sbuf_printf(sb, "Status   Data      PC%s",
5050             cfg & F_UPDBGLACAPTPCONLY ? "" :
5051             "     LS0Stat  LS0Addr             LS0Data");
5052
5053         KASSERT((sc->params.cim_la_size & 7) == 0,
5054             ("%s: p will walk off the end of buf", __func__));
5055
5056         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5057                 if (cfg & F_UPDBGLACAPTPCONLY) {
5058                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5059                             p[6], p[7]);
5060                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5061                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5062                             p[4] & 0xff, p[5] >> 8);
5063                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5064                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5065                             p[1] & 0xf, p[2] >> 4);
5066                 } else {
5067                         sbuf_printf(sb,
5068                             "\n  %02x   %x%07x %x%07x %08x %08x "
5069                             "%08x%08x%08x%08x",
5070                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5071                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5072                             p[6], p[7]);
5073                 }
5074         }
5075
5076         rc = sbuf_finish(sb);
5077         sbuf_delete(sb);
5078 done:
5079         free(buf, M_CXGBE);
5080         return (rc);
5081 }
5082
5083 static int
5084 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5085 {
5086         struct adapter *sc = arg1;
5087         u_int i;
5088         struct sbuf *sb;
5089         uint32_t *buf, *p;
5090         int rc;
5091
5092         rc = sysctl_wire_old_buffer(req, 0);
5093         if (rc != 0)
5094                 return (rc);
5095
5096         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5097         if (sb == NULL)
5098                 return (ENOMEM);
5099
5100         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5101             M_ZERO | M_WAITOK);
5102
5103         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5104         p = buf;
5105
5106         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5107                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5108                     p[1], p[0]);
5109         }
5110
5111         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5112         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5113                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5114                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5115                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5116                     (p[1] >> 2) | ((p[2] & 3) << 30),
5117                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5118                     p[0] & 1);
5119         }
5120
5121         rc = sbuf_finish(sb);
5122         sbuf_delete(sb);
5123         free(buf, M_CXGBE);
5124         return (rc);
5125 }
5126
5127 static int
5128 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5129 {
5130         struct adapter *sc = arg1;
5131         u_int i;
5132         struct sbuf *sb;
5133         uint32_t *buf, *p;
5134         int rc;
5135
5136         rc = sysctl_wire_old_buffer(req, 0);
5137         if (rc != 0)
5138                 return (rc);
5139
5140         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5141         if (sb == NULL)
5142                 return (ENOMEM);
5143
5144         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5145             M_ZERO | M_WAITOK);
5146
5147         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5148         p = buf;
5149
5150         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5151         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5152                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5153                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5154                     p[4], p[3], p[2], p[1], p[0]);
5155         }
5156
5157         sbuf_printf(sb, "\n\nCntl ID               Data");
5158         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5159                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5160                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5161         }
5162
5163         rc = sbuf_finish(sb);
5164         sbuf_delete(sb);
5165         free(buf, M_CXGBE);
5166         return (rc);
5167 }
5168
5169 static int
5170 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5171 {
5172         struct adapter *sc = arg1;
5173         struct sbuf *sb;
5174         int rc, i;
5175         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5176         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5177         uint16_t thres[CIM_NUM_IBQ];
5178         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5179         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5180         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5181
5182         if (is_t4(sc)) {
5183                 cim_num_obq = CIM_NUM_OBQ;
5184                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5185                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5186         } else {
5187                 cim_num_obq = CIM_NUM_OBQ_T5;
5188                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5189                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5190         }
5191         nq = CIM_NUM_IBQ + cim_num_obq;
5192
5193         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5194         if (rc == 0)
5195                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5196         if (rc != 0)
5197                 return (rc);
5198
5199         t4_read_cimq_cfg(sc, base, size, thres);
5200
5201         rc = sysctl_wire_old_buffer(req, 0);
5202         if (rc != 0)
5203                 return (rc);
5204
5205         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5206         if (sb == NULL)
5207                 return (ENOMEM);
5208
5209         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5210
5211         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5212                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5213                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5214                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5215                     G_QUEREMFLITS(p[2]) * 16);
5216         for ( ; i < nq; i++, p += 4, wr += 2)
5217                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5218                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5219                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5220                     G_QUEREMFLITS(p[2]) * 16);
5221
5222         rc = sbuf_finish(sb);
5223         sbuf_delete(sb);
5224
5225         return (rc);
5226 }
5227
5228 static int
5229 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5230 {
5231         struct adapter *sc = arg1;
5232         struct sbuf *sb;
5233         int rc;
5234         struct tp_cpl_stats stats;
5235
5236         rc = sysctl_wire_old_buffer(req, 0);
5237         if (rc != 0)
5238                 return (rc);
5239
5240         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5241         if (sb == NULL)
5242                 return (ENOMEM);
5243
5244         t4_tp_get_cpl_stats(sc, &stats);
5245
5246         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5247             "channel 3\n");
5248         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5249                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5250         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5251                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5252
5253         rc = sbuf_finish(sb);
5254         sbuf_delete(sb);
5255
5256         return (rc);
5257 }
5258
5259 static int
5260 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5261 {
5262         struct adapter *sc = arg1;
5263         struct sbuf *sb;
5264         int rc;
5265         struct tp_usm_stats stats;
5266
5267         rc = sysctl_wire_old_buffer(req, 0);
5268         if (rc != 0)
5269                 return(rc);
5270
5271         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5272         if (sb == NULL)
5273                 return (ENOMEM);
5274
5275         t4_get_usm_stats(sc, &stats);
5276
5277         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5278         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5279         sbuf_printf(sb, "Drops:  %u", stats.drops);
5280
5281         rc = sbuf_finish(sb);
5282         sbuf_delete(sb);
5283
5284         return (rc);
5285 }
5286
5287 const char *devlog_level_strings[] = {
5288         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5289         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5290         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5291         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5292         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5293         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5294 };
5295
5296 const char *devlog_facility_strings[] = {
5297         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5298         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5299         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5300         [FW_DEVLOG_FACILITY_RES]        = "RES",
5301         [FW_DEVLOG_FACILITY_HW]         = "HW",
5302         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5303         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5304         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5305         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5306         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5307         [FW_DEVLOG_FACILITY_VI]         = "VI",
5308         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5309         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5310         [FW_DEVLOG_FACILITY_TM]         = "TM",
5311         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5312         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5313         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5314         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5315         [FW_DEVLOG_FACILITY_RI]         = "RI",
5316         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5317         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5318         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5319         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5320 };
5321
5322 static int
5323 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5324 {
5325         struct adapter *sc = arg1;
5326         struct devlog_params *dparams = &sc->params.devlog;
5327         struct fw_devlog_e *buf, *e;
5328         int i, j, rc, nentries, first = 0, m;
5329         struct sbuf *sb;
5330         uint64_t ftstamp = UINT64_MAX;
5331
5332         if (dparams->start == 0) {
5333                 dparams->memtype = FW_MEMTYPE_EDC0;
5334                 dparams->start = 0x84000;
5335                 dparams->size = 32768;
5336         }
5337
5338         nentries = dparams->size / sizeof(struct fw_devlog_e);
5339
5340         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5341         if (buf == NULL)
5342                 return (ENOMEM);
5343
5344         m = fwmtype_to_hwmtype(dparams->memtype);
5345         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5346         if (rc != 0)
5347                 goto done;
5348
5349         for (i = 0; i < nentries; i++) {
5350                 e = &buf[i];
5351
5352                 if (e->timestamp == 0)
5353                         break;  /* end */
5354
5355                 e->timestamp = be64toh(e->timestamp);
5356                 e->seqno = be32toh(e->seqno);
5357                 for (j = 0; j < 8; j++)
5358                         e->params[j] = be32toh(e->params[j]);
5359
5360                 if (e->timestamp < ftstamp) {
5361                         ftstamp = e->timestamp;
5362                         first = i;
5363                 }
5364         }
5365
5366         if (buf[first].timestamp == 0)
5367                 goto done;      /* nothing in the log */
5368
5369         rc = sysctl_wire_old_buffer(req, 0);
5370         if (rc != 0)
5371                 goto done;
5372
5373         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5374         if (sb == NULL) {
5375                 rc = ENOMEM;
5376                 goto done;
5377         }
5378         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5379             "Seq#", "Tstamp", "Level", "Facility", "Message");
5380
5381         i = first;
5382         do {
5383                 e = &buf[i];
5384                 if (e->timestamp == 0)
5385                         break;  /* end */
5386
5387                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5388                     e->seqno, e->timestamp,
5389                     (e->level < nitems(devlog_level_strings) ?
5390                         devlog_level_strings[e->level] : "UNKNOWN"),
5391                     (e->facility < nitems(devlog_facility_strings) ?
5392                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5393                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5394                     e->params[2], e->params[3], e->params[4],
5395                     e->params[5], e->params[6], e->params[7]);
5396
5397                 if (++i == nentries)
5398                         i = 0;
5399         } while (i != first);
5400
5401         rc = sbuf_finish(sb);
5402         sbuf_delete(sb);
5403 done:
5404         free(buf, M_CXGBE);
5405         return (rc);
5406 }
5407
5408 static int
5409 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5410 {
5411         struct adapter *sc = arg1;
5412         struct sbuf *sb;
5413         int rc;
5414         struct tp_fcoe_stats stats[4];
5415
5416         rc = sysctl_wire_old_buffer(req, 0);
5417         if (rc != 0)
5418                 return (rc);
5419
5420         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5421         if (sb == NULL)
5422                 return (ENOMEM);
5423
5424         t4_get_fcoe_stats(sc, 0, &stats[0]);
5425         t4_get_fcoe_stats(sc, 1, &stats[1]);
5426         t4_get_fcoe_stats(sc, 2, &stats[2]);
5427         t4_get_fcoe_stats(sc, 3, &stats[3]);
5428
5429         sbuf_printf(sb, "                   channel 0        channel 1        "
5430             "channel 2        channel 3\n");
5431         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5432             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5433             stats[3].octetsDDP);
5434         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5435             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5436         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5437             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5438             stats[3].framesDrop);
5439
5440         rc = sbuf_finish(sb);
5441         sbuf_delete(sb);
5442
5443         return (rc);
5444 }
5445
5446 static int
5447 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5448 {
5449         struct adapter *sc = arg1;
5450         struct sbuf *sb;
5451         int rc, i;
5452         unsigned int map, kbps, ipg, mode;
5453         unsigned int pace_tab[NTX_SCHED];
5454
5455         rc = sysctl_wire_old_buffer(req, 0);
5456         if (rc != 0)
5457                 return (rc);
5458
5459         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5460         if (sb == NULL)
5461                 return (ENOMEM);
5462
5463         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5464         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5465         t4_read_pace_tbl(sc, pace_tab);
5466
5467         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5468             "Class IPG (0.1 ns)   Flow IPG (us)");
5469
5470         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5471                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5472                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5473                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5474                 if (kbps)
5475                         sbuf_printf(sb, "%9u     ", kbps);
5476                 else
5477                         sbuf_printf(sb, " disabled     ");
5478
5479                 if (ipg)
5480                         sbuf_printf(sb, "%13u        ", ipg);
5481                 else
5482                         sbuf_printf(sb, "     disabled        ");
5483
5484                 if (pace_tab[i])
5485                         sbuf_printf(sb, "%10u", pace_tab[i]);
5486                 else
5487                         sbuf_printf(sb, "  disabled");
5488         }
5489
5490         rc = sbuf_finish(sb);
5491         sbuf_delete(sb);
5492
5493         return (rc);
5494 }
5495
5496 static int
5497 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5498 {
5499         struct adapter *sc = arg1;
5500         struct sbuf *sb;
5501         int rc, i, j;
5502         uint64_t *p0, *p1;
5503         struct lb_port_stats s[2];
5504         static const char *stat_name[] = {
5505                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5506                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5507                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5508                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5509                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5510                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5511                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5512         };
5513
5514         rc = sysctl_wire_old_buffer(req, 0);
5515         if (rc != 0)
5516                 return (rc);
5517
5518         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5519         if (sb == NULL)
5520                 return (ENOMEM);
5521
5522         memset(s, 0, sizeof(s));
5523
5524         for (i = 0; i < 4; i += 2) {
5525                 t4_get_lb_stats(sc, i, &s[0]);
5526                 t4_get_lb_stats(sc, i + 1, &s[1]);
5527
5528                 p0 = &s[0].octets;
5529                 p1 = &s[1].octets;
5530                 sbuf_printf(sb, "%s                       Loopback %u"
5531                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5532
5533                 for (j = 0; j < nitems(stat_name); j++)
5534                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5535                                    *p0++, *p1++);
5536         }
5537
5538         rc = sbuf_finish(sb);
5539         sbuf_delete(sb);
5540
5541         return (rc);
5542 }
5543
5544 static int
5545 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5546 {
5547         int rc = 0;
5548         struct port_info *pi = arg1;
5549         struct sbuf *sb;
5550         static const char *linkdnreasons[] = {
5551                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5552                 "PHY overheated", "unknown", "rx los", "reserved7"
5553         };
5554
5555         rc = sysctl_wire_old_buffer(req, 0);
5556         if (rc != 0)
5557                 return(rc);
5558         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5559         if (sb == NULL)
5560                 return (ENOMEM);
5561
5562         if (pi->linkdnrc < 0)
5563                 sbuf_printf(sb, "n/a");
5564         else if (pi->linkdnrc < nitems(linkdnreasons))
5565                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5566         else
5567                 sbuf_printf(sb, "%d", pi->linkdnrc);
5568
5569         rc = sbuf_finish(sb);
5570         sbuf_delete(sb);
5571
5572         return (rc);
5573 }
5574
5575 struct mem_desc {
5576         unsigned int base;
5577         unsigned int limit;
5578         unsigned int idx;
5579 };
5580
5581 static int
5582 mem_desc_cmp(const void *a, const void *b)
5583 {
5584         return ((const struct mem_desc *)a)->base -
5585                ((const struct mem_desc *)b)->base;
5586 }
5587
5588 static void
5589 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5590     unsigned int to)
5591 {
5592         unsigned int size;
5593
5594         size = to - from + 1;
5595         if (size == 0)
5596                 return;
5597
5598         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5599         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5600 }
5601
5602 static int
5603 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5604 {
5605         struct adapter *sc = arg1;
5606         struct sbuf *sb;
5607         int rc, i, n;
5608         uint32_t lo, hi, used, alloc;
5609         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5610         static const char *region[] = {
5611                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5612                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5613                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5614                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5615                 "RQUDP region:", "PBL region:", "TXPBL region:",
5616                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5617                 "On-chip queues:"
5618         };
5619         struct mem_desc avail[4];
5620         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5621         struct mem_desc *md = mem;
5622
5623         rc = sysctl_wire_old_buffer(req, 0);
5624         if (rc != 0)
5625                 return (rc);
5626
5627         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5628         if (sb == NULL)
5629                 return (ENOMEM);
5630
5631         for (i = 0; i < nitems(mem); i++) {
5632                 mem[i].limit = 0;
5633                 mem[i].idx = i;
5634         }
5635
5636         /* Find and sort the populated memory ranges */
5637         i = 0;
5638         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5639         if (lo & F_EDRAM0_ENABLE) {
5640                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5641                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5642                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5643                 avail[i].idx = 0;
5644                 i++;
5645         }
5646         if (lo & F_EDRAM1_ENABLE) {
5647                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5648                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5649                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5650                 avail[i].idx = 1;
5651                 i++;
5652         }
5653         if (lo & F_EXT_MEM_ENABLE) {
5654                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5655                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5656                 avail[i].limit = avail[i].base +
5657                     (G_EXT_MEM_SIZE(hi) << 20);
5658                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5659                 i++;
5660         }
5661         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5662                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5663                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5664                 avail[i].limit = avail[i].base +
5665                     (G_EXT_MEM1_SIZE(hi) << 20);
5666                 avail[i].idx = 4;
5667                 i++;
5668         }
5669         if (!i)                                    /* no memory available */
5670                 return 0;
5671         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5672
5673         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5674         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5675         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5676         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5677         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5678         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5679         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5680         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5681         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5682
5683         /* the next few have explicit upper bounds */
5684         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5685         md->limit = md->base - 1 +
5686                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5687                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5688         md++;
5689
5690         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5691         md->limit = md->base - 1 +
5692                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5693                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5694         md++;
5695
5696         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5697                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5698                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5699                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5700         } else {
5701                 md->base = 0;
5702                 md->idx = nitems(region);  /* hide it */
5703         }
5704         md++;
5705
5706 #define ulp_region(reg) \
5707         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5708         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5709
5710         ulp_region(RX_ISCSI);
5711         ulp_region(RX_TDDP);
5712         ulp_region(TX_TPT);
5713         ulp_region(RX_STAG);
5714         ulp_region(RX_RQ);
5715         ulp_region(RX_RQUDP);
5716         ulp_region(RX_PBL);
5717         ulp_region(TX_PBL);
5718 #undef ulp_region
5719
5720         md->base = 0;
5721         md->idx = nitems(region);
5722         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5723                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5724                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5725                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5726         }
5727         md++;
5728
5729         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5730         md->limit = md->base + sc->tids.ntids - 1;
5731         md++;
5732         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5733         md->limit = md->base + sc->tids.ntids - 1;
5734         md++;
5735
5736         md->base = sc->vres.ocq.start;
5737         if (sc->vres.ocq.size)
5738                 md->limit = md->base + sc->vres.ocq.size - 1;
5739         else
5740                 md->idx = nitems(region);  /* hide it */
5741         md++;
5742
5743         /* add any address-space holes, there can be up to 3 */
5744         for (n = 0; n < i - 1; n++)
5745                 if (avail[n].limit < avail[n + 1].base)
5746                         (md++)->base = avail[n].limit;
5747         if (avail[n].limit)
5748                 (md++)->base = avail[n].limit;
5749
5750         n = md - mem;
5751         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5752
5753         for (lo = 0; lo < i; lo++)
5754                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5755                                 avail[lo].limit - 1);
5756
5757         sbuf_printf(sb, "\n");
5758         for (i = 0; i < n; i++) {
5759                 if (mem[i].idx >= nitems(region))
5760                         continue;                        /* skip holes */
5761                 if (!mem[i].limit)
5762                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5763                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5764                                 mem[i].limit);
5765         }
5766
5767         sbuf_printf(sb, "\n");
5768         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5769         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5770         mem_region_show(sb, "uP RAM:", lo, hi);
5771
5772         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5773         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5774         mem_region_show(sb, "uP Extmem2:", lo, hi);
5775
5776         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5777         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5778                    G_PMRXMAXPAGE(lo),
5779                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5780                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5781
5782         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5783         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5784         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5785                    G_PMTXMAXPAGE(lo),
5786                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5787                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5788         sbuf_printf(sb, "%u p-structs\n",
5789                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5790
5791         for (i = 0; i < 4; i++) {
5792                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5793                 if (is_t4(sc)) {
5794                         used = G_USED(lo);
5795                         alloc = G_ALLOC(lo);
5796                 } else {
5797                         used = G_T5_USED(lo);
5798                         alloc = G_T5_ALLOC(lo);
5799                 }
5800                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5801                            i, used, alloc);
5802         }
5803         for (i = 0; i < 4; i++) {
5804                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5805                 if (is_t4(sc)) {
5806                         used = G_USED(lo);
5807                         alloc = G_ALLOC(lo);
5808                 } else {
5809                         used = G_T5_USED(lo);
5810                         alloc = G_T5_ALLOC(lo);
5811                 }
5812                 sbuf_printf(sb,
5813                            "\nLoopback %d using %u pages out of %u allocated",
5814                            i, used, alloc);
5815         }
5816
5817         rc = sbuf_finish(sb);
5818         sbuf_delete(sb);
5819
5820         return (rc);
5821 }
5822
5823 static inline void
5824 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5825 {
5826         *mask = x | y;
5827         y = htobe64(y);
5828         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5829 }
5830
5831 static int
5832 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5833 {
5834         struct adapter *sc = arg1;
5835         struct sbuf *sb;
5836         int rc, i, n;
5837
5838         rc = sysctl_wire_old_buffer(req, 0);
5839         if (rc != 0)
5840                 return (rc);
5841
5842         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5843         if (sb == NULL)
5844                 return (ENOMEM);
5845
5846         sbuf_printf(sb,
5847             "Idx  Ethernet address     Mask     Vld Ports PF"
5848             "  VF              Replication             P0 P1 P2 P3  ML");
5849         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5850             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5851         for (i = 0; i < n; i++) {
5852                 uint64_t tcamx, tcamy, mask;
5853                 uint32_t cls_lo, cls_hi;
5854                 uint8_t addr[ETHER_ADDR_LEN];
5855
5856                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5857                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5858                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5859                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5860
5861                 if (tcamx & tcamy)
5862                         continue;
5863
5864                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5865                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5866                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5867                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5868                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5869                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5870                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5871
5872                 if (cls_lo & F_REPLICATE) {
5873                         struct fw_ldst_cmd ldst_cmd;
5874
5875                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5876                         ldst_cmd.op_to_addrspace =
5877                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5878                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5879                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5880                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5881                         ldst_cmd.u.mps.fid_ctl =
5882                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5883                                 V_FW_LDST_CMD_CTL(i));
5884
5885                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5886                             "t4mps");
5887                         if (rc)
5888                                 break;
5889                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5890                             sizeof(ldst_cmd), &ldst_cmd);
5891                         end_synchronized_op(sc, 0);
5892
5893                         if (rc != 0) {
5894                                 sbuf_printf(sb,
5895                                     " ------------ error %3u ------------", rc);
5896                                 rc = 0;
5897                         } else {
5898                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5899                                     be32toh(ldst_cmd.u.mps.rplc127_96),
5900                                     be32toh(ldst_cmd.u.mps.rplc95_64),
5901                                     be32toh(ldst_cmd.u.mps.rplc63_32),
5902                                     be32toh(ldst_cmd.u.mps.rplc31_0));
5903                         }
5904                 } else
5905                         sbuf_printf(sb, "%36s", "");
5906
5907                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5908                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5909                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5910         }
5911
5912         if (rc)
5913                 (void) sbuf_finish(sb);
5914         else
5915                 rc = sbuf_finish(sb);
5916         sbuf_delete(sb);
5917
5918         return (rc);
5919 }
5920
5921 static int
5922 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5923 {
5924         struct adapter *sc = arg1;
5925         struct sbuf *sb;
5926         int rc;
5927         uint16_t mtus[NMTUS];
5928
5929         rc = sysctl_wire_old_buffer(req, 0);
5930         if (rc != 0)
5931                 return (rc);
5932
5933         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5934         if (sb == NULL)
5935                 return (ENOMEM);
5936
5937         t4_read_mtu_tbl(sc, mtus, NULL);
5938
5939         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5940             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5941             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5942             mtus[14], mtus[15]);
5943
5944         rc = sbuf_finish(sb);
5945         sbuf_delete(sb);
5946
5947         return (rc);
5948 }
5949
5950 static int
5951 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5952 {
5953         struct adapter *sc = arg1;
5954         struct sbuf *sb;
5955         int rc, i;
5956         uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5957         uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5958         static const char *pm_stats[] = {
5959                 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5960         };
5961
5962         rc = sysctl_wire_old_buffer(req, 0);
5963         if (rc != 0)
5964                 return (rc);
5965
5966         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5967         if (sb == NULL)
5968                 return (ENOMEM);
5969
5970         t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5971         t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5972
5973         sbuf_printf(sb, "                Tx count            Tx cycles    "
5974             "Rx count            Rx cycles");
5975         for (i = 0; i < PM_NSTATS; i++)
5976                 sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
5977                     pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5978
5979         rc = sbuf_finish(sb);
5980         sbuf_delete(sb);
5981
5982         return (rc);
5983 }
5984
5985 static int
5986 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5987 {
5988         struct adapter *sc = arg1;
5989         struct sbuf *sb;
5990         int rc;
5991         struct tp_rdma_stats stats;
5992
5993         rc = sysctl_wire_old_buffer(req, 0);
5994         if (rc != 0)
5995                 return (rc);
5996
5997         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5998         if (sb == NULL)
5999                 return (ENOMEM);
6000
6001         t4_tp_get_rdma_stats(sc, &stats);
6002         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6003         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6004
6005         rc = sbuf_finish(sb);
6006         sbuf_delete(sb);
6007
6008         return (rc);
6009 }
6010
6011 static int
6012 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6013 {
6014         struct adapter *sc = arg1;
6015         struct sbuf *sb;
6016         int rc;
6017         struct tp_tcp_stats v4, v6;
6018
6019         rc = sysctl_wire_old_buffer(req, 0);
6020         if (rc != 0)
6021                 return (rc);
6022
6023         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6024         if (sb == NULL)
6025                 return (ENOMEM);
6026
6027         t4_tp_get_tcp_stats(sc, &v4, &v6);
6028         sbuf_printf(sb,
6029             "                                IP                 IPv6\n");
6030         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6031             v4.tcpOutRsts, v6.tcpOutRsts);
6032         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6033             v4.tcpInSegs, v6.tcpInSegs);
6034         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6035             v4.tcpOutSegs, v6.tcpOutSegs);
6036         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6037             v4.tcpRetransSegs, v6.tcpRetransSegs);
6038
6039         rc = sbuf_finish(sb);
6040         sbuf_delete(sb);
6041
6042         return (rc);
6043 }
6044
6045 static int
6046 sysctl_tids(SYSCTL_HANDLER_ARGS)
6047 {
6048         struct adapter *sc = arg1;
6049         struct sbuf *sb;
6050         int rc;
6051         struct tid_info *t = &sc->tids;
6052
6053         rc = sysctl_wire_old_buffer(req, 0);
6054         if (rc != 0)
6055                 return (rc);
6056
6057         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6058         if (sb == NULL)
6059                 return (ENOMEM);
6060
6061         if (t->natids) {
6062                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6063                     t->atids_in_use);
6064         }
6065
6066         if (t->ntids) {
6067                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6068                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6069
6070                         if (b) {
6071                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6072                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6073                                     t->ntids - 1);
6074                         } else {
6075                                 sbuf_printf(sb, "TID range: %u-%u",
6076                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6077                                     t->ntids - 1);
6078                         }
6079                 } else
6080                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6081                 sbuf_printf(sb, ", in use: %u\n",
6082                     atomic_load_acq_int(&t->tids_in_use));
6083         }
6084
6085         if (t->nstids) {
6086                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6087                     t->stid_base + t->nstids - 1, t->stids_in_use);
6088         }
6089
6090         if (t->nftids) {
6091                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6092                     t->ftid_base + t->nftids - 1);
6093         }
6094
6095         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6096             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6097             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6098
6099         rc = sbuf_finish(sb);
6100         sbuf_delete(sb);
6101
6102         return (rc);
6103 }
6104
6105 static int
6106 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6107 {
6108         struct adapter *sc = arg1;
6109         struct sbuf *sb;
6110         int rc;
6111         struct tp_err_stats stats;
6112
6113         rc = sysctl_wire_old_buffer(req, 0);
6114         if (rc != 0)
6115                 return (rc);
6116
6117         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6118         if (sb == NULL)
6119                 return (ENOMEM);
6120
6121         t4_tp_get_err_stats(sc, &stats);
6122
6123         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6124                       "channel 3\n");
6125         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6126             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6127             stats.macInErrs[3]);
6128         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6129             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6130             stats.hdrInErrs[3]);
6131         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6132             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6133             stats.tcpInErrs[3]);
6134         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6135             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6136             stats.tcp6InErrs[3]);
6137         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6138             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6139             stats.tnlCongDrops[3]);
6140         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6141             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6142             stats.tnlTxDrops[3]);
6143         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6144             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6145             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6146         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6147             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6148             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6149         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6150             stats.ofldNoNeigh, stats.ofldCongDefer);
6151
6152         rc = sbuf_finish(sb);
6153         sbuf_delete(sb);
6154
6155         return (rc);
6156 }
6157
6158 struct field_desc {
6159         const char *name;
6160         u_int start;
6161         u_int width;
6162 };
6163
6164 static void
6165 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6166 {
6167         char buf[32];
6168         int line_size = 0;
6169
6170         while (f->name) {
6171                 uint64_t mask = (1ULL << f->width) - 1;
6172                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6173                     ((uintmax_t)v >> f->start) & mask);
6174
6175                 if (line_size + len >= 79) {
6176                         line_size = 8;
6177                         sbuf_printf(sb, "\n        ");
6178                 }
6179                 sbuf_printf(sb, "%s ", buf);
6180                 line_size += len + 1;
6181                 f++;
6182         }
6183         sbuf_printf(sb, "\n");
6184 }
6185
6186 static struct field_desc tp_la0[] = {
6187         { "RcfOpCodeOut", 60, 4 },
6188         { "State", 56, 4 },
6189         { "WcfState", 52, 4 },
6190         { "RcfOpcSrcOut", 50, 2 },
6191         { "CRxError", 49, 1 },
6192         { "ERxError", 48, 1 },
6193         { "SanityFailed", 47, 1 },
6194         { "SpuriousMsg", 46, 1 },
6195         { "FlushInputMsg", 45, 1 },
6196         { "FlushInputCpl", 44, 1 },
6197         { "RssUpBit", 43, 1 },
6198         { "RssFilterHit", 42, 1 },
6199         { "Tid", 32, 10 },
6200         { "InitTcb", 31, 1 },
6201         { "LineNumber", 24, 7 },
6202         { "Emsg", 23, 1 },
6203         { "EdataOut", 22, 1 },
6204         { "Cmsg", 21, 1 },
6205         { "CdataOut", 20, 1 },
6206         { "EreadPdu", 19, 1 },
6207         { "CreadPdu", 18, 1 },
6208         { "TunnelPkt", 17, 1 },
6209         { "RcfPeerFin", 16, 1 },
6210         { "RcfReasonOut", 12, 4 },
6211         { "TxCchannel", 10, 2 },
6212         { "RcfTxChannel", 8, 2 },
6213         { "RxEchannel", 6, 2 },
6214         { "RcfRxChannel", 5, 1 },
6215         { "RcfDataOutSrdy", 4, 1 },
6216         { "RxDvld", 3, 1 },
6217         { "RxOoDvld", 2, 1 },
6218         { "RxCongestion", 1, 1 },
6219         { "TxCongestion", 0, 1 },
6220         { NULL }
6221 };
6222
6223 static struct field_desc tp_la1[] = {
6224         { "CplCmdIn", 56, 8 },
6225         { "CplCmdOut", 48, 8 },
6226         { "ESynOut", 47, 1 },
6227         { "EAckOut", 46, 1 },
6228         { "EFinOut", 45, 1 },
6229         { "ERstOut", 44, 1 },
6230         { "SynIn", 43, 1 },
6231         { "AckIn", 42, 1 },
6232         { "FinIn", 41, 1 },
6233         { "RstIn", 40, 1 },
6234         { "DataIn", 39, 1 },
6235         { "DataInVld", 38, 1 },
6236         { "PadIn", 37, 1 },
6237         { "RxBufEmpty", 36, 1 },
6238         { "RxDdp", 35, 1 },
6239         { "RxFbCongestion", 34, 1 },
6240         { "TxFbCongestion", 33, 1 },
6241         { "TxPktSumSrdy", 32, 1 },
6242         { "RcfUlpType", 28, 4 },
6243         { "Eread", 27, 1 },
6244         { "Ebypass", 26, 1 },
6245         { "Esave", 25, 1 },
6246         { "Static0", 24, 1 },
6247         { "Cread", 23, 1 },
6248         { "Cbypass", 22, 1 },
6249         { "Csave", 21, 1 },
6250         { "CPktOut", 20, 1 },
6251         { "RxPagePoolFull", 18, 2 },
6252         { "RxLpbkPkt", 17, 1 },
6253         { "TxLpbkPkt", 16, 1 },
6254         { "RxVfValid", 15, 1 },
6255         { "SynLearned", 14, 1 },
6256         { "SetDelEntry", 13, 1 },
6257         { "SetInvEntry", 12, 1 },
6258         { "CpcmdDvld", 11, 1 },
6259         { "CpcmdSave", 10, 1 },
6260         { "RxPstructsFull", 8, 2 },
6261         { "EpcmdDvld", 7, 1 },
6262         { "EpcmdFlush", 6, 1 },
6263         { "EpcmdTrimPrefix", 5, 1 },
6264         { "EpcmdTrimPostfix", 4, 1 },
6265         { "ERssIp4Pkt", 3, 1 },
6266         { "ERssIp6Pkt", 2, 1 },
6267         { "ERssTcpUdpPkt", 1, 1 },
6268         { "ERssFceFipPkt", 0, 1 },
6269         { NULL }
6270 };
6271
6272 static struct field_desc tp_la2[] = {
6273         { "CplCmdIn", 56, 8 },
6274         { "MpsVfVld", 55, 1 },
6275         { "MpsPf", 52, 3 },
6276         { "MpsVf", 44, 8 },
6277         { "SynIn", 43, 1 },
6278         { "AckIn", 42, 1 },
6279         { "FinIn", 41, 1 },
6280         { "RstIn", 40, 1 },
6281         { "DataIn", 39, 1 },
6282         { "DataInVld", 38, 1 },
6283         { "PadIn", 37, 1 },
6284         { "RxBufEmpty", 36, 1 },
6285         { "RxDdp", 35, 1 },
6286         { "RxFbCongestion", 34, 1 },
6287         { "TxFbCongestion", 33, 1 },
6288         { "TxPktSumSrdy", 32, 1 },
6289         { "RcfUlpType", 28, 4 },
6290         { "Eread", 27, 1 },
6291         { "Ebypass", 26, 1 },
6292         { "Esave", 25, 1 },
6293         { "Static0", 24, 1 },
6294         { "Cread", 23, 1 },
6295         { "Cbypass", 22, 1 },
6296         { "Csave", 21, 1 },
6297         { "CPktOut", 20, 1 },
6298         { "RxPagePoolFull", 18, 2 },
6299         { "RxLpbkPkt", 17, 1 },
6300         { "TxLpbkPkt", 16, 1 },
6301         { "RxVfValid", 15, 1 },
6302         { "SynLearned", 14, 1 },
6303         { "SetDelEntry", 13, 1 },
6304         { "SetInvEntry", 12, 1 },
6305         { "CpcmdDvld", 11, 1 },
6306         { "CpcmdSave", 10, 1 },
6307         { "RxPstructsFull", 8, 2 },
6308         { "EpcmdDvld", 7, 1 },
6309         { "EpcmdFlush", 6, 1 },
6310         { "EpcmdTrimPrefix", 5, 1 },
6311         { "EpcmdTrimPostfix", 4, 1 },
6312         { "ERssIp4Pkt", 3, 1 },
6313         { "ERssIp6Pkt", 2, 1 },
6314         { "ERssTcpUdpPkt", 1, 1 },
6315         { "ERssFceFipPkt", 0, 1 },
6316         { NULL }
6317 };
6318
6319 static void
6320 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6321 {
6322
6323         field_desc_show(sb, *p, tp_la0);
6324 }
6325
6326 static void
6327 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6328 {
6329
6330         if (idx)
6331                 sbuf_printf(sb, "\n");
6332         field_desc_show(sb, p[0], tp_la0);
6333         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6334                 field_desc_show(sb, p[1], tp_la0);
6335 }
6336
6337 static void
6338 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6339 {
6340
6341         if (idx)
6342                 sbuf_printf(sb, "\n");
6343         field_desc_show(sb, p[0], tp_la0);
6344         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6345                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6346 }
6347
6348 static int
6349 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6350 {
6351         struct adapter *sc = arg1;
6352         struct sbuf *sb;
6353         uint64_t *buf, *p;
6354         int rc;
6355         u_int i, inc;
6356         void (*show_func)(struct sbuf *, uint64_t *, int);
6357
6358         rc = sysctl_wire_old_buffer(req, 0);
6359         if (rc != 0)
6360                 return (rc);
6361
6362         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6363         if (sb == NULL)
6364                 return (ENOMEM);
6365
6366         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6367
6368         t4_tp_read_la(sc, buf, NULL);
6369         p = buf;
6370
6371         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6372         case 2:
6373                 inc = 2;
6374                 show_func = tp_la_show2;
6375                 break;
6376         case 3:
6377                 inc = 2;
6378                 show_func = tp_la_show3;
6379                 break;
6380         default:
6381                 inc = 1;
6382                 show_func = tp_la_show;
6383         }
6384
6385         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6386                 (*show_func)(sb, p, i);
6387
6388         rc = sbuf_finish(sb);
6389         sbuf_delete(sb);
6390         free(buf, M_CXGBE);
6391         return (rc);
6392 }
6393
6394 static int
6395 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6396 {
6397         struct adapter *sc = arg1;
6398         struct sbuf *sb;
6399         int rc;
6400         u64 nrate[NCHAN], orate[NCHAN];
6401
6402         rc = sysctl_wire_old_buffer(req, 0);
6403         if (rc != 0)
6404                 return (rc);
6405
6406         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6407         if (sb == NULL)
6408                 return (ENOMEM);
6409
6410         t4_get_chan_txrate(sc, nrate, orate);
6411         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6412                  "channel 3\n");
6413         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6414             nrate[0], nrate[1], nrate[2], nrate[3]);
6415         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6416             orate[0], orate[1], orate[2], orate[3]);
6417
6418         rc = sbuf_finish(sb);
6419         sbuf_delete(sb);
6420
6421         return (rc);
6422 }
6423
6424 static int
6425 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6426 {
6427         struct adapter *sc = arg1;
6428         struct sbuf *sb;
6429         uint32_t *buf, *p;
6430         int rc, i;
6431
6432         rc = sysctl_wire_old_buffer(req, 0);
6433         if (rc != 0)
6434                 return (rc);
6435
6436         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6437         if (sb == NULL)
6438                 return (ENOMEM);
6439
6440         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6441             M_ZERO | M_WAITOK);
6442
6443         t4_ulprx_read_la(sc, buf);
6444         p = buf;
6445
6446         sbuf_printf(sb, "      Pcmd        Type   Message"
6447             "                Data");
6448         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6449                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6450                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6451         }
6452
6453         rc = sbuf_finish(sb);
6454         sbuf_delete(sb);
6455         free(buf, M_CXGBE);
6456         return (rc);
6457 }
6458
6459 static int
6460 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6461 {
6462         struct adapter *sc = arg1;
6463         struct sbuf *sb;
6464         int rc, v;
6465
6466         rc = sysctl_wire_old_buffer(req, 0);
6467         if (rc != 0)
6468                 return (rc);
6469
6470         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6471         if (sb == NULL)
6472                 return (ENOMEM);
6473
6474         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6475         if (G_STATSOURCE_T5(v) == 7) {
6476                 if (G_STATMODE(v) == 0) {
6477                         sbuf_printf(sb, "total %d, incomplete %d",
6478                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6479                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6480                 } else if (G_STATMODE(v) == 1) {
6481                         sbuf_printf(sb, "total %d, data overflow %d",
6482                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6483                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6484                 }
6485         }
6486         rc = sbuf_finish(sb);
6487         sbuf_delete(sb);
6488
6489         return (rc);
6490 }
6491 #endif
6492
6493 static inline void
6494 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6495 {
6496         struct buf_ring *br;
6497         struct mbuf *m;
6498
6499         TXQ_LOCK_ASSERT_OWNED(txq);
6500
6501         br = txq->br;
6502         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6503         if (m)
6504                 t4_eth_tx(ifp, txq, m);
6505 }
6506
6507 void
6508 t4_tx_callout(void *arg)
6509 {
6510         struct sge_eq *eq = arg;
6511         struct adapter *sc;
6512
6513         if (EQ_TRYLOCK(eq) == 0)
6514                 goto reschedule;
6515
6516         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6517                 EQ_UNLOCK(eq);
6518 reschedule:
6519                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6520                         callout_schedule(&eq->tx_callout, 1);
6521                 return;
6522         }
6523
6524         EQ_LOCK_ASSERT_OWNED(eq);
6525
6526         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6527
6528                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6529                         struct sge_txq *txq = arg;
6530                         struct port_info *pi = txq->ifp->if_softc;
6531
6532                         sc = pi->adapter;
6533                 } else {
6534                         struct sge_wrq *wrq = arg;
6535
6536                         sc = wrq->adapter;
6537                 }
6538
6539                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6540         }
6541
6542         EQ_UNLOCK(eq);
6543 }
6544
6545 void
6546 t4_tx_task(void *arg, int count)
6547 {
6548         struct sge_eq *eq = arg;
6549
6550         EQ_LOCK(eq);
6551         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6552                 struct sge_txq *txq = arg;
6553                 txq_start(txq->ifp, txq);
6554         } else {
6555                 struct sge_wrq *wrq = arg;
6556                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6557         }
6558         EQ_UNLOCK(eq);
6559 }
6560
6561 static uint32_t
6562 fconf_to_mode(uint32_t fconf)
6563 {
6564         uint32_t mode;
6565
6566         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6567             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6568
6569         if (fconf & F_FRAGMENTATION)
6570                 mode |= T4_FILTER_IP_FRAGMENT;
6571
6572         if (fconf & F_MPSHITTYPE)
6573                 mode |= T4_FILTER_MPS_HIT_TYPE;
6574
6575         if (fconf & F_MACMATCH)
6576                 mode |= T4_FILTER_MAC_IDX;
6577
6578         if (fconf & F_ETHERTYPE)
6579                 mode |= T4_FILTER_ETH_TYPE;
6580
6581         if (fconf & F_PROTOCOL)
6582                 mode |= T4_FILTER_IP_PROTO;
6583
6584         if (fconf & F_TOS)
6585                 mode |= T4_FILTER_IP_TOS;
6586
6587         if (fconf & F_VLAN)
6588                 mode |= T4_FILTER_VLAN;
6589
6590         if (fconf & F_VNIC_ID)
6591                 mode |= T4_FILTER_VNIC;
6592
6593         if (fconf & F_PORT)
6594                 mode |= T4_FILTER_PORT;
6595
6596         if (fconf & F_FCOE)
6597                 mode |= T4_FILTER_FCoE;
6598
6599         return (mode);
6600 }
6601
6602 static uint32_t
6603 mode_to_fconf(uint32_t mode)
6604 {
6605         uint32_t fconf = 0;
6606
6607         if (mode & T4_FILTER_IP_FRAGMENT)
6608                 fconf |= F_FRAGMENTATION;
6609
6610         if (mode & T4_FILTER_MPS_HIT_TYPE)
6611                 fconf |= F_MPSHITTYPE;
6612
6613         if (mode & T4_FILTER_MAC_IDX)
6614                 fconf |= F_MACMATCH;
6615
6616         if (mode & T4_FILTER_ETH_TYPE)
6617                 fconf |= F_ETHERTYPE;
6618
6619         if (mode & T4_FILTER_IP_PROTO)
6620                 fconf |= F_PROTOCOL;
6621
6622         if (mode & T4_FILTER_IP_TOS)
6623                 fconf |= F_TOS;
6624
6625         if (mode & T4_FILTER_VLAN)
6626                 fconf |= F_VLAN;
6627
6628         if (mode & T4_FILTER_VNIC)
6629                 fconf |= F_VNIC_ID;
6630
6631         if (mode & T4_FILTER_PORT)
6632                 fconf |= F_PORT;
6633
6634         if (mode & T4_FILTER_FCoE)
6635                 fconf |= F_FCOE;
6636
6637         return (fconf);
6638 }
6639
6640 static uint32_t
6641 fspec_to_fconf(struct t4_filter_specification *fs)
6642 {
6643         uint32_t fconf = 0;
6644
6645         if (fs->val.frag || fs->mask.frag)
6646                 fconf |= F_FRAGMENTATION;
6647
6648         if (fs->val.matchtype || fs->mask.matchtype)
6649                 fconf |= F_MPSHITTYPE;
6650
6651         if (fs->val.macidx || fs->mask.macidx)
6652                 fconf |= F_MACMATCH;
6653
6654         if (fs->val.ethtype || fs->mask.ethtype)
6655                 fconf |= F_ETHERTYPE;
6656
6657         if (fs->val.proto || fs->mask.proto)
6658                 fconf |= F_PROTOCOL;
6659
6660         if (fs->val.tos || fs->mask.tos)
6661                 fconf |= F_TOS;
6662
6663         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6664                 fconf |= F_VLAN;
6665
6666         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6667                 fconf |= F_VNIC_ID;
6668
6669         if (fs->val.iport || fs->mask.iport)
6670                 fconf |= F_PORT;
6671
6672         if (fs->val.fcoe || fs->mask.fcoe)
6673                 fconf |= F_FCOE;
6674
6675         return (fconf);
6676 }
6677
6678 static int
6679 get_filter_mode(struct adapter *sc, uint32_t *mode)
6680 {
6681         int rc;
6682         uint32_t fconf;
6683
6684         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6685             "t4getfm");
6686         if (rc)
6687                 return (rc);
6688
6689         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6690             A_TP_VLAN_PRI_MAP);
6691
6692         if (sc->params.tp.vlan_pri_map != fconf) {
6693                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6694                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6695                     fconf);
6696                 sc->params.tp.vlan_pri_map = fconf;
6697         }
6698
6699         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6700
6701         end_synchronized_op(sc, LOCK_HELD);
6702         return (0);
6703 }
6704
6705 static int
6706 set_filter_mode(struct adapter *sc, uint32_t mode)
6707 {
6708         uint32_t fconf;
6709         int rc;
6710
6711         fconf = mode_to_fconf(mode);
6712
6713         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6714             "t4setfm");
6715         if (rc)
6716                 return (rc);
6717
6718         if (sc->tids.ftids_in_use > 0) {
6719                 rc = EBUSY;
6720                 goto done;
6721         }
6722
6723 #ifdef TCP_OFFLOAD
6724         if (sc->offload_map) {
6725                 rc = EBUSY;
6726                 goto done;
6727         }
6728 #endif
6729
6730 #ifdef notyet
6731         rc = -t4_set_filter_mode(sc, fconf);
6732         if (rc == 0)
6733                 sc->filter_mode = fconf;
6734 #else
6735         rc = ENOTSUP;
6736 #endif
6737
6738 done:
6739         end_synchronized_op(sc, LOCK_HELD);
6740         return (rc);
6741 }
6742
6743 static inline uint64_t
6744 get_filter_hits(struct adapter *sc, uint32_t fid)
6745 {
6746         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6747         uint64_t hits;
6748
6749         memwin_info(sc, 0, &mw_base, NULL);
6750         off = position_memwin(sc, 0,
6751             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6752         if (is_t4(sc)) {
6753                 hits = t4_read_reg64(sc, mw_base + off + 16);
6754                 hits = be64toh(hits);
6755         } else {
6756                 hits = t4_read_reg(sc, mw_base + off + 24);
6757                 hits = be32toh(hits);
6758         }
6759
6760         return (hits);
6761 }
6762
6763 static int
6764 get_filter(struct adapter *sc, struct t4_filter *t)
6765 {
6766         int i, rc, nfilters = sc->tids.nftids;
6767         struct filter_entry *f;
6768
6769         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6770             "t4getf");
6771         if (rc)
6772                 return (rc);
6773
6774         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6775             t->idx >= nfilters) {
6776                 t->idx = 0xffffffff;
6777                 goto done;
6778         }
6779
6780         f = &sc->tids.ftid_tab[t->idx];
6781         for (i = t->idx; i < nfilters; i++, f++) {
6782                 if (f->valid) {
6783                         t->idx = i;
6784                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6785                         t->smtidx = f->smtidx;
6786                         if (f->fs.hitcnts)
6787                                 t->hits = get_filter_hits(sc, t->idx);
6788                         else
6789                                 t->hits = UINT64_MAX;
6790                         t->fs = f->fs;
6791
6792                         goto done;
6793                 }
6794         }
6795
6796         t->idx = 0xffffffff;
6797 done:
6798         end_synchronized_op(sc, LOCK_HELD);
6799         return (0);
6800 }
6801
6802 static int
6803 set_filter(struct adapter *sc, struct t4_filter *t)
6804 {
6805         unsigned int nfilters, nports;
6806         struct filter_entry *f;
6807         int i, rc;
6808
6809         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6810         if (rc)
6811                 return (rc);
6812
6813         nfilters = sc->tids.nftids;
6814         nports = sc->params.nports;
6815
6816         if (nfilters == 0) {
6817                 rc = ENOTSUP;
6818                 goto done;
6819         }
6820
6821         if (!(sc->flags & FULL_INIT_DONE)) {
6822                 rc = EAGAIN;
6823                 goto done;
6824         }
6825
6826         if (t->idx >= nfilters) {
6827                 rc = EINVAL;
6828                 goto done;
6829         }
6830
6831         /* Validate against the global filter mode */
6832         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6833             sc->params.tp.vlan_pri_map) {
6834                 rc = E2BIG;
6835                 goto done;
6836         }
6837
6838         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6839                 rc = EINVAL;
6840                 goto done;
6841         }
6842
6843         if (t->fs.val.iport >= nports) {
6844                 rc = EINVAL;
6845                 goto done;
6846         }
6847
6848         /* Can't specify an iq if not steering to it */
6849         if (!t->fs.dirsteer && t->fs.iq) {
6850                 rc = EINVAL;
6851                 goto done;
6852         }
6853
6854         /* IPv6 filter idx must be 4 aligned */
6855         if (t->fs.type == 1 &&
6856             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6857                 rc = EINVAL;
6858                 goto done;
6859         }
6860
6861         if (sc->tids.ftid_tab == NULL) {
6862                 KASSERT(sc->tids.ftids_in_use == 0,
6863                     ("%s: no memory allocated but filters_in_use > 0",
6864                     __func__));
6865
6866                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6867                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6868                 if (sc->tids.ftid_tab == NULL) {
6869                         rc = ENOMEM;
6870                         goto done;
6871                 }
6872                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6873         }
6874
6875         for (i = 0; i < 4; i++) {
6876                 f = &sc->tids.ftid_tab[t->idx + i];
6877
6878                 if (f->pending || f->valid) {
6879                         rc = EBUSY;
6880                         goto done;
6881                 }
6882                 if (f->locked) {
6883                         rc = EPERM;
6884                         goto done;
6885                 }
6886
6887                 if (t->fs.type == 0)
6888                         break;
6889         }
6890
6891         f = &sc->tids.ftid_tab[t->idx];
6892         f->fs = t->fs;
6893
6894         rc = set_filter_wr(sc, t->idx);
6895 done:
6896         end_synchronized_op(sc, 0);
6897
6898         if (rc == 0) {
6899                 mtx_lock(&sc->tids.ftid_lock);
6900                 for (;;) {
6901                         if (f->pending == 0) {
6902                                 rc = f->valid ? 0 : EIO;
6903                                 break;
6904                         }
6905
6906                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6907                             PCATCH, "t4setfw", 0)) {
6908                                 rc = EINPROGRESS;
6909                                 break;
6910                         }
6911                 }
6912                 mtx_unlock(&sc->tids.ftid_lock);
6913         }
6914         return (rc);
6915 }
6916
6917 static int
6918 del_filter(struct adapter *sc, struct t4_filter *t)
6919 {
6920         unsigned int nfilters;
6921         struct filter_entry *f;
6922         int rc;
6923
6924         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6925         if (rc)
6926                 return (rc);
6927
6928         nfilters = sc->tids.nftids;
6929
6930         if (nfilters == 0) {
6931                 rc = ENOTSUP;
6932                 goto done;
6933         }
6934
6935         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6936             t->idx >= nfilters) {
6937                 rc = EINVAL;
6938                 goto done;
6939         }
6940
6941         if (!(sc->flags & FULL_INIT_DONE)) {
6942                 rc = EAGAIN;
6943                 goto done;
6944         }
6945
6946         f = &sc->tids.ftid_tab[t->idx];
6947
6948         if (f->pending) {
6949                 rc = EBUSY;
6950                 goto done;
6951         }
6952         if (f->locked) {
6953                 rc = EPERM;
6954                 goto done;
6955         }
6956
6957         if (f->valid) {
6958                 t->fs = f->fs;  /* extra info for the caller */
6959                 rc = del_filter_wr(sc, t->idx);
6960         }
6961
6962 done:
6963         end_synchronized_op(sc, 0);
6964
6965         if (rc == 0) {
6966                 mtx_lock(&sc->tids.ftid_lock);
6967                 for (;;) {
6968                         if (f->pending == 0) {
6969                                 rc = f->valid ? EIO : 0;
6970                                 break;
6971                         }
6972
6973                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6974                             PCATCH, "t4delfw", 0)) {
6975                                 rc = EINPROGRESS;
6976                                 break;
6977                         }
6978                 }
6979                 mtx_unlock(&sc->tids.ftid_lock);
6980         }
6981
6982         return (rc);
6983 }
6984
6985 static void
6986 clear_filter(struct filter_entry *f)
6987 {
6988         if (f->l2t)
6989                 t4_l2t_release(f->l2t);
6990
6991         bzero(f, sizeof (*f));
6992 }
6993
6994 static int
6995 set_filter_wr(struct adapter *sc, int fidx)
6996 {
6997         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6998         struct wrqe *wr;
6999         struct fw_filter_wr *fwr;
7000         unsigned int ftid;
7001
7002         ASSERT_SYNCHRONIZED_OP(sc);
7003
7004         if (f->fs.newdmac || f->fs.newvlan) {
7005                 /* This filter needs an L2T entry; allocate one. */
7006                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7007                 if (f->l2t == NULL)
7008                         return (EAGAIN);
7009                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7010                     f->fs.dmac)) {
7011                         t4_l2t_release(f->l2t);
7012                         f->l2t = NULL;
7013                         return (ENOMEM);
7014                 }
7015         }
7016
7017         ftid = sc->tids.ftid_base + fidx;
7018
7019         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7020         if (wr == NULL)
7021                 return (ENOMEM);
7022
7023         fwr = wrtod(wr);
7024         bzero(fwr, sizeof (*fwr));
7025
7026         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7027         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7028         fwr->tid_to_iq =
7029             htobe32(V_FW_FILTER_WR_TID(ftid) |
7030                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7031                 V_FW_FILTER_WR_NOREPLY(0) |
7032                 V_FW_FILTER_WR_IQ(f->fs.iq));
7033         fwr->del_filter_to_l2tix =
7034             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7035                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7036                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7037                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7038                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7039                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7040                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7041                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7042                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7043                     f->fs.newvlan == VLAN_REWRITE) |
7044                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7045                     f->fs.newvlan == VLAN_REWRITE) |
7046                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7047                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7048                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7049                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7050         fwr->ethtype = htobe16(f->fs.val.ethtype);
7051         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7052         fwr->frag_to_ovlan_vldm =
7053             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7054                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7055                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7056                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7057                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7058                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7059         fwr->smac_sel = 0;
7060         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7061             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7062         fwr->maci_to_matchtypem =
7063             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7064                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7065                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7066                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7067                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7068                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7069                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7070                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7071         fwr->ptcl = f->fs.val.proto;
7072         fwr->ptclm = f->fs.mask.proto;
7073         fwr->ttyp = f->fs.val.tos;
7074         fwr->ttypm = f->fs.mask.tos;
7075         fwr->ivlan = htobe16(f->fs.val.vlan);
7076         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7077         fwr->ovlan = htobe16(f->fs.val.vnic);
7078         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7079         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7080         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7081         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7082         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7083         fwr->lp = htobe16(f->fs.val.dport);
7084         fwr->lpm = htobe16(f->fs.mask.dport);
7085         fwr->fp = htobe16(f->fs.val.sport);
7086         fwr->fpm = htobe16(f->fs.mask.sport);
7087         if (f->fs.newsmac)
7088                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7089
7090         f->pending = 1;
7091         sc->tids.ftids_in_use++;
7092
7093         t4_wrq_tx(sc, wr);
7094         return (0);
7095 }
7096
7097 static int
7098 del_filter_wr(struct adapter *sc, int fidx)
7099 {
7100         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7101         struct wrqe *wr;
7102         struct fw_filter_wr *fwr;
7103         unsigned int ftid;
7104
7105         ftid = sc->tids.ftid_base + fidx;
7106
7107         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7108         if (wr == NULL)
7109                 return (ENOMEM);
7110         fwr = wrtod(wr);
7111         bzero(fwr, sizeof (*fwr));
7112
7113         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7114
7115         f->pending = 1;
7116         t4_wrq_tx(sc, wr);
7117         return (0);
7118 }
7119
7120 int
7121 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7122 {
7123         struct adapter *sc = iq->adapter;
7124         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7125         unsigned int idx = GET_TID(rpl);
7126
7127         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7128             rss->opcode));
7129
7130         if (idx >= sc->tids.ftid_base &&
7131             (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7132                 unsigned int rc = G_COOKIE(rpl->cookie);
7133                 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7134
7135                 mtx_lock(&sc->tids.ftid_lock);
7136                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7137                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7138                             __func__, idx));
7139                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7140                         f->pending = 0;  /* asynchronous setup completed */
7141                         f->valid = 1;
7142                 } else {
7143                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7144                                 /* Add or delete failed, display an error */
7145                                 log(LOG_ERR,
7146                                     "filter %u setup failed with error %u\n",
7147                                     idx, rc);
7148                         }
7149
7150                         clear_filter(f);
7151                         sc->tids.ftids_in_use--;
7152                 }
7153                 wakeup(&sc->tids.ftid_tab);
7154                 mtx_unlock(&sc->tids.ftid_lock);
7155         }
7156
7157         return (0);
7158 }
7159
7160 static int
7161 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7162 {
7163         int rc;
7164
7165         if (cntxt->cid > M_CTXTQID)
7166                 return (EINVAL);
7167
7168         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7169             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7170                 return (EINVAL);
7171
7172         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7173         if (rc)
7174                 return (rc);
7175
7176         if (sc->flags & FW_OK) {
7177                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7178                     &cntxt->data[0]);
7179                 if (rc == 0)
7180                         goto done;
7181         }
7182
7183         /*
7184          * Read via firmware failed or wasn't even attempted.  Read directly via
7185          * the backdoor.
7186          */
7187         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7188 done:
7189         end_synchronized_op(sc, 0);
7190         return (rc);
7191 }
7192
7193 static int
7194 load_fw(struct adapter *sc, struct t4_data *fw)
7195 {
7196         int rc;
7197         uint8_t *fw_data;
7198
7199         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7200         if (rc)
7201                 return (rc);
7202
7203         if (sc->flags & FULL_INIT_DONE) {
7204                 rc = EBUSY;
7205                 goto done;
7206         }
7207
7208         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7209         if (fw_data == NULL) {
7210                 rc = ENOMEM;
7211                 goto done;
7212         }
7213
7214         rc = copyin(fw->data, fw_data, fw->len);
7215         if (rc == 0)
7216                 rc = -t4_load_fw(sc, fw_data, fw->len);
7217
7218         free(fw_data, M_CXGBE);
7219 done:
7220         end_synchronized_op(sc, 0);
7221         return (rc);
7222 }
7223
7224 static int
7225 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7226 {
7227         uint32_t addr, off, remaining, i, n;
7228         uint32_t *buf, *b;
7229         uint32_t mw_base, mw_aperture;
7230         int rc;
7231         uint8_t *dst;
7232
7233         rc = validate_mem_range(sc, mr->addr, mr->len);
7234         if (rc != 0)
7235                 return (rc);
7236
7237         memwin_info(sc, win, &mw_base, &mw_aperture);
7238         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7239         addr = mr->addr;
7240         remaining = mr->len;
7241         dst = (void *)mr->data;
7242
7243         while (remaining) {
7244                 off = position_memwin(sc, win, addr);
7245
7246                 /* number of bytes that we'll copy in the inner loop */
7247                 n = min(remaining, mw_aperture - off);
7248                 for (i = 0; i < n; i += 4)
7249                         *b++ = t4_read_reg(sc, mw_base + off + i);
7250
7251                 rc = copyout(buf, dst, n);
7252                 if (rc != 0)
7253                         break;
7254
7255                 b = buf;
7256                 dst += n;
7257                 remaining -= n;
7258                 addr += n;
7259         }
7260
7261         free(buf, M_CXGBE);
7262         return (rc);
7263 }
7264
7265 static int
7266 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7267 {
7268         int rc;
7269
7270         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7271                 return (EINVAL);
7272
7273         if (i2cd->len > 1) {
7274                 /* XXX: need fw support for longer reads in one go */
7275                 return (ENOTSUP);
7276         }
7277
7278         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7279         if (rc)
7280                 return (rc);
7281         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7282             i2cd->offset, &i2cd->data[0]);
7283         end_synchronized_op(sc, 0);
7284
7285         return (rc);
7286 }
7287
7288 int
7289 t4_os_find_pci_capability(struct adapter *sc, int cap)
7290 {
7291         int i;
7292
7293         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7294 }
7295
7296 int
7297 t4_os_pci_save_state(struct adapter *sc)
7298 {
7299         device_t dev;
7300         struct pci_devinfo *dinfo;
7301
7302         dev = sc->dev;
7303         dinfo = device_get_ivars(dev);
7304
7305         pci_cfg_save(dev, dinfo, 0);
7306         return (0);
7307 }
7308
7309 int
7310 t4_os_pci_restore_state(struct adapter *sc)
7311 {
7312         device_t dev;
7313         struct pci_devinfo *dinfo;
7314
7315         dev = sc->dev;
7316         dinfo = device_get_ivars(dev);
7317
7318         pci_cfg_restore(dev, dinfo);
7319         return (0);
7320 }
7321
7322 void
7323 t4_os_portmod_changed(const struct adapter *sc, int idx)
7324 {
7325         struct port_info *pi = sc->port[idx];
7326         static const char *mod_str[] = {
7327                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7328         };
7329
7330         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7331                 if_printf(pi->ifp, "transceiver unplugged.\n");
7332         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7333                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7334         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7335                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7336         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7337                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7338                     mod_str[pi->mod_type]);
7339         } else {
7340                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7341                     pi->mod_type);
7342         }
7343 }
7344
7345 void
7346 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7347 {
7348         struct port_info *pi = sc->port[idx];
7349         struct ifnet *ifp = pi->ifp;
7350
7351         if (link_stat) {
7352                 pi->linkdnrc = -1;
7353                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7354                 if_link_state_change(ifp, LINK_STATE_UP);
7355         } else {
7356                 if (reason >= 0)
7357                         pi->linkdnrc = reason;
7358                 if_link_state_change(ifp, LINK_STATE_DOWN);
7359         }
7360 }
7361
7362 void
7363 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7364 {
7365         struct adapter *sc;
7366
7367         sx_slock(&t4_list_lock);
7368         SLIST_FOREACH(sc, &t4_list, link) {
7369                 /*
7370                  * func should not make any assumptions about what state sc is
7371                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7372                  */
7373                 func(sc, arg);
7374         }
7375         sx_sunlock(&t4_list_lock);
7376 }
7377
7378 static int
7379 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7380 {
7381        return (0);
7382 }
7383
7384 static int
7385 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7386 {
7387        return (0);
7388 }
7389
7390 static int
7391 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7392     struct thread *td)
7393 {
7394         int rc;
7395         struct adapter *sc = dev->si_drv1;
7396
7397         rc = priv_check(td, PRIV_DRIVER);
7398         if (rc != 0)
7399                 return (rc);
7400
7401         switch (cmd) {
7402         case CHELSIO_T4_GETREG: {
7403                 struct t4_reg *edata = (struct t4_reg *)data;
7404
7405                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7406                         return (EFAULT);
7407
7408                 if (edata->size == 4)
7409                         edata->val = t4_read_reg(sc, edata->addr);
7410                 else if (edata->size == 8)
7411                         edata->val = t4_read_reg64(sc, edata->addr);
7412                 else
7413                         return (EINVAL);
7414
7415                 break;
7416         }
7417         case CHELSIO_T4_SETREG: {
7418                 struct t4_reg *edata = (struct t4_reg *)data;
7419
7420                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7421                         return (EFAULT);
7422
7423                 if (edata->size == 4) {
7424                         if (edata->val & 0xffffffff00000000)
7425                                 return (EINVAL);
7426                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7427                 } else if (edata->size == 8)
7428                         t4_write_reg64(sc, edata->addr, edata->val);
7429                 else
7430                         return (EINVAL);
7431                 break;
7432         }
7433         case CHELSIO_T4_REGDUMP: {
7434                 struct t4_regdump *regs = (struct t4_regdump *)data;
7435                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7436                 uint8_t *buf;
7437
7438                 if (regs->len < reglen) {
7439                         regs->len = reglen; /* hint to the caller */
7440                         return (ENOBUFS);
7441                 }
7442
7443                 regs->len = reglen;
7444                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7445                 t4_get_regs(sc, regs, buf);
7446                 rc = copyout(buf, regs->data, reglen);
7447                 free(buf, M_CXGBE);
7448                 break;
7449         }
7450         case CHELSIO_T4_GET_FILTER_MODE:
7451                 rc = get_filter_mode(sc, (uint32_t *)data);
7452                 break;
7453         case CHELSIO_T4_SET_FILTER_MODE:
7454                 rc = set_filter_mode(sc, *(uint32_t *)data);
7455                 break;
7456         case CHELSIO_T4_GET_FILTER:
7457                 rc = get_filter(sc, (struct t4_filter *)data);
7458                 break;
7459         case CHELSIO_T4_SET_FILTER:
7460                 rc = set_filter(sc, (struct t4_filter *)data);
7461                 break;
7462         case CHELSIO_T4_DEL_FILTER:
7463                 rc = del_filter(sc, (struct t4_filter *)data);
7464                 break;
7465         case CHELSIO_T4_GET_SGE_CONTEXT:
7466                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7467                 break;
7468         case CHELSIO_T4_LOAD_FW:
7469                 rc = load_fw(sc, (struct t4_data *)data);
7470                 break;
7471         case CHELSIO_T4_GET_MEM:
7472                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7473                 break;
7474         case CHELSIO_T4_GET_I2C:
7475                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7476                 break;
7477         case CHELSIO_T4_CLEAR_STATS: {
7478                 int i;
7479                 u_int port_id = *(uint32_t *)data;
7480                 struct port_info *pi;
7481
7482                 if (port_id >= sc->params.nports)
7483                         return (EINVAL);
7484
7485                 /* MAC stats */
7486                 t4_clr_port_stats(sc, port_id);
7487
7488                 pi = sc->port[port_id];
7489                 if (pi->flags & PORT_INIT_DONE) {
7490                         struct sge_rxq *rxq;
7491                         struct sge_txq *txq;
7492                         struct sge_wrq *wrq;
7493
7494                         for_each_rxq(pi, i, rxq) {
7495 #if defined(INET) || defined(INET6)
7496                                 rxq->lro.lro_queued = 0;
7497                                 rxq->lro.lro_flushed = 0;
7498 #endif
7499                                 rxq->rxcsum = 0;
7500                                 rxq->vlan_extraction = 0;
7501                         }
7502
7503                         for_each_txq(pi, i, txq) {
7504                                 txq->txcsum = 0;
7505                                 txq->tso_wrs = 0;
7506                                 txq->vlan_insertion = 0;
7507                                 txq->imm_wrs = 0;
7508                                 txq->sgl_wrs = 0;
7509                                 txq->txpkt_wrs = 0;
7510                                 txq->txpkts_wrs = 0;
7511                                 txq->txpkts_pkts = 0;
7512                                 txq->br->br_drops = 0;
7513                                 txq->no_dmamap = 0;
7514                                 txq->no_desc = 0;
7515                         }
7516
7517 #ifdef TCP_OFFLOAD
7518                         /* nothing to clear for each ofld_rxq */
7519
7520                         for_each_ofld_txq(pi, i, wrq) {
7521                                 wrq->tx_wrs = 0;
7522                                 wrq->no_desc = 0;
7523                         }
7524 #endif
7525                         wrq = &sc->sge.ctrlq[pi->port_id];
7526                         wrq->tx_wrs = 0;
7527                         wrq->no_desc = 0;
7528                 }
7529                 break;
7530         }
7531         case CHELSIO_T4_GET_TRACER:
7532                 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7533                 break;
7534         case CHELSIO_T4_SET_TRACER:
7535                 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7536                 break;
7537         default:
7538                 rc = EINVAL;
7539         }
7540
7541         return (rc);
7542 }
7543
7544 #ifdef TCP_OFFLOAD
7545 static int
7546 toe_capability(struct port_info *pi, int enable)
7547 {
7548         int rc;
7549         struct adapter *sc = pi->adapter;
7550
7551         ASSERT_SYNCHRONIZED_OP(sc);
7552
7553         if (!is_offload(sc))
7554                 return (ENODEV);
7555
7556         if (enable) {
7557                 if (!(sc->flags & FULL_INIT_DONE)) {
7558                         rc = cxgbe_init_synchronized(pi);
7559                         if (rc)
7560                                 return (rc);
7561                 }
7562
7563                 if (isset(&sc->offload_map, pi->port_id))
7564                         return (0);
7565
7566                 if (!(sc->flags & TOM_INIT_DONE)) {
7567                         rc = t4_activate_uld(sc, ULD_TOM);
7568                         if (rc == EAGAIN) {
7569                                 log(LOG_WARNING,
7570                                     "You must kldload t4_tom.ko before trying "
7571                                     "to enable TOE on a cxgbe interface.\n");
7572                         }
7573                         if (rc != 0)
7574                                 return (rc);
7575                         KASSERT(sc->tom_softc != NULL,
7576                             ("%s: TOM activated but softc NULL", __func__));
7577                         KASSERT(sc->flags & TOM_INIT_DONE,
7578                             ("%s: TOM activated but flag not set", __func__));
7579                 }
7580
7581                 setbit(&sc->offload_map, pi->port_id);
7582         } else {
7583                 if (!isset(&sc->offload_map, pi->port_id))
7584                         return (0);
7585
7586                 KASSERT(sc->flags & TOM_INIT_DONE,
7587                     ("%s: TOM never initialized?", __func__));
7588                 clrbit(&sc->offload_map, pi->port_id);
7589         }
7590
7591         return (0);
7592 }
7593
7594 /*
7595  * Add an upper layer driver to the global list.
7596  */
7597 int
7598 t4_register_uld(struct uld_info *ui)
7599 {
7600         int rc = 0;
7601         struct uld_info *u;
7602
7603         sx_xlock(&t4_uld_list_lock);
7604         SLIST_FOREACH(u, &t4_uld_list, link) {
7605             if (u->uld_id == ui->uld_id) {
7606                     rc = EEXIST;
7607                     goto done;
7608             }
7609         }
7610
7611         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7612         ui->refcount = 0;
7613 done:
7614         sx_xunlock(&t4_uld_list_lock);
7615         return (rc);
7616 }
7617
7618 int
7619 t4_unregister_uld(struct uld_info *ui)
7620 {
7621         int rc = EINVAL;
7622         struct uld_info *u;
7623
7624         sx_xlock(&t4_uld_list_lock);
7625
7626         SLIST_FOREACH(u, &t4_uld_list, link) {
7627             if (u == ui) {
7628                     if (ui->refcount > 0) {
7629                             rc = EBUSY;
7630                             goto done;
7631                     }
7632
7633                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7634                     rc = 0;
7635                     goto done;
7636             }
7637         }
7638 done:
7639         sx_xunlock(&t4_uld_list_lock);
7640         return (rc);
7641 }
7642
7643 int
7644 t4_activate_uld(struct adapter *sc, int id)
7645 {
7646         int rc = EAGAIN;
7647         struct uld_info *ui;
7648
7649         ASSERT_SYNCHRONIZED_OP(sc);
7650
7651         sx_slock(&t4_uld_list_lock);
7652
7653         SLIST_FOREACH(ui, &t4_uld_list, link) {
7654                 if (ui->uld_id == id) {
7655                         rc = ui->activate(sc);
7656                         if (rc == 0)
7657                                 ui->refcount++;
7658                         goto done;
7659                 }
7660         }
7661 done:
7662         sx_sunlock(&t4_uld_list_lock);
7663
7664         return (rc);
7665 }
7666
7667 int
7668 t4_deactivate_uld(struct adapter *sc, int id)
7669 {
7670         int rc = EINVAL;
7671         struct uld_info *ui;
7672
7673         ASSERT_SYNCHRONIZED_OP(sc);
7674
7675         sx_slock(&t4_uld_list_lock);
7676
7677         SLIST_FOREACH(ui, &t4_uld_list, link) {
7678                 if (ui->uld_id == id) {
7679                         rc = ui->deactivate(sc);
7680                         if (rc == 0)
7681                                 ui->refcount--;
7682                         goto done;
7683                 }
7684         }
7685 done:
7686         sx_sunlock(&t4_uld_list_lock);
7687
7688         return (rc);
7689 }
7690 #endif
7691
7692 /*
7693  * Come up with reasonable defaults for some of the tunables, provided they're
7694  * not set by the user (in which case we'll use the values as is).
7695  */
7696 static void
7697 tweak_tunables(void)
7698 {
7699         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
7700
7701         if (t4_ntxq10g < 1)
7702                 t4_ntxq10g = min(nc, NTXQ_10G);
7703
7704         if (t4_ntxq1g < 1)
7705                 t4_ntxq1g = min(nc, NTXQ_1G);
7706
7707         if (t4_nrxq10g < 1)
7708                 t4_nrxq10g = min(nc, NRXQ_10G);
7709
7710         if (t4_nrxq1g < 1)
7711                 t4_nrxq1g = min(nc, NRXQ_1G);
7712
7713 #ifdef TCP_OFFLOAD
7714         if (t4_nofldtxq10g < 1)
7715                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7716
7717         if (t4_nofldtxq1g < 1)
7718                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7719
7720         if (t4_nofldrxq10g < 1)
7721                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7722
7723         if (t4_nofldrxq1g < 1)
7724                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7725
7726         if (t4_toecaps_allowed == -1)
7727                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7728 #else
7729         if (t4_toecaps_allowed == -1)
7730                 t4_toecaps_allowed = 0;
7731 #endif
7732
7733         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7734                 t4_tmr_idx_10g = TMR_IDX_10G;
7735
7736         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7737                 t4_pktc_idx_10g = PKTC_IDX_10G;
7738
7739         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7740                 t4_tmr_idx_1g = TMR_IDX_1G;
7741
7742         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7743                 t4_pktc_idx_1g = PKTC_IDX_1G;
7744
7745         if (t4_qsize_txq < 128)
7746                 t4_qsize_txq = 128;
7747
7748         if (t4_qsize_rxq < 128)
7749                 t4_qsize_rxq = 128;
7750         while (t4_qsize_rxq & 7)
7751                 t4_qsize_rxq++;
7752
7753         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7754 }
7755
7756 static int
7757 mod_event(module_t mod, int cmd, void *arg)
7758 {
7759         int rc = 0;
7760         static int loaded = 0;
7761
7762         switch (cmd) {
7763         case MOD_LOAD:
7764                 if (atomic_fetchadd_int(&loaded, 1))
7765                         break;
7766                 t4_sge_modload();
7767                 sx_init(&t4_list_lock, "T4/T5 adapters");
7768                 SLIST_INIT(&t4_list);
7769 #ifdef TCP_OFFLOAD
7770                 sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
7771                 SLIST_INIT(&t4_uld_list);
7772 #endif
7773                 t4_tracer_modload();
7774                 tweak_tunables();
7775                 break;
7776
7777         case MOD_UNLOAD:
7778                 if (atomic_fetchadd_int(&loaded, -1) > 1)
7779                         break;
7780                 t4_tracer_modunload();
7781 #ifdef TCP_OFFLOAD
7782                 sx_slock(&t4_uld_list_lock);
7783                 if (!SLIST_EMPTY(&t4_uld_list)) {
7784                         rc = EBUSY;
7785                         sx_sunlock(&t4_uld_list_lock);
7786                         break;
7787                 }
7788                 sx_sunlock(&t4_uld_list_lock);
7789                 sx_destroy(&t4_uld_list_lock);
7790 #endif
7791                 sx_slock(&t4_list_lock);
7792                 if (!SLIST_EMPTY(&t4_list)) {
7793                         rc = EBUSY;
7794                         sx_sunlock(&t4_list_lock);
7795                         break;
7796                 }
7797                 sx_sunlock(&t4_list_lock);
7798                 sx_destroy(&t4_list_lock);
7799                 break;
7800         }
7801
7802         return (rc);
7803 }
7804
7805 static devclass_t t4_devclass, t5_devclass;
7806 static devclass_t cxgbe_devclass, cxl_devclass;
7807
7808 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7809 MODULE_VERSION(t4nex, 1);
7810 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7811
7812 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7813 MODULE_VERSION(t5nex, 1);
7814 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7815
7816 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7817 MODULE_VERSION(cxgbe, 1);
7818
7819 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7820 MODULE_VERSION(cxl, 1);