]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/dev/cxgbe/t4_main.c
MFC r297884
[FreeBSD/stable/8.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/priv.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/pciio.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pci_private.h>
47 #include <sys/firmware.h>
48 #include <sys/sbuf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_types.h>
56 #include <net/if_dl.h>
57 #include <net/if_vlan_var.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #endif
62
63 #include "common/common.h"
64 #include "common/t4_msg.h"
65 #include "common/t4_regs.h"
66 #include "common/t4_regs_values.h"
67 #include "t4_ioctl.h"
68 #include "t4_l2t.h"
69
70 /* T4 bus driver interface */
71 static int t4_probe(device_t);
72 static int t4_attach(device_t);
73 static int t4_detach(device_t);
74 static device_method_t t4_methods[] = {
75         DEVMETHOD(device_probe,         t4_probe),
76         DEVMETHOD(device_attach,        t4_attach),
77         DEVMETHOD(device_detach,        t4_detach),
78
79         DEVMETHOD_END
80 };
81 static driver_t t4_driver = {
82         "t4nex",
83         t4_methods,
84         sizeof(struct adapter)
85 };
86
87
88 /* T4 port (cxgbe) interface */
89 static int cxgbe_probe(device_t);
90 static int cxgbe_attach(device_t);
91 static int cxgbe_detach(device_t);
92 static device_method_t cxgbe_methods[] = {
93         DEVMETHOD(device_probe,         cxgbe_probe),
94         DEVMETHOD(device_attach,        cxgbe_attach),
95         DEVMETHOD(device_detach,        cxgbe_detach),
96         { 0, 0 }
97 };
98 static driver_t cxgbe_driver = {
99         "cxgbe",
100         cxgbe_methods,
101         sizeof(struct port_info)
102 };
103
104 static d_ioctl_t t4_ioctl;
105 static d_open_t t4_open;
106 static d_close_t t4_close;
107
108 static struct cdevsw t4_cdevsw = {
109        .d_version = D_VERSION,
110        .d_flags = 0,
111        .d_open = t4_open,
112        .d_close = t4_close,
113        .d_ioctl = t4_ioctl,
114        .d_name = "t4nex",
115 };
116
117 /* T5 bus driver interface */
118 static int t5_probe(device_t);
119 static device_method_t t5_methods[] = {
120         DEVMETHOD(device_probe,         t5_probe),
121         DEVMETHOD(device_attach,        t4_attach),
122         DEVMETHOD(device_detach,        t4_detach),
123
124         DEVMETHOD_END
125 };
126 static driver_t t5_driver = {
127         "t5nex",
128         t5_methods,
129         sizeof(struct adapter)
130 };
131
132
133 /* T5 port (cxl) interface */
134 static driver_t cxl_driver = {
135         "cxl",
136         cxgbe_methods,
137         sizeof(struct port_info)
138 };
139
140 static struct cdevsw t5_cdevsw = {
141        .d_version = D_VERSION,
142        .d_flags = 0,
143        .d_open = t4_open,
144        .d_close = t4_close,
145        .d_ioctl = t4_ioctl,
146        .d_name = "t5nex",
147 };
148
149 /* ifnet + media interface */
150 static void cxgbe_init(void *);
151 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153 static void cxgbe_qflush(struct ifnet *);
154 static int cxgbe_media_change(struct ifnet *);
155 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159 /*
160  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161  * then ADAPTER_LOCK, then t4_uld_list_lock.
162  */
163 static struct mtx t4_list_lock;
164 static SLIST_HEAD(, adapter) t4_list;
165 #ifdef TCP_OFFLOAD
166 static struct mtx t4_uld_list_lock;
167 static SLIST_HEAD(, uld_info) t4_uld_list;
168 #endif
169
170 /*
171  * Tunables.  See tweak_tunables() too.
172  *
173  * Each tunable is set to a default value here if it's known at compile-time.
174  * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175  * provide a reasonable default when the driver is loaded.
176  *
177  * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178  * T5 are under hw.cxl.
179  */
180
181 /*
182  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183  */
184 #define NTXQ_10G 16
185 static int t4_ntxq10g = -1;
186 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188 #define NRXQ_10G 8
189 static int t4_nrxq10g = -1;
190 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192 #define NTXQ_1G 4
193 static int t4_ntxq1g = -1;
194 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196 #define NRXQ_1G 2
197 static int t4_nrxq1g = -1;
198 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200 static int t4_rsrv_noflowq = 0;
201 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203 #ifdef TCP_OFFLOAD
204 #define NOFLDTXQ_10G 8
205 static int t4_nofldtxq10g = -1;
206 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208 #define NOFLDRXQ_10G 2
209 static int t4_nofldrxq10g = -1;
210 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212 #define NOFLDTXQ_1G 2
213 static int t4_nofldtxq1g = -1;
214 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216 #define NOFLDRXQ_1G 1
217 static int t4_nofldrxq1g = -1;
218 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219 #endif
220
221 /*
222  * Holdoff parameters for 10G and 1G ports.
223  */
224 #define TMR_IDX_10G 1
225 static int t4_tmr_idx_10g = TMR_IDX_10G;
226 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228 #define PKTC_IDX_10G (-1)
229 static int t4_pktc_idx_10g = PKTC_IDX_10G;
230 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232 #define TMR_IDX_1G 1
233 static int t4_tmr_idx_1g = TMR_IDX_1G;
234 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236 #define PKTC_IDX_1G (-1)
237 static int t4_pktc_idx_1g = PKTC_IDX_1G;
238 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240 /*
241  * Size (# of entries) of each tx and rx queue.
242  */
243 static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249 /*
250  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251  */
252 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255 /*
256  * Configuration file.
257  */
258 #define DEFAULT_CF      "default"
259 #define FLASH_CF        "flash"
260 #define UWIRE_CF        "uwire"
261 #define FPGA_CF         "fpga"
262 static char t4_cfg_file[32] = DEFAULT_CF;
263 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265 /*
266  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267  * encouraged respectively).
268  */
269 static unsigned int t4_fw_install = 1;
270 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272 /*
273  * ASIC features that will be used.  Disable the ones you don't want so that the
274  * chip resources aren't wasted on features that will not be used.
275  */
276 static int t4_linkcaps_allowed = 0;     /* No DCBX, PPP, etc. by default */
277 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282 static int t4_toecaps_allowed = -1;
283 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285 static int t4_rdmacaps_allowed = 0;
286 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288 static int t4_iscsicaps_allowed = 0;
289 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291 static int t4_fcoecaps_allowed = 0;
292 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294 static int t5_write_combine = 0;
295 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297 struct intrs_and_queues {
298         int intr_type;          /* INTx, MSI, or MSI-X */
299         int nirq;               /* Number of vectors */
300         int intr_flags;
301         int ntxq10g;            /* # of NIC txq's for each 10G port */
302         int nrxq10g;            /* # of NIC rxq's for each 10G port */
303         int ntxq1g;             /* # of NIC txq's for each 1G port */
304         int nrxq1g;             /* # of NIC rxq's for each 1G port */
305         int rsrv_noflowq;       /* Flag whether to reserve queue 0 */
306 #ifdef TCP_OFFLOAD
307         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
308         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
309         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
310         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
311 #endif
312 };
313
314 struct filter_entry {
315         uint32_t valid:1;       /* filter allocated and valid */
316         uint32_t locked:1;      /* filter is administratively locked */
317         uint32_t pending:1;     /* filter action is pending firmware reply */
318         uint32_t smtidx:8;      /* Source MAC Table index for smac */
319         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
320
321         struct t4_filter_specification fs;
322 };
323
324 enum {
325         XGMAC_MTU       = (1 << 0),
326         XGMAC_PROMISC   = (1 << 1),
327         XGMAC_ALLMULTI  = (1 << 2),
328         XGMAC_VLANEX    = (1 << 3),
329         XGMAC_UCADDR    = (1 << 4),
330         XGMAC_MCADDRS   = (1 << 5),
331
332         XGMAC_ALL       = 0xffff
333 };
334
335 static int map_bars_0_and_4(struct adapter *);
336 static int map_bar_2(struct adapter *);
337 static void setup_memwin(struct adapter *);
338 static int validate_mem_range(struct adapter *, uint32_t, int);
339 static int fwmtype_to_hwmtype(int);
340 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341     uint32_t *);
342 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343 static uint32_t position_memwin(struct adapter *, int, uint32_t);
344 static int cfg_itype_and_nqueues(struct adapter *, int, int,
345     struct intrs_and_queues *);
346 static int prep_firmware(struct adapter *);
347 static int partition_resources(struct adapter *, const struct firmware *,
348     const char *);
349 static int get_params__pre_init(struct adapter *);
350 static int get_params__post_init(struct adapter *);
351 static int set_params__post_init(struct adapter *);
352 static void t4_set_desc(struct adapter *);
353 static void build_medialist(struct port_info *);
354 static int update_mac_settings(struct port_info *, int);
355 static int cxgbe_init_synchronized(struct port_info *);
356 static int cxgbe_uninit_synchronized(struct port_info *);
357 static int setup_intr_handlers(struct adapter *);
358 static int adapter_full_init(struct adapter *);
359 static int adapter_full_uninit(struct adapter *);
360 static int port_full_init(struct port_info *);
361 static int port_full_uninit(struct port_info *);
362 static void quiesce_eq(struct adapter *, struct sge_eq *);
363 static void quiesce_iq(struct adapter *, struct sge_iq *);
364 static void quiesce_fl(struct adapter *, struct sge_fl *);
365 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366     driver_intr_t *, void *, char *);
367 static int t4_free_irq(struct adapter *, struct irq *);
368 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369     unsigned int);
370 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371 static void cxgbe_tick(void *);
372 static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
373     struct mbuf *);
374 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
375 static int fw_msg_not_handled(struct adapter *, const __be64 *);
376 static int t4_sysctls(struct adapter *);
377 static int cxgbe_sysctls(struct port_info *);
378 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
379 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
380 static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
381 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
382 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
383 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
384 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
385 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
386 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
387 static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
388 #ifdef SBUF_DRAIN
389 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
390 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
391 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
392 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
393 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
394 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
395 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
396 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
397 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
398 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
399 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
400 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
401 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
402 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
403 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
404 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
405 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
406 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
407 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
408 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
409 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
410 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
411 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
412 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
413 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
414 #endif
415 static inline void txq_start(struct ifnet *, struct sge_txq *);
416 static uint32_t fconf_to_mode(uint32_t);
417 static uint32_t mode_to_fconf(uint32_t);
418 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
419 static int get_filter_mode(struct adapter *, uint32_t *);
420 static int set_filter_mode(struct adapter *, uint32_t);
421 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
422 static int get_filter(struct adapter *, struct t4_filter *);
423 static int set_filter(struct adapter *, struct t4_filter *);
424 static int del_filter(struct adapter *, struct t4_filter *);
425 static void clear_filter(struct filter_entry *);
426 static int set_filter_wr(struct adapter *, int);
427 static int del_filter_wr(struct adapter *, int);
428 static int get_sge_context(struct adapter *, struct t4_sge_context *);
429 static int load_fw(struct adapter *, struct t4_data *);
430 static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
431 static int read_i2c(struct adapter *, struct t4_i2c_data *);
432 static int set_sched_class(struct adapter *, struct t4_sched_params *);
433 static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
434 #ifdef TCP_OFFLOAD
435 static int toe_capability(struct port_info *, int);
436 #endif
437 static int mod_event(module_t, int, void *);
438
439 struct {
440         uint16_t device;
441         char *desc;
442 } t4_pciids[] = {
443         {0xa000, "Chelsio Terminator 4 FPGA"},
444         {0x4400, "Chelsio T440-dbg"},
445         {0x4401, "Chelsio T420-CR"},
446         {0x4402, "Chelsio T422-CR"},
447         {0x4403, "Chelsio T440-CR"},
448         {0x4404, "Chelsio T420-BCH"},
449         {0x4405, "Chelsio T440-BCH"},
450         {0x4406, "Chelsio T440-CH"},
451         {0x4407, "Chelsio T420-SO"},
452         {0x4408, "Chelsio T420-CX"},
453         {0x4409, "Chelsio T420-BT"},
454         {0x440a, "Chelsio T404-BT"},
455         {0x440e, "Chelsio T440-LP-CR"},
456 }, t5_pciids[] = {
457         {0xb000, "Chelsio Terminator 5 FPGA"},
458         {0x5400, "Chelsio T580-dbg"},
459         {0x5401,  "Chelsio T520-CR"},           /* 2 x 10G */
460         {0x5402,  "Chelsio T522-CR"},           /* 2 x 10G, 2 X 1G */
461         {0x5403,  "Chelsio T540-CR"},           /* 4 x 10G */
462         {0x5407,  "Chelsio T520-SO"},           /* 2 x 10G, nomem */
463         {0x5409,  "Chelsio T520-BT"},           /* 2 x 10GBaseT */
464         {0x540a,  "Chelsio T504-BT"},           /* 4 x 1G */
465         {0x540d,  "Chelsio T580-CR"},           /* 2 x 40G */
466         {0x540e,  "Chelsio T540-LP-CR"},        /* 4 x 10G */
467         {0x5410,  "Chelsio T580-LP-CR"},        /* 2 x 40G */
468         {0x5411,  "Chelsio T520-LL-CR"},        /* 2 x 10G */
469         {0x5412,  "Chelsio T560-CR"},           /* 1 x 40G, 2 x 10G */
470         {0x5414,  "Chelsio T580-LP-SO-CR"},     /* 2 x 40G, nomem */
471         {0x5415,  "Chelsio T502-BT"},           /* 2 x 1G */
472 #ifdef notyet
473         {0x5404,  "Chelsio T520-BCH"},
474         {0x5405,  "Chelsio T540-BCH"},
475         {0x5406,  "Chelsio T540-CH"},
476         {0x5408,  "Chelsio T520-CX"},
477         {0x540b,  "Chelsio B520-SR"},
478         {0x540c,  "Chelsio B504-BT"},
479         {0x540f,  "Chelsio Amsterdam"},
480         {0x5413,  "Chelsio T580-CHR"},
481 #endif
482 };
483
484 #ifdef TCP_OFFLOAD
485 /*
486  * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
487  * exactly the same for both rxq and ofld_rxq.
488  */
489 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
490 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
491 #endif
492
493 /* No easy way to include t4_msg.h before adapter.h so we check this way */
494 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
495 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
496
497 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
498
499 static int
500 t4_probe(device_t dev)
501 {
502         int i;
503         uint16_t v = pci_get_vendor(dev);
504         uint16_t d = pci_get_device(dev);
505         uint8_t f = pci_get_function(dev);
506
507         if (v != PCI_VENDOR_ID_CHELSIO)
508                 return (ENXIO);
509
510         /* Attach only to PF0 of the FPGA */
511         if (d == 0xa000 && f != 0)
512                 return (ENXIO);
513
514         for (i = 0; i < nitems(t4_pciids); i++) {
515                 if (d == t4_pciids[i].device) {
516                         device_set_desc(dev, t4_pciids[i].desc);
517                         return (BUS_PROBE_DEFAULT);
518                 }
519         }
520
521         return (ENXIO);
522 }
523
524 static int
525 t5_probe(device_t dev)
526 {
527         int i;
528         uint16_t v = pci_get_vendor(dev);
529         uint16_t d = pci_get_device(dev);
530         uint8_t f = pci_get_function(dev);
531
532         if (v != PCI_VENDOR_ID_CHELSIO)
533                 return (ENXIO);
534
535         /* Attach only to PF0 of the FPGA */
536         if (d == 0xb000 && f != 0)
537                 return (ENXIO);
538
539         for (i = 0; i < nitems(t5_pciids); i++) {
540                 if (d == t5_pciids[i].device) {
541                         device_set_desc(dev, t5_pciids[i].desc);
542                         return (BUS_PROBE_DEFAULT);
543                 }
544         }
545
546         return (ENXIO);
547 }
548
549 static int
550 t4_attach(device_t dev)
551 {
552         struct adapter *sc;
553         int rc = 0, i, n10g, n1g, rqidx, tqidx;
554         struct intrs_and_queues iaq;
555         struct sge *s;
556 #ifdef TCP_OFFLOAD
557         int ofld_rqidx, ofld_tqidx;
558 #endif
559
560         sc = device_get_softc(dev);
561         sc->dev = dev;
562
563         pci_enable_busmaster(dev);
564         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
565                 uint32_t v;
566
567                 pci_set_max_read_req(dev, 4096);
568                 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
569                 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
570                 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
571
572                 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
573         }
574
575         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
576             device_get_nameunit(dev));
577         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
578         mtx_lock(&t4_list_lock);
579         SLIST_INSERT_HEAD(&t4_list, sc, link);
580         mtx_unlock(&t4_list_lock);
581
582         mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
583         TAILQ_INIT(&sc->sfl);
584         callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
585
586         rc = map_bars_0_and_4(sc);
587         if (rc != 0)
588                 goto done; /* error message displayed already */
589
590         /*
591          * This is the real PF# to which we're attaching.  Works from within PCI
592          * passthrough environments too, where pci_get_function() could return a
593          * different PF# depending on the passthrough configuration.  We need to
594          * use the real PF# in all our communication with the firmware.
595          */
596         sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
597         sc->mbox = sc->pf;
598
599         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
600         sc->an_handler = an_not_handled;
601         for (i = 0; i < nitems(sc->cpl_handler); i++)
602                 sc->cpl_handler[i] = cpl_not_handled;
603         for (i = 0; i < nitems(sc->fw_msg_handler); i++)
604                 sc->fw_msg_handler[i] = fw_msg_not_handled;
605         t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
606         t4_init_sge_cpl_handlers(sc);
607
608         /* Prepare the adapter for operation */
609         rc = -t4_prep_adapter(sc);
610         if (rc != 0) {
611                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
612                 goto done;
613         }
614
615         /*
616          * Do this really early, with the memory windows set up even before the
617          * character device.  The userland tool's register i/o and mem read
618          * will work even in "recovery mode".
619          */
620         setup_memwin(sc);
621         sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
622             device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
623             device_get_nameunit(dev));
624         if (sc->cdev == NULL)
625                 device_printf(dev, "failed to create nexus char device.\n");
626         else
627                 sc->cdev->si_drv1 = sc;
628
629         /* Go no further if recovery mode has been requested. */
630         if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
631                 device_printf(dev, "recovery mode.\n");
632                 goto done;
633         }
634
635         /* Prepare the firmware for operation */
636         rc = prep_firmware(sc);
637         if (rc != 0)
638                 goto done; /* error message displayed already */
639
640         rc = get_params__post_init(sc);
641         if (rc != 0)
642                 goto done; /* error message displayed already */
643
644         rc = set_params__post_init(sc);
645         if (rc != 0)
646                 goto done; /* error message displayed already */
647
648         rc = map_bar_2(sc);
649         if (rc != 0)
650                 goto done; /* error message displayed already */
651
652         rc = t4_create_dma_tag(sc);
653         if (rc != 0)
654                 goto done; /* error message displayed already */
655
656         /*
657          * First pass over all the ports - allocate VIs and initialize some
658          * basic parameters like mac address, port type, etc.  We also figure
659          * out whether a port is 10G or 1G and use that information when
660          * calculating how many interrupts to attempt to allocate.
661          */
662         n10g = n1g = 0;
663         for_each_port(sc, i) {
664                 struct port_info *pi;
665
666                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
667                 sc->port[i] = pi;
668
669                 /* These must be set before t4_port_init */
670                 pi->adapter = sc;
671                 pi->port_id = i;
672
673                 /* Allocate the vi and initialize parameters like mac addr */
674                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
675                 if (rc != 0) {
676                         device_printf(dev, "unable to initialize port %d: %d\n",
677                             i, rc);
678                         free(pi, M_CXGBE);
679                         sc->port[i] = NULL;
680                         goto done;
681                 }
682
683                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
684                     device_get_nameunit(dev), i);
685                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
686
687                 if (is_10G_port(pi) || is_40G_port(pi)) {
688                         n10g++;
689                         pi->tmr_idx = t4_tmr_idx_10g;
690                         pi->pktc_idx = t4_pktc_idx_10g;
691                 } else {
692                         n1g++;
693                         pi->tmr_idx = t4_tmr_idx_1g;
694                         pi->pktc_idx = t4_pktc_idx_1g;
695                 }
696
697                 pi->xact_addr_filt = -1;
698                 pi->linkdnrc = -1;
699
700                 pi->qsize_rxq = t4_qsize_rxq;
701                 pi->qsize_txq = t4_qsize_txq;
702
703                 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
704                 if (pi->dev == NULL) {
705                         device_printf(dev,
706                             "failed to add device for port %d.\n", i);
707                         rc = ENXIO;
708                         goto done;
709                 }
710                 device_set_softc(pi->dev, pi);
711         }
712
713         /*
714          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
715          */
716         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
717         if (rc != 0)
718                 goto done; /* error message displayed already */
719
720         sc->intr_type = iaq.intr_type;
721         sc->intr_count = iaq.nirq;
722         sc->flags |= iaq.intr_flags;
723
724         s = &sc->sge;
725         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
726         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
727         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
728         s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
729         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
730
731 #ifdef TCP_OFFLOAD
732         if (is_offload(sc)) {
733
734                 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
735                 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
736                 s->neq += s->nofldtxq + s->nofldrxq;
737                 s->niq += s->nofldrxq;
738
739                 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
740                     M_CXGBE, M_ZERO | M_WAITOK);
741                 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
742                     M_CXGBE, M_ZERO | M_WAITOK);
743         }
744 #endif
745
746         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
747             M_ZERO | M_WAITOK);
748         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
749             M_ZERO | M_WAITOK);
750         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
751             M_ZERO | M_WAITOK);
752         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
753             M_ZERO | M_WAITOK);
754         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
755             M_ZERO | M_WAITOK);
756
757         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
758             M_ZERO | M_WAITOK);
759
760         t4_init_l2t(sc, M_WAITOK);
761
762         /*
763          * Second pass over the ports.  This time we know the number of rx and
764          * tx queues that each port should get.
765          */
766         rqidx = tqidx = 0;
767 #ifdef TCP_OFFLOAD
768         ofld_rqidx = ofld_tqidx = 0;
769 #endif
770         for_each_port(sc, i) {
771                 struct port_info *pi = sc->port[i];
772
773                 if (pi == NULL)
774                         continue;
775
776                 pi->first_rxq = rqidx;
777                 pi->first_txq = tqidx;
778                 if (is_10G_port(pi) || is_40G_port(pi)) {
779                         pi->nrxq = iaq.nrxq10g;
780                         pi->ntxq = iaq.ntxq10g;
781                 } else {
782                         pi->nrxq = iaq.nrxq1g;
783                         pi->ntxq = iaq.ntxq1g;
784                 }
785
786                 if (pi->ntxq > 1)
787                         pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
788                 else
789                         pi->rsrv_noflowq = 0;
790
791                 rqidx += pi->nrxq;
792                 tqidx += pi->ntxq;
793
794 #ifdef TCP_OFFLOAD
795                 if (is_offload(sc)) {
796                         pi->first_ofld_rxq = ofld_rqidx;
797                         pi->first_ofld_txq = ofld_tqidx;
798                         if (is_10G_port(pi) || is_40G_port(pi)) {
799                                 pi->nofldrxq = iaq.nofldrxq10g;
800                                 pi->nofldtxq = iaq.nofldtxq10g;
801                         } else {
802                                 pi->nofldrxq = iaq.nofldrxq1g;
803                                 pi->nofldtxq = iaq.nofldtxq1g;
804                         }
805                         ofld_rqidx += pi->nofldrxq;
806                         ofld_tqidx += pi->nofldtxq;
807                 }
808 #endif
809         }
810
811         rc = setup_intr_handlers(sc);
812         if (rc != 0) {
813                 device_printf(dev,
814                     "failed to setup interrupt handlers: %d\n", rc);
815                 goto done;
816         }
817
818         rc = bus_generic_attach(dev);
819         if (rc != 0) {
820                 device_printf(dev,
821                     "failed to attach all child ports: %d\n", rc);
822                 goto done;
823         }
824
825         device_printf(dev,
826             "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
827             sc->params.pci.width, sc->params.nports, sc->intr_count,
828             sc->intr_type == INTR_MSIX ? "MSI-X" :
829             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
830             sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
831
832         t4_set_desc(sc);
833
834 done:
835         if (rc != 0 && sc->cdev) {
836                 /* cdev was created and so cxgbetool works; recover that way. */
837                 device_printf(dev,
838                     "error during attach, adapter is now in recovery mode.\n");
839                 rc = 0;
840         }
841
842         if (rc != 0)
843                 t4_detach(dev);
844         else
845                 t4_sysctls(sc);
846
847         return (rc);
848 }
849
850 /*
851  * Idempotent
852  */
853 static int
854 t4_detach(device_t dev)
855 {
856         struct adapter *sc;
857         struct port_info *pi;
858         int i, rc;
859
860         sc = device_get_softc(dev);
861
862         if (sc->flags & FULL_INIT_DONE)
863                 t4_intr_disable(sc);
864
865         if (sc->cdev) {
866                 destroy_dev(sc->cdev);
867                 sc->cdev = NULL;
868         }
869
870         rc = bus_generic_detach(dev);
871         if (rc) {
872                 device_printf(dev,
873                     "failed to detach child devices: %d\n", rc);
874                 return (rc);
875         }
876
877         for (i = 0; i < sc->intr_count; i++)
878                 t4_free_irq(sc, &sc->irq[i]);
879
880         for (i = 0; i < MAX_NPORTS; i++) {
881                 pi = sc->port[i];
882                 if (pi) {
883                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
884                         if (pi->dev)
885                                 device_delete_child(dev, pi->dev);
886
887                         mtx_destroy(&pi->pi_lock);
888                         free(pi, M_CXGBE);
889                 }
890         }
891
892         if (sc->flags & FULL_INIT_DONE)
893                 adapter_full_uninit(sc);
894
895         if (sc->flags & FW_OK)
896                 t4_fw_bye(sc, sc->mbox);
897
898         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
899                 pci_release_msi(dev);
900
901         if (sc->regs_res)
902                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
903                     sc->regs_res);
904
905         if (sc->udbs_res)
906                 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
907                     sc->udbs_res);
908
909         if (sc->msix_res)
910                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
911                     sc->msix_res);
912
913         if (sc->l2t)
914                 t4_free_l2t(sc->l2t);
915
916 #ifdef TCP_OFFLOAD
917         free(sc->sge.ofld_rxq, M_CXGBE);
918         free(sc->sge.ofld_txq, M_CXGBE);
919 #endif
920         free(sc->irq, M_CXGBE);
921         free(sc->sge.rxq, M_CXGBE);
922         free(sc->sge.txq, M_CXGBE);
923         free(sc->sge.ctrlq, M_CXGBE);
924         free(sc->sge.iqmap, M_CXGBE);
925         free(sc->sge.eqmap, M_CXGBE);
926         free(sc->tids.ftid_tab, M_CXGBE);
927         t4_destroy_dma_tag(sc);
928         if (mtx_initialized(&sc->sc_lock)) {
929                 mtx_lock(&t4_list_lock);
930                 SLIST_REMOVE(&t4_list, sc, adapter, link);
931                 mtx_unlock(&t4_list_lock);
932                 mtx_destroy(&sc->sc_lock);
933         }
934
935         if (mtx_initialized(&sc->tids.ftid_lock))
936                 mtx_destroy(&sc->tids.ftid_lock);
937         if (mtx_initialized(&sc->sfl_lock))
938                 mtx_destroy(&sc->sfl_lock);
939
940         bzero(sc, sizeof(*sc));
941
942         return (0);
943 }
944
945
946 static int
947 cxgbe_probe(device_t dev)
948 {
949         char buf[128];
950         struct port_info *pi = device_get_softc(dev);
951
952         snprintf(buf, sizeof(buf), "port %d", pi->port_id);
953         device_set_desc_copy(dev, buf);
954
955         return (BUS_PROBE_DEFAULT);
956 }
957
958 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
959     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
960     IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
961 #define T4_CAP_ENABLE (T4_CAP)
962
963 static int
964 cxgbe_attach(device_t dev)
965 {
966         struct port_info *pi = device_get_softc(dev);
967         struct ifnet *ifp;
968
969         /* Allocate an ifnet and set it up */
970         ifp = if_alloc(IFT_ETHER);
971         if (ifp == NULL) {
972                 device_printf(dev, "Cannot allocate ifnet\n");
973                 return (ENOMEM);
974         }
975         pi->ifp = ifp;
976         ifp->if_softc = pi;
977
978         callout_init(&pi->tick, CALLOUT_MPSAFE);
979
980         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
981         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
982
983         ifp->if_init = cxgbe_init;
984         ifp->if_ioctl = cxgbe_ioctl;
985         ifp->if_transmit = cxgbe_transmit;
986         ifp->if_qflush = cxgbe_qflush;
987
988         ifp->if_capabilities = T4_CAP;
989 #ifdef TCP_OFFLOAD
990         if (is_offload(pi->adapter))
991                 ifp->if_capabilities |= IFCAP_TOE;
992 #endif
993         ifp->if_capenable = T4_CAP_ENABLE;
994         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
995             CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
996
997         /* Initialize ifmedia for this port */
998         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
999             cxgbe_media_status);
1000         build_medialist(pi);
1001
1002         ether_ifattach(ifp, pi->hw_addr);
1003
1004 #ifdef TCP_OFFLOAD
1005         if (is_offload(pi->adapter)) {
1006                 device_printf(dev,
1007                     "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1008                     pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1009         } else
1010 #endif
1011                 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1012
1013         cxgbe_sysctls(pi);
1014
1015         return (0);
1016 }
1017
1018 static int
1019 cxgbe_detach(device_t dev)
1020 {
1021         struct port_info *pi = device_get_softc(dev);
1022         struct adapter *sc = pi->adapter;
1023         struct ifnet *ifp = pi->ifp;
1024
1025         /* Tell if_ioctl and if_init that the port is going away */
1026         ADAPTER_LOCK(sc);
1027         SET_DOOMED(pi);
1028         wakeup(&sc->flags);
1029         while (IS_BUSY(sc))
1030                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1031         SET_BUSY(sc);
1032 #ifdef INVARIANTS
1033         sc->last_op = "t4detach";
1034         sc->last_op_thr = curthread;
1035 #endif
1036         ADAPTER_UNLOCK(sc);
1037
1038         PORT_LOCK(pi);
1039         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1040         callout_stop(&pi->tick);
1041         PORT_UNLOCK(pi);
1042         callout_drain(&pi->tick);
1043
1044         /* Let detach proceed even if these fail. */
1045         cxgbe_uninit_synchronized(pi);
1046         port_full_uninit(pi);
1047
1048         ifmedia_removeall(&pi->media);
1049         ether_ifdetach(pi->ifp);
1050         if_free(pi->ifp);
1051
1052         ADAPTER_LOCK(sc);
1053         CLR_BUSY(sc);
1054         wakeup(&sc->flags);
1055         ADAPTER_UNLOCK(sc);
1056
1057         return (0);
1058 }
1059
1060 static void
1061 cxgbe_init(void *arg)
1062 {
1063         struct port_info *pi = arg;
1064         struct adapter *sc = pi->adapter;
1065
1066         if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1067                 return;
1068         cxgbe_init_synchronized(pi);
1069         end_synchronized_op(sc, 0);
1070 }
1071
1072 static int
1073 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1074 {
1075         int rc = 0, mtu, flags;
1076         struct port_info *pi = ifp->if_softc;
1077         struct adapter *sc = pi->adapter;
1078         struct ifreq *ifr = (struct ifreq *)data;
1079         uint32_t mask;
1080
1081         switch (cmd) {
1082         case SIOCSIFMTU:
1083                 mtu = ifr->ifr_mtu;
1084                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1085                         return (EINVAL);
1086
1087                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1088                 if (rc)
1089                         return (rc);
1090                 ifp->if_mtu = mtu;
1091                 if (pi->flags & PORT_INIT_DONE) {
1092                         t4_update_fl_bufsize(ifp);
1093                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1094                                 rc = update_mac_settings(pi, XGMAC_MTU);
1095                 }
1096                 end_synchronized_op(sc, 0);
1097                 break;
1098
1099         case SIOCSIFFLAGS:
1100                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1101                 if (rc)
1102                         return (rc);
1103
1104                 if (ifp->if_flags & IFF_UP) {
1105                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1106                                 flags = pi->if_flags;
1107                                 if ((ifp->if_flags ^ flags) &
1108                                     (IFF_PROMISC | IFF_ALLMULTI)) {
1109                                         rc = update_mac_settings(pi,
1110                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
1111                                 }
1112                         } else
1113                                 rc = cxgbe_init_synchronized(pi);
1114                         pi->if_flags = ifp->if_flags;
1115                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1116                         rc = cxgbe_uninit_synchronized(pi);
1117                 end_synchronized_op(sc, 0);
1118                 break;
1119
1120         case SIOCADDMULTI:      
1121         case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1122                 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1123                 if (rc)
1124                         return (rc);
1125                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1126                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
1127                 end_synchronized_op(sc, LOCK_HELD);
1128                 break;
1129
1130         case SIOCSIFCAP:
1131                 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1132                 if (rc)
1133                         return (rc);
1134
1135                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1136                 if (mask & IFCAP_TXCSUM) {
1137                         ifp->if_capenable ^= IFCAP_TXCSUM;
1138                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1139
1140                         if (IFCAP_TSO4 & ifp->if_capenable &&
1141                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1142                                 ifp->if_capenable &= ~IFCAP_TSO4;
1143                                 if_printf(ifp,
1144                                     "tso4 disabled due to -txcsum.\n");
1145                         }
1146                 }
1147                 if (mask & IFCAP_TXCSUM_IPV6) {
1148                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1149                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1150
1151                         if (IFCAP_TSO6 & ifp->if_capenable &&
1152                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1153                                 ifp->if_capenable &= ~IFCAP_TSO6;
1154                                 if_printf(ifp,
1155                                     "tso6 disabled due to -txcsum6.\n");
1156                         }
1157                 }
1158                 if (mask & IFCAP_RXCSUM)
1159                         ifp->if_capenable ^= IFCAP_RXCSUM;
1160                 if (mask & IFCAP_RXCSUM_IPV6)
1161                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1162
1163                 /*
1164                  * Note that we leave CSUM_TSO alone (it is always set).  The
1165                  * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1166                  * sending a TSO request our way, so it's sufficient to toggle
1167                  * IFCAP_TSOx only.
1168                  */
1169                 if (mask & IFCAP_TSO4) {
1170                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1171                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
1172                                 if_printf(ifp, "enable txcsum first.\n");
1173                                 rc = EAGAIN;
1174                                 goto fail;
1175                         }
1176                         ifp->if_capenable ^= IFCAP_TSO4;
1177                 }
1178                 if (mask & IFCAP_TSO6) {
1179                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1180                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1181                                 if_printf(ifp, "enable txcsum6 first.\n");
1182                                 rc = EAGAIN;
1183                                 goto fail;
1184                         }
1185                         ifp->if_capenable ^= IFCAP_TSO6;
1186                 }
1187                 if (mask & IFCAP_LRO) {
1188 #if defined(INET) || defined(INET6)
1189                         int i;
1190                         struct sge_rxq *rxq;
1191
1192                         ifp->if_capenable ^= IFCAP_LRO;
1193                         for_each_rxq(pi, i, rxq) {
1194                                 if (ifp->if_capenable & IFCAP_LRO)
1195                                         rxq->iq.flags |= IQ_LRO_ENABLED;
1196                                 else
1197                                         rxq->iq.flags &= ~IQ_LRO_ENABLED;
1198                         }
1199 #endif
1200                 }
1201 #ifdef TCP_OFFLOAD
1202                 if (mask & IFCAP_TOE) {
1203                         int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1204
1205                         rc = toe_capability(pi, enable);
1206                         if (rc != 0)
1207                                 goto fail;
1208
1209                         ifp->if_capenable ^= mask;
1210                 }
1211 #endif
1212                 if (mask & IFCAP_VLAN_HWTAGGING) {
1213                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1214                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1215                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
1216                 }
1217                 if (mask & IFCAP_VLAN_MTU) {
1218                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
1219
1220                         /* Need to find out how to disable auto-mtu-inflation */
1221                 }
1222                 if (mask & IFCAP_VLAN_HWTSO)
1223                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1224                 if (mask & IFCAP_VLAN_HWCSUM)
1225                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1226
1227 #ifdef VLAN_CAPABILITIES
1228                 VLAN_CAPABILITIES(ifp);
1229 #endif
1230 fail:
1231                 end_synchronized_op(sc, 0);
1232                 break;
1233
1234         case SIOCSIFMEDIA:
1235         case SIOCGIFMEDIA:
1236                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1237                 break;
1238
1239         default:
1240                 rc = ether_ioctl(ifp, cmd, data);
1241         }
1242
1243         return (rc);
1244 }
1245
1246 static int
1247 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1248 {
1249         struct port_info *pi = ifp->if_softc;
1250         struct adapter *sc = pi->adapter;
1251         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1252         struct buf_ring *br;
1253         int rc;
1254
1255         M_ASSERTPKTHDR(m);
1256
1257         if (__predict_false(pi->link_cfg.link_ok == 0)) {
1258                 m_freem(m);
1259                 return (ENETDOWN);
1260         }
1261
1262         if (m->m_flags & M_FLOWID)
1263                 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1264                     + pi->rsrv_noflowq);
1265         br = txq->br;
1266
1267         if (TXQ_TRYLOCK(txq) == 0) {
1268                 struct sge_eq *eq = &txq->eq;
1269
1270                 /*
1271                  * It is possible that t4_eth_tx finishes up and releases the
1272                  * lock between the TRYLOCK above and the drbr_enqueue here.  We
1273                  * need to make sure that this mbuf doesn't just sit there in
1274                  * the drbr.
1275                  */
1276
1277                 rc = drbr_enqueue(ifp, br, m);
1278                 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1279                     !(eq->flags & EQ_DOOMED))
1280                         callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1281                 return (rc);
1282         }
1283
1284         /*
1285          * txq->m is the mbuf that is held up due to a temporary shortage of
1286          * resources and it should be put on the wire first.  Then what's in
1287          * drbr and finally the mbuf that was just passed in to us.
1288          *
1289          * Return code should indicate the fate of the mbuf that was passed in
1290          * this time.
1291          */
1292
1293         TXQ_LOCK_ASSERT_OWNED(txq);
1294         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1295
1296                 /* Queued for transmission. */
1297
1298                 rc = drbr_enqueue(ifp, br, m);
1299                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1300                 (void) t4_eth_tx(ifp, txq, m);
1301                 TXQ_UNLOCK(txq);
1302                 return (rc);
1303         }
1304
1305         /* Direct transmission. */
1306         rc = t4_eth_tx(ifp, txq, m);
1307         if (rc != 0 && txq->m)
1308                 rc = 0; /* held, will be transmitted soon (hopefully) */
1309
1310         TXQ_UNLOCK(txq);
1311         return (rc);
1312 }
1313
1314 static void
1315 cxgbe_qflush(struct ifnet *ifp)
1316 {
1317         struct port_info *pi = ifp->if_softc;
1318         struct sge_txq *txq;
1319         int i;
1320         struct mbuf *m;
1321
1322         /* queues do not exist if !PORT_INIT_DONE. */
1323         if (pi->flags & PORT_INIT_DONE) {
1324                 for_each_txq(pi, i, txq) {
1325                         TXQ_LOCK(txq);
1326                         m_freem(txq->m);
1327                         txq->m = NULL;
1328                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1329                                 m_freem(m);
1330                         TXQ_UNLOCK(txq);
1331                 }
1332         }
1333         if_qflush(ifp);
1334 }
1335
1336 static int
1337 cxgbe_media_change(struct ifnet *ifp)
1338 {
1339         struct port_info *pi = ifp->if_softc;
1340
1341         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1342
1343         return (EOPNOTSUPP);
1344 }
1345
1346 static void
1347 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1348 {
1349         struct port_info *pi = ifp->if_softc;
1350         struct ifmedia_entry *cur = pi->media.ifm_cur;
1351         int speed = pi->link_cfg.speed;
1352         int data = (pi->port_type << 8) | pi->mod_type;
1353
1354         if (cur->ifm_data != data) {
1355                 build_medialist(pi);
1356                 cur = pi->media.ifm_cur;
1357         }
1358
1359         ifmr->ifm_status = IFM_AVALID;
1360         if (!pi->link_cfg.link_ok)
1361                 return;
1362
1363         ifmr->ifm_status |= IFM_ACTIVE;
1364
1365         /* active and current will differ iff current media is autoselect. */
1366         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1367                 return;
1368
1369         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1370         if (speed == SPEED_10000)
1371                 ifmr->ifm_active |= IFM_10G_T;
1372         else if (speed == SPEED_1000)
1373                 ifmr->ifm_active |= IFM_1000_T;
1374         else if (speed == SPEED_100)
1375                 ifmr->ifm_active |= IFM_100_TX;
1376         else if (speed == SPEED_10)
1377                 ifmr->ifm_active |= IFM_10_T;
1378         else
1379                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1380                             speed));
1381 }
1382
1383 void
1384 t4_fatal_err(struct adapter *sc)
1385 {
1386         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1387         t4_intr_disable(sc);
1388         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1389             device_get_nameunit(sc->dev));
1390 }
1391
1392 static int
1393 map_bars_0_and_4(struct adapter *sc)
1394 {
1395         sc->regs_rid = PCIR_BAR(0);
1396         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1397             &sc->regs_rid, RF_ACTIVE);
1398         if (sc->regs_res == NULL) {
1399                 device_printf(sc->dev, "cannot map registers.\n");
1400                 return (ENXIO);
1401         }
1402         sc->bt = rman_get_bustag(sc->regs_res);
1403         sc->bh = rman_get_bushandle(sc->regs_res);
1404         sc->mmio_len = rman_get_size(sc->regs_res);
1405         setbit(&sc->doorbells, DOORBELL_KDB);
1406
1407         sc->msix_rid = PCIR_BAR(4);
1408         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1409             &sc->msix_rid, RF_ACTIVE);
1410         if (sc->msix_res == NULL) {
1411                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1412                 return (ENXIO);
1413         }
1414
1415         return (0);
1416 }
1417
1418 static int
1419 map_bar_2(struct adapter *sc)
1420 {
1421
1422         /*
1423          * T4: only iWARP driver uses the userspace doorbells.  There is no need
1424          * to map it if RDMA is disabled.
1425          */
1426         if (is_t4(sc) && sc->rdmacaps == 0)
1427                 return (0);
1428
1429         sc->udbs_rid = PCIR_BAR(2);
1430         sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1431             &sc->udbs_rid, RF_ACTIVE);
1432         if (sc->udbs_res == NULL) {
1433                 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1434                 return (ENXIO);
1435         }
1436         sc->udbs_base = rman_get_virtual(sc->udbs_res);
1437
1438         if (is_t5(sc)) {
1439                 setbit(&sc->doorbells, DOORBELL_UDB);
1440 #if defined(__i386__) || defined(__amd64__)
1441                 if (t5_write_combine) {
1442                         int rc;
1443
1444                         /*
1445                          * Enable write combining on BAR2.  This is the
1446                          * userspace doorbell BAR and is split into 128B
1447                          * (UDBS_SEG_SIZE) doorbell regions, each associated
1448                          * with an egress queue.  The first 64B has the doorbell
1449                          * and the second 64B can be used to submit a tx work
1450                          * request with an implicit doorbell.
1451                          */
1452
1453                         rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1454                             rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1455                         if (rc == 0) {
1456                                 clrbit(&sc->doorbells, DOORBELL_UDB);
1457                                 setbit(&sc->doorbells, DOORBELL_WCWR);
1458                                 setbit(&sc->doorbells, DOORBELL_UDBWC);
1459                         } else {
1460                                 device_printf(sc->dev,
1461                                     "couldn't enable write combining: %d\n",
1462                                     rc);
1463                         }
1464
1465                         t4_write_reg(sc, A_SGE_STAT_CFG,
1466                             V_STATSOURCE_T5(7) | V_STATMODE(0));
1467                 }
1468 #endif
1469         }
1470
1471         return (0);
1472 }
1473
1474 static const struct memwin t4_memwin[] = {
1475         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1476         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1477         { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1478 };
1479
1480 static const struct memwin t5_memwin[] = {
1481         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1482         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1483         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1484 };
1485
1486 static void
1487 setup_memwin(struct adapter *sc)
1488 {
1489         const struct memwin *mw;
1490         int i, n;
1491         uint32_t bar0;
1492
1493         if (is_t4(sc)) {
1494                 /*
1495                  * Read low 32b of bar0 indirectly via the hardware backdoor
1496                  * mechanism.  Works from within PCI passthrough environments
1497                  * too, where rman_get_start() can return a different value.  We
1498                  * need to program the T4 memory window decoders with the actual
1499                  * addresses that will be coming across the PCIe link.
1500                  */
1501                 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1502                 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1503
1504                 mw = &t4_memwin[0];
1505                 n = nitems(t4_memwin);
1506         } else {
1507                 /* T5 uses the relative offset inside the PCIe BAR */
1508                 bar0 = 0;
1509
1510                 mw = &t5_memwin[0];
1511                 n = nitems(t5_memwin);
1512         }
1513
1514         for (i = 0; i < n; i++, mw++) {
1515                 t4_write_reg(sc,
1516                     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1517                     (mw->base + bar0) | V_BIR(0) |
1518                     V_WINDOW(ilog2(mw->aperture) - 10));
1519         }
1520
1521         /* flush */
1522         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1523 }
1524
1525 /*
1526  * Verify that the memory range specified by the addr/len pair is valid and lies
1527  * entirely within a single region (EDCx or MCx).
1528  */
1529 static int
1530 validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1531 {
1532         uint32_t em, addr_len, maddr, mlen;
1533
1534         /* Memory can only be accessed in naturally aligned 4 byte units */
1535         if (addr & 3 || len & 3 || len == 0)
1536                 return (EINVAL);
1537
1538         /* Enabled memories */
1539         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1540         if (em & F_EDRAM0_ENABLE) {
1541                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1542                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1543                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1544                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1545                     addr + len <= maddr + mlen)
1546                         return (0);
1547         }
1548         if (em & F_EDRAM1_ENABLE) {
1549                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1550                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1551                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1552                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1553                     addr + len <= maddr + mlen)
1554                         return (0);
1555         }
1556         if (em & F_EXT_MEM_ENABLE) {
1557                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1558                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1559                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1560                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1561                     addr + len <= maddr + mlen)
1562                         return (0);
1563         }
1564         if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1565                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1566                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1567                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1568                 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1569                     addr + len <= maddr + mlen)
1570                         return (0);
1571         }
1572
1573         return (EFAULT);
1574 }
1575
1576 static int
1577 fwmtype_to_hwmtype(int mtype)
1578 {
1579
1580         switch (mtype) {
1581         case FW_MEMTYPE_EDC0:
1582                 return (MEM_EDC0);
1583         case FW_MEMTYPE_EDC1:
1584                 return (MEM_EDC1);
1585         case FW_MEMTYPE_EXTMEM:
1586                 return (MEM_MC0);
1587         case FW_MEMTYPE_EXTMEM1:
1588                 return (MEM_MC1);
1589         default:
1590                 panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1591         }
1592 }
1593
1594 /*
1595  * Verify that the memory range specified by the memtype/offset/len pair is
1596  * valid and lies entirely within the memtype specified.  The global address of
1597  * the start of the range is returned in addr.
1598  */
1599 static int
1600 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1601     uint32_t *addr)
1602 {
1603         uint32_t em, addr_len, maddr, mlen;
1604
1605         /* Memory can only be accessed in naturally aligned 4 byte units */
1606         if (off & 3 || len & 3 || len == 0)
1607                 return (EINVAL);
1608
1609         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1610         switch (fwmtype_to_hwmtype(mtype)) {
1611         case MEM_EDC0:
1612                 if (!(em & F_EDRAM0_ENABLE))
1613                         return (EINVAL);
1614                 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1615                 maddr = G_EDRAM0_BASE(addr_len) << 20;
1616                 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1617                 break;
1618         case MEM_EDC1:
1619                 if (!(em & F_EDRAM1_ENABLE))
1620                         return (EINVAL);
1621                 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1622                 maddr = G_EDRAM1_BASE(addr_len) << 20;
1623                 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1624                 break;
1625         case MEM_MC:
1626                 if (!(em & F_EXT_MEM_ENABLE))
1627                         return (EINVAL);
1628                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1629                 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1630                 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1631                 break;
1632         case MEM_MC1:
1633                 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1634                         return (EINVAL);
1635                 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1636                 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1637                 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1638                 break;
1639         default:
1640                 return (EINVAL);
1641         }
1642
1643         if (mlen > 0 && off < mlen && off + len <= mlen) {
1644                 *addr = maddr + off;    /* global address */
1645                 return (0);
1646         }
1647
1648         return (EFAULT);
1649 }
1650
1651 static void
1652 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1653 {
1654         const struct memwin *mw;
1655
1656         if (is_t4(sc)) {
1657                 KASSERT(win >= 0 && win < nitems(t4_memwin),
1658                     ("%s: incorrect memwin# (%d)", __func__, win));
1659                 mw = &t4_memwin[win];
1660         } else {
1661                 KASSERT(win >= 0 && win < nitems(t5_memwin),
1662                     ("%s: incorrect memwin# (%d)", __func__, win));
1663                 mw = &t5_memwin[win];
1664         }
1665
1666         if (base != NULL)
1667                 *base = mw->base;
1668         if (aperture != NULL)
1669                 *aperture = mw->aperture;
1670 }
1671
1672 /*
1673  * Positions the memory window such that it can be used to access the specified
1674  * address in the chip's address space.  The return value is the offset of addr
1675  * from the start of the window.
1676  */
1677 static uint32_t
1678 position_memwin(struct adapter *sc, int n, uint32_t addr)
1679 {
1680         uint32_t start, pf;
1681         uint32_t reg;
1682
1683         KASSERT(n >= 0 && n <= 3,
1684             ("%s: invalid window %d.", __func__, n));
1685         KASSERT((addr & 3) == 0,
1686             ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1687
1688         if (is_t4(sc)) {
1689                 pf = 0;
1690                 start = addr & ~0xf;    /* start must be 16B aligned */
1691         } else {
1692                 pf = V_PFNUM(sc->pf);
1693                 start = addr & ~0x7f;   /* start must be 128B aligned */
1694         }
1695         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1696
1697         t4_write_reg(sc, reg, start | pf);
1698         t4_read_reg(sc, reg);
1699
1700         return (addr - start);
1701 }
1702
1703 static int
1704 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1705     struct intrs_and_queues *iaq)
1706 {
1707         int rc, itype, navail, nrxq10g, nrxq1g, n;
1708         int nofldrxq10g = 0, nofldrxq1g = 0;
1709
1710         bzero(iaq, sizeof(*iaq));
1711
1712         iaq->ntxq10g = t4_ntxq10g;
1713         iaq->ntxq1g = t4_ntxq1g;
1714         iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1715         iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1716         iaq->rsrv_noflowq = t4_rsrv_noflowq;
1717 #ifdef TCP_OFFLOAD
1718         if (is_offload(sc)) {
1719                 iaq->nofldtxq10g = t4_nofldtxq10g;
1720                 iaq->nofldtxq1g = t4_nofldtxq1g;
1721                 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1722                 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1723         }
1724 #endif
1725
1726         for (itype = INTR_MSIX; itype; itype >>= 1) {
1727
1728                 if ((itype & t4_intr_types) == 0)
1729                         continue;       /* not allowed */
1730
1731                 if (itype == INTR_MSIX)
1732                         navail = pci_msix_count(sc->dev);
1733                 else if (itype == INTR_MSI)
1734                         navail = pci_msi_count(sc->dev);
1735                 else
1736                         navail = 1;
1737 restart:
1738                 if (navail == 0)
1739                         continue;
1740
1741                 iaq->intr_type = itype;
1742                 iaq->intr_flags = 0;
1743
1744                 /*
1745                  * Best option: an interrupt vector for errors, one for the
1746                  * firmware event queue, and one each for each rxq (NIC as well
1747                  * as offload).
1748                  */
1749                 iaq->nirq = T4_EXTRA_INTR;
1750                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1751                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1752                 if (iaq->nirq <= navail &&
1753                     (itype != INTR_MSI || powerof2(iaq->nirq))) {
1754                         iaq->intr_flags |= INTR_DIRECT;
1755                         goto allocate;
1756                 }
1757
1758                 /*
1759                  * Second best option: an interrupt vector for errors, one for
1760                  * the firmware event queue, and one each for either NIC or
1761                  * offload rxq's.
1762                  */
1763                 iaq->nirq = T4_EXTRA_INTR;
1764                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1765                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1766                 if (iaq->nirq <= navail &&
1767                     (itype != INTR_MSI || powerof2(iaq->nirq)))
1768                         goto allocate;
1769
1770                 /*
1771                  * Next best option: an interrupt vector for errors, one for the
1772                  * firmware event queue, and at least one per port.  At this
1773                  * point we know we'll have to downsize nrxq or nofldrxq to fit
1774                  * what's available to us.
1775                  */
1776                 iaq->nirq = T4_EXTRA_INTR;
1777                 iaq->nirq += n10g + n1g;
1778                 if (iaq->nirq <= navail) {
1779                         int leftover = navail - iaq->nirq;
1780
1781                         if (n10g > 0) {
1782                                 int target = max(nrxq10g, nofldrxq10g);
1783
1784                                 n = 1;
1785                                 while (n < target && leftover >= n10g) {
1786                                         leftover -= n10g;
1787                                         iaq->nirq += n10g;
1788                                         n++;
1789                                 }
1790                                 iaq->nrxq10g = min(n, nrxq10g);
1791 #ifdef TCP_OFFLOAD
1792                                 if (is_offload(sc))
1793                                         iaq->nofldrxq10g = min(n, nofldrxq10g);
1794 #endif
1795                         }
1796
1797                         if (n1g > 0) {
1798                                 int target = max(nrxq1g, nofldrxq1g);
1799
1800                                 n = 1;
1801                                 while (n < target && leftover >= n1g) {
1802                                         leftover -= n1g;
1803                                         iaq->nirq += n1g;
1804                                         n++;
1805                                 }
1806                                 iaq->nrxq1g = min(n, nrxq1g);
1807 #ifdef TCP_OFFLOAD
1808                                 if (is_offload(sc))
1809                                         iaq->nofldrxq1g = min(n, nofldrxq1g);
1810 #endif
1811                         }
1812
1813                         if (itype != INTR_MSI || powerof2(iaq->nirq))
1814                                 goto allocate;
1815                 }
1816
1817                 /*
1818                  * Least desirable option: one interrupt vector for everything.
1819                  */
1820                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1821 #ifdef TCP_OFFLOAD
1822                 if (is_offload(sc))
1823                         iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1824 #endif
1825
1826 allocate:
1827                 navail = iaq->nirq;
1828                 rc = 0;
1829                 if (itype == INTR_MSIX)
1830                         rc = pci_alloc_msix(sc->dev, &navail);
1831                 else if (itype == INTR_MSI)
1832                         rc = pci_alloc_msi(sc->dev, &navail);
1833
1834                 if (rc == 0) {
1835                         if (navail == iaq->nirq)
1836                                 return (0);
1837
1838                         /*
1839                          * Didn't get the number requested.  Use whatever number
1840                          * the kernel is willing to allocate (it's in navail).
1841                          */
1842                         device_printf(sc->dev, "fewer vectors than requested, "
1843                             "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1844                             itype, iaq->nirq, navail);
1845                         pci_release_msi(sc->dev);
1846                         goto restart;
1847                 }
1848
1849                 device_printf(sc->dev,
1850                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1851                     itype, rc, iaq->nirq, navail);
1852         }
1853
1854         device_printf(sc->dev,
1855             "failed to find a usable interrupt type.  "
1856             "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1857             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1858
1859         return (ENXIO);
1860 }
1861
1862 #define FW_VERSION(chip) ( \
1863     V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1864     V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1865     V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1866     V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1867 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1868
1869 struct fw_info {
1870         uint8_t chip;
1871         char *kld_name;
1872         char *fw_mod_name;
1873         struct fw_hdr fw_hdr;   /* XXX: waste of space, need a sparse struct */
1874 } fw_info[] = {
1875         {
1876                 .chip = CHELSIO_T4,
1877                 .kld_name = "t4fw_cfg",
1878                 .fw_mod_name = "t4fw",
1879                 .fw_hdr = {
1880                         .chip = FW_HDR_CHIP_T4,
1881                         .fw_ver = htobe32_const(FW_VERSION(T4)),
1882                         .intfver_nic = FW_INTFVER(T4, NIC),
1883                         .intfver_vnic = FW_INTFVER(T4, VNIC),
1884                         .intfver_ofld = FW_INTFVER(T4, OFLD),
1885                         .intfver_ri = FW_INTFVER(T4, RI),
1886                         .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1887                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1888                         .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1889                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
1890                 },
1891         }, {
1892                 .chip = CHELSIO_T5,
1893                 .kld_name = "t5fw_cfg",
1894                 .fw_mod_name = "t5fw",
1895                 .fw_hdr = {
1896                         .chip = FW_HDR_CHIP_T5,
1897                         .fw_ver = htobe32_const(FW_VERSION(T5)),
1898                         .intfver_nic = FW_INTFVER(T5, NIC),
1899                         .intfver_vnic = FW_INTFVER(T5, VNIC),
1900                         .intfver_ofld = FW_INTFVER(T5, OFLD),
1901                         .intfver_ri = FW_INTFVER(T5, RI),
1902                         .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1903                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1904                         .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1905                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
1906                 },
1907         }
1908 };
1909
1910 static struct fw_info *
1911 find_fw_info(int chip)
1912 {
1913         int i;
1914
1915         for (i = 0; i < nitems(fw_info); i++) {
1916                 if (fw_info[i].chip == chip)
1917                         return (&fw_info[i]);
1918         }
1919         return (NULL);
1920 }
1921
1922 /*
1923  * Is the given firmware API compatible with the one the driver was compiled
1924  * with?
1925  */
1926 static int
1927 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1928 {
1929
1930         /* short circuit if it's the exact same firmware version */
1931         if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1932                 return (1);
1933
1934         /*
1935          * XXX: Is this too conservative?  Perhaps I should limit this to the
1936          * features that are supported in the driver.
1937          */
1938 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1939         if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1940             SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1941             SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1942                 return (1);
1943 #undef SAME_INTF
1944
1945         return (0);
1946 }
1947
1948 /*
1949  * The firmware in the KLD is usable, but should it be installed?  This routine
1950  * explains itself in detail if it indicates the KLD firmware should be
1951  * installed.
1952  */
1953 static int
1954 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1955 {
1956         const char *reason;
1957
1958         if (!card_fw_usable) {
1959                 reason = "incompatible or unusable";
1960                 goto install;
1961         }
1962
1963         if (k > c) {
1964                 reason = "older than the version bundled with this driver";
1965                 goto install;
1966         }
1967
1968         if (t4_fw_install == 2 && k != c) {
1969                 reason = "different than the version bundled with this driver";
1970                 goto install;
1971         }
1972
1973         return (0);
1974
1975 install:
1976         if (t4_fw_install == 0) {
1977                 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1978                     "but the driver is prohibited from installing a different "
1979                     "firmware on the card.\n",
1980                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1981                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1982
1983                 return (0);
1984         }
1985
1986         device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1987             "installing firmware %u.%u.%u.%u on card.\n",
1988             G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1989             G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1990             G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1991             G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1992
1993         return (1);
1994 }
1995 /*
1996  * Establish contact with the firmware and determine if we are the master driver
1997  * or not, and whether we are responsible for chip initialization.
1998  */
1999 static int
2000 prep_firmware(struct adapter *sc)
2001 {
2002         const struct firmware *fw = NULL, *default_cfg;
2003         int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2004         enum dev_state state;
2005         struct fw_info *fw_info;
2006         struct fw_hdr *card_fw;         /* fw on the card */
2007         const struct fw_hdr *kld_fw;    /* fw in the KLD */
2008         const struct fw_hdr *drv_fw;    /* fw header the driver was compiled
2009                                            against */
2010
2011         /* Contact firmware. */
2012         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2013         if (rc < 0 || state == DEV_STATE_ERR) {
2014                 rc = -rc;
2015                 device_printf(sc->dev,
2016                     "failed to connect to the firmware: %d, %d.\n", rc, state);
2017                 return (rc);
2018         }
2019         pf = rc;
2020         if (pf == sc->mbox)
2021                 sc->flags |= MASTER_PF;
2022         else if (state == DEV_STATE_UNINIT) {
2023                 /*
2024                  * We didn't get to be the master so we definitely won't be
2025                  * configuring the chip.  It's a bug if someone else hasn't
2026                  * configured it already.
2027                  */
2028                 device_printf(sc->dev, "couldn't be master(%d), "
2029                     "device not already initialized either(%d).\n", rc, state);
2030                 return (EDOOFUS);
2031         }
2032
2033         /* This is the firmware whose headers the driver was compiled against */
2034         fw_info = find_fw_info(chip_id(sc));
2035         if (fw_info == NULL) {
2036                 device_printf(sc->dev,
2037                     "unable to look up firmware information for chip %d.\n",
2038                     chip_id(sc));
2039                 return (EINVAL);
2040         }
2041         drv_fw = &fw_info->fw_hdr;
2042
2043         /*
2044          * The firmware KLD contains many modules.  The KLD name is also the
2045          * name of the module that contains the default config file.
2046          */
2047         default_cfg = firmware_get(fw_info->kld_name);
2048
2049         /* Read the header of the firmware on the card */
2050         card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2051         rc = -t4_read_flash(sc, FLASH_FW_START,
2052             sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2053         if (rc == 0)
2054                 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2055         else {
2056                 device_printf(sc->dev,
2057                     "Unable to read card's firmware header: %d\n", rc);
2058                 card_fw_usable = 0;
2059         }
2060
2061         /* This is the firmware in the KLD */
2062         fw = firmware_get(fw_info->fw_mod_name);
2063         if (fw != NULL) {
2064                 kld_fw = (const void *)fw->data;
2065                 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2066         } else {
2067                 kld_fw = NULL;
2068                 kld_fw_usable = 0;
2069         }
2070
2071         if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2072             (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2073                 /*
2074                  * Common case: the firmware on the card is an exact match and
2075                  * the KLD is an exact match too, or the KLD is
2076                  * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2077                  * here -- use cxgbetool loadfw if you want to reinstall the
2078                  * same firmware as the one on the card.
2079                  */
2080         } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2081             should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2082             be32toh(card_fw->fw_ver))) {
2083
2084                 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2085                 if (rc != 0) {
2086                         device_printf(sc->dev,
2087                             "failed to install firmware: %d\n", rc);
2088                         goto done;
2089                 }
2090
2091                 /* Installed successfully, update the cached header too. */
2092                 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2093                 card_fw_usable = 1;
2094                 need_fw_reset = 0;      /* already reset as part of load_fw */
2095         }
2096
2097         if (!card_fw_usable) {
2098                 uint32_t d, c, k;
2099
2100                 d = ntohl(drv_fw->fw_ver);
2101                 c = ntohl(card_fw->fw_ver);
2102                 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2103
2104                 device_printf(sc->dev, "Cannot find a usable firmware: "
2105                     "fw_install %d, chip state %d, "
2106                     "driver compiled with %d.%d.%d.%d, "
2107                     "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2108                     t4_fw_install, state,
2109                     G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2110                     G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2111                     G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2112                     G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2113                     G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2114                     G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2115                 rc = EINVAL;
2116                 goto done;
2117         }
2118
2119         /* We're using whatever's on the card and it's known to be good. */
2120         sc->params.fw_vers = ntohl(card_fw->fw_ver);
2121         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2122             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2123             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2124             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2125             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2126         t4_get_tp_version(sc, &sc->params.tp_vers);
2127
2128         /* Reset device */
2129         if (need_fw_reset &&
2130             (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2131                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2132                 if (rc != ETIMEDOUT && rc != EIO)
2133                         t4_fw_bye(sc, sc->mbox);
2134                 goto done;
2135         }
2136         sc->flags |= FW_OK;
2137
2138         rc = get_params__pre_init(sc);
2139         if (rc != 0)
2140                 goto done; /* error message displayed already */
2141
2142         /* Partition adapter resources as specified in the config file. */
2143         if (state == DEV_STATE_UNINIT) {
2144
2145                 KASSERT(sc->flags & MASTER_PF,
2146                     ("%s: trying to change chip settings when not master.",
2147                     __func__));
2148
2149                 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2150                 if (rc != 0)
2151                         goto done;      /* error message displayed already */
2152
2153                 t4_tweak_chip_settings(sc);
2154
2155                 /* get basic stuff going */
2156                 rc = -t4_fw_initialize(sc, sc->mbox);
2157                 if (rc != 0) {
2158                         device_printf(sc->dev, "fw init failed: %d.\n", rc);
2159                         goto done;
2160                 }
2161         } else {
2162                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2163                 sc->cfcsum = 0;
2164         }
2165
2166 done:
2167         free(card_fw, M_CXGBE);
2168         if (fw != NULL)
2169                 firmware_put(fw, FIRMWARE_UNLOAD);
2170         if (default_cfg != NULL)
2171                 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2172
2173         return (rc);
2174 }
2175
2176 #define FW_PARAM_DEV(param) \
2177         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2178          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2179 #define FW_PARAM_PFVF(param) \
2180         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2181          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2182
2183 /*
2184  * Partition chip resources for use between various PFs, VFs, etc.
2185  */
2186 static int
2187 partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2188     const char *name_prefix)
2189 {
2190         const struct firmware *cfg = NULL;
2191         int rc = 0;
2192         struct fw_caps_config_cmd caps;
2193         uint32_t mtype, moff, finicsum, cfcsum;
2194
2195         /*
2196          * Figure out what configuration file to use.  Pick the default config
2197          * file for the card if the user hasn't specified one explicitly.
2198          */
2199         snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2200         if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2201                 /* Card specific overrides go here. */
2202                 if (pci_get_device(sc->dev) == 0x440a)
2203                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2204                 if (is_fpga(sc))
2205                         snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2206         }
2207
2208         /*
2209          * We need to load another module if the profile is anything except
2210          * "default" or "flash".
2211          */
2212         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2213             strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2214                 char s[32];
2215
2216                 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2217                 cfg = firmware_get(s);
2218                 if (cfg == NULL) {
2219                         if (default_cfg != NULL) {
2220                                 device_printf(sc->dev,
2221                                     "unable to load module \"%s\" for "
2222                                     "configuration profile \"%s\", will use "
2223                                     "the default config file instead.\n",
2224                                     s, sc->cfg_file);
2225                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2226                                     "%s", DEFAULT_CF);
2227                         } else {
2228                                 device_printf(sc->dev,
2229                                     "unable to load module \"%s\" for "
2230                                     "configuration profile \"%s\", will use "
2231                                     "the config file on the card's flash "
2232                                     "instead.\n", s, sc->cfg_file);
2233                                 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2234                                     "%s", FLASH_CF);
2235                         }
2236                 }
2237         }
2238
2239         if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2240             default_cfg == NULL) {
2241                 device_printf(sc->dev,
2242                     "default config file not available, will use the config "
2243                     "file on the card's flash instead.\n");
2244                 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2245         }
2246
2247         if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2248                 u_int cflen, i, n;
2249                 const uint32_t *cfdata;
2250                 uint32_t param, val, addr, off, mw_base, mw_aperture;
2251
2252                 KASSERT(cfg != NULL || default_cfg != NULL,
2253                     ("%s: no config to upload", __func__));
2254
2255                 /*
2256                  * Ask the firmware where it wants us to upload the config file.
2257                  */
2258                 param = FW_PARAM_DEV(CF);
2259                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2260                 if (rc != 0) {
2261                         /* No support for config file?  Shouldn't happen. */
2262                         device_printf(sc->dev,
2263                             "failed to query config file location: %d.\n", rc);
2264                         goto done;
2265                 }
2266                 mtype = G_FW_PARAMS_PARAM_Y(val);
2267                 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2268
2269                 /*
2270                  * XXX: sheer laziness.  We deliberately added 4 bytes of
2271                  * useless stuffing/comments at the end of the config file so
2272                  * it's ok to simply throw away the last remaining bytes when
2273                  * the config file is not an exact multiple of 4.  This also
2274                  * helps with the validate_mt_off_len check.
2275                  */
2276                 if (cfg != NULL) {
2277                         cflen = cfg->datasize & ~3;
2278                         cfdata = cfg->data;
2279                 } else {
2280                         cflen = default_cfg->datasize & ~3;
2281                         cfdata = default_cfg->data;
2282                 }
2283
2284                 if (cflen > FLASH_CFG_MAX_SIZE) {
2285                         device_printf(sc->dev,
2286                             "config file too long (%d, max allowed is %d).  "
2287                             "Will try to use the config on the card, if any.\n",
2288                             cflen, FLASH_CFG_MAX_SIZE);
2289                         goto use_config_on_flash;
2290                 }
2291
2292                 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2293                 if (rc != 0) {
2294                         device_printf(sc->dev,
2295                             "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2296                             "Will try to use the config on the card, if any.\n",
2297                             __func__, mtype, moff, cflen, rc);
2298                         goto use_config_on_flash;
2299                 }
2300
2301                 memwin_info(sc, 2, &mw_base, &mw_aperture);
2302                 while (cflen) {
2303                         off = position_memwin(sc, 2, addr);
2304                         n = min(cflen, mw_aperture - off);
2305                         for (i = 0; i < n; i += 4)
2306                                 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2307                         cflen -= n;
2308                         addr += n;
2309                 }
2310         } else {
2311 use_config_on_flash:
2312                 mtype = FW_MEMTYPE_FLASH;
2313                 moff = t4_flash_cfg_addr(sc);
2314         }
2315
2316         bzero(&caps, sizeof(caps));
2317         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2318             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2319         caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2320             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2321             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2322         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2323         if (rc != 0) {
2324                 device_printf(sc->dev,
2325                     "failed to pre-process config file: %d "
2326                     "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2327                 goto done;
2328         }
2329
2330         finicsum = be32toh(caps.finicsum);
2331         cfcsum = be32toh(caps.cfcsum);
2332         if (finicsum != cfcsum) {
2333                 device_printf(sc->dev,
2334                     "WARNING: config file checksum mismatch: %08x %08x\n",
2335                     finicsum, cfcsum);
2336         }
2337         sc->cfcsum = cfcsum;
2338
2339 #define LIMIT_CAPS(x) do { \
2340         caps.x &= htobe16(t4_##x##_allowed); \
2341 } while (0)
2342
2343         /*
2344          * Let the firmware know what features will (not) be used so it can tune
2345          * things accordingly.
2346          */
2347         LIMIT_CAPS(linkcaps);
2348         LIMIT_CAPS(niccaps);
2349         LIMIT_CAPS(toecaps);
2350         LIMIT_CAPS(rdmacaps);
2351         LIMIT_CAPS(iscsicaps);
2352         LIMIT_CAPS(fcoecaps);
2353 #undef LIMIT_CAPS
2354
2355         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2356             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2357         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2358         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2359         if (rc != 0) {
2360                 device_printf(sc->dev,
2361                     "failed to process config file: %d.\n", rc);
2362         }
2363 done:
2364         if (cfg != NULL)
2365                 firmware_put(cfg, FIRMWARE_UNLOAD);
2366         return (rc);
2367 }
2368
2369 /*
2370  * Retrieve parameters that are needed (or nice to have) very early.
2371  */
2372 static int
2373 get_params__pre_init(struct adapter *sc)
2374 {
2375         int rc;
2376         uint32_t param[2], val[2];
2377         struct fw_devlog_cmd cmd;
2378         struct devlog_params *dlog = &sc->params.devlog;
2379
2380         param[0] = FW_PARAM_DEV(PORTVEC);
2381         param[1] = FW_PARAM_DEV(CCLK);
2382         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2383         if (rc != 0) {
2384                 device_printf(sc->dev,
2385                     "failed to query parameters (pre_init): %d.\n", rc);
2386                 return (rc);
2387         }
2388
2389         sc->params.portvec = val[0];
2390         sc->params.nports = bitcount32(val[0]);
2391         sc->params.vpd.cclk = val[1];
2392
2393         /* Read device log parameters. */
2394         bzero(&cmd, sizeof(cmd));
2395         cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2396             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2397         cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2398         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2399         if (rc != 0) {
2400                 device_printf(sc->dev,
2401                     "failed to get devlog parameters: %d.\n", rc);
2402                 bzero(dlog, sizeof (*dlog));
2403                 rc = 0; /* devlog isn't critical for device operation */
2404         } else {
2405                 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2406                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2407                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2408                 dlog->size = be32toh(cmd.memsize_devlog);
2409         }
2410
2411         return (rc);
2412 }
2413
2414 /*
2415  * Retrieve various parameters that are of interest to the driver.  The device
2416  * has been initialized by the firmware at this point.
2417  */
2418 static int
2419 get_params__post_init(struct adapter *sc)
2420 {
2421         int rc;
2422         uint32_t param[7], val[7];
2423         struct fw_caps_config_cmd caps;
2424
2425         param[0] = FW_PARAM_PFVF(IQFLINT_START);
2426         param[1] = FW_PARAM_PFVF(EQ_START);
2427         param[2] = FW_PARAM_PFVF(FILTER_START);
2428         param[3] = FW_PARAM_PFVF(FILTER_END);
2429         param[4] = FW_PARAM_PFVF(L2T_START);
2430         param[5] = FW_PARAM_PFVF(L2T_END);
2431         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2432         if (rc != 0) {
2433                 device_printf(sc->dev,
2434                     "failed to query parameters (post_init): %d.\n", rc);
2435                 return (rc);
2436         }
2437
2438         sc->sge.iq_start = val[0];
2439         sc->sge.eq_start = val[1];
2440         sc->tids.ftid_base = val[2];
2441         sc->tids.nftids = val[3] - val[2] + 1;
2442         sc->params.ftid_min = val[2];
2443         sc->params.ftid_max = val[3];
2444         sc->vres.l2t.start = val[4];
2445         sc->vres.l2t.size = val[5] - val[4] + 1;
2446         KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2447             ("%s: L2 table size (%u) larger than expected (%u)",
2448             __func__, sc->vres.l2t.size, L2T_SIZE));
2449
2450         /* get capabilites */
2451         bzero(&caps, sizeof(caps));
2452         caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2453             F_FW_CMD_REQUEST | F_FW_CMD_READ);
2454         caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2455         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2456         if (rc != 0) {
2457                 device_printf(sc->dev,
2458                     "failed to get card capabilities: %d.\n", rc);
2459                 return (rc);
2460         }
2461
2462 #define READ_CAPS(x) do { \
2463         sc->x = htobe16(caps.x); \
2464 } while (0)
2465         READ_CAPS(linkcaps);
2466         READ_CAPS(niccaps);
2467         READ_CAPS(toecaps);
2468         READ_CAPS(rdmacaps);
2469         READ_CAPS(iscsicaps);
2470         READ_CAPS(fcoecaps);
2471
2472         if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
2473                 param[0] = FW_PARAM_PFVF(ETHOFLD_START);
2474                 param[1] = FW_PARAM_PFVF(ETHOFLD_END);
2475                 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2476                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
2477                 if (rc != 0) {
2478                         device_printf(sc->dev,
2479                             "failed to query NIC parameters: %d.\n", rc);
2480                         return (rc);
2481                 }
2482                 sc->tids.etid_base = val[0];
2483                 sc->params.etid_min = val[0];
2484                 sc->tids.netids = val[1] - val[0] + 1;
2485                 sc->params.netids = sc->tids.netids;
2486                 sc->params.eo_wr_cred = val[2];
2487                 sc->params.ethoffload = 1;
2488         }
2489
2490         if (sc->toecaps) {
2491                 /* query offload-related parameters */
2492                 param[0] = FW_PARAM_DEV(NTID);
2493                 param[1] = FW_PARAM_PFVF(SERVER_START);
2494                 param[2] = FW_PARAM_PFVF(SERVER_END);
2495                 param[3] = FW_PARAM_PFVF(TDDP_START);
2496                 param[4] = FW_PARAM_PFVF(TDDP_END);
2497                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2498                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2499                 if (rc != 0) {
2500                         device_printf(sc->dev,
2501                             "failed to query TOE parameters: %d.\n", rc);
2502                         return (rc);
2503                 }
2504                 sc->tids.ntids = val[0];
2505                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2506                 sc->tids.stid_base = val[1];
2507                 sc->tids.nstids = val[2] - val[1] + 1;
2508                 sc->vres.ddp.start = val[3];
2509                 sc->vres.ddp.size = val[4] - val[3] + 1;
2510                 sc->params.ofldq_wr_cred = val[5];
2511                 sc->params.offload = 1;
2512         }
2513         if (sc->rdmacaps) {
2514                 param[0] = FW_PARAM_PFVF(STAG_START);
2515                 param[1] = FW_PARAM_PFVF(STAG_END);
2516                 param[2] = FW_PARAM_PFVF(RQ_START);
2517                 param[3] = FW_PARAM_PFVF(RQ_END);
2518                 param[4] = FW_PARAM_PFVF(PBL_START);
2519                 param[5] = FW_PARAM_PFVF(PBL_END);
2520                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2521                 if (rc != 0) {
2522                         device_printf(sc->dev,
2523                             "failed to query RDMA parameters(1): %d.\n", rc);
2524                         return (rc);
2525                 }
2526                 sc->vres.stag.start = val[0];
2527                 sc->vres.stag.size = val[1] - val[0] + 1;
2528                 sc->vres.rq.start = val[2];
2529                 sc->vres.rq.size = val[3] - val[2] + 1;
2530                 sc->vres.pbl.start = val[4];
2531                 sc->vres.pbl.size = val[5] - val[4] + 1;
2532
2533                 param[0] = FW_PARAM_PFVF(SQRQ_START);
2534                 param[1] = FW_PARAM_PFVF(SQRQ_END);
2535                 param[2] = FW_PARAM_PFVF(CQ_START);
2536                 param[3] = FW_PARAM_PFVF(CQ_END);
2537                 param[4] = FW_PARAM_PFVF(OCQ_START);
2538                 param[5] = FW_PARAM_PFVF(OCQ_END);
2539                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2540                 if (rc != 0) {
2541                         device_printf(sc->dev,
2542                             "failed to query RDMA parameters(2): %d.\n", rc);
2543                         return (rc);
2544                 }
2545                 sc->vres.qp.start = val[0];
2546                 sc->vres.qp.size = val[1] - val[0] + 1;
2547                 sc->vres.cq.start = val[2];
2548                 sc->vres.cq.size = val[3] - val[2] + 1;
2549                 sc->vres.ocq.start = val[4];
2550                 sc->vres.ocq.size = val[5] - val[4] + 1;
2551         }
2552         if (sc->iscsicaps) {
2553                 param[0] = FW_PARAM_PFVF(ISCSI_START);
2554                 param[1] = FW_PARAM_PFVF(ISCSI_END);
2555                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2556                 if (rc != 0) {
2557                         device_printf(sc->dev,
2558                             "failed to query iSCSI parameters: %d.\n", rc);
2559                         return (rc);
2560                 }
2561                 sc->vres.iscsi.start = val[0];
2562                 sc->vres.iscsi.size = val[1] - val[0] + 1;
2563         }
2564
2565         /*
2566          * We've got the params we wanted to query via the firmware.  Now grab
2567          * some others directly from the chip.
2568          */
2569         rc = t4_read_chip_settings(sc);
2570
2571         return (rc);
2572 }
2573
2574 static int
2575 set_params__post_init(struct adapter *sc)
2576 {
2577         uint32_t param, val;
2578
2579         /* ask for encapsulated CPLs */
2580         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2581         val = 1;
2582         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2583
2584         return (0);
2585 }
2586
2587 #undef FW_PARAM_PFVF
2588 #undef FW_PARAM_DEV
2589
2590 static void
2591 t4_set_desc(struct adapter *sc)
2592 {
2593         char buf[128];
2594         struct adapter_params *p = &sc->params;
2595
2596         snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2597             "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2598             chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2599
2600         device_set_desc_copy(sc->dev, buf);
2601 }
2602
2603 static void
2604 build_medialist(struct port_info *pi)
2605 {
2606         struct ifmedia *media = &pi->media;
2607         int data, m;
2608
2609         PORT_LOCK(pi);
2610
2611         ifmedia_removeall(media);
2612
2613         m = IFM_ETHER | IFM_FDX;
2614         data = (pi->port_type << 8) | pi->mod_type;
2615
2616         switch(pi->port_type) {
2617         case FW_PORT_TYPE_BT_XFI:
2618                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2619                 break;
2620
2621         case FW_PORT_TYPE_BT_XAUI:
2622                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2623                 /* fall through */
2624
2625         case FW_PORT_TYPE_BT_SGMII:
2626                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2627                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2628                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2629                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2630                 break;
2631
2632         case FW_PORT_TYPE_CX4:
2633                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2634                 ifmedia_set(media, m | IFM_10G_CX4);
2635                 break;
2636
2637         case FW_PORT_TYPE_QSFP_10G:
2638         case FW_PORT_TYPE_SFP:
2639         case FW_PORT_TYPE_FIBER_XFI:
2640         case FW_PORT_TYPE_FIBER_XAUI:
2641                 switch (pi->mod_type) {
2642
2643                 case FW_PORT_MOD_TYPE_LR:
2644                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2645                         ifmedia_set(media, m | IFM_10G_LR);
2646                         break;
2647
2648                 case FW_PORT_MOD_TYPE_SR:
2649                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2650                         ifmedia_set(media, m | IFM_10G_SR);
2651                         break;
2652
2653                 case FW_PORT_MOD_TYPE_LRM:
2654                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2655                         ifmedia_set(media, m | IFM_10G_LRM);
2656                         break;
2657
2658                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2659                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2660                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2661                         ifmedia_set(media, m | IFM_10G_TWINAX);
2662                         break;
2663
2664                 case FW_PORT_MOD_TYPE_NONE:
2665                         m &= ~IFM_FDX;
2666                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2667                         ifmedia_set(media, m | IFM_NONE);
2668                         break;
2669
2670                 case FW_PORT_MOD_TYPE_NA:
2671                 case FW_PORT_MOD_TYPE_ER:
2672                 default:
2673                         device_printf(pi->dev,
2674                             "unknown port_type (%d), mod_type (%d)\n",
2675                             pi->port_type, pi->mod_type);
2676                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2677                         ifmedia_set(media, m | IFM_UNKNOWN);
2678                         break;
2679                 }
2680                 break;
2681
2682         case FW_PORT_TYPE_QSFP:
2683                 switch (pi->mod_type) {
2684
2685                 case FW_PORT_MOD_TYPE_LR:
2686                         ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2687                         ifmedia_set(media, m | IFM_40G_LR4);
2688                         break;
2689
2690                 case FW_PORT_MOD_TYPE_SR:
2691                         ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2692                         ifmedia_set(media, m | IFM_40G_SR4);
2693                         break;
2694
2695                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2696                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2697                         ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2698                         ifmedia_set(media, m | IFM_40G_CR4);
2699                         break;
2700
2701                 case FW_PORT_MOD_TYPE_NONE:
2702                         m &= ~IFM_FDX;
2703                         ifmedia_add(media, m | IFM_NONE, data, NULL);
2704                         ifmedia_set(media, m | IFM_NONE);
2705                         break;
2706
2707                 default:
2708                         device_printf(pi->dev,
2709                             "unknown port_type (%d), mod_type (%d)\n",
2710                             pi->port_type, pi->mod_type);
2711                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2712                         ifmedia_set(media, m | IFM_UNKNOWN);
2713                         break;
2714                 }
2715                 break;
2716
2717         default:
2718                 device_printf(pi->dev,
2719                     "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2720                     pi->mod_type);
2721                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2722                 ifmedia_set(media, m | IFM_UNKNOWN);
2723                 break;
2724         }
2725
2726         PORT_UNLOCK(pi);
2727 }
2728
2729 #define FW_MAC_EXACT_CHUNK      7
2730
2731 /*
2732  * Program the port's XGMAC based on parameters in ifnet.  The caller also
2733  * indicates which parameters should be programmed (the rest are left alone).
2734  */
2735 static int
2736 update_mac_settings(struct port_info *pi, int flags)
2737 {
2738         int rc;
2739         struct ifnet *ifp = pi->ifp;
2740         struct adapter *sc = pi->adapter;
2741         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2742
2743         ASSERT_SYNCHRONIZED_OP(sc);
2744         KASSERT(flags, ("%s: not told what to update.", __func__));
2745
2746         if (flags & XGMAC_MTU)
2747                 mtu = ifp->if_mtu;
2748
2749         if (flags & XGMAC_PROMISC)
2750                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2751
2752         if (flags & XGMAC_ALLMULTI)
2753                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2754
2755         if (flags & XGMAC_VLANEX)
2756                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2757
2758         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2759             vlanex, false);
2760         if (rc) {
2761                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2762                 return (rc);
2763         }
2764
2765         if (flags & XGMAC_UCADDR) {
2766                 uint8_t ucaddr[ETHER_ADDR_LEN];
2767
2768                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2769                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2770                     ucaddr, true, true);
2771                 if (rc < 0) {
2772                         rc = -rc;
2773                         if_printf(ifp, "change_mac failed: %d\n", rc);
2774                         return (rc);
2775                 } else {
2776                         pi->xact_addr_filt = rc;
2777                         rc = 0;
2778                 }
2779         }
2780
2781         if (flags & XGMAC_MCADDRS) {
2782                 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2783                 int del = 1;
2784                 uint64_t hash = 0;
2785                 struct ifmultiaddr *ifma;
2786                 int i = 0, j;
2787
2788                 if_maddr_rlock(ifp);
2789                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2790                         if (ifma->ifma_addr->sa_family != AF_LINK)
2791                                 continue;
2792                         mcaddr[i++] =
2793                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2794
2795                         if (i == FW_MAC_EXACT_CHUNK) {
2796                                 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2797                                     del, i, mcaddr, NULL, &hash, 0);
2798                                 if (rc < 0) {
2799                                         rc = -rc;
2800                                         for (j = 0; j < i; j++) {
2801                                                 if_printf(ifp,
2802                                                     "failed to add mc address"
2803                                                     " %02x:%02x:%02x:"
2804                                                     "%02x:%02x:%02x rc=%d\n",
2805                                                     mcaddr[j][0], mcaddr[j][1],
2806                                                     mcaddr[j][2], mcaddr[j][3],
2807                                                     mcaddr[j][4], mcaddr[j][5],
2808                                                     rc);
2809                                         }
2810                                         goto mcfail;
2811                                 }
2812                                 del = 0;
2813                                 i = 0;
2814                         }
2815                 }
2816                 if (i > 0) {
2817                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2818                             del, i, mcaddr, NULL, &hash, 0);
2819                         if (rc < 0) {
2820                                 rc = -rc;
2821                                 for (j = 0; j < i; j++) {
2822                                         if_printf(ifp,
2823                                             "failed to add mc address"
2824                                             " %02x:%02x:%02x:"
2825                                             "%02x:%02x:%02x rc=%d\n",
2826                                             mcaddr[j][0], mcaddr[j][1],
2827                                             mcaddr[j][2], mcaddr[j][3],
2828                                             mcaddr[j][4], mcaddr[j][5],
2829                                             rc);
2830                                 }
2831                                 goto mcfail;
2832                         }
2833                 }
2834
2835                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2836                 if (rc != 0)
2837                         if_printf(ifp, "failed to set mc address hash: %d", rc);
2838 mcfail:
2839                 if_maddr_runlock(ifp);
2840         }
2841
2842         return (rc);
2843 }
2844
2845 int
2846 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2847     char *wmesg)
2848 {
2849         int rc, pri;
2850
2851 #ifdef WITNESS
2852         /* the caller thinks it's ok to sleep, but is it really? */
2853         if (flags & SLEEP_OK)
2854                 pause("t4slptst", 1);
2855 #endif
2856
2857         if (INTR_OK)
2858                 pri = PCATCH;
2859         else
2860                 pri = 0;
2861
2862         ADAPTER_LOCK(sc);
2863         for (;;) {
2864
2865                 if (pi && IS_DOOMED(pi)) {
2866                         rc = ENXIO;
2867                         goto done;
2868                 }
2869
2870                 if (!IS_BUSY(sc)) {
2871                         rc = 0;
2872                         break;
2873                 }
2874
2875                 if (!(flags & SLEEP_OK)) {
2876                         rc = EBUSY;
2877                         goto done;
2878                 }
2879
2880                 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2881                         rc = EINTR;
2882                         goto done;
2883                 }
2884         }
2885
2886         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2887         SET_BUSY(sc);
2888 #ifdef INVARIANTS
2889         sc->last_op = wmesg;
2890         sc->last_op_thr = curthread;
2891 #endif
2892
2893 done:
2894         if (!(flags & HOLD_LOCK) || rc)
2895                 ADAPTER_UNLOCK(sc);
2896
2897         return (rc);
2898 }
2899
2900 void
2901 end_synchronized_op(struct adapter *sc, int flags)
2902 {
2903
2904         if (flags & LOCK_HELD)
2905                 ADAPTER_LOCK_ASSERT_OWNED(sc);
2906         else
2907                 ADAPTER_LOCK(sc);
2908
2909         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2910         CLR_BUSY(sc);
2911         wakeup(&sc->flags);
2912         ADAPTER_UNLOCK(sc);
2913 }
2914
2915 static int
2916 cxgbe_init_synchronized(struct port_info *pi)
2917 {
2918         struct adapter *sc = pi->adapter;
2919         struct ifnet *ifp = pi->ifp;
2920         int rc = 0;
2921
2922         ASSERT_SYNCHRONIZED_OP(sc);
2923
2924         if (isset(&sc->open_device_map, pi->port_id)) {
2925                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2926                     ("mismatch between open_device_map and if_drv_flags"));
2927                 return (0);     /* already running */
2928         }
2929
2930         if (!(sc->flags & FULL_INIT_DONE) &&
2931             ((rc = adapter_full_init(sc)) != 0))
2932                 return (rc);    /* error message displayed already */
2933
2934         if (!(pi->flags & PORT_INIT_DONE) &&
2935             ((rc = port_full_init(pi)) != 0))
2936                 return (rc); /* error message displayed already */
2937
2938         rc = update_mac_settings(pi, XGMAC_ALL);
2939         if (rc)
2940                 goto done;      /* error message displayed already */
2941
2942         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2943         if (rc != 0) {
2944                 if_printf(ifp, "start_link failed: %d\n", rc);
2945                 goto done;
2946         }
2947
2948         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2949         if (rc != 0) {
2950                 if_printf(ifp, "enable_vi failed: %d\n", rc);
2951                 goto done;
2952         }
2953
2954         /* all ok */
2955         setbit(&sc->open_device_map, pi->port_id);
2956         PORT_LOCK(pi);
2957         ifp->if_drv_flags |= IFF_DRV_RUNNING;
2958         PORT_UNLOCK(pi);
2959
2960         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2961 done:
2962         if (rc != 0)
2963                 cxgbe_uninit_synchronized(pi);
2964
2965         return (rc);
2966 }
2967
2968 /*
2969  * Idempotent.
2970  */
2971 static int
2972 cxgbe_uninit_synchronized(struct port_info *pi)
2973 {
2974         struct adapter *sc = pi->adapter;
2975         struct ifnet *ifp = pi->ifp;
2976         int rc;
2977
2978         ASSERT_SYNCHRONIZED_OP(sc);
2979
2980         /*
2981          * Disable the VI so that all its data in either direction is discarded
2982          * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2983          * tick) intact as the TP can deliver negative advice or data that it's
2984          * holding in its RAM (for an offloaded connection) even after the VI is
2985          * disabled.
2986          */
2987         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2988         if (rc) {
2989                 if_printf(ifp, "disable_vi failed: %d\n", rc);
2990                 return (rc);
2991         }
2992
2993         clrbit(&sc->open_device_map, pi->port_id);
2994         PORT_LOCK(pi);
2995         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2996         PORT_UNLOCK(pi);
2997
2998         pi->link_cfg.link_ok = 0;
2999         pi->link_cfg.speed = 0;
3000         pi->linkdnrc = -1;
3001         t4_os_link_changed(sc, pi->port_id, 0, -1);
3002
3003         return (0);
3004 }
3005
3006 /*
3007  * It is ok for this function to fail midway and return right away.  t4_detach
3008  * will walk the entire sc->irq list and clean up whatever is valid.
3009  */
3010 static int
3011 setup_intr_handlers(struct adapter *sc)
3012 {
3013         int rc, rid, p, q;
3014         char s[8];
3015         struct irq *irq;
3016         struct port_info *pi;
3017         struct sge_rxq *rxq;
3018 #ifdef TCP_OFFLOAD
3019         struct sge_ofld_rxq *ofld_rxq;
3020 #endif
3021
3022         /*
3023          * Setup interrupts.
3024          */
3025         irq = &sc->irq[0];
3026         rid = sc->intr_type == INTR_INTX ? 0 : 1;
3027         if (sc->intr_count == 1) {
3028                 KASSERT(!(sc->flags & INTR_DIRECT),
3029                     ("%s: single interrupt && INTR_DIRECT?", __func__));
3030
3031                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3032                 if (rc != 0)
3033                         return (rc);
3034         } else {
3035                 /* Multiple interrupts. */
3036                 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3037                     ("%s: too few intr.", __func__));
3038
3039                 /* The first one is always error intr */
3040                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3041                 if (rc != 0)
3042                         return (rc);
3043                 irq++;
3044                 rid++;
3045
3046                 /* The second one is always the firmware event queue */
3047                 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3048                     "evt");
3049                 if (rc != 0)
3050                         return (rc);
3051                 irq++;
3052                 rid++;
3053
3054                 /*
3055                  * Note that if INTR_DIRECT is not set then either the NIC rx
3056                  * queues or (exclusive or) the TOE rx queueus will be taking
3057                  * direct interrupts.
3058                  *
3059                  * There is no need to check for is_offload(sc) as nofldrxq
3060                  * will be 0 if offload is disabled.
3061                  */
3062                 for_each_port(sc, p) {
3063                         pi = sc->port[p];
3064
3065 #ifdef TCP_OFFLOAD
3066                         /*
3067                          * Skip over the NIC queues if they aren't taking direct
3068                          * interrupts.
3069                          */
3070                         if (!(sc->flags & INTR_DIRECT) &&
3071                             pi->nofldrxq > pi->nrxq)
3072                                 goto ofld_queues;
3073 #endif
3074                         rxq = &sc->sge.rxq[pi->first_rxq];
3075                         for (q = 0; q < pi->nrxq; q++, rxq++) {
3076                                 snprintf(s, sizeof(s), "%d.%d", p, q);
3077                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3078                                     s);
3079                                 if (rc != 0)
3080                                         return (rc);
3081                                 irq++;
3082                                 rid++;
3083                         }
3084
3085 #ifdef TCP_OFFLOAD
3086                         /*
3087                          * Skip over the offload queues if they aren't taking
3088                          * direct interrupts.
3089                          */
3090                         if (!(sc->flags & INTR_DIRECT))
3091                                 continue;
3092 ofld_queues:
3093                         ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3094                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3095                                 snprintf(s, sizeof(s), "%d,%d", p, q);
3096                                 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3097                                     ofld_rxq, s);
3098                                 if (rc != 0)
3099                                         return (rc);
3100                                 irq++;
3101                                 rid++;
3102                         }
3103 #endif
3104                 }
3105         }
3106
3107         return (0);
3108 }
3109
3110 static int
3111 adapter_full_init(struct adapter *sc)
3112 {
3113         int rc, i;
3114
3115         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3116         KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3117             ("%s: FULL_INIT_DONE already", __func__));
3118
3119         /*
3120          * queues that belong to the adapter (not any particular port).
3121          */
3122         rc = t4_setup_adapter_queues(sc);
3123         if (rc != 0)
3124                 goto done;
3125
3126         for (i = 0; i < nitems(sc->tq); i++) {
3127                 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3128                     taskqueue_thread_enqueue, &sc->tq[i]);
3129                 if (sc->tq[i] == NULL) {
3130                         device_printf(sc->dev,
3131                             "failed to allocate task queue %d\n", i);
3132                         rc = ENOMEM;
3133                         goto done;
3134                 }
3135                 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3136                     device_get_nameunit(sc->dev), i);
3137         }
3138
3139         t4_intr_enable(sc);
3140         sc->flags |= FULL_INIT_DONE;
3141 done:
3142         if (rc != 0)
3143                 adapter_full_uninit(sc);
3144
3145         return (rc);
3146 }
3147
3148 static int
3149 adapter_full_uninit(struct adapter *sc)
3150 {
3151         int i;
3152
3153         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3154
3155         t4_teardown_adapter_queues(sc);
3156
3157         for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3158                 taskqueue_free(sc->tq[i]);
3159                 sc->tq[i] = NULL;
3160         }
3161
3162         sc->flags &= ~FULL_INIT_DONE;
3163
3164         return (0);
3165 }
3166
3167 static int
3168 port_full_init(struct port_info *pi)
3169 {
3170         struct adapter *sc = pi->adapter;
3171         struct ifnet *ifp = pi->ifp;
3172         uint16_t *rss;
3173         struct sge_rxq *rxq;
3174         int rc, i, j;
3175
3176         ASSERT_SYNCHRONIZED_OP(sc);
3177         KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3178             ("%s: PORT_INIT_DONE already", __func__));
3179
3180         sysctl_ctx_init(&pi->ctx);
3181         pi->flags |= PORT_SYSCTL_CTX;
3182
3183         /*
3184          * Allocate tx/rx/fl queues for this port.
3185          */
3186         rc = t4_setup_port_queues(pi);
3187         if (rc != 0)
3188                 goto done;      /* error message displayed already */
3189
3190         /*
3191          * Setup RSS for this port.  Save a copy of the RSS table for later use.
3192          */
3193         rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3194         for (i = 0; i < pi->rss_size;) {
3195                 for_each_rxq(pi, j, rxq) {
3196                         rss[i++] = rxq->iq.abs_id;
3197                         if (i == pi->rss_size)
3198                                 break;
3199                 }
3200         }
3201
3202         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3203             pi->rss_size);
3204         if (rc != 0) {
3205                 if_printf(ifp, "rss_config failed: %d\n", rc);
3206                 goto done;
3207         }
3208
3209         pi->rss = rss;
3210         pi->flags |= PORT_INIT_DONE;
3211 done:
3212         if (rc != 0)
3213                 port_full_uninit(pi);
3214
3215         return (rc);
3216 }
3217
3218 /*
3219  * Idempotent.
3220  */
3221 static int
3222 port_full_uninit(struct port_info *pi)
3223 {
3224         struct adapter *sc = pi->adapter;
3225         int i;
3226         struct sge_rxq *rxq;
3227         struct sge_txq *txq;
3228 #ifdef TCP_OFFLOAD
3229         struct sge_ofld_rxq *ofld_rxq;
3230         struct sge_wrq *ofld_txq;
3231 #endif
3232
3233         if (pi->flags & PORT_INIT_DONE) {
3234
3235                 /* Need to quiesce queues.  XXX: ctrl queues? */
3236
3237                 for_each_txq(pi, i, txq) {
3238                         quiesce_eq(sc, &txq->eq);
3239                 }
3240
3241 #ifdef TCP_OFFLOAD
3242                 for_each_ofld_txq(pi, i, ofld_txq) {
3243                         quiesce_eq(sc, &ofld_txq->eq);
3244                 }
3245 #endif
3246
3247                 for_each_rxq(pi, i, rxq) {
3248                         quiesce_iq(sc, &rxq->iq);
3249                         quiesce_fl(sc, &rxq->fl);
3250                 }
3251
3252 #ifdef TCP_OFFLOAD
3253                 for_each_ofld_rxq(pi, i, ofld_rxq) {
3254                         quiesce_iq(sc, &ofld_rxq->iq);
3255                         quiesce_fl(sc, &ofld_rxq->fl);
3256                 }
3257 #endif
3258                 free(pi->rss, M_CXGBE);
3259         }
3260
3261         t4_teardown_port_queues(pi);
3262         pi->flags &= ~PORT_INIT_DONE;
3263
3264         return (0);
3265 }
3266
3267 static void
3268 quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3269 {
3270         EQ_LOCK(eq);
3271         eq->flags |= EQ_DOOMED;
3272
3273         /*
3274          * Wait for the response to a credit flush if one's
3275          * pending.
3276          */
3277         while (eq->flags & EQ_CRFLUSHED)
3278                 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3279         EQ_UNLOCK(eq);
3280
3281         callout_drain(&eq->tx_callout); /* XXX: iffy */
3282         pause("callout", 10);           /* Still iffy */
3283
3284         taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3285 }
3286
3287 static void
3288 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3289 {
3290         (void) sc;      /* unused */
3291
3292         /* Synchronize with the interrupt handler */
3293         while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3294                 pause("iqfree", 1);
3295 }
3296
3297 static void
3298 quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3299 {
3300         mtx_lock(&sc->sfl_lock);
3301         FL_LOCK(fl);
3302         fl->flags |= FL_DOOMED;
3303         FL_UNLOCK(fl);
3304         mtx_unlock(&sc->sfl_lock);
3305
3306         callout_drain(&sc->sfl_callout);
3307         KASSERT((fl->flags & FL_STARVING) == 0,
3308             ("%s: still starving", __func__));
3309 }
3310
3311 static int
3312 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3313     driver_intr_t *handler, void *arg, char *name)
3314 {
3315         int rc;
3316
3317         irq->rid = rid;
3318         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3319             RF_SHAREABLE | RF_ACTIVE);
3320         if (irq->res == NULL) {
3321                 device_printf(sc->dev,
3322                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3323                 return (ENOMEM);
3324         }
3325
3326         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3327             NULL, handler, arg, &irq->tag);
3328         if (rc != 0) {
3329                 device_printf(sc->dev,
3330                     "failed to setup interrupt for rid %d, name %s: %d\n",
3331                     rid, name, rc);
3332         } else if (name)
3333                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3334
3335         return (rc);
3336 }
3337
3338 static int
3339 t4_free_irq(struct adapter *sc, struct irq *irq)
3340 {
3341         if (irq->tag)
3342                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3343         if (irq->res)
3344                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3345
3346         bzero(irq, sizeof(*irq));
3347
3348         return (0);
3349 }
3350
3351 static void
3352 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3353     unsigned int end)
3354 {
3355         uint32_t *p = (uint32_t *)(buf + start);
3356
3357         for ( ; start <= end; start += sizeof(uint32_t))
3358                 *p++ = t4_read_reg(sc, start);
3359 }
3360
3361 static void
3362 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3363 {
3364         int i, n;
3365         const unsigned int *reg_ranges;
3366         static const unsigned int t4_reg_ranges[] = {
3367                 0x1008, 0x1108,
3368                 0x1180, 0x11b4,
3369                 0x11fc, 0x123c,
3370                 0x1300, 0x173c,
3371                 0x1800, 0x18fc,
3372                 0x3000, 0x30d8,
3373                 0x30e0, 0x5924,
3374                 0x5960, 0x59d4,
3375                 0x5a00, 0x5af8,
3376                 0x6000, 0x6098,
3377                 0x6100, 0x6150,
3378                 0x6200, 0x6208,
3379                 0x6240, 0x6248,
3380                 0x6280, 0x6338,
3381                 0x6370, 0x638c,
3382                 0x6400, 0x643c,
3383                 0x6500, 0x6524,
3384                 0x6a00, 0x6a38,
3385                 0x6a60, 0x6a78,
3386                 0x6b00, 0x6b84,
3387                 0x6bf0, 0x6c84,
3388                 0x6cf0, 0x6d84,
3389                 0x6df0, 0x6e84,
3390                 0x6ef0, 0x6f84,
3391                 0x6ff0, 0x7084,
3392                 0x70f0, 0x7184,
3393                 0x71f0, 0x7284,
3394                 0x72f0, 0x7384,
3395                 0x73f0, 0x7450,
3396                 0x7500, 0x7530,
3397                 0x7600, 0x761c,
3398                 0x7680, 0x76cc,
3399                 0x7700, 0x7798,
3400                 0x77c0, 0x77fc,
3401                 0x7900, 0x79fc,
3402                 0x7b00, 0x7c38,
3403                 0x7d00, 0x7efc,
3404                 0x8dc0, 0x8e1c,
3405                 0x8e30, 0x8e78,
3406                 0x8ea0, 0x8f6c,
3407                 0x8fc0, 0x9074,
3408                 0x90fc, 0x90fc,
3409                 0x9400, 0x9458,
3410                 0x9600, 0x96bc,
3411                 0x9800, 0x9808,
3412                 0x9820, 0x983c,
3413                 0x9850, 0x9864,
3414                 0x9c00, 0x9c6c,
3415                 0x9c80, 0x9cec,
3416                 0x9d00, 0x9d6c,
3417                 0x9d80, 0x9dec,
3418                 0x9e00, 0x9e6c,
3419                 0x9e80, 0x9eec,
3420                 0x9f00, 0x9f6c,
3421                 0x9f80, 0x9fec,
3422                 0xd004, 0xd03c,
3423                 0xdfc0, 0xdfe0,
3424                 0xe000, 0xea7c,
3425                 0xf000, 0x11110,
3426                 0x11118, 0x11190,
3427                 0x19040, 0x1906c,
3428                 0x19078, 0x19080,
3429                 0x1908c, 0x19124,
3430                 0x19150, 0x191b0,
3431                 0x191d0, 0x191e8,
3432                 0x19238, 0x1924c,
3433                 0x193f8, 0x19474,
3434                 0x19490, 0x194f8,
3435                 0x19800, 0x19f30,
3436                 0x1a000, 0x1a06c,
3437                 0x1a0b0, 0x1a120,
3438                 0x1a128, 0x1a138,
3439                 0x1a190, 0x1a1c4,
3440                 0x1a1fc, 0x1a1fc,
3441                 0x1e040, 0x1e04c,
3442                 0x1e284, 0x1e28c,
3443                 0x1e2c0, 0x1e2c0,
3444                 0x1e2e0, 0x1e2e0,
3445                 0x1e300, 0x1e384,
3446                 0x1e3c0, 0x1e3c8,
3447                 0x1e440, 0x1e44c,
3448                 0x1e684, 0x1e68c,
3449                 0x1e6c0, 0x1e6c0,
3450                 0x1e6e0, 0x1e6e0,
3451                 0x1e700, 0x1e784,
3452                 0x1e7c0, 0x1e7c8,
3453                 0x1e840, 0x1e84c,
3454                 0x1ea84, 0x1ea8c,
3455                 0x1eac0, 0x1eac0,
3456                 0x1eae0, 0x1eae0,
3457                 0x1eb00, 0x1eb84,
3458                 0x1ebc0, 0x1ebc8,
3459                 0x1ec40, 0x1ec4c,
3460                 0x1ee84, 0x1ee8c,
3461                 0x1eec0, 0x1eec0,
3462                 0x1eee0, 0x1eee0,
3463                 0x1ef00, 0x1ef84,
3464                 0x1efc0, 0x1efc8,
3465                 0x1f040, 0x1f04c,
3466                 0x1f284, 0x1f28c,
3467                 0x1f2c0, 0x1f2c0,
3468                 0x1f2e0, 0x1f2e0,
3469                 0x1f300, 0x1f384,
3470                 0x1f3c0, 0x1f3c8,
3471                 0x1f440, 0x1f44c,
3472                 0x1f684, 0x1f68c,
3473                 0x1f6c0, 0x1f6c0,
3474                 0x1f6e0, 0x1f6e0,
3475                 0x1f700, 0x1f784,
3476                 0x1f7c0, 0x1f7c8,
3477                 0x1f840, 0x1f84c,
3478                 0x1fa84, 0x1fa8c,
3479                 0x1fac0, 0x1fac0,
3480                 0x1fae0, 0x1fae0,
3481                 0x1fb00, 0x1fb84,
3482                 0x1fbc0, 0x1fbc8,
3483                 0x1fc40, 0x1fc4c,
3484                 0x1fe84, 0x1fe8c,
3485                 0x1fec0, 0x1fec0,
3486                 0x1fee0, 0x1fee0,
3487                 0x1ff00, 0x1ff84,
3488                 0x1ffc0, 0x1ffc8,
3489                 0x20000, 0x2002c,
3490                 0x20100, 0x2013c,
3491                 0x20190, 0x201c8,
3492                 0x20200, 0x20318,
3493                 0x20400, 0x20528,
3494                 0x20540, 0x20614,
3495                 0x21000, 0x21040,
3496                 0x2104c, 0x21060,
3497                 0x210c0, 0x210ec,
3498                 0x21200, 0x21268,
3499                 0x21270, 0x21284,
3500                 0x212fc, 0x21388,
3501                 0x21400, 0x21404,
3502                 0x21500, 0x21518,
3503                 0x2152c, 0x2153c,
3504                 0x21550, 0x21554,
3505                 0x21600, 0x21600,
3506                 0x21608, 0x21628,
3507                 0x21630, 0x2163c,
3508                 0x21700, 0x2171c,
3509                 0x21780, 0x2178c,
3510                 0x21800, 0x21c38,
3511                 0x21c80, 0x21d7c,
3512                 0x21e00, 0x21e04,
3513                 0x22000, 0x2202c,
3514                 0x22100, 0x2213c,
3515                 0x22190, 0x221c8,
3516                 0x22200, 0x22318,
3517                 0x22400, 0x22528,
3518                 0x22540, 0x22614,
3519                 0x23000, 0x23040,
3520                 0x2304c, 0x23060,
3521                 0x230c0, 0x230ec,
3522                 0x23200, 0x23268,
3523                 0x23270, 0x23284,
3524                 0x232fc, 0x23388,
3525                 0x23400, 0x23404,
3526                 0x23500, 0x23518,
3527                 0x2352c, 0x2353c,
3528                 0x23550, 0x23554,
3529                 0x23600, 0x23600,
3530                 0x23608, 0x23628,
3531                 0x23630, 0x2363c,
3532                 0x23700, 0x2371c,
3533                 0x23780, 0x2378c,
3534                 0x23800, 0x23c38,
3535                 0x23c80, 0x23d7c,
3536                 0x23e00, 0x23e04,
3537                 0x24000, 0x2402c,
3538                 0x24100, 0x2413c,
3539                 0x24190, 0x241c8,
3540                 0x24200, 0x24318,
3541                 0x24400, 0x24528,
3542                 0x24540, 0x24614,
3543                 0x25000, 0x25040,
3544                 0x2504c, 0x25060,
3545                 0x250c0, 0x250ec,
3546                 0x25200, 0x25268,
3547                 0x25270, 0x25284,
3548                 0x252fc, 0x25388,
3549                 0x25400, 0x25404,
3550                 0x25500, 0x25518,
3551                 0x2552c, 0x2553c,
3552                 0x25550, 0x25554,
3553                 0x25600, 0x25600,
3554                 0x25608, 0x25628,
3555                 0x25630, 0x2563c,
3556                 0x25700, 0x2571c,
3557                 0x25780, 0x2578c,
3558                 0x25800, 0x25c38,
3559                 0x25c80, 0x25d7c,
3560                 0x25e00, 0x25e04,
3561                 0x26000, 0x2602c,
3562                 0x26100, 0x2613c,
3563                 0x26190, 0x261c8,
3564                 0x26200, 0x26318,
3565                 0x26400, 0x26528,
3566                 0x26540, 0x26614,
3567                 0x27000, 0x27040,
3568                 0x2704c, 0x27060,
3569                 0x270c0, 0x270ec,
3570                 0x27200, 0x27268,
3571                 0x27270, 0x27284,
3572                 0x272fc, 0x27388,
3573                 0x27400, 0x27404,
3574                 0x27500, 0x27518,
3575                 0x2752c, 0x2753c,
3576                 0x27550, 0x27554,
3577                 0x27600, 0x27600,
3578                 0x27608, 0x27628,
3579                 0x27630, 0x2763c,
3580                 0x27700, 0x2771c,
3581                 0x27780, 0x2778c,
3582                 0x27800, 0x27c38,
3583                 0x27c80, 0x27d7c,
3584                 0x27e00, 0x27e04
3585         };
3586         static const unsigned int t5_reg_ranges[] = {
3587                 0x1008, 0x1148,
3588                 0x1180, 0x11b4,
3589                 0x11fc, 0x123c,
3590                 0x1280, 0x173c,
3591                 0x1800, 0x18fc,
3592                 0x3000, 0x3028,
3593                 0x3060, 0x30d8,
3594                 0x30e0, 0x30fc,
3595                 0x3140, 0x357c,
3596                 0x35a8, 0x35cc,
3597                 0x35ec, 0x35ec,
3598                 0x3600, 0x5624,
3599                 0x56cc, 0x575c,
3600                 0x580c, 0x5814,
3601                 0x5890, 0x58bc,
3602                 0x5940, 0x59dc,
3603                 0x59fc, 0x5a18,
3604                 0x5a60, 0x5a9c,
3605                 0x5b94, 0x5bfc,
3606                 0x6000, 0x6040,
3607                 0x6058, 0x614c,
3608                 0x7700, 0x7798,
3609                 0x77c0, 0x78fc,
3610                 0x7b00, 0x7c54,
3611                 0x7d00, 0x7efc,
3612                 0x8dc0, 0x8de0,
3613                 0x8df8, 0x8e84,
3614                 0x8ea0, 0x8f84,
3615                 0x8fc0, 0x90f8,
3616                 0x9400, 0x9470,
3617                 0x9600, 0x96f4,
3618                 0x9800, 0x9808,
3619                 0x9820, 0x983c,
3620                 0x9850, 0x9864,
3621                 0x9c00, 0x9c6c,
3622                 0x9c80, 0x9cec,
3623                 0x9d00, 0x9d6c,
3624                 0x9d80, 0x9dec,
3625                 0x9e00, 0x9e6c,
3626                 0x9e80, 0x9eec,
3627                 0x9f00, 0x9f6c,
3628                 0x9f80, 0xa020,
3629                 0xd004, 0xd03c,
3630                 0xdfc0, 0xdfe0,
3631                 0xe000, 0x11088,
3632                 0x1109c, 0x11110,
3633                 0x11118, 0x1117c,
3634                 0x11190, 0x11204,
3635                 0x19040, 0x1906c,
3636                 0x19078, 0x19080,
3637                 0x1908c, 0x19124,
3638                 0x19150, 0x191b0,
3639                 0x191d0, 0x191e8,
3640                 0x19238, 0x19290,
3641                 0x193f8, 0x19474,
3642                 0x19490, 0x194cc,
3643                 0x194f0, 0x194f8,
3644                 0x19c00, 0x19c60,
3645                 0x19c94, 0x19e10,
3646                 0x19e50, 0x19f34,
3647                 0x19f40, 0x19f50,
3648                 0x19f90, 0x19fe4,
3649                 0x1a000, 0x1a06c,
3650                 0x1a0b0, 0x1a120,
3651                 0x1a128, 0x1a138,
3652                 0x1a190, 0x1a1c4,
3653                 0x1a1fc, 0x1a1fc,
3654                 0x1e008, 0x1e00c,
3655                 0x1e040, 0x1e04c,
3656                 0x1e284, 0x1e290,
3657                 0x1e2c0, 0x1e2c0,
3658                 0x1e2e0, 0x1e2e0,
3659                 0x1e300, 0x1e384,
3660                 0x1e3c0, 0x1e3c8,
3661                 0x1e408, 0x1e40c,
3662                 0x1e440, 0x1e44c,
3663                 0x1e684, 0x1e690,
3664                 0x1e6c0, 0x1e6c0,
3665                 0x1e6e0, 0x1e6e0,
3666                 0x1e700, 0x1e784,
3667                 0x1e7c0, 0x1e7c8,
3668                 0x1e808, 0x1e80c,
3669                 0x1e840, 0x1e84c,
3670                 0x1ea84, 0x1ea90,
3671                 0x1eac0, 0x1eac0,
3672                 0x1eae0, 0x1eae0,
3673                 0x1eb00, 0x1eb84,
3674                 0x1ebc0, 0x1ebc8,
3675                 0x1ec08, 0x1ec0c,
3676                 0x1ec40, 0x1ec4c,
3677                 0x1ee84, 0x1ee90,
3678                 0x1eec0, 0x1eec0,
3679                 0x1eee0, 0x1eee0,
3680                 0x1ef00, 0x1ef84,
3681                 0x1efc0, 0x1efc8,
3682                 0x1f008, 0x1f00c,
3683                 0x1f040, 0x1f04c,
3684                 0x1f284, 0x1f290,
3685                 0x1f2c0, 0x1f2c0,
3686                 0x1f2e0, 0x1f2e0,
3687                 0x1f300, 0x1f384,
3688                 0x1f3c0, 0x1f3c8,
3689                 0x1f408, 0x1f40c,
3690                 0x1f440, 0x1f44c,
3691                 0x1f684, 0x1f690,
3692                 0x1f6c0, 0x1f6c0,
3693                 0x1f6e0, 0x1f6e0,
3694                 0x1f700, 0x1f784,
3695                 0x1f7c0, 0x1f7c8,
3696                 0x1f808, 0x1f80c,
3697                 0x1f840, 0x1f84c,
3698                 0x1fa84, 0x1fa90,
3699                 0x1fac0, 0x1fac0,
3700                 0x1fae0, 0x1fae0,
3701                 0x1fb00, 0x1fb84,
3702                 0x1fbc0, 0x1fbc8,
3703                 0x1fc08, 0x1fc0c,
3704                 0x1fc40, 0x1fc4c,
3705                 0x1fe84, 0x1fe90,
3706                 0x1fec0, 0x1fec0,
3707                 0x1fee0, 0x1fee0,
3708                 0x1ff00, 0x1ff84,
3709                 0x1ffc0, 0x1ffc8,
3710                 0x30000, 0x30030,
3711                 0x30100, 0x30144,
3712                 0x30190, 0x301d0,
3713                 0x30200, 0x30318,
3714                 0x30400, 0x3052c,
3715                 0x30540, 0x3061c,
3716                 0x30800, 0x30834,
3717                 0x308c0, 0x30908,
3718                 0x30910, 0x309ac,
3719                 0x30a00, 0x30a2c,
3720                 0x30a44, 0x30a50,
3721                 0x30a74, 0x30c24,
3722                 0x30d00, 0x30d00,
3723                 0x30d08, 0x30d14,
3724                 0x30d1c, 0x30d20,
3725                 0x30d3c, 0x30d50,
3726                 0x31200, 0x3120c,
3727                 0x31220, 0x31220,
3728                 0x31240, 0x31240,
3729                 0x31600, 0x3160c,
3730                 0x31a00, 0x31a1c,
3731                 0x31e00, 0x31e20,
3732                 0x31e38, 0x31e3c,
3733                 0x31e80, 0x31e80,
3734                 0x31e88, 0x31ea8,
3735                 0x31eb0, 0x31eb4,
3736                 0x31ec8, 0x31ed4,
3737                 0x31fb8, 0x32004,
3738                 0x32200, 0x32200,
3739                 0x32208, 0x32240,
3740                 0x32248, 0x32280,
3741                 0x32288, 0x322c0,
3742                 0x322c8, 0x322fc,
3743                 0x32600, 0x32630,
3744                 0x32a00, 0x32abc,
3745                 0x32b00, 0x32b70,
3746                 0x33000, 0x33048,
3747                 0x33060, 0x3309c,
3748                 0x330f0, 0x33148,
3749                 0x33160, 0x3319c,
3750                 0x331f0, 0x332e4,
3751                 0x332f8, 0x333e4,
3752                 0x333f8, 0x33448,
3753                 0x33460, 0x3349c,
3754                 0x334f0, 0x33548,
3755                 0x33560, 0x3359c,
3756                 0x335f0, 0x336e4,
3757                 0x336f8, 0x337e4,
3758                 0x337f8, 0x337fc,
3759                 0x33814, 0x33814,
3760                 0x3382c, 0x3382c,
3761                 0x33880, 0x3388c,
3762                 0x338e8, 0x338ec,
3763                 0x33900, 0x33948,
3764                 0x33960, 0x3399c,
3765                 0x339f0, 0x33ae4,
3766                 0x33af8, 0x33b10,
3767                 0x33b28, 0x33b28,
3768                 0x33b3c, 0x33b50,
3769                 0x33bf0, 0x33c10,
3770                 0x33c28, 0x33c28,
3771                 0x33c3c, 0x33c50,
3772                 0x33cf0, 0x33cfc,
3773                 0x34000, 0x34030,
3774                 0x34100, 0x34144,
3775                 0x34190, 0x341d0,
3776                 0x34200, 0x34318,
3777                 0x34400, 0x3452c,
3778                 0x34540, 0x3461c,
3779                 0x34800, 0x34834,
3780                 0x348c0, 0x34908,
3781                 0x34910, 0x349ac,
3782                 0x34a00, 0x34a2c,
3783                 0x34a44, 0x34a50,
3784                 0x34a74, 0x34c24,
3785                 0x34d00, 0x34d00,
3786                 0x34d08, 0x34d14,
3787                 0x34d1c, 0x34d20,
3788                 0x34d3c, 0x34d50,
3789                 0x35200, 0x3520c,
3790                 0x35220, 0x35220,
3791                 0x35240, 0x35240,
3792                 0x35600, 0x3560c,
3793                 0x35a00, 0x35a1c,
3794                 0x35e00, 0x35e20,
3795                 0x35e38, 0x35e3c,
3796                 0x35e80, 0x35e80,
3797                 0x35e88, 0x35ea8,
3798                 0x35eb0, 0x35eb4,
3799                 0x35ec8, 0x35ed4,
3800                 0x35fb8, 0x36004,
3801                 0x36200, 0x36200,
3802                 0x36208, 0x36240,
3803                 0x36248, 0x36280,
3804                 0x36288, 0x362c0,
3805                 0x362c8, 0x362fc,
3806                 0x36600, 0x36630,
3807                 0x36a00, 0x36abc,
3808                 0x36b00, 0x36b70,
3809                 0x37000, 0x37048,
3810                 0x37060, 0x3709c,
3811                 0x370f0, 0x37148,
3812                 0x37160, 0x3719c,
3813                 0x371f0, 0x372e4,
3814                 0x372f8, 0x373e4,
3815                 0x373f8, 0x37448,
3816                 0x37460, 0x3749c,
3817                 0x374f0, 0x37548,
3818                 0x37560, 0x3759c,
3819                 0x375f0, 0x376e4,
3820                 0x376f8, 0x377e4,
3821                 0x377f8, 0x377fc,
3822                 0x37814, 0x37814,
3823                 0x3782c, 0x3782c,
3824                 0x37880, 0x3788c,
3825                 0x378e8, 0x378ec,
3826                 0x37900, 0x37948,
3827                 0x37960, 0x3799c,
3828                 0x379f0, 0x37ae4,
3829                 0x37af8, 0x37b10,
3830                 0x37b28, 0x37b28,
3831                 0x37b3c, 0x37b50,
3832                 0x37bf0, 0x37c10,
3833                 0x37c28, 0x37c28,
3834                 0x37c3c, 0x37c50,
3835                 0x37cf0, 0x37cfc,
3836                 0x38000, 0x38030,
3837                 0x38100, 0x38144,
3838                 0x38190, 0x381d0,
3839                 0x38200, 0x38318,
3840                 0x38400, 0x3852c,
3841                 0x38540, 0x3861c,
3842                 0x38800, 0x38834,
3843                 0x388c0, 0x38908,
3844                 0x38910, 0x389ac,
3845                 0x38a00, 0x38a2c,
3846                 0x38a44, 0x38a50,
3847                 0x38a74, 0x38c24,
3848                 0x38d00, 0x38d00,
3849                 0x38d08, 0x38d14,
3850                 0x38d1c, 0x38d20,
3851                 0x38d3c, 0x38d50,
3852                 0x39200, 0x3920c,
3853                 0x39220, 0x39220,
3854                 0x39240, 0x39240,
3855                 0x39600, 0x3960c,
3856                 0x39a00, 0x39a1c,
3857                 0x39e00, 0x39e20,
3858                 0x39e38, 0x39e3c,
3859                 0x39e80, 0x39e80,
3860                 0x39e88, 0x39ea8,
3861                 0x39eb0, 0x39eb4,
3862                 0x39ec8, 0x39ed4,
3863                 0x39fb8, 0x3a004,
3864                 0x3a200, 0x3a200,
3865                 0x3a208, 0x3a240,
3866                 0x3a248, 0x3a280,
3867                 0x3a288, 0x3a2c0,
3868                 0x3a2c8, 0x3a2fc,
3869                 0x3a600, 0x3a630,
3870                 0x3aa00, 0x3aabc,
3871                 0x3ab00, 0x3ab70,
3872                 0x3b000, 0x3b048,
3873                 0x3b060, 0x3b09c,
3874                 0x3b0f0, 0x3b148,
3875                 0x3b160, 0x3b19c,
3876                 0x3b1f0, 0x3b2e4,
3877                 0x3b2f8, 0x3b3e4,
3878                 0x3b3f8, 0x3b448,
3879                 0x3b460, 0x3b49c,
3880                 0x3b4f0, 0x3b548,
3881                 0x3b560, 0x3b59c,
3882                 0x3b5f0, 0x3b6e4,
3883                 0x3b6f8, 0x3b7e4,
3884                 0x3b7f8, 0x3b7fc,
3885                 0x3b814, 0x3b814,
3886                 0x3b82c, 0x3b82c,
3887                 0x3b880, 0x3b88c,
3888                 0x3b8e8, 0x3b8ec,
3889                 0x3b900, 0x3b948,
3890                 0x3b960, 0x3b99c,
3891                 0x3b9f0, 0x3bae4,
3892                 0x3baf8, 0x3bb10,
3893                 0x3bb28, 0x3bb28,
3894                 0x3bb3c, 0x3bb50,
3895                 0x3bbf0, 0x3bc10,
3896                 0x3bc28, 0x3bc28,
3897                 0x3bc3c, 0x3bc50,
3898                 0x3bcf0, 0x3bcfc,
3899                 0x3c000, 0x3c030,
3900                 0x3c100, 0x3c144,
3901                 0x3c190, 0x3c1d0,
3902                 0x3c200, 0x3c318,
3903                 0x3c400, 0x3c52c,
3904                 0x3c540, 0x3c61c,
3905                 0x3c800, 0x3c834,
3906                 0x3c8c0, 0x3c908,
3907                 0x3c910, 0x3c9ac,
3908                 0x3ca00, 0x3ca2c,
3909                 0x3ca44, 0x3ca50,
3910                 0x3ca74, 0x3cc24,
3911                 0x3cd00, 0x3cd00,
3912                 0x3cd08, 0x3cd14,
3913                 0x3cd1c, 0x3cd20,
3914                 0x3cd3c, 0x3cd50,
3915                 0x3d200, 0x3d20c,
3916                 0x3d220, 0x3d220,
3917                 0x3d240, 0x3d240,
3918                 0x3d600, 0x3d60c,
3919                 0x3da00, 0x3da1c,
3920                 0x3de00, 0x3de20,
3921                 0x3de38, 0x3de3c,
3922                 0x3de80, 0x3de80,
3923                 0x3de88, 0x3dea8,
3924                 0x3deb0, 0x3deb4,
3925                 0x3dec8, 0x3ded4,
3926                 0x3dfb8, 0x3e004,
3927                 0x3e200, 0x3e200,
3928                 0x3e208, 0x3e240,
3929                 0x3e248, 0x3e280,
3930                 0x3e288, 0x3e2c0,
3931                 0x3e2c8, 0x3e2fc,
3932                 0x3e600, 0x3e630,
3933                 0x3ea00, 0x3eabc,
3934                 0x3eb00, 0x3eb70,
3935                 0x3f000, 0x3f048,
3936                 0x3f060, 0x3f09c,
3937                 0x3f0f0, 0x3f148,
3938                 0x3f160, 0x3f19c,
3939                 0x3f1f0, 0x3f2e4,
3940                 0x3f2f8, 0x3f3e4,
3941                 0x3f3f8, 0x3f448,
3942                 0x3f460, 0x3f49c,
3943                 0x3f4f0, 0x3f548,
3944                 0x3f560, 0x3f59c,
3945                 0x3f5f0, 0x3f6e4,
3946                 0x3f6f8, 0x3f7e4,
3947                 0x3f7f8, 0x3f7fc,
3948                 0x3f814, 0x3f814,
3949                 0x3f82c, 0x3f82c,
3950                 0x3f880, 0x3f88c,
3951                 0x3f8e8, 0x3f8ec,
3952                 0x3f900, 0x3f948,
3953                 0x3f960, 0x3f99c,
3954                 0x3f9f0, 0x3fae4,
3955                 0x3faf8, 0x3fb10,
3956                 0x3fb28, 0x3fb28,
3957                 0x3fb3c, 0x3fb50,
3958                 0x3fbf0, 0x3fc10,
3959                 0x3fc28, 0x3fc28,
3960                 0x3fc3c, 0x3fc50,
3961                 0x3fcf0, 0x3fcfc,
3962                 0x40000, 0x4000c,
3963                 0x40040, 0x40068,
3964                 0x4007c, 0x40144,
3965                 0x40180, 0x4018c,
3966                 0x40200, 0x40298,
3967                 0x402ac, 0x4033c,
3968                 0x403f8, 0x403fc,
3969                 0x41304, 0x413c4,
3970                 0x41400, 0x4141c,
3971                 0x41480, 0x414d0,
3972                 0x44000, 0x44078,
3973                 0x440c0, 0x44278,
3974                 0x442c0, 0x44478,
3975                 0x444c0, 0x44678,
3976                 0x446c0, 0x44878,
3977                 0x448c0, 0x449fc,
3978                 0x45000, 0x45068,
3979                 0x45080, 0x45084,
3980                 0x450a0, 0x450b0,
3981                 0x45200, 0x45268,
3982                 0x45280, 0x45284,
3983                 0x452a0, 0x452b0,
3984                 0x460c0, 0x460e4,
3985                 0x47000, 0x4708c,
3986                 0x47200, 0x47250,
3987                 0x47400, 0x47420,
3988                 0x47600, 0x47618,
3989                 0x47800, 0x47814,
3990                 0x48000, 0x4800c,
3991                 0x48040, 0x48068,
3992                 0x4807c, 0x48144,
3993                 0x48180, 0x4818c,
3994                 0x48200, 0x48298,
3995                 0x482ac, 0x4833c,
3996                 0x483f8, 0x483fc,
3997                 0x49304, 0x493c4,
3998                 0x49400, 0x4941c,
3999                 0x49480, 0x494d0,
4000                 0x4c000, 0x4c078,
4001                 0x4c0c0, 0x4c278,
4002                 0x4c2c0, 0x4c478,
4003                 0x4c4c0, 0x4c678,
4004                 0x4c6c0, 0x4c878,
4005                 0x4c8c0, 0x4c9fc,
4006                 0x4d000, 0x4d068,
4007                 0x4d080, 0x4d084,
4008                 0x4d0a0, 0x4d0b0,
4009                 0x4d200, 0x4d268,
4010                 0x4d280, 0x4d284,
4011                 0x4d2a0, 0x4d2b0,
4012                 0x4e0c0, 0x4e0e4,
4013                 0x4f000, 0x4f08c,
4014                 0x4f200, 0x4f250,
4015                 0x4f400, 0x4f420,
4016                 0x4f600, 0x4f618,
4017                 0x4f800, 0x4f814,
4018                 0x50000, 0x500cc,
4019                 0x50400, 0x50400,
4020                 0x50800, 0x508cc,
4021                 0x50c00, 0x50c00,
4022                 0x51000, 0x5101c,
4023                 0x51300, 0x51308,
4024         };
4025
4026         if (is_t4(sc)) {
4027                 reg_ranges = &t4_reg_ranges[0];
4028                 n = nitems(t4_reg_ranges);
4029         } else {
4030                 reg_ranges = &t5_reg_ranges[0];
4031                 n = nitems(t5_reg_ranges);
4032         }
4033
4034         regs->version = chip_id(sc) | chip_rev(sc) << 10;
4035         for (i = 0; i < n; i += 2)
4036                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4037 }
4038
4039 static void
4040 cxgbe_tick(void *arg)
4041 {
4042         struct port_info *pi = arg;
4043         struct adapter *sc = pi->adapter;
4044         struct ifnet *ifp = pi->ifp;
4045         struct sge_txq *txq;
4046         int i, drops;
4047         struct port_stats *s = &pi->stats;
4048
4049         PORT_LOCK(pi);
4050         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4051                 PORT_UNLOCK(pi);
4052                 return; /* without scheduling another callout */
4053         }
4054
4055         t4_get_port_stats(sc, pi->tx_chan, s);
4056
4057         ifp->if_opackets = s->tx_frames - s->tx_pause;
4058         ifp->if_ipackets = s->rx_frames - s->rx_pause;
4059         ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4060         ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4061         ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4062         ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4063         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4064             s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4065             s->rx_trunc3;
4066         for (i = 0; i < 4; i++) {
4067                 if (pi->rx_chan_map & (1 << i)) {
4068                         uint32_t v;
4069
4070                         /*
4071                          * XXX: indirect reads from the same ADDR/DATA pair can
4072                          * race with each other.
4073                          */
4074                         t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
4075                             1, A_TP_MIB_TNL_CNG_DROP_0 + i);
4076                         ifp->if_iqdrops += v;
4077                 }
4078         }
4079
4080         drops = s->tx_drop;
4081         for_each_txq(pi, i, txq)
4082                 drops += txq->br->br_drops;
4083         ifp->if_snd.ifq_drops = drops;
4084
4085         ifp->if_oerrors = s->tx_error_frames;
4086         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4087             s->rx_fcs_err + s->rx_len_err;
4088
4089         callout_schedule(&pi->tick, hz);
4090         PORT_UNLOCK(pi);
4091 }
4092
4093 static int
4094 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4095 {
4096
4097 #ifdef INVARIANTS
4098         panic("%s: opcode 0x%02x on iq %p with payload %p",
4099             __func__, rss->opcode, iq, m);
4100 #else
4101         log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4102             __func__, rss->opcode, iq, m);
4103         m_freem(m);
4104 #endif
4105         return (EDOOFUS);
4106 }
4107
4108 int
4109 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4110 {
4111         uintptr_t *loc, new;
4112
4113         if (opcode >= nitems(sc->cpl_handler))
4114                 return (EINVAL);
4115
4116         new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4117         loc = (uintptr_t *) &sc->cpl_handler[opcode];
4118         atomic_store_rel_ptr(loc, new);
4119
4120         return (0);
4121 }
4122
4123 static int
4124 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4125 {
4126
4127 #ifdef INVARIANTS
4128         panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4129 #else
4130         log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4131             __func__, iq, ctrl);
4132 #endif
4133         return (EDOOFUS);
4134 }
4135
4136 int
4137 t4_register_an_handler(struct adapter *sc, an_handler_t h)
4138 {
4139         uintptr_t *loc, new;
4140
4141         new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4142         loc = (uintptr_t *) &sc->an_handler;
4143         atomic_store_rel_ptr(loc, new);
4144
4145         return (0);
4146 }
4147
4148 static int
4149 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4150 {
4151         __be64 *r = __DECONST(__be64 *, rpl);
4152         struct cpl_fw6_msg *cpl = member2struct(cpl_fw6_msg, data, r);
4153
4154 #ifdef INVARIANTS
4155         panic("%s: fw_msg type %d", __func__, cpl->type);
4156 #else
4157         log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4158 #endif
4159         return (EDOOFUS);
4160 }
4161
4162 int
4163 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4164 {
4165         uintptr_t *loc, new;
4166
4167         if (type >= nitems(sc->fw_msg_handler))
4168                 return (EINVAL);
4169
4170         /*
4171          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4172          * handler dispatch table.  Reject any attempt to install a handler for
4173          * this subtype.
4174          */
4175         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4176                 return (EINVAL);
4177
4178         new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4179         loc = (uintptr_t *) &sc->fw_msg_handler[type];
4180         atomic_store_rel_ptr(loc, new);
4181
4182         return (0);
4183 }
4184
4185 static int
4186 t4_sysctls(struct adapter *sc)
4187 {
4188         struct sysctl_ctx_list *ctx;
4189         struct sysctl_oid *oid;
4190         struct sysctl_oid_list *children, *c0;
4191         static char *caps[] = {
4192                 "\20\1PPP\2QFC\3DCBX",                  /* caps[0] linkcaps */
4193                 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"        /* caps[1] niccaps */
4194                     "\6HASHFILTER\7ETHOFLD",
4195                 "\20\1TOE",                             /* caps[2] toecaps */
4196                 "\20\1RDDP\2RDMAC",                     /* caps[3] rdmacaps */
4197                 "\20\1INITIATOR_PDU\2TARGET_PDU"        /* caps[4] iscsicaps */
4198                     "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4199                     "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4200                 "\20\1INITIATOR\2TARGET\3CTRL_OFLD"     /* caps[5] fcoecaps */
4201                     "\4PO_INITIAOR\5PO_TARGET"
4202         };
4203         static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4204
4205         ctx = device_get_sysctl_ctx(sc->dev);
4206
4207         /*
4208          * dev.t4nex.X.
4209          */
4210         oid = device_get_sysctl_tree(sc->dev);
4211         c0 = children = SYSCTL_CHILDREN(oid);
4212
4213         sc->sc_do_rxcopy = 1;
4214         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4215             &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4216
4217         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4218             sc->params.nports, "# of ports");
4219
4220         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4221             NULL, chip_rev(sc), "chip hardware revision");
4222
4223         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4224             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4225
4226         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4227             CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4228
4229         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4230             sc->cfcsum, "config file checksum");
4231
4232         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4233             CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4234             sysctl_bitfield, "A", "available doorbells");
4235
4236         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4237             CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4238             sysctl_bitfield, "A", "available link capabilities");
4239
4240         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4241             CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4242             sysctl_bitfield, "A", "available NIC capabilities");
4243
4244         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4245             CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4246             sysctl_bitfield, "A", "available TCP offload capabilities");
4247
4248         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4249             CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4250             sysctl_bitfield, "A", "available RDMA capabilities");
4251
4252         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4253             CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4254             sysctl_bitfield, "A", "available iSCSI capabilities");
4255
4256         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4257             CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4258             sysctl_bitfield, "A", "available FCoE capabilities");
4259
4260         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4261             sc->params.vpd.cclk, "core clock frequency (in KHz)");
4262
4263         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4264             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4265             sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4266             "interrupt holdoff timer values (us)");
4267
4268         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4269             CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4270             sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4271             "interrupt holdoff packet counter values");
4272
4273         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4274             NULL, sc->tids.nftids, "number of filters");
4275
4276         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4277             CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4278             "chip temperature (in Celsius)");
4279
4280         t4_sge_sysctls(sc, ctx, children);
4281
4282 #ifdef SBUF_DRAIN
4283         /*
4284          * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4285          */
4286         oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4287             CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4288             "logs and miscellaneous information");
4289         children = SYSCTL_CHILDREN(oid);
4290
4291         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4292             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4293             sysctl_cctrl, "A", "congestion control");
4294
4295         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4296             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4297             sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4298
4299         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4300             CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4301             sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4302
4303         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4304             CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4305             sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4306
4307         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4308             CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4309             sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4310
4311         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4312             CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4313             sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4314
4315         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4316             CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4317             sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4318
4319         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4320             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4321             sysctl_cim_la, "A", "CIM logic analyzer");
4322
4323         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4324             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4325             sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4326
4327         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4328             CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4329             sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4330
4331         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4332             CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4333             sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4334
4335         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4336             CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4337             sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4338
4339         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4340             CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4341             sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4342
4343         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4344             CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4345             sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4346
4347         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4348             CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4349             sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4350
4351         if (is_t5(sc)) {
4352                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4353                     CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4354                     sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4355
4356                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4357                     CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4358                     sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4359         }
4360
4361         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4362             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4363             sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4364
4365         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4366             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4367             sysctl_cim_qcfg, "A", "CIM queue configuration");
4368
4369         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4370             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4371             sysctl_cpl_stats, "A", "CPL statistics");
4372
4373         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4374             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4375             sysctl_ddp_stats, "A", "DDP statistics");
4376
4377         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4378             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4379             sysctl_devlog, "A", "firmware's device log");
4380
4381         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4382             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4383             sysctl_fcoe_stats, "A", "FCoE statistics");
4384
4385         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4386             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4387             sysctl_hw_sched, "A", "hardware scheduler ");
4388
4389         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4390             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4391             sysctl_l2t, "A", "hardware L2 table");
4392
4393         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4394             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4395             sysctl_lb_stats, "A", "loopback statistics");
4396
4397         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4398             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4399             sysctl_meminfo, "A", "memory regions");
4400
4401         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4402             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4403             sysctl_mps_tcam, "A", "MPS TCAM entries");
4404
4405         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4406             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4407             sysctl_path_mtus, "A", "path MTUs");
4408
4409         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4410             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4411             sysctl_pm_stats, "A", "PM statistics");
4412
4413         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4414             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4415             sysctl_rdma_stats, "A", "RDMA statistics");
4416
4417         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4418             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4419             sysctl_tcp_stats, "A", "TCP statistics");
4420
4421         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4422             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4423             sysctl_tids, "A", "TID information");
4424
4425         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4426             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4427             sysctl_tp_err_stats, "A", "TP error statistics");
4428
4429         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4430             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4431             sysctl_tp_la, "A", "TP logic analyzer");
4432
4433         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4434             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4435             sysctl_tx_rate, "A", "Tx rate");
4436
4437         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4438             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4439             sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4440
4441         if (is_t5(sc)) {
4442                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4443                     CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4444                     sysctl_wcwr_stats, "A", "write combined work requests");
4445         }
4446 #endif
4447
4448 #ifdef TCP_OFFLOAD
4449         if (is_offload(sc)) {
4450                 /*
4451                  * dev.t4nex.X.toe.
4452                  */
4453                 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4454                     NULL, "TOE parameters");
4455                 children = SYSCTL_CHILDREN(oid);
4456
4457                 sc->tt.sndbuf = 256 * 1024;
4458                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4459                     &sc->tt.sndbuf, 0, "max hardware send buffer size");
4460
4461                 sc->tt.ddp = 0;
4462                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4463                     &sc->tt.ddp, 0, "DDP allowed");
4464
4465                 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4466                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4467                     &sc->tt.indsz, 0, "DDP max indicate size allowed");
4468
4469                 sc->tt.ddp_thres =
4470                     G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4471                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4472                     &sc->tt.ddp_thres, 0, "DDP threshold");
4473
4474                 sc->tt.rx_coalesce = 1;
4475                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4476                     CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4477         }
4478 #endif
4479
4480
4481         return (0);
4482 }
4483
4484 static int
4485 cxgbe_sysctls(struct port_info *pi)
4486 {
4487         struct sysctl_ctx_list *ctx;
4488         struct sysctl_oid *oid;
4489         struct sysctl_oid_list *children;
4490         struct adapter *sc = pi->adapter;
4491
4492         ctx = device_get_sysctl_ctx(pi->dev);
4493
4494         /*
4495          * dev.cxgbe.X.
4496          */
4497         oid = device_get_sysctl_tree(pi->dev);
4498         children = SYSCTL_CHILDREN(oid);
4499
4500         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4501            CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4502         if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4503                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4504                     CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4505                     "PHY temperature (in Celsius)");
4506                 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4507                     CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4508                     "PHY firmware version");
4509         }
4510         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4511             &pi->nrxq, 0, "# of rx queues");
4512         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4513             &pi->ntxq, 0, "# of tx queues");
4514         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4515             &pi->first_rxq, 0, "index of first rx queue");
4516         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4517             &pi->first_txq, 0, "index of first tx queue");
4518         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4519             CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4520             "Reserve queue 0 for non-flowid packets");
4521
4522 #ifdef TCP_OFFLOAD
4523         if (is_offload(sc)) {
4524                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4525                     &pi->nofldrxq, 0,
4526                     "# of rx queues for offloaded TCP connections");
4527                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4528                     &pi->nofldtxq, 0,
4529                     "# of tx queues for offloaded TCP connections");
4530                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4531                     CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4532                     "index of first TOE rx queue");
4533                 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4534                     CTLFLAG_RD, &pi->first_ofld_txq, 0,
4535                     "index of first TOE tx queue");
4536         }
4537 #endif
4538
4539         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4540             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4541             "holdoff timer index");
4542         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4543             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4544             "holdoff packet counter index");
4545
4546         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4547             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4548             "rx queue size");
4549         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4550             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4551             "tx queue size");
4552
4553         /*
4554          * dev.cxgbe.X.stats.
4555          */
4556         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4557             NULL, "port statistics");
4558         children = SYSCTL_CHILDREN(oid);
4559
4560 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4561         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4562             CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
4563             sysctl_handle_t4_reg64, "QU", desc)
4564
4565         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4566             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4567         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4568             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4569         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4570             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4571         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4572             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4573         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4574             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4575         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4576             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4577         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4578             "# of tx frames in this range",
4579             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4580         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4581             "# of tx frames in this range",
4582             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4583         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4584             "# of tx frames in this range",
4585             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4586         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4587             "# of tx frames in this range",
4588             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4589         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4590             "# of tx frames in this range",
4591             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4592         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4593             "# of tx frames in this range",
4594             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4595         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4596             "# of tx frames in this range",
4597             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4598         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4599             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4600         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4601             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4602         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4603             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4604         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4605             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4606         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4607             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4608         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4609             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4610         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4611             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4612         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4613             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4614         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4615             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4616         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4617             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4618
4619         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4620             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4621         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4622             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4623         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4624             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4625         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4626             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4627         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4628             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4629         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4630             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4631         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4632             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4633         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4634             "# of frames received with bad FCS",
4635             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4636         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4637             "# of frames received with length error",
4638             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4639         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4640             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4641         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4642             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4643         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4644             "# of rx frames in this range",
4645             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4646         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4647             "# of rx frames in this range",
4648             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4649         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4650             "# of rx frames in this range",
4651             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4652         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4653             "# of rx frames in this range",
4654             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4655         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4656             "# of rx frames in this range",
4657             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4658         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4659             "# of rx frames in this range",
4660             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4661         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4662             "# of rx frames in this range",
4663             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4664         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4665             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4666         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4667             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4668         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4669             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4670         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4671             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4672         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4673             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4674         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4675             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4676         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4677             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4678         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4679             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4680         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4681             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4682
4683 #undef SYSCTL_ADD_T4_REG64
4684
4685 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4686         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4687             &pi->stats.name, desc)
4688
4689         /* We get these from port_stats and they may be stale by upto 1s */
4690         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4691             "# drops due to buffer-group 0 overflows");
4692         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4693             "# drops due to buffer-group 1 overflows");
4694         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4695             "# drops due to buffer-group 2 overflows");
4696         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4697             "# drops due to buffer-group 3 overflows");
4698         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4699             "# of buffer-group 0 truncated packets");
4700         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4701             "# of buffer-group 1 truncated packets");
4702         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4703             "# of buffer-group 2 truncated packets");
4704         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4705             "# of buffer-group 3 truncated packets");
4706
4707 #undef SYSCTL_ADD_T4_PORTSTAT
4708
4709         return (0);
4710 }
4711
4712 static int
4713 sysctl_int_array(SYSCTL_HANDLER_ARGS)
4714 {
4715         int rc, *i;
4716         struct sbuf sb;
4717
4718         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4719         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4720                 sbuf_printf(&sb, "%d ", *i);
4721         sbuf_trim(&sb);
4722         sbuf_finish(&sb);
4723         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4724         sbuf_delete(&sb);
4725         return (rc);
4726 }
4727
4728 static int
4729 sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4730 {
4731         int rc;
4732         struct sbuf *sb;
4733
4734         rc = sysctl_wire_old_buffer(req, 0);
4735         if (rc != 0)
4736                 return(rc);
4737
4738         sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4739         if (sb == NULL)
4740                 return (ENOMEM);
4741
4742         sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4743         rc = sbuf_finish(sb);
4744         sbuf_delete(sb);
4745
4746         return (rc);
4747 }
4748
4749 static int
4750 sysctl_btphy(SYSCTL_HANDLER_ARGS)
4751 {
4752         struct port_info *pi = arg1;
4753         int op = arg2;
4754         struct adapter *sc = pi->adapter;
4755         u_int v;
4756         int rc;
4757
4758         rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4759         if (rc)
4760                 return (rc);
4761         /* XXX: magic numbers */
4762         rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4763             &v);
4764         end_synchronized_op(sc, 0);
4765         if (rc)
4766                 return (rc);
4767         if (op == 0)
4768                 v /= 256;
4769
4770         rc = sysctl_handle_int(oidp, &v, 0, req);
4771         return (rc);
4772 }
4773
4774 static int
4775 sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4776 {
4777         struct port_info *pi = arg1;
4778         int rc, val;
4779
4780         val = pi->rsrv_noflowq;
4781         rc = sysctl_handle_int(oidp, &val, 0, req);
4782         if (rc != 0 || req->newptr == NULL)
4783                 return (rc);
4784
4785         if ((val >= 1) && (pi->ntxq > 1))
4786                 pi->rsrv_noflowq = 1;
4787         else
4788                 pi->rsrv_noflowq = 0;
4789
4790         return (rc);
4791 }
4792
4793 static int
4794 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4795 {
4796         struct port_info *pi = arg1;
4797         struct adapter *sc = pi->adapter;
4798         int idx, rc, i;
4799         struct sge_rxq *rxq;
4800 #ifdef TCP_OFFLOAD
4801         struct sge_ofld_rxq *ofld_rxq;
4802 #endif
4803         uint8_t v;
4804
4805         idx = pi->tmr_idx;
4806
4807         rc = sysctl_handle_int(oidp, &idx, 0, req);
4808         if (rc != 0 || req->newptr == NULL)
4809                 return (rc);
4810
4811         if (idx < 0 || idx >= SGE_NTIMERS)
4812                 return (EINVAL);
4813
4814         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4815             "t4tmr");
4816         if (rc)
4817                 return (rc);
4818
4819         v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4820         for_each_rxq(pi, i, rxq) {
4821 #ifdef atomic_store_rel_8
4822                 atomic_store_rel_8(&rxq->iq.intr_params, v);
4823 #else
4824                 rxq->iq.intr_params = v;
4825 #endif
4826         }
4827 #ifdef TCP_OFFLOAD
4828         for_each_ofld_rxq(pi, i, ofld_rxq) {
4829 #ifdef atomic_store_rel_8
4830                 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4831 #else
4832                 ofld_rxq->iq.intr_params = v;
4833 #endif
4834         }
4835 #endif
4836         pi->tmr_idx = idx;
4837
4838         end_synchronized_op(sc, LOCK_HELD);
4839         return (0);
4840 }
4841
4842 static int
4843 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4844 {
4845         struct port_info *pi = arg1;
4846         struct adapter *sc = pi->adapter;
4847         int idx, rc;
4848
4849         idx = pi->pktc_idx;
4850
4851         rc = sysctl_handle_int(oidp, &idx, 0, req);
4852         if (rc != 0 || req->newptr == NULL)
4853                 return (rc);
4854
4855         if (idx < -1 || idx >= SGE_NCOUNTERS)
4856                 return (EINVAL);
4857
4858         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4859             "t4pktc");
4860         if (rc)
4861                 return (rc);
4862
4863         if (pi->flags & PORT_INIT_DONE)
4864                 rc = EBUSY; /* cannot be changed once the queues are created */
4865         else
4866                 pi->pktc_idx = idx;
4867
4868         end_synchronized_op(sc, LOCK_HELD);
4869         return (rc);
4870 }
4871
4872 static int
4873 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4874 {
4875         struct port_info *pi = arg1;
4876         struct adapter *sc = pi->adapter;
4877         int qsize, rc;
4878
4879         qsize = pi->qsize_rxq;
4880
4881         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4882         if (rc != 0 || req->newptr == NULL)
4883                 return (rc);
4884
4885         if (qsize < 128 || (qsize & 7))
4886                 return (EINVAL);
4887
4888         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4889             "t4rxqs");
4890         if (rc)
4891                 return (rc);
4892
4893         if (pi->flags & PORT_INIT_DONE)
4894                 rc = EBUSY; /* cannot be changed once the queues are created */
4895         else
4896                 pi->qsize_rxq = qsize;
4897
4898         end_synchronized_op(sc, LOCK_HELD);
4899         return (rc);
4900 }
4901
4902 static int
4903 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4904 {
4905         struct port_info *pi = arg1;
4906         struct adapter *sc = pi->adapter;
4907         int qsize, rc;
4908
4909         qsize = pi->qsize_txq;
4910
4911         rc = sysctl_handle_int(oidp, &qsize, 0, req);
4912         if (rc != 0 || req->newptr == NULL)
4913                 return (rc);
4914
4915         /* bufring size must be powerof2 */
4916         if (qsize < 128 || !powerof2(qsize))
4917                 return (EINVAL);
4918
4919         rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4920             "t4txqs");
4921         if (rc)
4922                 return (rc);
4923
4924         if (pi->flags & PORT_INIT_DONE)
4925                 rc = EBUSY; /* cannot be changed once the queues are created */
4926         else
4927                 pi->qsize_txq = qsize;
4928
4929         end_synchronized_op(sc, LOCK_HELD);
4930         return (rc);
4931 }
4932
4933 static int
4934 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4935 {
4936         struct adapter *sc = arg1;
4937         int reg = arg2;
4938         uint64_t val;
4939
4940         val = t4_read_reg64(sc, reg);
4941
4942         return (sysctl_handle_64(oidp, &val, 0, req));
4943 }
4944
4945 static int
4946 sysctl_temperature(SYSCTL_HANDLER_ARGS)
4947 {
4948         struct adapter *sc = arg1;
4949         int rc, t;
4950         uint32_t param, val;
4951
4952         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4953         if (rc)
4954                 return (rc);
4955         param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4956             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4957             V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4958         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4959         end_synchronized_op(sc, 0);
4960         if (rc)
4961                 return (rc);
4962
4963         /* unknown is returned as 0 but we display -1 in that case */
4964         t = val == 0 ? -1 : val;
4965
4966         rc = sysctl_handle_int(oidp, &t, 0, req);
4967         return (rc);
4968 }
4969
4970 #ifdef SBUF_DRAIN
4971 static int
4972 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4973 {
4974         struct adapter *sc = arg1;
4975         struct sbuf *sb;
4976         int rc, i;
4977         uint16_t incr[NMTUS][NCCTRL_WIN];
4978         static const char *dec_fac[] = {
4979                 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4980                 "0.9375"
4981         };
4982
4983         rc = sysctl_wire_old_buffer(req, 0);
4984         if (rc != 0)
4985                 return (rc);
4986
4987         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4988         if (sb == NULL)
4989                 return (ENOMEM);
4990
4991         t4_read_cong_tbl(sc, incr);
4992
4993         for (i = 0; i < NCCTRL_WIN; ++i) {
4994                 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4995                     incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4996                     incr[5][i], incr[6][i], incr[7][i]);
4997                 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4998                     incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4999                     incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5000                     sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5001         }
5002
5003         rc = sbuf_finish(sb);
5004         sbuf_delete(sb);
5005
5006         return (rc);
5007 }
5008
5009 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5010         "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",   /* ibq's */
5011         "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
5012         "SGE0-RX", "SGE1-RX"    /* additional obq's (T5 onwards) */
5013 };
5014
5015 static int
5016 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5017 {
5018         struct adapter *sc = arg1;
5019         struct sbuf *sb;
5020         int rc, i, n, qid = arg2;
5021         uint32_t *buf, *p;
5022         char *qtype;
5023         u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5024
5025         KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5026             ("%s: bad qid %d\n", __func__, qid));
5027
5028         if (qid < CIM_NUM_IBQ) {
5029                 /* inbound queue */
5030                 qtype = "IBQ";
5031                 n = 4 * CIM_IBQ_SIZE;
5032                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5033                 rc = t4_read_cim_ibq(sc, qid, buf, n);
5034         } else {
5035                 /* outbound queue */
5036                 qtype = "OBQ";
5037                 qid -= CIM_NUM_IBQ;
5038                 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5039                 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5040                 rc = t4_read_cim_obq(sc, qid, buf, n);
5041         }
5042
5043         if (rc < 0) {
5044                 rc = -rc;
5045                 goto done;
5046         }
5047         n = rc * sizeof(uint32_t);      /* rc has # of words actually read */
5048
5049         rc = sysctl_wire_old_buffer(req, 0);
5050         if (rc != 0)
5051                 goto done;
5052
5053         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5054         if (sb == NULL) {
5055                 rc = ENOMEM;
5056                 goto done;
5057         }
5058
5059         sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5060         for (i = 0, p = buf; i < n; i += 16, p += 4)
5061                 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5062                     p[2], p[3]);
5063
5064         rc = sbuf_finish(sb);
5065         sbuf_delete(sb);
5066 done:
5067         free(buf, M_CXGBE);
5068         return (rc);
5069 }
5070
5071 static int
5072 sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5073 {
5074         struct adapter *sc = arg1;
5075         u_int cfg;
5076         struct sbuf *sb;
5077         uint32_t *buf, *p;
5078         int rc;
5079
5080         rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5081         if (rc != 0)
5082                 return (rc);
5083
5084         rc = sysctl_wire_old_buffer(req, 0);
5085         if (rc != 0)
5086                 return (rc);
5087
5088         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5089         if (sb == NULL)
5090                 return (ENOMEM);
5091
5092         buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5093             M_ZERO | M_WAITOK);
5094
5095         rc = -t4_cim_read_la(sc, buf, NULL);
5096         if (rc != 0)
5097                 goto done;
5098
5099         sbuf_printf(sb, "Status   Data      PC%s",
5100             cfg & F_UPDBGLACAPTPCONLY ? "" :
5101             "     LS0Stat  LS0Addr             LS0Data");
5102
5103         KASSERT((sc->params.cim_la_size & 7) == 0,
5104             ("%s: p will walk off the end of buf", __func__));
5105
5106         for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5107                 if (cfg & F_UPDBGLACAPTPCONLY) {
5108                         sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5109                             p[6], p[7]);
5110                         sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5111                             (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5112                             p[4] & 0xff, p[5] >> 8);
5113                         sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5114                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5115                             p[1] & 0xf, p[2] >> 4);
5116                 } else {
5117                         sbuf_printf(sb,
5118                             "\n  %02x   %x%07x %x%07x %08x %08x "
5119                             "%08x%08x%08x%08x",
5120                             (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5121                             p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5122                             p[6], p[7]);
5123                 }
5124         }
5125
5126         rc = sbuf_finish(sb);
5127         sbuf_delete(sb);
5128 done:
5129         free(buf, M_CXGBE);
5130         return (rc);
5131 }
5132
5133 static int
5134 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5135 {
5136         struct adapter *sc = arg1;
5137         u_int i;
5138         struct sbuf *sb;
5139         uint32_t *buf, *p;
5140         int rc;
5141
5142         rc = sysctl_wire_old_buffer(req, 0);
5143         if (rc != 0)
5144                 return (rc);
5145
5146         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5147         if (sb == NULL)
5148                 return (ENOMEM);
5149
5150         buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5151             M_ZERO | M_WAITOK);
5152
5153         t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5154         p = buf;
5155
5156         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5157                 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5158                     p[1], p[0]);
5159         }
5160
5161         sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5162         for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5163                 sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5164                     (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5165                     (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5166                     (p[1] >> 2) | ((p[2] & 3) << 30),
5167                     (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5168                     p[0] & 1);
5169         }
5170
5171         rc = sbuf_finish(sb);
5172         sbuf_delete(sb);
5173         free(buf, M_CXGBE);
5174         return (rc);
5175 }
5176
5177 static int
5178 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5179 {
5180         struct adapter *sc = arg1;
5181         u_int i;
5182         struct sbuf *sb;
5183         uint32_t *buf, *p;
5184         int rc;
5185
5186         rc = sysctl_wire_old_buffer(req, 0);
5187         if (rc != 0)
5188                 return (rc);
5189
5190         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5191         if (sb == NULL)
5192                 return (ENOMEM);
5193
5194         buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5195             M_ZERO | M_WAITOK);
5196
5197         t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5198         p = buf;
5199
5200         sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5201         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5202                 sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5203                     (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5204                     p[4], p[3], p[2], p[1], p[0]);
5205         }
5206
5207         sbuf_printf(sb, "\n\nCntl ID               Data");
5208         for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5209                 sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5210                     (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5211         }
5212
5213         rc = sbuf_finish(sb);
5214         sbuf_delete(sb);
5215         free(buf, M_CXGBE);
5216         return (rc);
5217 }
5218
5219 static int
5220 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5221 {
5222         struct adapter *sc = arg1;
5223         struct sbuf *sb;
5224         int rc, i;
5225         uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5226         uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5227         uint16_t thres[CIM_NUM_IBQ];
5228         uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5229         uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5230         u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5231
5232         if (is_t4(sc)) {
5233                 cim_num_obq = CIM_NUM_OBQ;
5234                 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5235                 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5236         } else {
5237                 cim_num_obq = CIM_NUM_OBQ_T5;
5238                 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5239                 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5240         }
5241         nq = CIM_NUM_IBQ + cim_num_obq;
5242
5243         rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5244         if (rc == 0)
5245                 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5246         if (rc != 0)
5247                 return (rc);
5248
5249         t4_read_cimq_cfg(sc, base, size, thres);
5250
5251         rc = sysctl_wire_old_buffer(req, 0);
5252         if (rc != 0)
5253                 return (rc);
5254
5255         sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5256         if (sb == NULL)
5257                 return (ENOMEM);
5258
5259         sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5260
5261         for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5262                 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5263                     qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5264                     G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5265                     G_QUEREMFLITS(p[2]) * 16);
5266         for ( ; i < nq; i++, p += 4, wr += 2)
5267                 sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5268                     base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5269                     wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5270                     G_QUEREMFLITS(p[2]) * 16);
5271
5272         rc = sbuf_finish(sb);
5273         sbuf_delete(sb);
5274
5275         return (rc);
5276 }
5277
5278 static int
5279 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5280 {
5281         struct adapter *sc = arg1;
5282         struct sbuf *sb;
5283         int rc;
5284         struct tp_cpl_stats stats;
5285
5286         rc = sysctl_wire_old_buffer(req, 0);
5287         if (rc != 0)
5288                 return (rc);
5289
5290         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5291         if (sb == NULL)
5292                 return (ENOMEM);
5293
5294         t4_tp_get_cpl_stats(sc, &stats);
5295
5296         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5297             "channel 3\n");
5298         sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5299                    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5300         sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5301                    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5302
5303         rc = sbuf_finish(sb);
5304         sbuf_delete(sb);
5305
5306         return (rc);
5307 }
5308
5309 static int
5310 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5311 {
5312         struct adapter *sc = arg1;
5313         struct sbuf *sb;
5314         int rc;
5315         struct tp_usm_stats stats;
5316
5317         rc = sysctl_wire_old_buffer(req, 0);
5318         if (rc != 0)
5319                 return(rc);
5320
5321         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5322         if (sb == NULL)
5323                 return (ENOMEM);
5324
5325         t4_get_usm_stats(sc, &stats);
5326
5327         sbuf_printf(sb, "Frames: %u\n", stats.frames);
5328         sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5329         sbuf_printf(sb, "Drops:  %u", stats.drops);
5330
5331         rc = sbuf_finish(sb);
5332         sbuf_delete(sb);
5333
5334         return (rc);
5335 }
5336
5337 const char *devlog_level_strings[] = {
5338         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
5339         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
5340         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
5341         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
5342         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
5343         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
5344 };
5345
5346 const char *devlog_facility_strings[] = {
5347         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
5348         [FW_DEVLOG_FACILITY_CF]         = "CF",
5349         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
5350         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
5351         [FW_DEVLOG_FACILITY_RES]        = "RES",
5352         [FW_DEVLOG_FACILITY_HW]         = "HW",
5353         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
5354         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
5355         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
5356         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
5357         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
5358         [FW_DEVLOG_FACILITY_VI]         = "VI",
5359         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
5360         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
5361         [FW_DEVLOG_FACILITY_TM]         = "TM",
5362         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
5363         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
5364         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
5365         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
5366         [FW_DEVLOG_FACILITY_RI]         = "RI",
5367         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
5368         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
5369         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
5370         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
5371 };
5372
5373 static int
5374 sysctl_devlog(SYSCTL_HANDLER_ARGS)
5375 {
5376         struct adapter *sc = arg1;
5377         struct devlog_params *dparams = &sc->params.devlog;
5378         struct fw_devlog_e *buf, *e;
5379         int i, j, rc, nentries, first = 0, m;
5380         struct sbuf *sb;
5381         uint64_t ftstamp = UINT64_MAX;
5382
5383         if (dparams->start == 0) {
5384                 dparams->memtype = FW_MEMTYPE_EDC0;
5385                 dparams->start = 0x84000;
5386                 dparams->size = 32768;
5387         }
5388
5389         nentries = dparams->size / sizeof(struct fw_devlog_e);
5390
5391         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5392         if (buf == NULL)
5393                 return (ENOMEM);
5394
5395         m = fwmtype_to_hwmtype(dparams->memtype);
5396         rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5397         if (rc != 0)
5398                 goto done;
5399
5400         for (i = 0; i < nentries; i++) {
5401                 e = &buf[i];
5402
5403                 if (e->timestamp == 0)
5404                         break;  /* end */
5405
5406                 e->timestamp = be64toh(e->timestamp);
5407                 e->seqno = be32toh(e->seqno);
5408                 for (j = 0; j < 8; j++)
5409                         e->params[j] = be32toh(e->params[j]);
5410
5411                 if (e->timestamp < ftstamp) {
5412                         ftstamp = e->timestamp;
5413                         first = i;
5414                 }
5415         }
5416
5417         if (buf[first].timestamp == 0)
5418                 goto done;      /* nothing in the log */
5419
5420         rc = sysctl_wire_old_buffer(req, 0);
5421         if (rc != 0)
5422                 goto done;
5423
5424         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5425         if (sb == NULL) {
5426                 rc = ENOMEM;
5427                 goto done;
5428         }
5429         sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5430             "Seq#", "Tstamp", "Level", "Facility", "Message");
5431
5432         i = first;
5433         do {
5434                 e = &buf[i];
5435                 if (e->timestamp == 0)
5436                         break;  /* end */
5437
5438                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5439                     e->seqno, e->timestamp,
5440                     (e->level < nitems(devlog_level_strings) ?
5441                         devlog_level_strings[e->level] : "UNKNOWN"),
5442                     (e->facility < nitems(devlog_facility_strings) ?
5443                         devlog_facility_strings[e->facility] : "UNKNOWN"));
5444                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5445                     e->params[2], e->params[3], e->params[4],
5446                     e->params[5], e->params[6], e->params[7]);
5447
5448                 if (++i == nentries)
5449                         i = 0;
5450         } while (i != first);
5451
5452         rc = sbuf_finish(sb);
5453         sbuf_delete(sb);
5454 done:
5455         free(buf, M_CXGBE);
5456         return (rc);
5457 }
5458
5459 static int
5460 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5461 {
5462         struct adapter *sc = arg1;
5463         struct sbuf *sb;
5464         int rc;
5465         struct tp_fcoe_stats stats[4];
5466
5467         rc = sysctl_wire_old_buffer(req, 0);
5468         if (rc != 0)
5469                 return (rc);
5470
5471         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5472         if (sb == NULL)
5473                 return (ENOMEM);
5474
5475         t4_get_fcoe_stats(sc, 0, &stats[0]);
5476         t4_get_fcoe_stats(sc, 1, &stats[1]);
5477         t4_get_fcoe_stats(sc, 2, &stats[2]);
5478         t4_get_fcoe_stats(sc, 3, &stats[3]);
5479
5480         sbuf_printf(sb, "                   channel 0        channel 1        "
5481             "channel 2        channel 3\n");
5482         sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5483             stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5484             stats[3].octetsDDP);
5485         sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5486             stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5487         sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5488             stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5489             stats[3].framesDrop);
5490
5491         rc = sbuf_finish(sb);
5492         sbuf_delete(sb);
5493
5494         return (rc);
5495 }
5496
5497 static int
5498 sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5499 {
5500         struct adapter *sc = arg1;
5501         struct sbuf *sb;
5502         int rc, i;
5503         unsigned int map, kbps, ipg, mode;
5504         unsigned int pace_tab[NTX_SCHED];
5505
5506         rc = sysctl_wire_old_buffer(req, 0);
5507         if (rc != 0)
5508                 return (rc);
5509
5510         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5511         if (sb == NULL)
5512                 return (ENOMEM);
5513
5514         map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5515         mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5516         t4_read_pace_tbl(sc, pace_tab);
5517
5518         sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5519             "Class IPG (0.1 ns)   Flow IPG (us)");
5520
5521         for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5522                 t4_get_tx_sched(sc, i, &kbps, &ipg);
5523                 sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5524                     (mode & (1 << i)) ? "flow" : "class", map & 3);
5525                 if (kbps)
5526                         sbuf_printf(sb, "%9u     ", kbps);
5527                 else
5528                         sbuf_printf(sb, " disabled     ");
5529
5530                 if (ipg)
5531                         sbuf_printf(sb, "%13u        ", ipg);
5532                 else
5533                         sbuf_printf(sb, "     disabled        ");
5534
5535                 if (pace_tab[i])
5536                         sbuf_printf(sb, "%10u", pace_tab[i]);
5537                 else
5538                         sbuf_printf(sb, "  disabled");
5539         }
5540
5541         rc = sbuf_finish(sb);
5542         sbuf_delete(sb);
5543
5544         return (rc);
5545 }
5546
5547 static int
5548 sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5549 {
5550         struct adapter *sc = arg1;
5551         struct sbuf *sb;
5552         int rc, i, j;
5553         uint64_t *p0, *p1;
5554         struct lb_port_stats s[2];
5555         static const char *stat_name[] = {
5556                 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5557                 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5558                 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5559                 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5560                 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5561                 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5562                 "BG2FramesTrunc:", "BG3FramesTrunc:"
5563         };
5564
5565         rc = sysctl_wire_old_buffer(req, 0);
5566         if (rc != 0)
5567                 return (rc);
5568
5569         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5570         if (sb == NULL)
5571                 return (ENOMEM);
5572
5573         memset(s, 0, sizeof(s));
5574
5575         for (i = 0; i < 4; i += 2) {
5576                 t4_get_lb_stats(sc, i, &s[0]);
5577                 t4_get_lb_stats(sc, i + 1, &s[1]);
5578
5579                 p0 = &s[0].octets;
5580                 p1 = &s[1].octets;
5581                 sbuf_printf(sb, "%s                       Loopback %u"
5582                     "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5583
5584                 for (j = 0; j < nitems(stat_name); j++)
5585                         sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5586                                    *p0++, *p1++);
5587         }
5588
5589         rc = sbuf_finish(sb);
5590         sbuf_delete(sb);
5591
5592         return (rc);
5593 }
5594
5595 static int
5596 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5597 {
5598         int rc = 0;
5599         struct port_info *pi = arg1;
5600         struct sbuf *sb;
5601         static const char *linkdnreasons[] = {
5602                 "non-specific", "remote fault", "autoneg failed", "reserved3",
5603                 "PHY overheated", "unknown", "rx los", "reserved7"
5604         };
5605
5606         rc = sysctl_wire_old_buffer(req, 0);
5607         if (rc != 0)
5608                 return(rc);
5609         sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5610         if (sb == NULL)
5611                 return (ENOMEM);
5612
5613         if (pi->linkdnrc < 0)
5614                 sbuf_printf(sb, "n/a");
5615         else if (pi->linkdnrc < nitems(linkdnreasons))
5616                 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5617         else
5618                 sbuf_printf(sb, "%d", pi->linkdnrc);
5619
5620         rc = sbuf_finish(sb);
5621         sbuf_delete(sb);
5622
5623         return (rc);
5624 }
5625
5626 struct mem_desc {
5627         unsigned int base;
5628         unsigned int limit;
5629         unsigned int idx;
5630 };
5631
5632 static int
5633 mem_desc_cmp(const void *a, const void *b)
5634 {
5635         return ((const struct mem_desc *)a)->base -
5636                ((const struct mem_desc *)b)->base;
5637 }
5638
5639 static void
5640 mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5641     unsigned int to)
5642 {
5643         unsigned int size;
5644
5645         size = to - from + 1;
5646         if (size == 0)
5647                 return;
5648
5649         /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5650         sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5651 }
5652
5653 static int
5654 sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5655 {
5656         struct adapter *sc = arg1;
5657         struct sbuf *sb;
5658         int rc, i, n;
5659         uint32_t lo, hi, used, alloc;
5660         static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5661         static const char *region[] = {
5662                 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5663                 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5664                 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5665                 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5666                 "RQUDP region:", "PBL region:", "TXPBL region:",
5667                 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5668                 "On-chip queues:"
5669         };
5670         struct mem_desc avail[4];
5671         struct mem_desc mem[nitems(region) + 3];        /* up to 3 holes */
5672         struct mem_desc *md = mem;
5673
5674         rc = sysctl_wire_old_buffer(req, 0);
5675         if (rc != 0)
5676                 return (rc);
5677
5678         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5679         if (sb == NULL)
5680                 return (ENOMEM);
5681
5682         for (i = 0; i < nitems(mem); i++) {
5683                 mem[i].limit = 0;
5684                 mem[i].idx = i;
5685         }
5686
5687         /* Find and sort the populated memory ranges */
5688         i = 0;
5689         lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5690         if (lo & F_EDRAM0_ENABLE) {
5691                 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5692                 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5693                 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5694                 avail[i].idx = 0;
5695                 i++;
5696         }
5697         if (lo & F_EDRAM1_ENABLE) {
5698                 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5699                 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5700                 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5701                 avail[i].idx = 1;
5702                 i++;
5703         }
5704         if (lo & F_EXT_MEM_ENABLE) {
5705                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5706                 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5707                 avail[i].limit = avail[i].base +
5708                     (G_EXT_MEM_SIZE(hi) << 20);
5709                 avail[i].idx = is_t4(sc) ? 2 : 3;       /* Call it MC for T4 */
5710                 i++;
5711         }
5712         if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5713                 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5714                 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5715                 avail[i].limit = avail[i].base +
5716                     (G_EXT_MEM1_SIZE(hi) << 20);
5717                 avail[i].idx = 4;
5718                 i++;
5719         }
5720         if (!i)                                    /* no memory available */
5721                 return 0;
5722         qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5723
5724         (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5725         (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5726         (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5727         (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5728         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5729         (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5730         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5731         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5732         (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5733
5734         /* the next few have explicit upper bounds */
5735         md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5736         md->limit = md->base - 1 +
5737                     t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5738                     G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5739         md++;
5740
5741         md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5742         md->limit = md->base - 1 +
5743                     t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5744                     G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5745         md++;
5746
5747         if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5748                 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5749                 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5750                 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5751         } else {
5752                 md->base = 0;
5753                 md->idx = nitems(region);  /* hide it */
5754         }
5755         md++;
5756
5757 #define ulp_region(reg) \
5758         md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5759         (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5760
5761         ulp_region(RX_ISCSI);
5762         ulp_region(RX_TDDP);
5763         ulp_region(TX_TPT);
5764         ulp_region(RX_STAG);
5765         ulp_region(RX_RQ);
5766         ulp_region(RX_RQUDP);
5767         ulp_region(RX_PBL);
5768         ulp_region(TX_PBL);
5769 #undef ulp_region
5770
5771         md->base = 0;
5772         md->idx = nitems(region);
5773         if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5774                 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5775                 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5776                     A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5777         }
5778         md++;
5779
5780         md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5781         md->limit = md->base + sc->tids.ntids - 1;
5782         md++;
5783         md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5784         md->limit = md->base + sc->tids.ntids - 1;
5785         md++;
5786
5787         md->base = sc->vres.ocq.start;
5788         if (sc->vres.ocq.size)
5789                 md->limit = md->base + sc->vres.ocq.size - 1;
5790         else
5791                 md->idx = nitems(region);  /* hide it */
5792         md++;
5793
5794         /* add any address-space holes, there can be up to 3 */
5795         for (n = 0; n < i - 1; n++)
5796                 if (avail[n].limit < avail[n + 1].base)
5797                         (md++)->base = avail[n].limit;
5798         if (avail[n].limit)
5799                 (md++)->base = avail[n].limit;
5800
5801         n = md - mem;
5802         qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5803
5804         for (lo = 0; lo < i; lo++)
5805                 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5806                                 avail[lo].limit - 1);
5807
5808         sbuf_printf(sb, "\n");
5809         for (i = 0; i < n; i++) {
5810                 if (mem[i].idx >= nitems(region))
5811                         continue;                        /* skip holes */
5812                 if (!mem[i].limit)
5813                         mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5814                 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5815                                 mem[i].limit);
5816         }
5817
5818         sbuf_printf(sb, "\n");
5819         lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5820         hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5821         mem_region_show(sb, "uP RAM:", lo, hi);
5822
5823         lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5824         hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5825         mem_region_show(sb, "uP Extmem2:", lo, hi);
5826
5827         lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5828         sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5829                    G_PMRXMAXPAGE(lo),
5830                    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5831                    (lo & F_PMRXNUMCHN) ? 2 : 1);
5832
5833         lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5834         hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5835         sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5836                    G_PMTXMAXPAGE(lo),
5837                    hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5838                    hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5839         sbuf_printf(sb, "%u p-structs\n",
5840                    t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5841
5842         for (i = 0; i < 4; i++) {
5843                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5844                 if (is_t4(sc)) {
5845                         used = G_USED(lo);
5846                         alloc = G_ALLOC(lo);
5847                 } else {
5848                         used = G_T5_USED(lo);
5849                         alloc = G_T5_ALLOC(lo);
5850                 }
5851                 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5852                            i, used, alloc);
5853         }
5854         for (i = 0; i < 4; i++) {
5855                 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5856                 if (is_t4(sc)) {
5857                         used = G_USED(lo);
5858                         alloc = G_ALLOC(lo);
5859                 } else {
5860                         used = G_T5_USED(lo);
5861                         alloc = G_T5_ALLOC(lo);
5862                 }
5863                 sbuf_printf(sb,
5864                            "\nLoopback %d using %u pages out of %u allocated",
5865                            i, used, alloc);
5866         }
5867
5868         rc = sbuf_finish(sb);
5869         sbuf_delete(sb);
5870
5871         return (rc);
5872 }
5873
5874 static inline void
5875 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5876 {
5877         *mask = x | y;
5878         y = htobe64(y);
5879         memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5880 }
5881
5882 static int
5883 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5884 {
5885         struct adapter *sc = arg1;
5886         struct sbuf *sb;
5887         int rc, i, n;
5888
5889         rc = sysctl_wire_old_buffer(req, 0);
5890         if (rc != 0)
5891                 return (rc);
5892
5893         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5894         if (sb == NULL)
5895                 return (ENOMEM);
5896
5897         sbuf_printf(sb,
5898             "Idx  Ethernet address     Mask     Vld Ports PF"
5899             "  VF              Replication             P0 P1 P2 P3  ML");
5900         n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5901             NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5902         for (i = 0; i < n; i++) {
5903                 uint64_t tcamx, tcamy, mask;
5904                 uint32_t cls_lo, cls_hi;
5905                 uint8_t addr[ETHER_ADDR_LEN];
5906
5907                 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5908                 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5909                 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5910                 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5911
5912                 if (tcamx & tcamy)
5913                         continue;
5914
5915                 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5916                 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5917                            "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5918                            addr[3], addr[4], addr[5], (uintmax_t)mask,
5919                            (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5920                            G_PORTMAP(cls_hi), G_PF(cls_lo),
5921                            (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5922
5923                 if (cls_lo & F_REPLICATE) {
5924                         struct fw_ldst_cmd ldst_cmd;
5925
5926                         memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5927                         ldst_cmd.op_to_addrspace =
5928                             htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5929                                 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5930                                 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5931                         ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5932                         ldst_cmd.u.mps.fid_ctl =
5933                             htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5934                                 V_FW_LDST_CMD_CTL(i));
5935
5936                         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5937                             "t4mps");
5938                         if (rc)
5939                                 break;
5940                         rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5941                             sizeof(ldst_cmd), &ldst_cmd);
5942                         end_synchronized_op(sc, 0);
5943
5944                         if (rc != 0) {
5945                                 sbuf_printf(sb,
5946                                     " ------------ error %3u ------------", rc);
5947                                 rc = 0;
5948                         } else {
5949                                 sbuf_printf(sb, " %08x %08x %08x %08x",
5950                                     be32toh(ldst_cmd.u.mps.rplc127_96),
5951                                     be32toh(ldst_cmd.u.mps.rplc95_64),
5952                                     be32toh(ldst_cmd.u.mps.rplc63_32),
5953                                     be32toh(ldst_cmd.u.mps.rplc31_0));
5954                         }
5955                 } else
5956                         sbuf_printf(sb, "%36s", "");
5957
5958                 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5959                     G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5960                     G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5961         }
5962
5963         if (rc)
5964                 (void) sbuf_finish(sb);
5965         else
5966                 rc = sbuf_finish(sb);
5967         sbuf_delete(sb);
5968
5969         return (rc);
5970 }
5971
5972 static int
5973 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5974 {
5975         struct adapter *sc = arg1;
5976         struct sbuf *sb;
5977         int rc;
5978         uint16_t mtus[NMTUS];
5979
5980         rc = sysctl_wire_old_buffer(req, 0);
5981         if (rc != 0)
5982                 return (rc);
5983
5984         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5985         if (sb == NULL)
5986                 return (ENOMEM);
5987
5988         t4_read_mtu_tbl(sc, mtus, NULL);
5989
5990         sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5991             mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5992             mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5993             mtus[14], mtus[15]);
5994
5995         rc = sbuf_finish(sb);
5996         sbuf_delete(sb);
5997
5998         return (rc);
5999 }
6000
6001 static int
6002 sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6003 {
6004         struct adapter *sc = arg1;
6005         struct sbuf *sb;
6006         int rc, i;
6007         uint32_t cnt[PM_NSTATS];
6008         uint64_t cyc[PM_NSTATS];
6009         static const char *rx_stats[] = {
6010                 "Read:", "Write bypass:", "Write mem:", "Flush:"
6011         };
6012         static const char *tx_stats[] = {
6013                 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6014         };
6015
6016         rc = sysctl_wire_old_buffer(req, 0);
6017         if (rc != 0)
6018                 return (rc);
6019
6020         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6021         if (sb == NULL)
6022                 return (ENOMEM);
6023
6024         t4_pmtx_get_stats(sc, cnt, cyc);
6025         sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6026         for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6027                 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6028                     cyc[i]);
6029
6030         t4_pmrx_get_stats(sc, cnt, cyc);
6031         sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6032         for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6033                 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6034                     cyc[i]);
6035
6036         rc = sbuf_finish(sb);
6037         sbuf_delete(sb);
6038
6039         return (rc);
6040 }
6041
6042 static int
6043 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6044 {
6045         struct adapter *sc = arg1;
6046         struct sbuf *sb;
6047         int rc;
6048         struct tp_rdma_stats stats;
6049
6050         rc = sysctl_wire_old_buffer(req, 0);
6051         if (rc != 0)
6052                 return (rc);
6053
6054         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6055         if (sb == NULL)
6056                 return (ENOMEM);
6057
6058         t4_tp_get_rdma_stats(sc, &stats);
6059         sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6060         sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6061
6062         rc = sbuf_finish(sb);
6063         sbuf_delete(sb);
6064
6065         return (rc);
6066 }
6067
6068 static int
6069 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6070 {
6071         struct adapter *sc = arg1;
6072         struct sbuf *sb;
6073         int rc;
6074         struct tp_tcp_stats v4, v6;
6075
6076         rc = sysctl_wire_old_buffer(req, 0);
6077         if (rc != 0)
6078                 return (rc);
6079
6080         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6081         if (sb == NULL)
6082                 return (ENOMEM);
6083
6084         t4_tp_get_tcp_stats(sc, &v4, &v6);
6085         sbuf_printf(sb,
6086             "                                IP                 IPv6\n");
6087         sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6088             v4.tcpOutRsts, v6.tcpOutRsts);
6089         sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6090             v4.tcpInSegs, v6.tcpInSegs);
6091         sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6092             v4.tcpOutSegs, v6.tcpOutSegs);
6093         sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6094             v4.tcpRetransSegs, v6.tcpRetransSegs);
6095
6096         rc = sbuf_finish(sb);
6097         sbuf_delete(sb);
6098
6099         return (rc);
6100 }
6101
6102 static int
6103 sysctl_tids(SYSCTL_HANDLER_ARGS)
6104 {
6105         struct adapter *sc = arg1;
6106         struct sbuf *sb;
6107         int rc;
6108         struct tid_info *t = &sc->tids;
6109
6110         rc = sysctl_wire_old_buffer(req, 0);
6111         if (rc != 0)
6112                 return (rc);
6113
6114         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6115         if (sb == NULL)
6116                 return (ENOMEM);
6117
6118         if (t->natids) {
6119                 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6120                     t->atids_in_use);
6121         }
6122
6123         if (t->ntids) {
6124                 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6125                         uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6126
6127                         if (b) {
6128                                 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6129                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6130                                     t->ntids - 1);
6131                         } else {
6132                                 sbuf_printf(sb, "TID range: %u-%u",
6133                                     t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6134                                     t->ntids - 1);
6135                         }
6136                 } else
6137                         sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6138                 sbuf_printf(sb, ", in use: %u\n",
6139                     atomic_load_acq_int(&t->tids_in_use));
6140         }
6141
6142         if (t->nstids) {
6143                 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6144                     t->stid_base + t->nstids - 1, t->stids_in_use);
6145         }
6146
6147         if (t->nftids) {
6148                 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6149                     t->ftid_base + t->nftids - 1);
6150         }
6151
6152         if (t->netids) {
6153                 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
6154                     t->etid_base + t->netids - 1);
6155         }
6156
6157         sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6158             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6159             t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6160
6161         rc = sbuf_finish(sb);
6162         sbuf_delete(sb);
6163
6164         return (rc);
6165 }
6166
6167 static int
6168 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6169 {
6170         struct adapter *sc = arg1;
6171         struct sbuf *sb;
6172         int rc;
6173         struct tp_err_stats stats;
6174
6175         rc = sysctl_wire_old_buffer(req, 0);
6176         if (rc != 0)
6177                 return (rc);
6178
6179         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6180         if (sb == NULL)
6181                 return (ENOMEM);
6182
6183         t4_tp_get_err_stats(sc, &stats);
6184
6185         sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6186                       "channel 3\n");
6187         sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6188             stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6189             stats.macInErrs[3]);
6190         sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6191             stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6192             stats.hdrInErrs[3]);
6193         sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6194             stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6195             stats.tcpInErrs[3]);
6196         sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6197             stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6198             stats.tcp6InErrs[3]);
6199         sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6200             stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6201             stats.tnlCongDrops[3]);
6202         sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6203             stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6204             stats.tnlTxDrops[3]);
6205         sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6206             stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6207             stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6208         sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6209             stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6210             stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6211         sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6212             stats.ofldNoNeigh, stats.ofldCongDefer);
6213
6214         rc = sbuf_finish(sb);
6215         sbuf_delete(sb);
6216
6217         return (rc);
6218 }
6219
6220 struct field_desc {
6221         const char *name;
6222         u_int start;
6223         u_int width;
6224 };
6225
6226 static void
6227 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6228 {
6229         char buf[32];
6230         int line_size = 0;
6231
6232         while (f->name) {
6233                 uint64_t mask = (1ULL << f->width) - 1;
6234                 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6235                     ((uintmax_t)v >> f->start) & mask);
6236
6237                 if (line_size + len >= 79) {
6238                         line_size = 8;
6239                         sbuf_printf(sb, "\n        ");
6240                 }
6241                 sbuf_printf(sb, "%s ", buf);
6242                 line_size += len + 1;
6243                 f++;
6244         }
6245         sbuf_printf(sb, "\n");
6246 }
6247
6248 static struct field_desc tp_la0[] = {
6249         { "RcfOpCodeOut", 60, 4 },
6250         { "State", 56, 4 },
6251         { "WcfState", 52, 4 },
6252         { "RcfOpcSrcOut", 50, 2 },
6253         { "CRxError", 49, 1 },
6254         { "ERxError", 48, 1 },
6255         { "SanityFailed", 47, 1 },
6256         { "SpuriousMsg", 46, 1 },
6257         { "FlushInputMsg", 45, 1 },
6258         { "FlushInputCpl", 44, 1 },
6259         { "RssUpBit", 43, 1 },
6260         { "RssFilterHit", 42, 1 },
6261         { "Tid", 32, 10 },
6262         { "InitTcb", 31, 1 },
6263         { "LineNumber", 24, 7 },
6264         { "Emsg", 23, 1 },
6265         { "EdataOut", 22, 1 },
6266         { "Cmsg", 21, 1 },
6267         { "CdataOut", 20, 1 },
6268         { "EreadPdu", 19, 1 },
6269         { "CreadPdu", 18, 1 },
6270         { "TunnelPkt", 17, 1 },
6271         { "RcfPeerFin", 16, 1 },
6272         { "RcfReasonOut", 12, 4 },
6273         { "TxCchannel", 10, 2 },
6274         { "RcfTxChannel", 8, 2 },
6275         { "RxEchannel", 6, 2 },
6276         { "RcfRxChannel", 5, 1 },
6277         { "RcfDataOutSrdy", 4, 1 },
6278         { "RxDvld", 3, 1 },
6279         { "RxOoDvld", 2, 1 },
6280         { "RxCongestion", 1, 1 },
6281         { "TxCongestion", 0, 1 },
6282         { NULL }
6283 };
6284
6285 static struct field_desc tp_la1[] = {
6286         { "CplCmdIn", 56, 8 },
6287         { "CplCmdOut", 48, 8 },
6288         { "ESynOut", 47, 1 },
6289         { "EAckOut", 46, 1 },
6290         { "EFinOut", 45, 1 },
6291         { "ERstOut", 44, 1 },
6292         { "SynIn", 43, 1 },
6293         { "AckIn", 42, 1 },
6294         { "FinIn", 41, 1 },
6295         { "RstIn", 40, 1 },
6296         { "DataIn", 39, 1 },
6297         { "DataInVld", 38, 1 },
6298         { "PadIn", 37, 1 },
6299         { "RxBufEmpty", 36, 1 },
6300         { "RxDdp", 35, 1 },
6301         { "RxFbCongestion", 34, 1 },
6302         { "TxFbCongestion", 33, 1 },
6303         { "TxPktSumSrdy", 32, 1 },
6304         { "RcfUlpType", 28, 4 },
6305         { "Eread", 27, 1 },
6306         { "Ebypass", 26, 1 },
6307         { "Esave", 25, 1 },
6308         { "Static0", 24, 1 },
6309         { "Cread", 23, 1 },
6310         { "Cbypass", 22, 1 },
6311         { "Csave", 21, 1 },
6312         { "CPktOut", 20, 1 },
6313         { "RxPagePoolFull", 18, 2 },
6314         { "RxLpbkPkt", 17, 1 },
6315         { "TxLpbkPkt", 16, 1 },
6316         { "RxVfValid", 15, 1 },
6317         { "SynLearned", 14, 1 },
6318         { "SetDelEntry", 13, 1 },
6319         { "SetInvEntry", 12, 1 },
6320         { "CpcmdDvld", 11, 1 },
6321         { "CpcmdSave", 10, 1 },
6322         { "RxPstructsFull", 8, 2 },
6323         { "EpcmdDvld", 7, 1 },
6324         { "EpcmdFlush", 6, 1 },
6325         { "EpcmdTrimPrefix", 5, 1 },
6326         { "EpcmdTrimPostfix", 4, 1 },
6327         { "ERssIp4Pkt", 3, 1 },
6328         { "ERssIp6Pkt", 2, 1 },
6329         { "ERssTcpUdpPkt", 1, 1 },
6330         { "ERssFceFipPkt", 0, 1 },
6331         { NULL }
6332 };
6333
6334 static struct field_desc tp_la2[] = {
6335         { "CplCmdIn", 56, 8 },
6336         { "MpsVfVld", 55, 1 },
6337         { "MpsPf", 52, 3 },
6338         { "MpsVf", 44, 8 },
6339         { "SynIn", 43, 1 },
6340         { "AckIn", 42, 1 },
6341         { "FinIn", 41, 1 },
6342         { "RstIn", 40, 1 },
6343         { "DataIn", 39, 1 },
6344         { "DataInVld", 38, 1 },
6345         { "PadIn", 37, 1 },
6346         { "RxBufEmpty", 36, 1 },
6347         { "RxDdp", 35, 1 },
6348         { "RxFbCongestion", 34, 1 },
6349         { "TxFbCongestion", 33, 1 },
6350         { "TxPktSumSrdy", 32, 1 },
6351         { "RcfUlpType", 28, 4 },
6352         { "Eread", 27, 1 },
6353         { "Ebypass", 26, 1 },
6354         { "Esave", 25, 1 },
6355         { "Static0", 24, 1 },
6356         { "Cread", 23, 1 },
6357         { "Cbypass", 22, 1 },
6358         { "Csave", 21, 1 },
6359         { "CPktOut", 20, 1 },
6360         { "RxPagePoolFull", 18, 2 },
6361         { "RxLpbkPkt", 17, 1 },
6362         { "TxLpbkPkt", 16, 1 },
6363         { "RxVfValid", 15, 1 },
6364         { "SynLearned", 14, 1 },
6365         { "SetDelEntry", 13, 1 },
6366         { "SetInvEntry", 12, 1 },
6367         { "CpcmdDvld", 11, 1 },
6368         { "CpcmdSave", 10, 1 },
6369         { "RxPstructsFull", 8, 2 },
6370         { "EpcmdDvld", 7, 1 },
6371         { "EpcmdFlush", 6, 1 },
6372         { "EpcmdTrimPrefix", 5, 1 },
6373         { "EpcmdTrimPostfix", 4, 1 },
6374         { "ERssIp4Pkt", 3, 1 },
6375         { "ERssIp6Pkt", 2, 1 },
6376         { "ERssTcpUdpPkt", 1, 1 },
6377         { "ERssFceFipPkt", 0, 1 },
6378         { NULL }
6379 };
6380
6381 static void
6382 tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6383 {
6384
6385         field_desc_show(sb, *p, tp_la0);
6386 }
6387
6388 static void
6389 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6390 {
6391
6392         if (idx)
6393                 sbuf_printf(sb, "\n");
6394         field_desc_show(sb, p[0], tp_la0);
6395         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6396                 field_desc_show(sb, p[1], tp_la0);
6397 }
6398
6399 static void
6400 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6401 {
6402
6403         if (idx)
6404                 sbuf_printf(sb, "\n");
6405         field_desc_show(sb, p[0], tp_la0);
6406         if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6407                 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6408 }
6409
6410 static int
6411 sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6412 {
6413         struct adapter *sc = arg1;
6414         struct sbuf *sb;
6415         uint64_t *buf, *p;
6416         int rc;
6417         u_int i, inc;
6418         void (*show_func)(struct sbuf *, uint64_t *, int);
6419
6420         rc = sysctl_wire_old_buffer(req, 0);
6421         if (rc != 0)
6422                 return (rc);
6423
6424         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6425         if (sb == NULL)
6426                 return (ENOMEM);
6427
6428         buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6429
6430         t4_tp_read_la(sc, buf, NULL);
6431         p = buf;
6432
6433         switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6434         case 2:
6435                 inc = 2;
6436                 show_func = tp_la_show2;
6437                 break;
6438         case 3:
6439                 inc = 2;
6440                 show_func = tp_la_show3;
6441                 break;
6442         default:
6443                 inc = 1;
6444                 show_func = tp_la_show;
6445         }
6446
6447         for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6448                 (*show_func)(sb, p, i);
6449
6450         rc = sbuf_finish(sb);
6451         sbuf_delete(sb);
6452         free(buf, M_CXGBE);
6453         return (rc);
6454 }
6455
6456 static int
6457 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6458 {
6459         struct adapter *sc = arg1;
6460         struct sbuf *sb;
6461         int rc;
6462         u64 nrate[NCHAN], orate[NCHAN];
6463
6464         rc = sysctl_wire_old_buffer(req, 0);
6465         if (rc != 0)
6466                 return (rc);
6467
6468         sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6469         if (sb == NULL)
6470                 return (ENOMEM);
6471
6472         t4_get_chan_txrate(sc, nrate, orate);
6473         sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6474                  "channel 3\n");
6475         sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6476             nrate[0], nrate[1], nrate[2], nrate[3]);
6477         sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6478             orate[0], orate[1], orate[2], orate[3]);
6479
6480         rc = sbuf_finish(sb);
6481         sbuf_delete(sb);
6482
6483         return (rc);
6484 }
6485
6486 static int
6487 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6488 {
6489         struct adapter *sc = arg1;
6490         struct sbuf *sb;
6491         uint32_t *buf, *p;
6492         int rc, i;
6493
6494         rc = sysctl_wire_old_buffer(req, 0);
6495         if (rc != 0)
6496                 return (rc);
6497
6498         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6499         if (sb == NULL)
6500                 return (ENOMEM);
6501
6502         buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6503             M_ZERO | M_WAITOK);
6504
6505         t4_ulprx_read_la(sc, buf);
6506         p = buf;
6507
6508         sbuf_printf(sb, "      Pcmd        Type   Message"
6509             "                Data");
6510         for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6511                 sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6512                     p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6513         }
6514
6515         rc = sbuf_finish(sb);
6516         sbuf_delete(sb);
6517         free(buf, M_CXGBE);
6518         return (rc);
6519 }
6520
6521 static int
6522 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6523 {
6524         struct adapter *sc = arg1;
6525         struct sbuf *sb;
6526         int rc, v;
6527
6528         rc = sysctl_wire_old_buffer(req, 0);
6529         if (rc != 0)
6530                 return (rc);
6531
6532         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6533         if (sb == NULL)
6534                 return (ENOMEM);
6535
6536         v = t4_read_reg(sc, A_SGE_STAT_CFG);
6537         if (G_STATSOURCE_T5(v) == 7) {
6538                 if (G_STATMODE(v) == 0) {
6539                         sbuf_printf(sb, "total %d, incomplete %d",
6540                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6541                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6542                 } else if (G_STATMODE(v) == 1) {
6543                         sbuf_printf(sb, "total %d, data overflow %d",
6544                             t4_read_reg(sc, A_SGE_STAT_TOTAL),
6545                             t4_read_reg(sc, A_SGE_STAT_MATCH));
6546                 }
6547         }
6548         rc = sbuf_finish(sb);
6549         sbuf_delete(sb);
6550
6551         return (rc);
6552 }
6553 #endif
6554
6555 static inline void
6556 txq_start(struct ifnet *ifp, struct sge_txq *txq)
6557 {
6558         struct buf_ring *br;
6559         struct mbuf *m;
6560
6561         TXQ_LOCK_ASSERT_OWNED(txq);
6562
6563         br = txq->br;
6564         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6565         if (m)
6566                 t4_eth_tx(ifp, txq, m);
6567 }
6568
6569 void
6570 t4_tx_callout(void *arg)
6571 {
6572         struct sge_eq *eq = arg;
6573         struct adapter *sc;
6574
6575         if (EQ_TRYLOCK(eq) == 0)
6576                 goto reschedule;
6577
6578         if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6579                 EQ_UNLOCK(eq);
6580 reschedule:
6581                 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6582                         callout_schedule(&eq->tx_callout, 1);
6583                 return;
6584         }
6585
6586         EQ_LOCK_ASSERT_OWNED(eq);
6587
6588         if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6589
6590                 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6591                         struct sge_txq *txq = arg;
6592                         struct port_info *pi = txq->ifp->if_softc;
6593
6594                         sc = pi->adapter;
6595                 } else {
6596                         struct sge_wrq *wrq = arg;
6597
6598                         sc = wrq->adapter;
6599                 }
6600
6601                 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6602         }
6603
6604         EQ_UNLOCK(eq);
6605 }
6606
6607 void
6608 t4_tx_task(void *arg, int count)
6609 {
6610         struct sge_eq *eq = arg;
6611
6612         EQ_LOCK(eq);
6613         if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6614                 struct sge_txq *txq = arg;
6615                 txq_start(txq->ifp, txq);
6616         } else {
6617                 struct sge_wrq *wrq = arg;
6618                 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6619         }
6620         EQ_UNLOCK(eq);
6621 }
6622
6623 static uint32_t
6624 fconf_to_mode(uint32_t fconf)
6625 {
6626         uint32_t mode;
6627
6628         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6629             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6630
6631         if (fconf & F_FRAGMENTATION)
6632                 mode |= T4_FILTER_IP_FRAGMENT;
6633
6634         if (fconf & F_MPSHITTYPE)
6635                 mode |= T4_FILTER_MPS_HIT_TYPE;
6636
6637         if (fconf & F_MACMATCH)
6638                 mode |= T4_FILTER_MAC_IDX;
6639
6640         if (fconf & F_ETHERTYPE)
6641                 mode |= T4_FILTER_ETH_TYPE;
6642
6643         if (fconf & F_PROTOCOL)
6644                 mode |= T4_FILTER_IP_PROTO;
6645
6646         if (fconf & F_TOS)
6647                 mode |= T4_FILTER_IP_TOS;
6648
6649         if (fconf & F_VLAN)
6650                 mode |= T4_FILTER_VLAN;
6651
6652         if (fconf & F_VNIC_ID)
6653                 mode |= T4_FILTER_VNIC;
6654
6655         if (fconf & F_PORT)
6656                 mode |= T4_FILTER_PORT;
6657
6658         if (fconf & F_FCOE)
6659                 mode |= T4_FILTER_FCoE;
6660
6661         return (mode);
6662 }
6663
6664 static uint32_t
6665 mode_to_fconf(uint32_t mode)
6666 {
6667         uint32_t fconf = 0;
6668
6669         if (mode & T4_FILTER_IP_FRAGMENT)
6670                 fconf |= F_FRAGMENTATION;
6671
6672         if (mode & T4_FILTER_MPS_HIT_TYPE)
6673                 fconf |= F_MPSHITTYPE;
6674
6675         if (mode & T4_FILTER_MAC_IDX)
6676                 fconf |= F_MACMATCH;
6677
6678         if (mode & T4_FILTER_ETH_TYPE)
6679                 fconf |= F_ETHERTYPE;
6680
6681         if (mode & T4_FILTER_IP_PROTO)
6682                 fconf |= F_PROTOCOL;
6683
6684         if (mode & T4_FILTER_IP_TOS)
6685                 fconf |= F_TOS;
6686
6687         if (mode & T4_FILTER_VLAN)
6688                 fconf |= F_VLAN;
6689
6690         if (mode & T4_FILTER_VNIC)
6691                 fconf |= F_VNIC_ID;
6692
6693         if (mode & T4_FILTER_PORT)
6694                 fconf |= F_PORT;
6695
6696         if (mode & T4_FILTER_FCoE)
6697                 fconf |= F_FCOE;
6698
6699         return (fconf);
6700 }
6701
6702 static uint32_t
6703 fspec_to_fconf(struct t4_filter_specification *fs)
6704 {
6705         uint32_t fconf = 0;
6706
6707         if (fs->val.frag || fs->mask.frag)
6708                 fconf |= F_FRAGMENTATION;
6709
6710         if (fs->val.matchtype || fs->mask.matchtype)
6711                 fconf |= F_MPSHITTYPE;
6712
6713         if (fs->val.macidx || fs->mask.macidx)
6714                 fconf |= F_MACMATCH;
6715
6716         if (fs->val.ethtype || fs->mask.ethtype)
6717                 fconf |= F_ETHERTYPE;
6718
6719         if (fs->val.proto || fs->mask.proto)
6720                 fconf |= F_PROTOCOL;
6721
6722         if (fs->val.tos || fs->mask.tos)
6723                 fconf |= F_TOS;
6724
6725         if (fs->val.vlan_vld || fs->mask.vlan_vld)
6726                 fconf |= F_VLAN;
6727
6728         if (fs->val.vnic_vld || fs->mask.vnic_vld)
6729                 fconf |= F_VNIC_ID;
6730
6731         if (fs->val.iport || fs->mask.iport)
6732                 fconf |= F_PORT;
6733
6734         if (fs->val.fcoe || fs->mask.fcoe)
6735                 fconf |= F_FCOE;
6736
6737         return (fconf);
6738 }
6739
6740 static int
6741 get_filter_mode(struct adapter *sc, uint32_t *mode)
6742 {
6743         int rc;
6744         uint32_t fconf;
6745
6746         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6747             "t4getfm");
6748         if (rc)
6749                 return (rc);
6750
6751         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6752             A_TP_VLAN_PRI_MAP);
6753
6754         if (sc->params.tp.vlan_pri_map != fconf) {
6755                 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6756                     device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6757                     fconf);
6758                 sc->params.tp.vlan_pri_map = fconf;
6759         }
6760
6761         *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6762
6763         end_synchronized_op(sc, LOCK_HELD);
6764         return (0);
6765 }
6766
6767 static int
6768 set_filter_mode(struct adapter *sc, uint32_t mode)
6769 {
6770         uint32_t fconf;
6771         int rc;
6772
6773         fconf = mode_to_fconf(mode);
6774
6775         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6776             "t4setfm");
6777         if (rc)
6778                 return (rc);
6779
6780         if (sc->tids.ftids_in_use > 0) {
6781                 rc = EBUSY;
6782                 goto done;
6783         }
6784
6785 #ifdef TCP_OFFLOAD
6786         if (sc->offload_map) {
6787                 rc = EBUSY;
6788                 goto done;
6789         }
6790 #endif
6791
6792 #ifdef notyet
6793         rc = -t4_set_filter_mode(sc, fconf);
6794         if (rc == 0)
6795                 sc->filter_mode = fconf;
6796 #else
6797         rc = ENOTSUP;
6798 #endif
6799
6800 done:
6801         end_synchronized_op(sc, LOCK_HELD);
6802         return (rc);
6803 }
6804
6805 static inline uint64_t
6806 get_filter_hits(struct adapter *sc, uint32_t fid)
6807 {
6808         uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6809         uint64_t hits;
6810
6811         memwin_info(sc, 0, &mw_base, NULL);
6812         off = position_memwin(sc, 0,
6813             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6814         if (is_t4(sc)) {
6815                 hits = t4_read_reg64(sc, mw_base + off + 16);
6816                 hits = be64toh(hits);
6817         } else {
6818                 hits = t4_read_reg(sc, mw_base + off + 24);
6819                 hits = be32toh(hits);
6820         }
6821
6822         return (hits);
6823 }
6824
6825 static int
6826 get_filter(struct adapter *sc, struct t4_filter *t)
6827 {
6828         int i, rc, nfilters = sc->tids.nftids;
6829         struct filter_entry *f;
6830
6831         rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6832             "t4getf");
6833         if (rc)
6834                 return (rc);
6835
6836         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6837             t->idx >= nfilters) {
6838                 t->idx = 0xffffffff;
6839                 goto done;
6840         }
6841
6842         f = &sc->tids.ftid_tab[t->idx];
6843         for (i = t->idx; i < nfilters; i++, f++) {
6844                 if (f->valid) {
6845                         t->idx = i;
6846                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
6847                         t->smtidx = f->smtidx;
6848                         if (f->fs.hitcnts)
6849                                 t->hits = get_filter_hits(sc, t->idx);
6850                         else
6851                                 t->hits = UINT64_MAX;
6852                         t->fs = f->fs;
6853
6854                         goto done;
6855                 }
6856         }
6857
6858         t->idx = 0xffffffff;
6859 done:
6860         end_synchronized_op(sc, LOCK_HELD);
6861         return (0);
6862 }
6863
6864 static int
6865 set_filter(struct adapter *sc, struct t4_filter *t)
6866 {
6867         unsigned int nfilters, nports;
6868         struct filter_entry *f;
6869         int i, rc;
6870
6871         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6872         if (rc)
6873                 return (rc);
6874
6875         nfilters = sc->tids.nftids;
6876         nports = sc->params.nports;
6877
6878         if (nfilters == 0) {
6879                 rc = ENOTSUP;
6880                 goto done;
6881         }
6882
6883         if (!(sc->flags & FULL_INIT_DONE)) {
6884                 rc = EAGAIN;
6885                 goto done;
6886         }
6887
6888         if (t->idx >= nfilters) {
6889                 rc = EINVAL;
6890                 goto done;
6891         }
6892
6893         /* Validate against the global filter mode */
6894         if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6895             sc->params.tp.vlan_pri_map) {
6896                 rc = E2BIG;
6897                 goto done;
6898         }
6899
6900         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6901                 rc = EINVAL;
6902                 goto done;
6903         }
6904
6905         if (t->fs.val.iport >= nports) {
6906                 rc = EINVAL;
6907                 goto done;
6908         }
6909
6910         /* Can't specify an iq if not steering to it */
6911         if (!t->fs.dirsteer && t->fs.iq) {
6912                 rc = EINVAL;
6913                 goto done;
6914         }
6915
6916         /* IPv6 filter idx must be 4 aligned */
6917         if (t->fs.type == 1 &&
6918             ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6919                 rc = EINVAL;
6920                 goto done;
6921         }
6922
6923         if (sc->tids.ftid_tab == NULL) {
6924                 KASSERT(sc->tids.ftids_in_use == 0,
6925                     ("%s: no memory allocated but filters_in_use > 0",
6926                     __func__));
6927
6928                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6929                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6930                 if (sc->tids.ftid_tab == NULL) {
6931                         rc = ENOMEM;
6932                         goto done;
6933                 }
6934                 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6935         }
6936
6937         for (i = 0; i < 4; i++) {
6938                 f = &sc->tids.ftid_tab[t->idx + i];
6939
6940                 if (f->pending || f->valid) {
6941                         rc = EBUSY;
6942                         goto done;
6943                 }
6944                 if (f->locked) {
6945                         rc = EPERM;
6946                         goto done;
6947                 }
6948
6949                 if (t->fs.type == 0)
6950                         break;
6951         }
6952
6953         f = &sc->tids.ftid_tab[t->idx];
6954         f->fs = t->fs;
6955
6956         rc = set_filter_wr(sc, t->idx);
6957 done:
6958         end_synchronized_op(sc, 0);
6959
6960         if (rc == 0) {
6961                 mtx_lock(&sc->tids.ftid_lock);
6962                 for (;;) {
6963                         if (f->pending == 0) {
6964                                 rc = f->valid ? 0 : EIO;
6965                                 break;
6966                         }
6967
6968                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6969                             PCATCH, "t4setfw", 0)) {
6970                                 rc = EINPROGRESS;
6971                                 break;
6972                         }
6973                 }
6974                 mtx_unlock(&sc->tids.ftid_lock);
6975         }
6976         return (rc);
6977 }
6978
6979 static int
6980 del_filter(struct adapter *sc, struct t4_filter *t)
6981 {
6982         unsigned int nfilters;
6983         struct filter_entry *f;
6984         int rc;
6985
6986         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6987         if (rc)
6988                 return (rc);
6989
6990         nfilters = sc->tids.nftids;
6991
6992         if (nfilters == 0) {
6993                 rc = ENOTSUP;
6994                 goto done;
6995         }
6996
6997         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6998             t->idx >= nfilters) {
6999                 rc = EINVAL;
7000                 goto done;
7001         }
7002
7003         if (!(sc->flags & FULL_INIT_DONE)) {
7004                 rc = EAGAIN;
7005                 goto done;
7006         }
7007
7008         f = &sc->tids.ftid_tab[t->idx];
7009
7010         if (f->pending) {
7011                 rc = EBUSY;
7012                 goto done;
7013         }
7014         if (f->locked) {
7015                 rc = EPERM;
7016                 goto done;
7017         }
7018
7019         if (f->valid) {
7020                 t->fs = f->fs;  /* extra info for the caller */
7021                 rc = del_filter_wr(sc, t->idx);
7022         }
7023
7024 done:
7025         end_synchronized_op(sc, 0);
7026
7027         if (rc == 0) {
7028                 mtx_lock(&sc->tids.ftid_lock);
7029                 for (;;) {
7030                         if (f->pending == 0) {
7031                                 rc = f->valid ? EIO : 0;
7032                                 break;
7033                         }
7034
7035                         if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7036                             PCATCH, "t4delfw", 0)) {
7037                                 rc = EINPROGRESS;
7038                                 break;
7039                         }
7040                 }
7041                 mtx_unlock(&sc->tids.ftid_lock);
7042         }
7043
7044         return (rc);
7045 }
7046
7047 static void
7048 clear_filter(struct filter_entry *f)
7049 {
7050         if (f->l2t)
7051                 t4_l2t_release(f->l2t);
7052
7053         bzero(f, sizeof (*f));
7054 }
7055
7056 static int
7057 set_filter_wr(struct adapter *sc, int fidx)
7058 {
7059         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7060         struct wrqe *wr;
7061         struct fw_filter_wr *fwr;
7062         unsigned int ftid;
7063
7064         ASSERT_SYNCHRONIZED_OP(sc);
7065
7066         if (f->fs.newdmac || f->fs.newvlan) {
7067                 /* This filter needs an L2T entry; allocate one. */
7068                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
7069                 if (f->l2t == NULL)
7070                         return (EAGAIN);
7071                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7072                     f->fs.dmac)) {
7073                         t4_l2t_release(f->l2t);
7074                         f->l2t = NULL;
7075                         return (ENOMEM);
7076                 }
7077         }
7078
7079         ftid = sc->tids.ftid_base + fidx;
7080
7081         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7082         if (wr == NULL)
7083                 return (ENOMEM);
7084
7085         fwr = wrtod(wr);
7086         bzero(fwr, sizeof (*fwr));
7087
7088         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7089         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7090         fwr->tid_to_iq =
7091             htobe32(V_FW_FILTER_WR_TID(ftid) |
7092                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7093                 V_FW_FILTER_WR_NOREPLY(0) |
7094                 V_FW_FILTER_WR_IQ(f->fs.iq));
7095         fwr->del_filter_to_l2tix =
7096             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7097                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7098                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7099                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7100                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7101                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7102                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7103                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7104                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7105                     f->fs.newvlan == VLAN_REWRITE) |
7106                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7107                     f->fs.newvlan == VLAN_REWRITE) |
7108                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7109                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7110                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7111                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7112         fwr->ethtype = htobe16(f->fs.val.ethtype);
7113         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7114         fwr->frag_to_ovlan_vldm =
7115             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7116                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7117                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7118                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7119                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7120                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7121         fwr->smac_sel = 0;
7122         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7123             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7124         fwr->maci_to_matchtypem =
7125             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7126                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7127                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7128                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7129                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7130                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7131                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7132                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7133         fwr->ptcl = f->fs.val.proto;
7134         fwr->ptclm = f->fs.mask.proto;
7135         fwr->ttyp = f->fs.val.tos;
7136         fwr->ttypm = f->fs.mask.tos;
7137         fwr->ivlan = htobe16(f->fs.val.vlan);
7138         fwr->ivlanm = htobe16(f->fs.mask.vlan);
7139         fwr->ovlan = htobe16(f->fs.val.vnic);
7140         fwr->ovlanm = htobe16(f->fs.mask.vnic);
7141         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7142         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7143         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7144         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7145         fwr->lp = htobe16(f->fs.val.dport);
7146         fwr->lpm = htobe16(f->fs.mask.dport);
7147         fwr->fp = htobe16(f->fs.val.sport);
7148         fwr->fpm = htobe16(f->fs.mask.sport);
7149         if (f->fs.newsmac)
7150                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7151
7152         f->pending = 1;
7153         sc->tids.ftids_in_use++;
7154
7155         t4_wrq_tx(sc, wr);
7156         return (0);
7157 }
7158
7159 static int
7160 del_filter_wr(struct adapter *sc, int fidx)
7161 {
7162         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7163         struct wrqe *wr;
7164         struct fw_filter_wr *fwr;
7165         unsigned int ftid;
7166
7167         ftid = sc->tids.ftid_base + fidx;
7168
7169         wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7170         if (wr == NULL)
7171                 return (ENOMEM);
7172         fwr = wrtod(wr);
7173         bzero(fwr, sizeof (*fwr));
7174
7175         t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7176
7177         f->pending = 1;
7178         t4_wrq_tx(sc, wr);
7179         return (0);
7180 }
7181
7182 int
7183 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7184 {
7185         struct adapter *sc = iq->adapter;
7186         const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7187         unsigned int idx = GET_TID(rpl);
7188         unsigned int rc;
7189         struct filter_entry *f;
7190
7191         KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7192             rss->opcode));
7193
7194         if (is_ftid(sc, idx)) {
7195
7196                 idx -= sc->tids.ftid_base;
7197                 f = &sc->tids.ftid_tab[idx];
7198                 rc = G_COOKIE(rpl->cookie);
7199
7200                 mtx_lock(&sc->tids.ftid_lock);
7201                 if (rc == FW_FILTER_WR_FLT_ADDED) {
7202                         KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7203                             __func__, idx));
7204                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7205                         f->pending = 0;  /* asynchronous setup completed */
7206                         f->valid = 1;
7207                 } else {
7208                         if (rc != FW_FILTER_WR_FLT_DELETED) {
7209                                 /* Add or delete failed, display an error */
7210                                 log(LOG_ERR,
7211                                     "filter %u setup failed with error %u\n",
7212                                     idx, rc);
7213                         }
7214
7215                         clear_filter(f);
7216                         sc->tids.ftids_in_use--;
7217                 }
7218                 wakeup(&sc->tids.ftid_tab);
7219                 mtx_unlock(&sc->tids.ftid_lock);
7220         }
7221
7222         return (0);
7223 }
7224
7225 static int
7226 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7227 {
7228         int rc;
7229
7230         if (cntxt->cid > M_CTXTQID)
7231                 return (EINVAL);
7232
7233         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7234             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7235                 return (EINVAL);
7236
7237         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7238         if (rc)
7239                 return (rc);
7240
7241         if (sc->flags & FW_OK) {
7242                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7243                     &cntxt->data[0]);
7244                 if (rc == 0)
7245                         goto done;
7246         }
7247
7248         /*
7249          * Read via firmware failed or wasn't even attempted.  Read directly via
7250          * the backdoor.
7251          */
7252         rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7253 done:
7254         end_synchronized_op(sc, 0);
7255         return (rc);
7256 }
7257
7258 static int
7259 load_fw(struct adapter *sc, struct t4_data *fw)
7260 {
7261         int rc;
7262         uint8_t *fw_data;
7263
7264         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7265         if (rc)
7266                 return (rc);
7267
7268         if (sc->flags & FULL_INIT_DONE) {
7269                 rc = EBUSY;
7270                 goto done;
7271         }
7272
7273         fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7274         if (fw_data == NULL) {
7275                 rc = ENOMEM;
7276                 goto done;
7277         }
7278
7279         rc = copyin(fw->data, fw_data, fw->len);
7280         if (rc == 0)
7281                 rc = -t4_load_fw(sc, fw_data, fw->len);
7282
7283         free(fw_data, M_CXGBE);
7284 done:
7285         end_synchronized_op(sc, 0);
7286         return (rc);
7287 }
7288
7289 static int
7290 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7291 {
7292         uint32_t addr, off, remaining, i, n;
7293         uint32_t *buf, *b;
7294         uint32_t mw_base, mw_aperture;
7295         int rc;
7296         uint8_t *dst;
7297
7298         rc = validate_mem_range(sc, mr->addr, mr->len);
7299         if (rc != 0)
7300                 return (rc);
7301
7302         memwin_info(sc, win, &mw_base, &mw_aperture);
7303         buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7304         addr = mr->addr;
7305         remaining = mr->len;
7306         dst = (void *)mr->data;
7307
7308         while (remaining) {
7309                 off = position_memwin(sc, win, addr);
7310
7311                 /* number of bytes that we'll copy in the inner loop */
7312                 n = min(remaining, mw_aperture - off);
7313                 for (i = 0; i < n; i += 4)
7314                         *b++ = t4_read_reg(sc, mw_base + off + i);
7315
7316                 rc = copyout(buf, dst, n);
7317                 if (rc != 0)
7318                         break;
7319
7320                 b = buf;
7321                 dst += n;
7322                 remaining -= n;
7323                 addr += n;
7324         }
7325
7326         free(buf, M_CXGBE);
7327         return (rc);
7328 }
7329
7330 static int
7331 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7332 {
7333         int rc;
7334
7335         if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7336                 return (EINVAL);
7337
7338         if (i2cd->len > sizeof(i2cd->data))
7339                 return (EFBIG);
7340
7341         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7342         if (rc)
7343                 return (rc);
7344         rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7345             i2cd->offset, i2cd->len, &i2cd->data[0]);
7346         end_synchronized_op(sc, 0);
7347
7348         return (rc);
7349 }
7350
7351 static int
7352 in_range(int val, int lo, int hi)
7353 {
7354
7355         return (val < 0 || (val <= hi && val >= lo));
7356 }
7357
7358 static int
7359 set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7360 {
7361         int fw_subcmd, fw_type, rc;
7362
7363         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7364         if (rc)
7365                 return (rc);
7366
7367         if (!(sc->flags & FULL_INIT_DONE)) {
7368                 rc = EAGAIN;
7369                 goto done;
7370         }
7371
7372         /*
7373          * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7374          * sub-command and type are in common locations.)
7375          */
7376         if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7377                 fw_subcmd = FW_SCHED_SC_CONFIG;
7378         else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7379                 fw_subcmd = FW_SCHED_SC_PARAMS;
7380         else {
7381                 rc = EINVAL;
7382                 goto done;
7383         }
7384         if (p->type == SCHED_CLASS_TYPE_PACKET)
7385                 fw_type = FW_SCHED_TYPE_PKTSCHED;
7386         else {
7387                 rc = EINVAL;
7388                 goto done;
7389         }
7390
7391         if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7392                 /* Vet our parameters ..*/
7393                 if (p->u.config.minmax < 0) {
7394                         rc = EINVAL;
7395                         goto done;
7396                 }
7397
7398                 /* And pass the request to the firmware ...*/
7399                 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7400                 goto done;
7401         }
7402
7403         if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7404                 int fw_level;
7405                 int fw_mode;
7406                 int fw_rateunit;
7407                 int fw_ratemode;
7408
7409                 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7410                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7411                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7412                         fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7413                 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7414                         fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7415                 else {
7416                         rc = EINVAL;
7417                         goto done;
7418                 }
7419
7420                 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7421                         fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7422                 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7423                         fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7424                 else {
7425                         rc = EINVAL;
7426                         goto done;
7427                 }
7428
7429                 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7430                         fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7431                 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7432                         fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7433                 else {
7434                         rc = EINVAL;
7435                         goto done;
7436                 }
7437
7438                 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7439                         fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7440                 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7441                         fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7442                 else {
7443                         rc = EINVAL;
7444                         goto done;
7445                 }
7446
7447                 /* Vet our parameters ... */
7448                 if (!in_range(p->u.params.channel, 0, 3) ||
7449                     !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7450                     !in_range(p->u.params.minrate, 0, 10000000) ||
7451                     !in_range(p->u.params.maxrate, 0, 10000000) ||
7452                     !in_range(p->u.params.weight, 0, 100)) {
7453                         rc = ERANGE;
7454                         goto done;
7455                 }
7456
7457                 /*
7458                  * Translate any unset parameters into the firmware's
7459                  * nomenclature and/or fail the call if the parameters
7460                  * are required ...
7461                  */
7462                 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7463                     p->u.params.channel < 0 || p->u.params.cl < 0) {
7464                         rc = EINVAL;
7465                         goto done;
7466                 }
7467                 if (p->u.params.minrate < 0)
7468                         p->u.params.minrate = 0;
7469                 if (p->u.params.maxrate < 0) {
7470                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7471                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7472                                 rc = EINVAL;
7473                                 goto done;
7474                         } else
7475                                 p->u.params.maxrate = 0;
7476                 }
7477                 if (p->u.params.weight < 0) {
7478                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7479                                 rc = EINVAL;
7480                                 goto done;
7481                         } else
7482                                 p->u.params.weight = 0;
7483                 }
7484                 if (p->u.params.pktsize < 0) {
7485                         if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7486                             p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7487                                 rc = EINVAL;
7488                                 goto done;
7489                         } else
7490                                 p->u.params.pktsize = 0;
7491                 }
7492
7493                 /* See what the firmware thinks of the request ... */
7494                 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7495                     fw_rateunit, fw_ratemode, p->u.params.channel,
7496                     p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7497                     p->u.params.weight, p->u.params.pktsize);
7498                 goto done;
7499         }
7500
7501         rc = EINVAL;
7502 done:
7503         end_synchronized_op(sc, 0);
7504         return (rc);
7505 }
7506
7507 static int
7508 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7509 {
7510         struct port_info *pi = NULL;
7511         struct sge_txq *txq;
7512         uint32_t fw_mnem, fw_queue, fw_class;
7513         int i, rc;
7514
7515         rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7516         if (rc)
7517                 return (rc);
7518
7519         if (!(sc->flags & FULL_INIT_DONE)) {
7520                 rc = EAGAIN;
7521                 goto done;
7522         }
7523
7524         if (p->port >= sc->params.nports) {
7525                 rc = EINVAL;
7526                 goto done;
7527         }
7528
7529         pi = sc->port[p->port];
7530         if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7531                 rc = EINVAL;
7532                 goto done;
7533         }
7534
7535         /*
7536          * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7537          * Scheduling Class in this case).
7538          */
7539         fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7540             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7541         fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7542
7543         /*
7544          * If op.queue is non-negative, then we're only changing the scheduling
7545          * on a single specified TX queue.
7546          */
7547         if (p->queue >= 0) {
7548                 txq = &sc->sge.txq[pi->first_txq + p->queue];
7549                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7550                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7551                     &fw_class);
7552                 goto done;
7553         }
7554
7555         /*
7556          * Change the scheduling on all the TX queues for the
7557          * interface.
7558          */
7559         for_each_txq(pi, i, txq) {
7560                 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7561                 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7562                     &fw_class);
7563                 if (rc)
7564                         goto done;
7565         }
7566
7567         rc = 0;
7568 done:
7569         end_synchronized_op(sc, 0);
7570         return (rc);
7571 }
7572
7573 int
7574 t4_os_find_pci_capability(struct adapter *sc, int cap)
7575 {
7576         int i;
7577
7578         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7579 }
7580
7581 int
7582 t4_os_pci_save_state(struct adapter *sc)
7583 {
7584         device_t dev;
7585         struct pci_devinfo *dinfo;
7586
7587         dev = sc->dev;
7588         dinfo = device_get_ivars(dev);
7589
7590         pci_cfg_save(dev, dinfo, 0);
7591         return (0);
7592 }
7593
7594 int
7595 t4_os_pci_restore_state(struct adapter *sc)
7596 {
7597         device_t dev;
7598         struct pci_devinfo *dinfo;
7599
7600         dev = sc->dev;
7601         dinfo = device_get_ivars(dev);
7602
7603         pci_cfg_restore(dev, dinfo);
7604         return (0);
7605 }
7606
7607 void
7608 t4_os_portmod_changed(const struct adapter *sc, int idx)
7609 {
7610         struct port_info *pi = sc->port[idx];
7611         static const char *mod_str[] = {
7612                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7613         };
7614
7615         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7616                 if_printf(pi->ifp, "transceiver unplugged.\n");
7617         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7618                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7619         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7620                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7621         else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7622                 if_printf(pi->ifp, "%s transceiver inserted.\n",
7623                     mod_str[pi->mod_type]);
7624         } else {
7625                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7626                     pi->mod_type);
7627         }
7628 }
7629
7630 void
7631 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7632 {
7633         struct port_info *pi = sc->port[idx];
7634         struct ifnet *ifp = pi->ifp;
7635
7636         if (link_stat) {
7637                 pi->linkdnrc = -1;
7638                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7639                 if_link_state_change(ifp, LINK_STATE_UP);
7640         } else {
7641                 if (reason >= 0)
7642                         pi->linkdnrc = reason;
7643                 if_link_state_change(ifp, LINK_STATE_DOWN);
7644         }
7645 }
7646
7647 void
7648 t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7649 {
7650         struct adapter *sc;
7651
7652         mtx_lock(&t4_list_lock);
7653         SLIST_FOREACH(sc, &t4_list, link) {
7654                 /*
7655                  * func should not make any assumptions about what state sc is
7656                  * in - the only guarantee is that sc->sc_lock is a valid lock.
7657                  */
7658                 func(sc, arg);
7659         }
7660         mtx_unlock(&t4_list_lock);
7661 }
7662
7663 static int
7664 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7665 {
7666        return (0);
7667 }
7668
7669 static int
7670 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7671 {
7672        return (0);
7673 }
7674
7675 static int
7676 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7677     struct thread *td)
7678 {
7679         int rc;
7680         struct adapter *sc = dev->si_drv1;
7681
7682         rc = priv_check(td, PRIV_DRIVER);
7683         if (rc != 0)
7684                 return (rc);
7685
7686         switch (cmd) {
7687         case CHELSIO_T4_GETREG: {
7688                 struct t4_reg *edata = (struct t4_reg *)data;
7689
7690                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7691                         return (EFAULT);
7692
7693                 if (edata->size == 4)
7694                         edata->val = t4_read_reg(sc, edata->addr);
7695                 else if (edata->size == 8)
7696                         edata->val = t4_read_reg64(sc, edata->addr);
7697                 else
7698                         return (EINVAL);
7699
7700                 break;
7701         }
7702         case CHELSIO_T4_SETREG: {
7703                 struct t4_reg *edata = (struct t4_reg *)data;
7704
7705                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7706                         return (EFAULT);
7707
7708                 if (edata->size == 4) {
7709                         if (edata->val & 0xffffffff00000000)
7710                                 return (EINVAL);
7711                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7712                 } else if (edata->size == 8)
7713                         t4_write_reg64(sc, edata->addr, edata->val);
7714                 else
7715                         return (EINVAL);
7716                 break;
7717         }
7718         case CHELSIO_T4_REGDUMP: {
7719                 struct t4_regdump *regs = (struct t4_regdump *)data;
7720                 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7721                 uint8_t *buf;
7722
7723                 if (regs->len < reglen) {
7724                         regs->len = reglen; /* hint to the caller */
7725                         return (ENOBUFS);
7726                 }
7727
7728                 regs->len = reglen;
7729                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7730                 t4_get_regs(sc, regs, buf);
7731                 rc = copyout(buf, regs->data, reglen);
7732                 free(buf, M_CXGBE);
7733                 break;
7734         }
7735         case CHELSIO_T4_GET_FILTER_MODE:
7736                 rc = get_filter_mode(sc, (uint32_t *)data);
7737                 break;
7738         case CHELSIO_T4_SET_FILTER_MODE:
7739                 rc = set_filter_mode(sc, *(uint32_t *)data);
7740                 break;
7741         case CHELSIO_T4_GET_FILTER:
7742                 rc = get_filter(sc, (struct t4_filter *)data);
7743                 break;
7744         case CHELSIO_T4_SET_FILTER:
7745                 rc = set_filter(sc, (struct t4_filter *)data);
7746                 break;
7747         case CHELSIO_T4_DEL_FILTER:
7748                 rc = del_filter(sc, (struct t4_filter *)data);
7749                 break;
7750         case CHELSIO_T4_GET_SGE_CONTEXT:
7751                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7752                 break;
7753         case CHELSIO_T4_LOAD_FW:
7754                 rc = load_fw(sc, (struct t4_data *)data);
7755                 break;
7756         case CHELSIO_T4_GET_MEM:
7757                 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7758                 break;
7759         case CHELSIO_T4_GET_I2C:
7760                 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7761                 break;
7762         case CHELSIO_T4_CLEAR_STATS: {
7763                 int i;
7764                 u_int port_id = *(uint32_t *)data;
7765                 struct port_info *pi;
7766
7767                 if (port_id >= sc->params.nports)
7768                         return (EINVAL);
7769                 pi = sc->port[port_id];
7770
7771                 /* MAC stats */
7772                 t4_clr_port_stats(sc, pi->tx_chan);
7773
7774                 if (pi->flags & PORT_INIT_DONE) {
7775                         struct sge_rxq *rxq;
7776                         struct sge_txq *txq;
7777                         struct sge_wrq *wrq;
7778
7779                         for_each_rxq(pi, i, rxq) {
7780 #if defined(INET) || defined(INET6)
7781                                 rxq->lro.lro_queued = 0;
7782                                 rxq->lro.lro_flushed = 0;
7783 #endif
7784                                 rxq->rxcsum = 0;
7785                                 rxq->vlan_extraction = 0;
7786                         }
7787
7788                         for_each_txq(pi, i, txq) {
7789                                 txq->txcsum = 0;
7790                                 txq->tso_wrs = 0;
7791                                 txq->vlan_insertion = 0;
7792                                 txq->imm_wrs = 0;
7793                                 txq->sgl_wrs = 0;
7794                                 txq->txpkt_wrs = 0;
7795                                 txq->txpkts_wrs = 0;
7796                                 txq->txpkts_pkts = 0;
7797                                 txq->br->br_drops = 0;
7798                                 txq->no_dmamap = 0;
7799                                 txq->no_desc = 0;
7800                         }
7801
7802 #ifdef TCP_OFFLOAD
7803                         /* nothing to clear for each ofld_rxq */
7804
7805                         for_each_ofld_txq(pi, i, wrq) {
7806                                 wrq->tx_wrs = 0;
7807                                 wrq->no_desc = 0;
7808                         }
7809 #endif
7810                         wrq = &sc->sge.ctrlq[pi->port_id];
7811                         wrq->tx_wrs = 0;
7812                         wrq->no_desc = 0;
7813                 }
7814                 break;
7815         }
7816         case CHELSIO_T4_SCHED_CLASS:
7817                 rc = set_sched_class(sc, (struct t4_sched_params *)data);
7818                 break;
7819         case CHELSIO_T4_SCHED_QUEUE:
7820                 rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7821                 break;
7822         default:
7823                 rc = EINVAL;
7824         }
7825
7826         return (rc);
7827 }
7828
7829 #ifdef TCP_OFFLOAD
7830 static int
7831 toe_capability(struct port_info *pi, int enable)
7832 {
7833         int rc;
7834         struct adapter *sc = pi->adapter;
7835
7836         ASSERT_SYNCHRONIZED_OP(sc);
7837
7838         if (!is_offload(sc))
7839                 return (ENODEV);
7840
7841         if (enable) {
7842                 if (!(sc->flags & FULL_INIT_DONE)) {
7843                         rc = cxgbe_init_synchronized(pi);
7844                         if (rc)
7845                                 return (rc);
7846                 }
7847
7848                 if (isset(&sc->offload_map, pi->port_id))
7849                         return (0);
7850
7851                 if (!(sc->flags & TOM_INIT_DONE)) {
7852                         rc = t4_activate_uld(sc, ULD_TOM);
7853                         if (rc == EAGAIN) {
7854                                 log(LOG_WARNING,
7855                                     "You must kldload t4_tom.ko before trying "
7856                                     "to enable TOE on a cxgbe interface.\n");
7857                         }
7858                         if (rc != 0)
7859                                 return (rc);
7860                         KASSERT(sc->tom_softc != NULL,
7861                             ("%s: TOM activated but softc NULL", __func__));
7862                         KASSERT(sc->flags & TOM_INIT_DONE,
7863                             ("%s: TOM activated but flag not set", __func__));
7864                 }
7865
7866                 setbit(&sc->offload_map, pi->port_id);
7867         } else {
7868                 if (!isset(&sc->offload_map, pi->port_id))
7869                         return (0);
7870
7871                 KASSERT(sc->flags & TOM_INIT_DONE,
7872                     ("%s: TOM never initialized?", __func__));
7873                 clrbit(&sc->offload_map, pi->port_id);
7874         }
7875
7876         return (0);
7877 }
7878
7879 /*
7880  * Add an upper layer driver to the global list.
7881  */
7882 int
7883 t4_register_uld(struct uld_info *ui)
7884 {
7885         int rc = 0;
7886         struct uld_info *u;
7887
7888         mtx_lock(&t4_uld_list_lock);
7889         SLIST_FOREACH(u, &t4_uld_list, link) {
7890             if (u->uld_id == ui->uld_id) {
7891                     rc = EEXIST;
7892                     goto done;
7893             }
7894         }
7895
7896         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7897         ui->refcount = 0;
7898 done:
7899         mtx_unlock(&t4_uld_list_lock);
7900         return (rc);
7901 }
7902
7903 int
7904 t4_unregister_uld(struct uld_info *ui)
7905 {
7906         int rc = EINVAL;
7907         struct uld_info *u;
7908
7909         mtx_lock(&t4_uld_list_lock);
7910
7911         SLIST_FOREACH(u, &t4_uld_list, link) {
7912             if (u == ui) {
7913                     if (ui->refcount > 0) {
7914                             rc = EBUSY;
7915                             goto done;
7916                     }
7917
7918                     SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7919                     rc = 0;
7920                     goto done;
7921             }
7922         }
7923 done:
7924         mtx_unlock(&t4_uld_list_lock);
7925         return (rc);
7926 }
7927
7928 int
7929 t4_activate_uld(struct adapter *sc, int id)
7930 {
7931         int rc = EAGAIN;
7932         struct uld_info *ui;
7933
7934         ASSERT_SYNCHRONIZED_OP(sc);
7935
7936         mtx_lock(&t4_uld_list_lock);
7937
7938         SLIST_FOREACH(ui, &t4_uld_list, link) {
7939                 if (ui->uld_id == id) {
7940                         rc = ui->activate(sc);
7941                         if (rc == 0)
7942                                 ui->refcount++;
7943                         goto done;
7944                 }
7945         }
7946 done:
7947         mtx_unlock(&t4_uld_list_lock);
7948
7949         return (rc);
7950 }
7951
7952 int
7953 t4_deactivate_uld(struct adapter *sc, int id)
7954 {
7955         int rc = EINVAL;
7956         struct uld_info *ui;
7957
7958         ASSERT_SYNCHRONIZED_OP(sc);
7959
7960         mtx_lock(&t4_uld_list_lock);
7961
7962         SLIST_FOREACH(ui, &t4_uld_list, link) {
7963                 if (ui->uld_id == id) {
7964                         rc = ui->deactivate(sc);
7965                         if (rc == 0)
7966                                 ui->refcount--;
7967                         goto done;
7968                 }
7969         }
7970 done:
7971         mtx_unlock(&t4_uld_list_lock);
7972
7973         return (rc);
7974 }
7975 #endif
7976
7977 /*
7978  * Come up with reasonable defaults for some of the tunables, provided they're
7979  * not set by the user (in which case we'll use the values as is).
7980  */
7981 static void
7982 tweak_tunables(void)
7983 {
7984         int nc = mp_ncpus;      /* our snapshot of the number of CPUs */
7985
7986         if (t4_ntxq10g < 1)
7987                 t4_ntxq10g = min(nc, NTXQ_10G);
7988
7989         if (t4_ntxq1g < 1)
7990                 t4_ntxq1g = min(nc, NTXQ_1G);
7991
7992         if (t4_nrxq10g < 1)
7993                 t4_nrxq10g = min(nc, NRXQ_10G);
7994
7995         if (t4_nrxq1g < 1)
7996                 t4_nrxq1g = min(nc, NRXQ_1G);
7997
7998 #ifdef TCP_OFFLOAD
7999         if (t4_nofldtxq10g < 1)
8000                 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8001
8002         if (t4_nofldtxq1g < 1)
8003                 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8004
8005         if (t4_nofldrxq10g < 1)
8006                 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8007
8008         if (t4_nofldrxq1g < 1)
8009                 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8010
8011         if (t4_toecaps_allowed == -1)
8012                 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8013 #else
8014         if (t4_toecaps_allowed == -1)
8015                 t4_toecaps_allowed = 0;
8016 #endif
8017
8018         if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8019                 t4_tmr_idx_10g = TMR_IDX_10G;
8020
8021         if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8022                 t4_pktc_idx_10g = PKTC_IDX_10G;
8023
8024         if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8025                 t4_tmr_idx_1g = TMR_IDX_1G;
8026
8027         if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8028                 t4_pktc_idx_1g = PKTC_IDX_1G;
8029
8030         if (t4_qsize_txq < 128)
8031                 t4_qsize_txq = 128;
8032
8033         if (t4_qsize_rxq < 128)
8034                 t4_qsize_rxq = 128;
8035         while (t4_qsize_rxq & 7)
8036                 t4_qsize_rxq++;
8037
8038         t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8039 }
8040
8041 static int
8042 mod_event(module_t mod, int cmd, void *arg)
8043 {
8044         int rc = 0;
8045         static int loaded = 0;
8046
8047         switch (cmd) {
8048         case MOD_LOAD:
8049                 if (atomic_fetchadd_int(&loaded, 1))
8050                         break;
8051                 t4_sge_modload();
8052                 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
8053                 SLIST_INIT(&t4_list);
8054 #ifdef TCP_OFFLOAD
8055                 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
8056                 SLIST_INIT(&t4_uld_list);
8057 #endif
8058                 tweak_tunables();
8059                 break;
8060
8061         case MOD_UNLOAD:
8062                 if (atomic_fetchadd_int(&loaded, -1) > 1)
8063                         break;
8064 #ifdef TCP_OFFLOAD
8065                 mtx_lock(&t4_uld_list_lock);
8066                 if (!SLIST_EMPTY(&t4_uld_list)) {
8067                         rc = EBUSY;
8068                         mtx_unlock(&t4_uld_list_lock);
8069                         break;
8070                 }
8071                 mtx_unlock(&t4_uld_list_lock);
8072                 mtx_destroy(&t4_uld_list_lock);
8073 #endif
8074                 mtx_lock(&t4_list_lock);
8075                 if (!SLIST_EMPTY(&t4_list)) {
8076                         rc = EBUSY;
8077                         mtx_unlock(&t4_list_lock);
8078                         break;
8079                 }
8080                 mtx_unlock(&t4_list_lock);
8081                 mtx_destroy(&t4_list_lock);
8082                 break;
8083         }
8084
8085         return (rc);
8086 }
8087
8088 static devclass_t t4_devclass, t5_devclass;
8089 static devclass_t cxgbe_devclass, cxl_devclass;
8090
8091 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8092 MODULE_VERSION(t4nex, 1);
8093 MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8094
8095 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8096 MODULE_VERSION(t5nex, 1);
8097 MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8098
8099 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8100 MODULE_VERSION(cxgbe, 1);
8101
8102 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8103 MODULE_VERSION(cxl, 1);