]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/cxgbe/t4_main.c
MFC r226173, r227843, r227848 and r227908:
[FreeBSD/stable/9.git] / sys / dev / cxgbe / t4_main.c
1 /*-
2  * Copyright (c) 2011 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_inet.h"
32
33 #include <sys/param.h>
34 #include <sys/conf.h>
35 #include <sys/priv.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
42 #include <sys/pciio.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pci_private.h>
46 #include <sys/firmware.h>
47 #include <sys/sbuf.h>
48 #include <sys/smp.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_types.h>
55 #include <net/if_dl.h>
56 #include <net/if_vlan_var.h>
57
58 #include "common/t4_hw.h"
59 #include "common/common.h"
60 #include "common/t4_msg.h"
61 #include "common/t4_regs.h"
62 #include "common/t4_regs_values.h"
63 #include "common/t4fw_interface.h"
64 #include "t4_ioctl.h"
65 #include "t4_l2t.h"
66
67 /* T4 bus driver interface */
68 static int t4_probe(device_t);
69 static int t4_attach(device_t);
70 static int t4_detach(device_t);
71 static device_method_t t4_methods[] = {
72         DEVMETHOD(device_probe,         t4_probe),
73         DEVMETHOD(device_attach,        t4_attach),
74         DEVMETHOD(device_detach,        t4_detach),
75
76         DEVMETHOD_END
77 };
78 static driver_t t4_driver = {
79         "t4nex",
80         t4_methods,
81         sizeof(struct adapter)
82 };
83
84
85 /* T4 port (cxgbe) interface */
86 static int cxgbe_probe(device_t);
87 static int cxgbe_attach(device_t);
88 static int cxgbe_detach(device_t);
89 static device_method_t cxgbe_methods[] = {
90         DEVMETHOD(device_probe,         cxgbe_probe),
91         DEVMETHOD(device_attach,        cxgbe_attach),
92         DEVMETHOD(device_detach,        cxgbe_detach),
93         { 0, 0 }
94 };
95 static driver_t cxgbe_driver = {
96         "cxgbe",
97         cxgbe_methods,
98         sizeof(struct port_info)
99 };
100
101 static d_ioctl_t t4_ioctl;
102 static d_open_t t4_open;
103 static d_close_t t4_close;
104
105 static struct cdevsw t4_cdevsw = {
106        .d_version = D_VERSION,
107        .d_flags = 0,
108        .d_open = t4_open,
109        .d_close = t4_close,
110        .d_ioctl = t4_ioctl,
111        .d_name = "t4nex",
112 };
113
114 /* ifnet + media interface */
115 static void cxgbe_init(void *);
116 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
117 static void cxgbe_start(struct ifnet *);
118 static int cxgbe_transmit(struct ifnet *, struct mbuf *);
119 static void cxgbe_qflush(struct ifnet *);
120 static int cxgbe_media_change(struct ifnet *);
121 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
122
123 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
124
125 /*
126  * Tunables.
127  */
128 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe driver parameters");
129
130 static int force_firmware_install = 0;
131 TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
132 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN,
133     &force_firmware_install, 0, "install firmware on every attach.");
134
135 /*
136  * Holdoff timer and packet counter values.
137  */
138 static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
139 static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
140
141 /*
142  * Max # of tx and rx queues to use for each 10G and 1G port.
143  */
144 static unsigned int max_ntxq_10g = 8;
145 TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g);
146 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN,
147     &max_ntxq_10g, 0, "maximum number of tx queues per 10G port.");
148
149 static unsigned int max_nrxq_10g = 8;
150 TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g);
151 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN,
152     &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port).");
153
154 static unsigned int max_ntxq_1g = 2;
155 TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g);
156 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN,
157     &max_ntxq_1g, 0, "maximum number of tx queues per 1G port.");
158
159 static unsigned int max_nrxq_1g = 2;
160 TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g);
161 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN,
162     &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port).");
163
164 /*
165  * Holdoff parameters for 10G and 1G ports.
166  */
167 static unsigned int tmr_idx_10g = 1;
168 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g);
169 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN,
170     &tmr_idx_10g, 0,
171     "default timer index for interrupt holdoff (10G ports).");
172
173 static int pktc_idx_10g = 2;
174 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g);
175 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN,
176     &pktc_idx_10g, 0,
177     "default pkt counter index for interrupt holdoff (10G ports).");
178
179 static unsigned int tmr_idx_1g = 1;
180 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g);
181 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN,
182     &tmr_idx_1g, 0,
183     "default timer index for interrupt holdoff (1G ports).");
184
185 static int pktc_idx_1g = 2;
186 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g);
187 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN,
188     &pktc_idx_1g, 0,
189     "default pkt counter index for interrupt holdoff (1G ports).");
190
191 /*
192  * Size (# of entries) of each tx and rx queue.
193  */
194 static unsigned int qsize_txq = TX_EQ_QSIZE;
195 TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq);
196 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN,
197     &qsize_txq, 0, "default queue size of NIC tx queues.");
198
199 static unsigned int qsize_rxq = RX_IQ_QSIZE;
200 TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq);
201 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN,
202     &qsize_rxq, 0, "default queue size of NIC rx queues.");
203
204 /*
205  * Interrupt types allowed.
206  */
207 static int intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
208 TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types);
209 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
210     "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
211
212 /*
213  * Force the driver to use the same set of interrupts for all ports.
214  */
215 static int intr_shared = 0;
216 TUNABLE_INT("hw.cxgbe.interrupts_shared", &intr_shared);
217 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupts_shared, CTLFLAG_RDTUN,
218     &intr_shared, 0, "interrupts shared between all ports");
219
220 static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC;
221 TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode);
222 SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN,
223     &filter_mode, 0, "default global filter mode.");
224
225 struct intrs_and_queues {
226         int intr_type;          /* INTx, MSI, or MSI-X */
227         int nirq;               /* Number of vectors */
228         int intr_shared;        /* Interrupts shared between all ports */
229         int ntxq10g;            /* # of NIC txq's for each 10G port */
230         int nrxq10g;            /* # of NIC rxq's for each 10G port */
231         int ntxq1g;             /* # of NIC txq's for each 1G port */
232         int nrxq1g;             /* # of NIC rxq's for each 1G port */
233 };
234
235 struct filter_entry {
236         uint32_t valid:1;       /* filter allocated and valid */
237         uint32_t locked:1;      /* filter is administratively locked */
238         uint32_t pending:1;     /* filter action is pending firmware reply */
239         uint32_t smtidx:8;      /* Source MAC Table index for smac */
240         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
241
242         struct t4_filter_specification fs;
243 };
244
245 enum {
246         MEMWIN0_APERTURE = 2048,
247         MEMWIN0_BASE     = 0x1b800,
248         MEMWIN1_APERTURE = 32768,
249         MEMWIN1_BASE     = 0x28000,
250         MEMWIN2_APERTURE = 65536,
251         MEMWIN2_BASE     = 0x30000,
252 };
253
254 enum {
255         XGMAC_MTU       = (1 << 0),
256         XGMAC_PROMISC   = (1 << 1),
257         XGMAC_ALLMULTI  = (1 << 2),
258         XGMAC_VLANEX    = (1 << 3),
259         XGMAC_UCADDR    = (1 << 4),
260         XGMAC_MCADDRS   = (1 << 5),
261
262         XGMAC_ALL       = 0xffff
263 };
264
265 static int map_bars(struct adapter *);
266 static void setup_memwin(struct adapter *);
267 static int cfg_itype_and_nqueues(struct adapter *, int, int,
268     struct intrs_and_queues *);
269 static int prep_firmware(struct adapter *);
270 static int get_devlog_params(struct adapter *, struct devlog_params *);
271 static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
272 static int get_params(struct adapter *, struct fw_caps_config_cmd *);
273 static void t4_set_desc(struct adapter *);
274 static void build_medialist(struct port_info *);
275 static int update_mac_settings(struct port_info *, int);
276 static int cxgbe_init_locked(struct port_info *);
277 static int cxgbe_init_synchronized(struct port_info *);
278 static int cxgbe_uninit_locked(struct port_info *);
279 static int cxgbe_uninit_synchronized(struct port_info *);
280 static int first_port_up(struct adapter *);
281 static int last_port_down(struct adapter *);
282 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
283     iq_intr_handler_t *, void *, char *);
284 static int t4_free_irq(struct adapter *, struct irq *);
285 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
286     unsigned int);
287 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
288 static void cxgbe_tick(void *);
289 static int t4_sysctls(struct adapter *);
290 static int cxgbe_sysctls(struct port_info *);
291 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
292 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
293 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
294 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
295 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
296 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
297 static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
298 static inline void txq_start(struct ifnet *, struct sge_txq *);
299 static uint32_t fconf_to_mode(uint32_t);
300 static uint32_t mode_to_fconf(uint32_t);
301 static uint32_t fspec_to_fconf(struct t4_filter_specification *);
302 static int get_filter_mode(struct adapter *, uint32_t *);
303 static int set_filter_mode(struct adapter *, uint32_t);
304 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
305 static int get_filter(struct adapter *, struct t4_filter *);
306 static int set_filter(struct adapter *, struct t4_filter *);
307 static int del_filter(struct adapter *, struct t4_filter *);
308 static void clear_filter(struct filter_entry *);
309 static int set_filter_wr(struct adapter *, int);
310 static int del_filter_wr(struct adapter *, int);
311 void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
312 static int get_sge_context(struct adapter *, struct t4_sge_context *);
313 static int t4_mod_event(module_t, int, void *);
314
315 struct t4_pciids {
316         uint16_t device;
317         uint8_t mpf;
318         char *desc;
319 } t4_pciids[] = {
320         {0xa000, 0, "Chelsio Terminator 4 FPGA"},
321         {0x4400, 4, "Chelsio T440-dbg"},
322         {0x4401, 4, "Chelsio T420-CR"},
323         {0x4402, 4, "Chelsio T422-CR"},
324         {0x4403, 4, "Chelsio T440-CR"},
325         {0x4404, 4, "Chelsio T420-BCH"},
326         {0x4405, 4, "Chelsio T440-BCH"},
327         {0x4406, 4, "Chelsio T440-CH"},
328         {0x4407, 4, "Chelsio T420-SO"},
329         {0x4408, 4, "Chelsio T420-CX"},
330         {0x4409, 4, "Chelsio T420-BT"},
331         {0x440a, 4, "Chelsio T404-BT"},
332 };
333
334 static int
335 t4_probe(device_t dev)
336 {
337         int i;
338         uint16_t v = pci_get_vendor(dev);
339         uint16_t d = pci_get_device(dev);
340
341         if (v != PCI_VENDOR_ID_CHELSIO)
342                 return (ENXIO);
343
344         for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
345                 if (d == t4_pciids[i].device &&
346                     pci_get_function(dev) == t4_pciids[i].mpf) {
347                         device_set_desc(dev, t4_pciids[i].desc);
348                         return (BUS_PROBE_DEFAULT);
349                 }
350         }
351
352         return (ENXIO);
353 }
354
355 static int
356 t4_attach(device_t dev)
357 {
358         struct adapter *sc;
359         int rc = 0, i, n10g, n1g, rqidx, tqidx;
360         struct fw_caps_config_cmd caps;
361         uint32_t p, v;
362         struct intrs_and_queues iaq;
363         struct sge *s;
364
365         sc = device_get_softc(dev);
366         sc->dev = dev;
367         sc->pf = pci_get_function(dev);
368         sc->mbox = sc->pf;
369
370         pci_enable_busmaster(dev);
371         if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
372                 pci_set_max_read_req(dev, 4096);
373                 v = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2);
374                 v |= PCIM_EXP_CTL_RELAXED_ORD_ENABLE;
375                 pci_write_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, v, 2);
376         }
377
378         snprintf(sc->lockname, sizeof(sc->lockname), "%s",
379             device_get_nameunit(dev));
380         mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
381
382         rc = map_bars(sc);
383         if (rc != 0)
384                 goto done; /* error message displayed already */
385
386         memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
387
388         /* Prepare the adapter for operation */
389         rc = -t4_prep_adapter(sc);
390         if (rc != 0) {
391                 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
392                 goto done;
393         }
394
395         /* Do this really early */
396         sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
397             GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
398         sc->cdev->si_drv1 = sc;
399
400         /* Prepare the firmware for operation */
401         rc = prep_firmware(sc);
402         if (rc != 0)
403                 goto done; /* error message displayed already */
404
405         /* Read firmware devlog parameters */
406         (void) get_devlog_params(sc, &sc->params.devlog);
407
408         /* Get device capabilities and select which ones we'll use */
409         rc = get_capabilities(sc, &caps);
410         if (rc != 0) {
411                 device_printf(dev,
412                     "failed to initialize adapter capabilities: %d.\n", rc);
413                 goto done;
414         }
415
416         /* Choose the global RSS mode. */
417         rc = -t4_config_glbl_rss(sc, sc->mbox,
418             FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
419             F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
420             F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
421             F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
422         if (rc != 0) {
423                 device_printf(dev,
424                     "failed to select global RSS mode: %d.\n", rc);
425                 goto done;
426         }
427
428         /* These are total (sum of all ports) limits for a bus driver */
429         rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0,
430             128,        /* max # of egress queues */
431             64,         /* max # of egress Ethernet or control queues */
432             64,         /* max # of ingress queues with fl/interrupt */
433             0,          /* max # of ingress queues without interrupt */
434             0,          /* PCIe traffic class */
435             4,          /* max # of virtual interfaces */
436             M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16,
437             FW_CMD_CAP_PF, FW_CMD_CAP_PF);
438         if (rc != 0) {
439                 device_printf(dev,
440                     "failed to configure pf/vf resources: %d.\n", rc);
441                 goto done;
442         }
443
444         /* Need this before sge_init */
445         for (i = 0; i < SGE_NTIMERS; i++)
446                 sc->sge.timer_val[i] = min(intr_timer[i], 200U);
447         for (i = 0; i < SGE_NCOUNTERS; i++)
448                 sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0);
449
450         /* Also need the cooked value of cclk before sge_init */
451         p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
452             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
453         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v);
454         if (rc != 0) {
455                 device_printf(sc->dev,
456                     "failed to obtain core clock value: %d.\n", rc);
457                 goto done;
458         }
459         sc->params.vpd.cclk = v;
460
461         t4_sge_init(sc);
462
463         t4_set_filter_mode(sc, filter_mode);
464         t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG,
465             V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP),
466             V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP));
467         t4_tp_wr_bits_indirect(sc, A_TP_INGRESS_CONFIG, F_CSUM_HAS_PSEUDO_HDR,
468             F_LOOKUPEVERYPKT);
469
470         /* get basic stuff going */
471         rc = -t4_early_init(sc, sc->mbox);
472         if (rc != 0) {
473                 device_printf(dev, "early init failed: %d.\n", rc);
474                 goto done;
475         }
476
477         rc = get_params(sc, &caps);
478         if (rc != 0)
479                 goto done; /* error message displayed already */
480
481         /* These are finalized by FW initialization, load their values now */
482         v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
483         sc->params.tp.tre = G_TIMERRESOLUTION(v);
484         sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
485         t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
486
487         /* tweak some settings */
488         t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
489             V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
490             V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
491         t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
492         t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
493             F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
494
495         setup_memwin(sc);
496
497         rc = t4_create_dma_tag(sc);
498         if (rc != 0)
499                 goto done; /* error message displayed already */
500
501         /*
502          * First pass over all the ports - allocate VIs and initialize some
503          * basic parameters like mac address, port type, etc.  We also figure
504          * out whether a port is 10G or 1G and use that information when
505          * calculating how many interrupts to attempt to allocate.
506          */
507         n10g = n1g = 0;
508         for_each_port(sc, i) {
509                 struct port_info *pi;
510
511                 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
512                 sc->port[i] = pi;
513
514                 /* These must be set before t4_port_init */
515                 pi->adapter = sc;
516                 pi->port_id = i;
517
518                 /* Allocate the vi and initialize parameters like mac addr */
519                 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
520                 if (rc != 0) {
521                         device_printf(dev, "unable to initialize port %d: %d\n",
522                             i, rc);
523                         free(pi, M_CXGBE);
524                         sc->port[i] = NULL;
525                         goto done;
526                 }
527
528                 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
529                     device_get_nameunit(dev), i);
530                 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
531
532                 if (is_10G_port(pi)) {
533                         n10g++;
534                         pi->tmr_idx = tmr_idx_10g;
535                         pi->pktc_idx = pktc_idx_10g;
536                 } else {
537                         n1g++;
538                         pi->tmr_idx = tmr_idx_1g;
539                         pi->pktc_idx = pktc_idx_1g;
540                 }
541
542                 pi->xact_addr_filt = -1;
543
544                 pi->qsize_rxq = max(qsize_rxq, 128);
545                 while (pi->qsize_rxq & 7)
546                         pi->qsize_rxq++;
547                 pi->qsize_txq = max(qsize_txq, 128);
548
549                 if (pi->qsize_rxq != qsize_rxq) {
550                         device_printf(dev,
551                             "using %d instead of %d as the rx queue size.\n",
552                             pi->qsize_rxq, qsize_rxq);
553                 }
554                 if (pi->qsize_txq != qsize_txq) {
555                         device_printf(dev,
556                             "using %d instead of %d as the tx queue size.\n",
557                             pi->qsize_txq, qsize_txq);
558                 }
559
560                 pi->dev = device_add_child(dev, "cxgbe", -1);
561                 if (pi->dev == NULL) {
562                         device_printf(dev,
563                             "failed to add device for port %d.\n", i);
564                         rc = ENXIO;
565                         goto done;
566                 }
567                 device_set_softc(pi->dev, pi);
568
569                 setbit(&sc->registered_device_map, i);
570         }
571
572         if (sc->registered_device_map == 0) {
573                 device_printf(dev, "no usable ports\n");
574                 rc = ENXIO;
575                 goto done;
576         }
577
578         /*
579          * Interrupt type, # of interrupts, # of rx/tx queues, etc.
580          */
581         rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
582         if (rc != 0)
583                 goto done; /* error message displayed already */
584
585         sc->intr_type = iaq.intr_type;
586         sc->intr_count = iaq.nirq;
587
588         s = &sc->sge;
589         s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
590         s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
591         s->neq = s->ntxq + s->nrxq;     /* the free list in an rxq is an eq */
592         s->neq += sc->params.nports;    /* control queues, 1 per port */
593         s->niq = s->nrxq + 1;           /* 1 extra for firmware event queue */
594         if (iaq.intr_shared)
595                 sc->flags |= INTR_SHARED;
596         s->niq += NINTRQ(sc);           /* interrupt queues */
597
598         s->intrq = malloc(NINTRQ(sc) * sizeof(struct sge_iq), M_CXGBE,
599             M_ZERO | M_WAITOK);
600         s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_ctrlq), M_CXGBE,
601             M_ZERO | M_WAITOK);
602         s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
603             M_ZERO | M_WAITOK);
604         s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
605             M_ZERO | M_WAITOK);
606         s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
607             M_ZERO | M_WAITOK);
608         s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
609             M_ZERO | M_WAITOK);
610
611         sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
612             M_ZERO | M_WAITOK);
613
614         sc->l2t = t4_init_l2t(M_WAITOK);
615
616         t4_sysctls(sc);
617
618         /*
619          * Second pass over the ports.  This time we know the number of rx and
620          * tx queues that each port should get.
621          */
622         rqidx = tqidx = 0;
623         for_each_port(sc, i) {
624                 struct port_info *pi = sc->port[i];
625
626                 if (pi == NULL)
627                         continue;
628
629                 pi->first_rxq = rqidx;
630                 pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
631
632                 pi->first_txq = tqidx;
633                 pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
634
635                 rqidx += pi->nrxq;
636                 tqidx += pi->ntxq;
637         }
638
639         rc = bus_generic_attach(dev);
640         if (rc != 0) {
641                 device_printf(dev,
642                     "failed to attach all child ports: %d\n", rc);
643                 goto done;
644         }
645
646 #ifdef INVARIANTS
647         device_printf(dev,
648             "%p, %d ports (0x%x), %d intr_type, %d intr_count\n",
649             sc, sc->params.nports, sc->params.portvec,
650             sc->intr_type, sc->intr_count);
651 #endif
652         t4_set_desc(sc);
653
654 done:
655         if (rc != 0)
656                 t4_detach(dev);
657
658         return (rc);
659 }
660
661 /*
662  * Idempotent
663  */
664 static int
665 t4_detach(device_t dev)
666 {
667         struct adapter *sc;
668         struct port_info *pi;
669         int i;
670
671         sc = device_get_softc(dev);
672
673         if (sc->cdev)
674                 destroy_dev(sc->cdev);
675
676         bus_generic_detach(dev);
677         for (i = 0; i < MAX_NPORTS; i++) {
678                 pi = sc->port[i];
679                 if (pi) {
680                         t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
681                         if (pi->dev)
682                                 device_delete_child(dev, pi->dev);
683
684                         mtx_destroy(&pi->pi_lock);
685                         free(pi, M_CXGBE);
686                 }
687         }
688
689         if (sc->flags & FW_OK)
690                 t4_fw_bye(sc, sc->mbox);
691
692         if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
693                 pci_release_msi(dev);
694
695         if (sc->regs_res)
696                 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
697                     sc->regs_res);
698
699         if (sc->msix_res)
700                 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
701                     sc->msix_res);
702
703         if (sc->l2t)
704                 t4_free_l2t(sc->l2t);
705
706         free(sc->irq, M_CXGBE);
707         free(sc->sge.rxq, M_CXGBE);
708         free(sc->sge.txq, M_CXGBE);
709         free(sc->sge.ctrlq, M_CXGBE);
710         free(sc->sge.intrq, M_CXGBE);
711         free(sc->sge.iqmap, M_CXGBE);
712         free(sc->sge.eqmap, M_CXGBE);
713         free(sc->tids.ftid_tab, M_CXGBE);
714         t4_destroy_dma_tag(sc);
715         mtx_destroy(&sc->sc_lock);
716
717         bzero(sc, sizeof(*sc));
718
719         return (0);
720 }
721
722
723 static int
724 cxgbe_probe(device_t dev)
725 {
726         char buf[128];
727         struct port_info *pi = device_get_softc(dev);
728
729         snprintf(buf, sizeof(buf), "Port %d", pi->port_id);
730         device_set_desc_copy(dev, buf);
731
732         return (BUS_PROBE_DEFAULT);
733 }
734
735 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
736     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
737     IFCAP_VLAN_HWTSO)
738 #define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
739
740 static int
741 cxgbe_attach(device_t dev)
742 {
743         struct port_info *pi = device_get_softc(dev);
744         struct ifnet *ifp;
745
746         /* Allocate an ifnet and set it up */
747         ifp = if_alloc(IFT_ETHER);
748         if (ifp == NULL) {
749                 device_printf(dev, "Cannot allocate ifnet\n");
750                 return (ENOMEM);
751         }
752         pi->ifp = ifp;
753         ifp->if_softc = pi;
754
755         callout_init(&pi->tick, CALLOUT_MPSAFE);
756         pi->tq = taskqueue_create("cxgbe_taskq", M_NOWAIT,
757             taskqueue_thread_enqueue, &pi->tq);
758         if (pi->tq == NULL) {
759                 device_printf(dev, "failed to allocate port task queue\n");
760                 if_free(pi->ifp);
761                 return (ENOMEM);
762         }
763         taskqueue_start_threads(&pi->tq, 1, PI_NET, "%s taskq",
764             device_get_nameunit(dev));
765
766         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
767         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
768
769         ifp->if_init = cxgbe_init;
770         ifp->if_ioctl = cxgbe_ioctl;
771         ifp->if_start = cxgbe_start;
772         ifp->if_transmit = cxgbe_transmit;
773         ifp->if_qflush = cxgbe_qflush;
774
775         ifp->if_snd.ifq_drv_maxlen = 1024;
776         IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
777         IFQ_SET_READY(&ifp->if_snd);
778
779         ifp->if_capabilities = T4_CAP;
780         ifp->if_capenable = T4_CAP_ENABLE;
781         ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
782
783         /* Initialize ifmedia for this port */
784         ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
785             cxgbe_media_status);
786         build_medialist(pi);
787
788         ether_ifattach(ifp, pi->hw_addr);
789
790 #ifdef INVARIANTS
791         device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq);
792 #endif
793
794         cxgbe_sysctls(pi);
795
796         return (0);
797 }
798
799 static int
800 cxgbe_detach(device_t dev)
801 {
802         struct port_info *pi = device_get_softc(dev);
803         struct adapter *sc = pi->adapter;
804         int rc;
805
806         /* Tell if_ioctl and if_init that the port is going away */
807         ADAPTER_LOCK(sc);
808         SET_DOOMED(pi);
809         wakeup(&sc->flags);
810         while (IS_BUSY(sc))
811                 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
812         SET_BUSY(sc);
813         ADAPTER_UNLOCK(sc);
814
815         rc = cxgbe_uninit_synchronized(pi);
816         if (rc != 0)
817                 device_printf(dev, "port uninit failed: %d.\n", rc);
818
819         taskqueue_free(pi->tq);
820
821         ifmedia_removeall(&pi->media);
822         ether_ifdetach(pi->ifp);
823         if_free(pi->ifp);
824
825         ADAPTER_LOCK(sc);
826         CLR_BUSY(sc);
827         wakeup_one(&sc->flags);
828         ADAPTER_UNLOCK(sc);
829
830         return (0);
831 }
832
833 static void
834 cxgbe_init(void *arg)
835 {
836         struct port_info *pi = arg;
837         struct adapter *sc = pi->adapter;
838
839         ADAPTER_LOCK(sc);
840         cxgbe_init_locked(pi); /* releases adapter lock */
841         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
842 }
843
844 static int
845 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
846 {
847         int rc = 0, mtu, flags;
848         struct port_info *pi = ifp->if_softc;
849         struct adapter *sc = pi->adapter;
850         struct ifreq *ifr = (struct ifreq *)data;
851         uint32_t mask;
852
853         switch (cmd) {
854         case SIOCSIFMTU:
855                 ADAPTER_LOCK(sc);
856                 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
857                 if (rc) {
858 fail:
859                         ADAPTER_UNLOCK(sc);
860                         return (rc);
861                 }
862
863                 mtu = ifr->ifr_mtu;
864                 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
865                         rc = EINVAL;
866                 } else {
867                         ifp->if_mtu = mtu;
868                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
869                                 t4_update_fl_bufsize(ifp);
870                                 PORT_LOCK(pi);
871                                 rc = update_mac_settings(pi, XGMAC_MTU);
872                                 PORT_UNLOCK(pi);
873                         }
874                 }
875                 ADAPTER_UNLOCK(sc);
876                 break;
877
878         case SIOCSIFFLAGS:
879                 ADAPTER_LOCK(sc);
880                 if (IS_DOOMED(pi)) {
881                         rc = ENXIO;
882                         goto fail;
883                 }
884                 if (ifp->if_flags & IFF_UP) {
885                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
886                                 flags = pi->if_flags;
887                                 if ((ifp->if_flags ^ flags) &
888                                     (IFF_PROMISC | IFF_ALLMULTI)) {
889                                         if (IS_BUSY(sc)) {
890                                                 rc = EBUSY;
891                                                 goto fail;
892                                         }
893                                         PORT_LOCK(pi);
894                                         rc = update_mac_settings(pi,
895                                             XGMAC_PROMISC | XGMAC_ALLMULTI);
896                                         PORT_UNLOCK(pi);
897                                 }
898                                 ADAPTER_UNLOCK(sc);
899                         } else
900                                 rc = cxgbe_init_locked(pi);
901                         pi->if_flags = ifp->if_flags;
902                 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
903                         rc = cxgbe_uninit_locked(pi);
904                 else
905                         ADAPTER_UNLOCK(sc);
906
907                 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
908                 break;
909
910         case SIOCADDMULTI:      
911         case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
912                 ADAPTER_LOCK(sc);
913                 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
914                 if (rc)
915                         goto fail;
916
917                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
918                         PORT_LOCK(pi);
919                         rc = update_mac_settings(pi, XGMAC_MCADDRS);
920                         PORT_UNLOCK(pi);
921                 }
922                 ADAPTER_UNLOCK(sc);
923                 break;
924
925         case SIOCSIFCAP:
926                 ADAPTER_LOCK(sc);
927                 rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
928                 if (rc)
929                         goto fail;
930
931                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
932                 if (mask & IFCAP_TXCSUM) {
933                         ifp->if_capenable ^= IFCAP_TXCSUM;
934                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
935
936                         if (IFCAP_TSO & ifp->if_capenable &&
937                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
938                                 ifp->if_capenable &= ~IFCAP_TSO;
939                                 ifp->if_hwassist &= ~CSUM_TSO;
940                                 if_printf(ifp,
941                                     "tso disabled due to -txcsum.\n");
942                         }
943                 }
944                 if (mask & IFCAP_RXCSUM)
945                         ifp->if_capenable ^= IFCAP_RXCSUM;
946                 if (mask & IFCAP_TSO4) {
947                         ifp->if_capenable ^= IFCAP_TSO4;
948
949                         if (IFCAP_TSO & ifp->if_capenable) {
950                                 if (IFCAP_TXCSUM & ifp->if_capenable)
951                                         ifp->if_hwassist |= CSUM_TSO;
952                                 else {
953                                         ifp->if_capenable &= ~IFCAP_TSO;
954                                         ifp->if_hwassist &= ~CSUM_TSO;
955                                         if_printf(ifp,
956                                             "enable txcsum first.\n");
957                                         rc = EAGAIN;
958                                 }
959                         } else
960                                 ifp->if_hwassist &= ~CSUM_TSO;
961                 }
962                 if (mask & IFCAP_LRO) {
963 #ifdef INET
964                         int i;
965                         struct sge_rxq *rxq;
966
967                         ifp->if_capenable ^= IFCAP_LRO;
968                         for_each_rxq(pi, i, rxq) {
969                                 if (ifp->if_capenable & IFCAP_LRO)
970                                         rxq->flags |= RXQ_LRO_ENABLED;
971                                 else
972                                         rxq->flags &= ~RXQ_LRO_ENABLED;
973                         }
974 #endif
975                 }
976 #ifndef TCP_OFFLOAD_DISABLE
977                 if (mask & IFCAP_TOE4) {
978                         rc = EOPNOTSUPP;
979                 }
980 #endif
981                 if (mask & IFCAP_VLAN_HWTAGGING) {
982                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
983                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
984                                 PORT_LOCK(pi);
985                                 rc = update_mac_settings(pi, XGMAC_VLANEX);
986                                 PORT_UNLOCK(pi);
987                         }
988                 }
989                 if (mask & IFCAP_VLAN_MTU) {
990                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
991
992                         /* Need to find out how to disable auto-mtu-inflation */
993                 }
994                 if (mask & IFCAP_VLAN_HWTSO)
995                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
996                 if (mask & IFCAP_VLAN_HWCSUM)
997                         ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
998
999 #ifdef VLAN_CAPABILITIES
1000                 VLAN_CAPABILITIES(ifp);
1001 #endif
1002                 ADAPTER_UNLOCK(sc);
1003                 break;
1004
1005         case SIOCSIFMEDIA:
1006         case SIOCGIFMEDIA:
1007                 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1008                 break;
1009
1010         default:
1011                 rc = ether_ioctl(ifp, cmd, data);
1012         }
1013
1014         return (rc);
1015 }
1016
1017 static void
1018 cxgbe_start(struct ifnet *ifp)
1019 {
1020         struct port_info *pi = ifp->if_softc;
1021         struct sge_txq *txq;
1022         int i;
1023
1024         for_each_txq(pi, i, txq) {
1025                 if (TXQ_TRYLOCK(txq)) {
1026                         txq_start(ifp, txq);
1027                         TXQ_UNLOCK(txq);
1028                 }
1029         }
1030 }
1031
1032 static int
1033 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1034 {
1035         struct port_info *pi = ifp->if_softc;
1036         struct adapter *sc = pi->adapter;
1037         struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1038         struct buf_ring *br;
1039         int rc;
1040
1041         M_ASSERTPKTHDR(m);
1042
1043         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1044                 m_freem(m);
1045                 return (0);
1046         }
1047
1048         if (m->m_flags & M_FLOWID)
1049                 txq += (m->m_pkthdr.flowid % pi->ntxq);
1050         br = txq->br;
1051
1052         if (TXQ_TRYLOCK(txq) == 0) {
1053                 /*
1054                  * XXX: make sure that this packet really is sent out.  There is
1055                  * a small race where t4_eth_tx may stop draining the drbr and
1056                  * goes away, just before we enqueued this mbuf.
1057                  */
1058
1059                 return (drbr_enqueue(ifp, br, m));
1060         }
1061
1062         /*
1063          * txq->m is the mbuf that is held up due to a temporary shortage of
1064          * resources and it should be put on the wire first.  Then what's in
1065          * drbr and finally the mbuf that was just passed in to us.
1066          *
1067          * Return code should indicate the fate of the mbuf that was passed in
1068          * this time.
1069          */
1070
1071         TXQ_LOCK_ASSERT_OWNED(txq);
1072         if (drbr_needs_enqueue(ifp, br) || txq->m) {
1073
1074                 /* Queued for transmission. */
1075
1076                 rc = drbr_enqueue(ifp, br, m);
1077                 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1078                 (void) t4_eth_tx(ifp, txq, m);
1079                 TXQ_UNLOCK(txq);
1080                 return (rc);
1081         }
1082
1083         /* Direct transmission. */
1084         rc = t4_eth_tx(ifp, txq, m);
1085         if (rc != 0 && txq->m)
1086                 rc = 0; /* held, will be transmitted soon (hopefully) */
1087
1088         TXQ_UNLOCK(txq);
1089         return (rc);
1090 }
1091
1092 static void
1093 cxgbe_qflush(struct ifnet *ifp)
1094 {
1095         struct port_info *pi = ifp->if_softc;
1096         struct sge_txq *txq;
1097         int i;
1098         struct mbuf *m;
1099
1100         /* queues do not exist if !IFF_DRV_RUNNING. */
1101         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1102                 for_each_txq(pi, i, txq) {
1103                         TXQ_LOCK(txq);
1104                         m_freem(txq->m);
1105                         while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1106                                 m_freem(m);
1107                         TXQ_UNLOCK(txq);
1108                 }
1109         }
1110         if_qflush(ifp);
1111 }
1112
1113 static int
1114 cxgbe_media_change(struct ifnet *ifp)
1115 {
1116         struct port_info *pi = ifp->if_softc;
1117
1118         device_printf(pi->dev, "%s unimplemented.\n", __func__);
1119
1120         return (EOPNOTSUPP);
1121 }
1122
1123 static void
1124 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1125 {
1126         struct port_info *pi = ifp->if_softc;
1127         struct ifmedia_entry *cur = pi->media.ifm_cur;
1128         int speed = pi->link_cfg.speed;
1129         int data = (pi->port_type << 8) | pi->mod_type;
1130
1131         if (cur->ifm_data != data) {
1132                 build_medialist(pi);
1133                 cur = pi->media.ifm_cur;
1134         }
1135
1136         ifmr->ifm_status = IFM_AVALID;
1137         if (!pi->link_cfg.link_ok)
1138                 return;
1139
1140         ifmr->ifm_status |= IFM_ACTIVE;
1141
1142         /* active and current will differ iff current media is autoselect. */
1143         if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1144                 return;
1145
1146         ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1147         if (speed == SPEED_10000)
1148                 ifmr->ifm_active |= IFM_10G_T;
1149         else if (speed == SPEED_1000)
1150                 ifmr->ifm_active |= IFM_1000_T;
1151         else if (speed == SPEED_100)
1152                 ifmr->ifm_active |= IFM_100_TX;
1153         else if (speed == SPEED_10)
1154                 ifmr->ifm_active |= IFM_10_T;
1155         else
1156                 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1157                             speed));
1158 }
1159
1160 void
1161 t4_fatal_err(struct adapter *sc)
1162 {
1163         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1164         t4_intr_disable(sc);
1165         log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1166             device_get_nameunit(sc->dev));
1167 }
1168
1169 static int
1170 map_bars(struct adapter *sc)
1171 {
1172         sc->regs_rid = PCIR_BAR(0);
1173         sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1174             &sc->regs_rid, RF_ACTIVE);
1175         if (sc->regs_res == NULL) {
1176                 device_printf(sc->dev, "cannot map registers.\n");
1177                 return (ENXIO);
1178         }
1179         sc->bt = rman_get_bustag(sc->regs_res);
1180         sc->bh = rman_get_bushandle(sc->regs_res);
1181         sc->mmio_len = rman_get_size(sc->regs_res);
1182
1183         sc->msix_rid = PCIR_BAR(4);
1184         sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1185             &sc->msix_rid, RF_ACTIVE);
1186         if (sc->msix_res == NULL) {
1187                 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1188                 return (ENXIO);
1189         }
1190
1191         return (0);
1192 }
1193
1194 static void
1195 setup_memwin(struct adapter *sc)
1196 {
1197         u_long bar0;
1198
1199         bar0 = rman_get_start(sc->regs_res);
1200
1201         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1202                      (bar0 + MEMWIN0_BASE) | V_BIR(0) |
1203                      V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1204
1205         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1206                      (bar0 + MEMWIN1_BASE) | V_BIR(0) |
1207                      V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1208
1209         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1210                      (bar0 + MEMWIN2_BASE) | V_BIR(0) |
1211                      V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1212 }
1213
1214 static int
1215 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1216     struct intrs_and_queues *iaq)
1217 {
1218         int rc, itype, navail, nc, nrxq10g, nrxq1g;
1219
1220         bzero(iaq, sizeof(*iaq));
1221         nc = mp_ncpus;  /* our snapshot of the number of CPUs */
1222
1223         for (itype = INTR_MSIX; itype; itype >>= 1) {
1224
1225                 if ((itype & intr_types) == 0)
1226                         continue;       /* not allowed */
1227
1228                 if (itype == INTR_MSIX)
1229                         navail = pci_msix_count(sc->dev);
1230                 else if (itype == INTR_MSI)
1231                         navail = pci_msi_count(sc->dev);
1232                 else
1233                         navail = 1;
1234
1235                 if (navail == 0)
1236                         continue;
1237
1238                 iaq->intr_type = itype;
1239
1240                 iaq->ntxq10g = min(nc, max_ntxq_10g);
1241                 iaq->ntxq1g = min(nc, max_ntxq_1g);
1242
1243                 nrxq10g = min(nc, max_nrxq_10g);
1244                 nrxq1g = min(nc, max_nrxq_1g);
1245
1246                 iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + T4_EXTRA_INTR;
1247                 if (iaq->nirq <= navail && intr_shared == 0) {
1248
1249                         if (itype == INTR_MSI && !powerof2(iaq->nirq))
1250                                 goto share;
1251
1252                         /* One for err, one for fwq, and one for each rxq */
1253
1254                         iaq->intr_shared = 0;
1255                         iaq->nrxq10g = nrxq10g;
1256                         iaq->nrxq1g = nrxq1g;
1257
1258                 } else {
1259 share:
1260                         iaq->intr_shared = 1;
1261
1262                         if (navail >= nc + T4_EXTRA_INTR) {
1263                                 if (itype == INTR_MSIX)
1264                                         navail = nc + T4_EXTRA_INTR;
1265
1266                                 /* navail is and must remain a pow2 for MSI */
1267                                 if (itype == INTR_MSI) {
1268                                         KASSERT(powerof2(navail),
1269                                             ("%d not power of 2", navail));
1270
1271                                         while (navail / 2 >= nc + T4_EXTRA_INTR)
1272                                                 navail /= 2;
1273                                 }
1274                         }
1275                         iaq->nirq = navail;     /* total # of interrupts */
1276
1277                         /*
1278                          * If we have multiple vectors available reserve one
1279                          * exclusively for errors.  The rest will be shared by
1280                          * the fwq and data.
1281                          */
1282                         if (navail > 1)
1283                                 navail--;
1284                         iaq->nrxq10g = min(nrxq10g, navail);
1285                         iaq->nrxq1g = min(nrxq1g, navail);
1286                 }
1287
1288                 navail = iaq->nirq;
1289                 rc = 0;
1290                 if (itype == INTR_MSIX)
1291                         rc = pci_alloc_msix(sc->dev, &navail);
1292                 else if (itype == INTR_MSI)
1293                         rc = pci_alloc_msi(sc->dev, &navail);
1294
1295                 if (rc == 0) {
1296                         if (navail == iaq->nirq)
1297                                 return (0);
1298
1299                         /*
1300                          * Didn't get the number requested.  Use whatever number
1301                          * the kernel is willing to allocate (it's in navail).
1302                          */
1303                         pci_release_msi(sc->dev);
1304                         goto share;
1305                 }
1306
1307                 device_printf(sc->dev,
1308                     "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1309                     itype, rc, iaq->nirq, navail);
1310         }
1311
1312         device_printf(sc->dev,
1313             "failed to find a usable interrupt type.  "
1314             "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types,
1315             pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1316
1317         return (ENXIO);
1318 }
1319
1320 /*
1321  * Install a compatible firmware (if required), establish contact with it,
1322  * become the master, and reset the device.
1323  */
1324 static int
1325 prep_firmware(struct adapter *sc)
1326 {
1327         const struct firmware *fw;
1328         int rc;
1329         enum dev_state state;
1330
1331         /* Check firmware version and install a different one if necessary */
1332         rc = t4_check_fw_version(sc);
1333         if (rc != 0 || force_firmware_install) {
1334                 uint32_t v = 0;
1335
1336                 fw = firmware_get(T4_FWNAME);
1337                 if (fw != NULL) {
1338                         const struct fw_hdr *hdr = (const void *)fw->data;
1339
1340                         v = ntohl(hdr->fw_ver);
1341
1342                         /*
1343                          * The firmware module will not be used if it isn't the
1344                          * same major version as what the driver was compiled
1345                          * with.  This check trumps force_firmware_install.
1346                          */
1347                         if (G_FW_HDR_FW_VER_MAJOR(v) != FW_VERSION_MAJOR) {
1348                                 device_printf(sc->dev,
1349                                     "Found firmware image but version %d "
1350                                     "can not be used with this driver (%d)\n",
1351                                     G_FW_HDR_FW_VER_MAJOR(v), FW_VERSION_MAJOR);
1352
1353                                 firmware_put(fw, FIRMWARE_UNLOAD);
1354                                 fw = NULL;
1355                         }
1356                 }
1357
1358                 if (fw == NULL && (rc < 0 || force_firmware_install)) {
1359                         device_printf(sc->dev, "No usable firmware. "
1360                             "card has %d.%d.%d, driver compiled with %d.%d.%d, "
1361                             "force_firmware_install%s set",
1362                             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1363                             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1364                             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1365                             FW_VERSION_MAJOR, FW_VERSION_MINOR,
1366                             FW_VERSION_MICRO,
1367                             force_firmware_install ? "" : " not");
1368                         return (EAGAIN);
1369                 }
1370
1371                 /*
1372                  * Always upgrade, even for minor/micro/build mismatches.
1373                  * Downgrade only for a major version mismatch or if
1374                  * force_firmware_install was specified.
1375                  */
1376                 if (fw != NULL && (rc < 0 || force_firmware_install ||
1377                     v > sc->params.fw_vers)) {
1378                         device_printf(sc->dev,
1379                             "installing firmware %d.%d.%d.%d on card.\n",
1380                             G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
1381                             G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
1382
1383                         rc = -t4_load_fw(sc, fw->data, fw->datasize);
1384                         if (rc != 0) {
1385                                 device_printf(sc->dev,
1386                                     "failed to install firmware: %d\n", rc);
1387                                 firmware_put(fw, FIRMWARE_UNLOAD);
1388                                 return (rc);
1389                         } else {
1390                                 /* refresh */
1391                                 (void) t4_check_fw_version(sc);
1392                         }
1393                 }
1394
1395                 if (fw != NULL)
1396                         firmware_put(fw, FIRMWARE_UNLOAD);
1397         }
1398
1399         /* Contact firmware, request master */
1400         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1401         if (rc < 0) {
1402                 rc = -rc;
1403                 device_printf(sc->dev,
1404                     "failed to connect to the firmware: %d.\n", rc);
1405                 return (rc);
1406         }
1407
1408         /* Reset device */
1409         rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1410         if (rc != 0) {
1411                 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
1412                 if (rc != ETIMEDOUT && rc != EIO)
1413                         t4_fw_bye(sc, sc->mbox);
1414                 return (rc);
1415         }
1416
1417         snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
1418             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
1419             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
1420             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
1421             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
1422         sc->flags |= FW_OK;
1423
1424         return (0);
1425 }
1426
1427 static int
1428 get_devlog_params(struct adapter *sc, struct devlog_params *dlog)
1429 {
1430         struct fw_devlog_cmd devlog_cmd;
1431         uint32_t meminfo;
1432         int rc;
1433
1434         bzero(&devlog_cmd, sizeof(devlog_cmd));
1435         devlog_cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1436             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1437         devlog_cmd.retval_len16 = htobe32(FW_LEN16(devlog_cmd));
1438         rc = -t4_wr_mbox(sc, sc->mbox, &devlog_cmd, sizeof(devlog_cmd),
1439             &devlog_cmd);
1440         if (rc != 0) {
1441                 device_printf(sc->dev,
1442                     "failed to get devlog parameters: %d.\n", rc);
1443                 bzero(dlog, sizeof (*dlog));
1444                 return (rc);
1445         }
1446
1447         meminfo = be32toh(devlog_cmd.memtype_devlog_memaddr16_devlog);
1448         dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(meminfo);
1449         dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(meminfo) << 4;
1450         dlog->size = be32toh(devlog_cmd.memsize_devlog);
1451
1452         return (0);
1453 }
1454
1455 static int
1456 get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
1457 {
1458         int rc;
1459
1460         bzero(caps, sizeof(*caps));
1461         caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1462             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1463         caps->retval_len16 = htobe32(FW_LEN16(*caps));
1464
1465         rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps);
1466         if (rc != 0)
1467                 return (rc);
1468
1469         if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM))
1470                 caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM);
1471
1472         caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1473             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1474         rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL);
1475
1476         return (rc);
1477 }
1478
1479 static int
1480 get_params(struct adapter *sc, struct fw_caps_config_cmd *caps)
1481 {
1482         int rc;
1483         uint32_t params[7], val[7];
1484
1485 #define FW_PARAM_DEV(param) \
1486         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1487          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1488 #define FW_PARAM_PFVF(param) \
1489         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1490          V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1491
1492         params[0] = FW_PARAM_DEV(PORTVEC);
1493         params[1] = FW_PARAM_PFVF(IQFLINT_START);
1494         params[2] = FW_PARAM_PFVF(EQ_START);
1495         params[3] = FW_PARAM_PFVF(FILTER_START);
1496         params[4] = FW_PARAM_PFVF(FILTER_END);
1497         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val);
1498         if (rc != 0) {
1499                 device_printf(sc->dev,
1500                     "failed to query parameters: %d.\n", rc);
1501                 goto done;
1502         }
1503
1504         sc->params.portvec = val[0];
1505         sc->params.nports = 0;
1506         while (val[0]) {
1507                 sc->params.nports++;
1508                 val[0] &= val[0] - 1;
1509         }
1510
1511         sc->sge.iq_start = val[1];
1512         sc->sge.eq_start = val[2];
1513         sc->tids.ftid_base = val[3];
1514         sc->tids.nftids = val[4] - val[3] + 1;
1515
1516         if (caps->toecaps) {
1517                 /* query offload-related parameters */
1518                 params[0] = FW_PARAM_DEV(NTID);
1519                 params[1] = FW_PARAM_PFVF(SERVER_START);
1520                 params[2] = FW_PARAM_PFVF(SERVER_END);
1521                 params[3] = FW_PARAM_PFVF(TDDP_START);
1522                 params[4] = FW_PARAM_PFVF(TDDP_END);
1523                 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1524                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1525                 if (rc != 0) {
1526                         device_printf(sc->dev,
1527                             "failed to query TOE parameters: %d.\n", rc);
1528                         goto done;
1529                 }
1530                 sc->tids.ntids = val[0];
1531                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1532                 sc->tids.stid_base = val[1];
1533                 sc->tids.nstids = val[2] - val[1] + 1;
1534                 sc->vres.ddp.start = val[3];
1535                 sc->vres.ddp.size = val[4] - val[3] + 1;
1536                 sc->params.ofldq_wr_cred = val[5];
1537                 sc->params.offload = 1;
1538         }
1539         if (caps->rdmacaps) {
1540                 params[0] = FW_PARAM_PFVF(STAG_START);
1541                 params[1] = FW_PARAM_PFVF(STAG_END);
1542                 params[2] = FW_PARAM_PFVF(RQ_START);
1543                 params[3] = FW_PARAM_PFVF(RQ_END);
1544                 params[4] = FW_PARAM_PFVF(PBL_START);
1545                 params[5] = FW_PARAM_PFVF(PBL_END);
1546                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
1547                 if (rc != 0) {
1548                         device_printf(sc->dev,
1549                             "failed to query RDMA parameters: %d.\n", rc);
1550                         goto done;
1551                 }
1552                 sc->vres.stag.start = val[0];
1553                 sc->vres.stag.size = val[1] - val[0] + 1;
1554                 sc->vres.rq.start = val[2];
1555                 sc->vres.rq.size = val[3] - val[2] + 1;
1556                 sc->vres.pbl.start = val[4];
1557                 sc->vres.pbl.size = val[5] - val[4] + 1;
1558         }
1559         if (caps->iscsicaps) {
1560                 params[0] = FW_PARAM_PFVF(ISCSI_START);
1561                 params[1] = FW_PARAM_PFVF(ISCSI_END);
1562                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val);
1563                 if (rc != 0) {
1564                         device_printf(sc->dev,
1565                             "failed to query iSCSI parameters: %d.\n", rc);
1566                         goto done;
1567                 }
1568                 sc->vres.iscsi.start = val[0];
1569                 sc->vres.iscsi.size = val[1] - val[0] + 1;
1570         }
1571 #undef FW_PARAM_PFVF
1572 #undef FW_PARAM_DEV
1573
1574 done:
1575         return (rc);
1576 }
1577
1578 static void
1579 t4_set_desc(struct adapter *sc)
1580 {
1581         char buf[128];
1582         struct adapter_params *p = &sc->params;
1583
1584         snprintf(buf, sizeof(buf),
1585             "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %d %s, S/N:%s, E/C:%s",
1586             p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "",
1587             p->pci.width, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
1588             (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), p->vpd.sn, p->vpd.ec);
1589
1590         device_set_desc_copy(sc->dev, buf);
1591 }
1592
1593 static void
1594 build_medialist(struct port_info *pi)
1595 {
1596         struct ifmedia *media = &pi->media;
1597         int data, m;
1598
1599         PORT_LOCK(pi);
1600
1601         ifmedia_removeall(media);
1602
1603         m = IFM_ETHER | IFM_FDX;
1604         data = (pi->port_type << 8) | pi->mod_type;
1605
1606         switch(pi->port_type) {
1607         case FW_PORT_TYPE_BT_XFI:
1608                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1609                 break;
1610
1611         case FW_PORT_TYPE_BT_XAUI:
1612                 ifmedia_add(media, m | IFM_10G_T, data, NULL);
1613                 /* fall through */
1614
1615         case FW_PORT_TYPE_BT_SGMII:
1616                 ifmedia_add(media, m | IFM_1000_T, data, NULL);
1617                 ifmedia_add(media, m | IFM_100_TX, data, NULL);
1618                 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
1619                 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
1620                 break;
1621
1622         case FW_PORT_TYPE_CX4:
1623                 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
1624                 ifmedia_set(media, m | IFM_10G_CX4);
1625                 break;
1626
1627         case FW_PORT_TYPE_SFP:
1628         case FW_PORT_TYPE_FIBER_XFI:
1629         case FW_PORT_TYPE_FIBER_XAUI:
1630                 switch (pi->mod_type) {
1631
1632                 case FW_PORT_MOD_TYPE_LR:
1633                         ifmedia_add(media, m | IFM_10G_LR, data, NULL);
1634                         ifmedia_set(media, m | IFM_10G_LR);
1635                         break;
1636
1637                 case FW_PORT_MOD_TYPE_SR:
1638                         ifmedia_add(media, m | IFM_10G_SR, data, NULL);
1639                         ifmedia_set(media, m | IFM_10G_SR);
1640                         break;
1641
1642                 case FW_PORT_MOD_TYPE_LRM:
1643                         ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
1644                         ifmedia_set(media, m | IFM_10G_LRM);
1645                         break;
1646
1647                 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
1648                 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
1649                         ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
1650                         ifmedia_set(media, m | IFM_10G_TWINAX);
1651                         break;
1652
1653                 case FW_PORT_MOD_TYPE_NONE:
1654                         m &= ~IFM_FDX;
1655                         ifmedia_add(media, m | IFM_NONE, data, NULL);
1656                         ifmedia_set(media, m | IFM_NONE);
1657                         break;
1658
1659                 case FW_PORT_MOD_TYPE_NA:
1660                 case FW_PORT_MOD_TYPE_ER:
1661                 default:
1662                         ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1663                         ifmedia_set(media, m | IFM_UNKNOWN);
1664                         break;
1665                 }
1666                 break;
1667
1668         case FW_PORT_TYPE_KX4:
1669         case FW_PORT_TYPE_KX:
1670         case FW_PORT_TYPE_KR:
1671         default:
1672                 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
1673                 ifmedia_set(media, m | IFM_UNKNOWN);
1674                 break;
1675         }
1676
1677         PORT_UNLOCK(pi);
1678 }
1679
1680 /*
1681  * Program the port's XGMAC based on parameters in ifnet.  The caller also
1682  * indicates which parameters should be programmed (the rest are left alone).
1683  */
1684 static int
1685 update_mac_settings(struct port_info *pi, int flags)
1686 {
1687         int rc;
1688         struct ifnet *ifp = pi->ifp;
1689         struct adapter *sc = pi->adapter;
1690         int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
1691
1692         PORT_LOCK_ASSERT_OWNED(pi);
1693         KASSERT(flags, ("%s: not told what to update.", __func__));
1694
1695         if (flags & XGMAC_MTU)
1696                 mtu = ifp->if_mtu;
1697
1698         if (flags & XGMAC_PROMISC)
1699                 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
1700
1701         if (flags & XGMAC_ALLMULTI)
1702                 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
1703
1704         if (flags & XGMAC_VLANEX)
1705                 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
1706
1707         rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
1708             vlanex, false);
1709         if (rc) {
1710                 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
1711                 return (rc);
1712         }
1713
1714         if (flags & XGMAC_UCADDR) {
1715                 uint8_t ucaddr[ETHER_ADDR_LEN];
1716
1717                 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
1718                 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
1719                     ucaddr, true, true);
1720                 if (rc < 0) {
1721                         rc = -rc;
1722                         if_printf(ifp, "change_mac failed: %d\n", rc);
1723                         return (rc);
1724                 } else {
1725                         pi->xact_addr_filt = rc;
1726                         rc = 0;
1727                 }
1728         }
1729
1730         if (flags & XGMAC_MCADDRS) {
1731                 const uint8_t *mcaddr;
1732                 int del = 1;
1733                 uint64_t hash = 0;
1734                 struct ifmultiaddr *ifma;
1735
1736                 if_maddr_rlock(ifp);
1737                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1738                         if (ifma->ifma_addr->sa_family != AF_LINK)
1739                                 continue;
1740                         mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1741
1742                         rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
1743                             &mcaddr, NULL, &hash, 0);
1744                         if (rc < 0) {
1745                                 rc = -rc;
1746                                 if_printf(ifp, "failed to add mc address"
1747                                     " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
1748                                     mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
1749                                     mcaddr[4], mcaddr[5], rc);
1750                                 goto mcfail;
1751                         }
1752                         del = 0;
1753                 }
1754
1755                 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
1756                 if (rc != 0)
1757                         if_printf(ifp, "failed to set mc address hash: %d", rc);
1758 mcfail:
1759                 if_maddr_runlock(ifp);
1760         }
1761
1762         return (rc);
1763 }
1764
1765 static int
1766 cxgbe_init_locked(struct port_info *pi)
1767 {
1768         struct adapter *sc = pi->adapter;
1769         int rc = 0;
1770
1771         ADAPTER_LOCK_ASSERT_OWNED(sc);
1772
1773         while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1774                 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
1775                         rc = EINTR;
1776                         goto done;
1777                 }
1778         }
1779         if (IS_DOOMED(pi)) {
1780                 rc = ENXIO;
1781                 goto done;
1782         }
1783         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1784
1785         /* Give up the adapter lock, port init code can sleep. */
1786         SET_BUSY(sc);
1787         ADAPTER_UNLOCK(sc);
1788
1789         rc = cxgbe_init_synchronized(pi);
1790
1791 done:
1792         ADAPTER_LOCK(sc);
1793         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1794         CLR_BUSY(sc);
1795         wakeup_one(&sc->flags);
1796         ADAPTER_UNLOCK(sc);
1797         return (rc);
1798 }
1799
1800 static int
1801 cxgbe_init_synchronized(struct port_info *pi)
1802 {
1803         struct adapter *sc = pi->adapter;
1804         struct ifnet *ifp = pi->ifp;
1805         int rc = 0, i;
1806         uint16_t *rss;
1807         struct sge_rxq *rxq;
1808
1809         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1810
1811         if (isset(&sc->open_device_map, pi->port_id)) {
1812                 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
1813                     ("mismatch between open_device_map and if_drv_flags"));
1814                 return (0);     /* already running */
1815         }
1816
1817         if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0))
1818                 return (rc);    /* error message displayed already */
1819
1820         /*
1821          * Allocate tx/rx/fl queues for this port.
1822          */
1823         rc = t4_setup_eth_queues(pi);
1824         if (rc != 0)
1825                 goto done;      /* error message displayed already */
1826
1827         /*
1828          * Setup RSS for this port.
1829          */
1830         rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
1831         for_each_rxq(pi, i, rxq) {
1832                 rss[i] = rxq->iq.abs_id;
1833         }
1834         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
1835             pi->nrxq);
1836         free(rss, M_CXGBE);
1837         if (rc != 0) {
1838                 if_printf(ifp, "rss_config failed: %d\n", rc);
1839                 goto done;
1840         }
1841
1842         PORT_LOCK(pi);
1843         rc = update_mac_settings(pi, XGMAC_ALL);
1844         PORT_UNLOCK(pi);
1845         if (rc)
1846                 goto done;      /* error message displayed already */
1847
1848         rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
1849         if (rc != 0) {
1850                 if_printf(ifp, "start_link failed: %d\n", rc);
1851                 goto done;
1852         }
1853
1854         rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
1855         if (rc != 0) {
1856                 if_printf(ifp, "enable_vi failed: %d\n", rc);
1857                 goto done;
1858         }
1859         pi->flags |= VI_ENABLED;
1860
1861         /* all ok */
1862         setbit(&sc->open_device_map, pi->port_id);
1863         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1864         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1865
1866         callout_reset(&pi->tick, hz, cxgbe_tick, pi);
1867 done:
1868         if (rc != 0)
1869                 cxgbe_uninit_synchronized(pi);
1870
1871         return (rc);
1872 }
1873
1874 static int
1875 cxgbe_uninit_locked(struct port_info *pi)
1876 {
1877         struct adapter *sc = pi->adapter;
1878         int rc;
1879
1880         ADAPTER_LOCK_ASSERT_OWNED(sc);
1881
1882         while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
1883                 if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
1884                         rc = EINTR;
1885                         goto done;
1886                 }
1887         }
1888         if (IS_DOOMED(pi)) {
1889                 rc = ENXIO;
1890                 goto done;
1891         }
1892         KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1893         SET_BUSY(sc);
1894         ADAPTER_UNLOCK(sc);
1895
1896         rc = cxgbe_uninit_synchronized(pi);
1897
1898         ADAPTER_LOCK(sc);
1899         KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1900         CLR_BUSY(sc);
1901         wakeup_one(&sc->flags);
1902 done:
1903         ADAPTER_UNLOCK(sc);
1904         return (rc);
1905 }
1906
1907 /*
1908  * Idempotent.
1909  */
1910 static int
1911 cxgbe_uninit_synchronized(struct port_info *pi)
1912 {
1913         struct adapter *sc = pi->adapter;
1914         struct ifnet *ifp = pi->ifp;
1915         int rc;
1916
1917         /*
1918          * taskqueue_drain may cause a deadlock if the adapter lock is held.
1919          */
1920         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1921
1922         /*
1923          * Clear this port's bit from the open device map, and then drain
1924          * tasks and callouts.
1925          */
1926         clrbit(&sc->open_device_map, pi->port_id);
1927
1928         PORT_LOCK(pi);
1929         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1930         callout_stop(&pi->tick);
1931         PORT_UNLOCK(pi);
1932         callout_drain(&pi->tick);
1933
1934         /*
1935          * Stop and then free the queues' resources, including the queues
1936          * themselves.
1937          *
1938          * XXX: we could just stop the queues here (on ifconfig down) and free
1939          * them later (on port detach), but having up/down go through the entire
1940          * allocate/activate/deactivate/free sequence is a good way to find
1941          * leaks and bugs.
1942          */
1943         rc = t4_teardown_eth_queues(pi);
1944         if (rc != 0)
1945                 if_printf(ifp, "teardown failed: %d\n", rc);
1946
1947         if (pi->flags & VI_ENABLED) {
1948                 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
1949                 if (rc)
1950                         if_printf(ifp, "disable_vi failed: %d\n", rc);
1951                 else
1952                         pi->flags &= ~VI_ENABLED;
1953         }
1954
1955         pi->link_cfg.link_ok = 0;
1956         pi->link_cfg.speed = 0;
1957         t4_os_link_changed(sc, pi->port_id, 0);
1958
1959         if (sc->open_device_map == 0)
1960                 last_port_down(sc);
1961
1962         return (0);
1963 }
1964
1965 #define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
1966         rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
1967         if (rc != 0) \
1968                 goto done; \
1969 } while (0)
1970 static int
1971 first_port_up(struct adapter *sc)
1972 {
1973         int rc, i, rid, p, q;
1974         char s[8];
1975         struct irq *irq;
1976         struct sge_iq *intrq;
1977
1978         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1979
1980         /*
1981          * queues that belong to the adapter (not any particular port).
1982          */
1983         rc = t4_setup_adapter_queues(sc);
1984         if (rc != 0)
1985                 goto done;
1986
1987         /*
1988          * Setup interrupts.
1989          */
1990         irq = &sc->irq[0];
1991         rid = sc->intr_type == INTR_INTX ? 0 : 1;
1992         if (sc->intr_count == 1) {
1993                 KASSERT(sc->flags & INTR_SHARED,
1994                     ("%s: single interrupt but not shared?", __func__));
1995
1996                 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
1997         } else {
1998                 /* Multiple interrupts.  The first one is always error intr */
1999                 T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
2000                 irq++;
2001                 rid++;
2002
2003                 /* Firmware event queue normally has an interrupt of its own */
2004                 if (sc->intr_count > T4_EXTRA_INTR) {
2005                         T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
2006                             "evt");
2007                         irq++;
2008                         rid++;
2009                 }
2010
2011                 intrq = &sc->sge.intrq[0];
2012                 if (sc->flags & INTR_SHARED) {
2013
2014                         /* All ports share these interrupt queues */
2015
2016                         for (i = 0; i < NINTRQ(sc); i++) {
2017                                 snprintf(s, sizeof(s), "*.%d", i);
2018                                 T4_ALLOC_IRQ(sc, irq, rid, t4_intr, intrq, s);
2019                                 irq++;
2020                                 rid++;
2021                                 intrq++;
2022                         }
2023                 } else {
2024
2025                         /* Each port has its own set of interrupt queues */
2026
2027                         for (p = 0; p < sc->params.nports; p++) {
2028                                 for (q = 0; q < sc->port[p]->nrxq; q++) {
2029                                         snprintf(s, sizeof(s), "%d.%d", p, q);
2030                                         T4_ALLOC_IRQ(sc, irq, rid, t4_intr,
2031                                             intrq, s);
2032                                         irq++;
2033                                         rid++;
2034                                         intrq++;
2035                                 }
2036                         }
2037                 }
2038         }
2039
2040         t4_intr_enable(sc);
2041         sc->flags |= FULL_INIT_DONE;
2042
2043 done:
2044         if (rc != 0)
2045                 last_port_down(sc);
2046
2047         return (rc);
2048 }
2049 #undef T4_ALLOC_IRQ
2050
2051 /*
2052  * Idempotent.
2053  */
2054 static int
2055 last_port_down(struct adapter *sc)
2056 {
2057         int i;
2058
2059         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2060
2061         t4_intr_disable(sc);
2062
2063         t4_teardown_adapter_queues(sc);
2064
2065         for (i = 0; i < sc->intr_count; i++)
2066                 t4_free_irq(sc, &sc->irq[i]);
2067
2068         sc->flags &= ~FULL_INIT_DONE;
2069
2070         return (0);
2071 }
2072
2073 static int
2074 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
2075     iq_intr_handler_t *handler, void *arg, char *name)
2076 {
2077         int rc;
2078
2079         irq->rid = rid;
2080         irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
2081             RF_SHAREABLE | RF_ACTIVE);
2082         if (irq->res == NULL) {
2083                 device_printf(sc->dev,
2084                     "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
2085                 return (ENOMEM);
2086         }
2087
2088         rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
2089             NULL, handler, arg, &irq->tag);
2090         if (rc != 0) {
2091                 device_printf(sc->dev,
2092                     "failed to setup interrupt for rid %d, name %s: %d\n",
2093                     rid, name, rc);
2094         } else if (name)
2095                 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
2096
2097         return (rc);
2098 }
2099
2100 static int
2101 t4_free_irq(struct adapter *sc, struct irq *irq)
2102 {
2103         if (irq->tag)
2104                 bus_teardown_intr(sc->dev, irq->res, irq->tag);
2105         if (irq->res)
2106                 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
2107
2108         bzero(irq, sizeof(*irq));
2109
2110         return (0);
2111 }
2112
2113 static void
2114 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
2115     unsigned int end)
2116 {
2117         uint32_t *p = (uint32_t *)(buf + start);
2118
2119         for ( ; start <= end; start += sizeof(uint32_t))
2120                 *p++ = t4_read_reg(sc, start);
2121 }
2122
2123 static void
2124 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
2125 {
2126         int i;
2127         static const unsigned int reg_ranges[] = {
2128                 0x1008, 0x1108,
2129                 0x1180, 0x11b4,
2130                 0x11fc, 0x123c,
2131                 0x1300, 0x173c,
2132                 0x1800, 0x18fc,
2133                 0x3000, 0x30d8,
2134                 0x30e0, 0x5924,
2135                 0x5960, 0x59d4,
2136                 0x5a00, 0x5af8,
2137                 0x6000, 0x6098,
2138                 0x6100, 0x6150,
2139                 0x6200, 0x6208,
2140                 0x6240, 0x6248,
2141                 0x6280, 0x6338,
2142                 0x6370, 0x638c,
2143                 0x6400, 0x643c,
2144                 0x6500, 0x6524,
2145                 0x6a00, 0x6a38,
2146                 0x6a60, 0x6a78,
2147                 0x6b00, 0x6b84,
2148                 0x6bf0, 0x6c84,
2149                 0x6cf0, 0x6d84,
2150                 0x6df0, 0x6e84,
2151                 0x6ef0, 0x6f84,
2152                 0x6ff0, 0x7084,
2153                 0x70f0, 0x7184,
2154                 0x71f0, 0x7284,
2155                 0x72f0, 0x7384,
2156                 0x73f0, 0x7450,
2157                 0x7500, 0x7530,
2158                 0x7600, 0x761c,
2159                 0x7680, 0x76cc,
2160                 0x7700, 0x7798,
2161                 0x77c0, 0x77fc,
2162                 0x7900, 0x79fc,
2163                 0x7b00, 0x7c38,
2164                 0x7d00, 0x7efc,
2165                 0x8dc0, 0x8e1c,
2166                 0x8e30, 0x8e78,
2167                 0x8ea0, 0x8f6c,
2168                 0x8fc0, 0x9074,
2169                 0x90fc, 0x90fc,
2170                 0x9400, 0x9458,
2171                 0x9600, 0x96bc,
2172                 0x9800, 0x9808,
2173                 0x9820, 0x983c,
2174                 0x9850, 0x9864,
2175                 0x9c00, 0x9c6c,
2176                 0x9c80, 0x9cec,
2177                 0x9d00, 0x9d6c,
2178                 0x9d80, 0x9dec,
2179                 0x9e00, 0x9e6c,
2180                 0x9e80, 0x9eec,
2181                 0x9f00, 0x9f6c,
2182                 0x9f80, 0x9fec,
2183                 0xd004, 0xd03c,
2184                 0xdfc0, 0xdfe0,
2185                 0xe000, 0xea7c,
2186                 0xf000, 0x11190,
2187                 0x19040, 0x19124,
2188                 0x19150, 0x191b0,
2189                 0x191d0, 0x191e8,
2190                 0x19238, 0x1924c,
2191                 0x193f8, 0x19474,
2192                 0x19490, 0x194f8,
2193                 0x19800, 0x19f30,
2194                 0x1a000, 0x1a06c,
2195                 0x1a0b0, 0x1a120,
2196                 0x1a128, 0x1a138,
2197                 0x1a190, 0x1a1c4,
2198                 0x1a1fc, 0x1a1fc,
2199                 0x1e040, 0x1e04c,
2200                 0x1e240, 0x1e28c,
2201                 0x1e2c0, 0x1e2c0,
2202                 0x1e2e0, 0x1e2e0,
2203                 0x1e300, 0x1e384,
2204                 0x1e3c0, 0x1e3c8,
2205                 0x1e440, 0x1e44c,
2206                 0x1e640, 0x1e68c,
2207                 0x1e6c0, 0x1e6c0,
2208                 0x1e6e0, 0x1e6e0,
2209                 0x1e700, 0x1e784,
2210                 0x1e7c0, 0x1e7c8,
2211                 0x1e840, 0x1e84c,
2212                 0x1ea40, 0x1ea8c,
2213                 0x1eac0, 0x1eac0,
2214                 0x1eae0, 0x1eae0,
2215                 0x1eb00, 0x1eb84,
2216                 0x1ebc0, 0x1ebc8,
2217                 0x1ec40, 0x1ec4c,
2218                 0x1ee40, 0x1ee8c,
2219                 0x1eec0, 0x1eec0,
2220                 0x1eee0, 0x1eee0,
2221                 0x1ef00, 0x1ef84,
2222                 0x1efc0, 0x1efc8,
2223                 0x1f040, 0x1f04c,
2224                 0x1f240, 0x1f28c,
2225                 0x1f2c0, 0x1f2c0,
2226                 0x1f2e0, 0x1f2e0,
2227                 0x1f300, 0x1f384,
2228                 0x1f3c0, 0x1f3c8,
2229                 0x1f440, 0x1f44c,
2230                 0x1f640, 0x1f68c,
2231                 0x1f6c0, 0x1f6c0,
2232                 0x1f6e0, 0x1f6e0,
2233                 0x1f700, 0x1f784,
2234                 0x1f7c0, 0x1f7c8,
2235                 0x1f840, 0x1f84c,
2236                 0x1fa40, 0x1fa8c,
2237                 0x1fac0, 0x1fac0,
2238                 0x1fae0, 0x1fae0,
2239                 0x1fb00, 0x1fb84,
2240                 0x1fbc0, 0x1fbc8,
2241                 0x1fc40, 0x1fc4c,
2242                 0x1fe40, 0x1fe8c,
2243                 0x1fec0, 0x1fec0,
2244                 0x1fee0, 0x1fee0,
2245                 0x1ff00, 0x1ff84,
2246                 0x1ffc0, 0x1ffc8,
2247                 0x20000, 0x2002c,
2248                 0x20100, 0x2013c,
2249                 0x20190, 0x201c8,
2250                 0x20200, 0x20318,
2251                 0x20400, 0x20528,
2252                 0x20540, 0x20614,
2253                 0x21000, 0x21040,
2254                 0x2104c, 0x21060,
2255                 0x210c0, 0x210ec,
2256                 0x21200, 0x21268,
2257                 0x21270, 0x21284,
2258                 0x212fc, 0x21388,
2259                 0x21400, 0x21404,
2260                 0x21500, 0x21518,
2261                 0x2152c, 0x2153c,
2262                 0x21550, 0x21554,
2263                 0x21600, 0x21600,
2264                 0x21608, 0x21628,
2265                 0x21630, 0x2163c,
2266                 0x21700, 0x2171c,
2267                 0x21780, 0x2178c,
2268                 0x21800, 0x21c38,
2269                 0x21c80, 0x21d7c,
2270                 0x21e00, 0x21e04,
2271                 0x22000, 0x2202c,
2272                 0x22100, 0x2213c,
2273                 0x22190, 0x221c8,
2274                 0x22200, 0x22318,
2275                 0x22400, 0x22528,
2276                 0x22540, 0x22614,
2277                 0x23000, 0x23040,
2278                 0x2304c, 0x23060,
2279                 0x230c0, 0x230ec,
2280                 0x23200, 0x23268,
2281                 0x23270, 0x23284,
2282                 0x232fc, 0x23388,
2283                 0x23400, 0x23404,
2284                 0x23500, 0x23518,
2285                 0x2352c, 0x2353c,
2286                 0x23550, 0x23554,
2287                 0x23600, 0x23600,
2288                 0x23608, 0x23628,
2289                 0x23630, 0x2363c,
2290                 0x23700, 0x2371c,
2291                 0x23780, 0x2378c,
2292                 0x23800, 0x23c38,
2293                 0x23c80, 0x23d7c,
2294                 0x23e00, 0x23e04,
2295                 0x24000, 0x2402c,
2296                 0x24100, 0x2413c,
2297                 0x24190, 0x241c8,
2298                 0x24200, 0x24318,
2299                 0x24400, 0x24528,
2300                 0x24540, 0x24614,
2301                 0x25000, 0x25040,
2302                 0x2504c, 0x25060,
2303                 0x250c0, 0x250ec,
2304                 0x25200, 0x25268,
2305                 0x25270, 0x25284,
2306                 0x252fc, 0x25388,
2307                 0x25400, 0x25404,
2308                 0x25500, 0x25518,
2309                 0x2552c, 0x2553c,
2310                 0x25550, 0x25554,
2311                 0x25600, 0x25600,
2312                 0x25608, 0x25628,
2313                 0x25630, 0x2563c,
2314                 0x25700, 0x2571c,
2315                 0x25780, 0x2578c,
2316                 0x25800, 0x25c38,
2317                 0x25c80, 0x25d7c,
2318                 0x25e00, 0x25e04,
2319                 0x26000, 0x2602c,
2320                 0x26100, 0x2613c,
2321                 0x26190, 0x261c8,
2322                 0x26200, 0x26318,
2323                 0x26400, 0x26528,
2324                 0x26540, 0x26614,
2325                 0x27000, 0x27040,
2326                 0x2704c, 0x27060,
2327                 0x270c0, 0x270ec,
2328                 0x27200, 0x27268,
2329                 0x27270, 0x27284,
2330                 0x272fc, 0x27388,
2331                 0x27400, 0x27404,
2332                 0x27500, 0x27518,
2333                 0x2752c, 0x2753c,
2334                 0x27550, 0x27554,
2335                 0x27600, 0x27600,
2336                 0x27608, 0x27628,
2337                 0x27630, 0x2763c,
2338                 0x27700, 0x2771c,
2339                 0x27780, 0x2778c,
2340                 0x27800, 0x27c38,
2341                 0x27c80, 0x27d7c,
2342                 0x27e00, 0x27e04
2343         };
2344
2345         regs->version = 4 | (sc->params.rev << 10);
2346         for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
2347                 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
2348 }
2349
2350 static void
2351 cxgbe_tick(void *arg)
2352 {
2353         struct port_info *pi = arg;
2354         struct ifnet *ifp = pi->ifp;
2355         struct sge_txq *txq;
2356         int i, drops;
2357         struct port_stats *s = &pi->stats;
2358
2359         PORT_LOCK(pi);
2360         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2361                 PORT_UNLOCK(pi);
2362                 return; /* without scheduling another callout */
2363         }
2364
2365         t4_get_port_stats(pi->adapter, pi->tx_chan, s);
2366
2367         ifp->if_opackets = s->tx_frames;
2368         ifp->if_ipackets = s->rx_frames;
2369         ifp->if_obytes = s->tx_octets;
2370         ifp->if_ibytes = s->rx_octets;
2371         ifp->if_omcasts = s->tx_mcast_frames;
2372         ifp->if_imcasts = s->rx_mcast_frames;
2373         ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
2374             s->rx_ovflow3;
2375
2376         drops = s->tx_drop;
2377         for_each_txq(pi, i, txq)
2378                 drops += txq->br->br_drops;
2379         ifp->if_snd.ifq_drops = drops;
2380
2381         ifp->if_oerrors = s->tx_error_frames;
2382         ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
2383             s->rx_fcs_err + s->rx_len_err;
2384
2385         callout_schedule(&pi->tick, hz);
2386         PORT_UNLOCK(pi);
2387 }
2388
2389 static int
2390 t4_sysctls(struct adapter *sc)
2391 {
2392         struct sysctl_ctx_list *ctx;
2393         struct sysctl_oid *oid;
2394         struct sysctl_oid_list *children;
2395
2396         ctx = device_get_sysctl_ctx(sc->dev);
2397         oid = device_get_sysctl_tree(sc->dev);
2398         children = SYSCTL_CHILDREN(oid);
2399
2400         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
2401             &sc->params.nports, 0, "# of ports");
2402
2403         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
2404             &sc->params.rev, 0, "chip hardware revision");
2405
2406         SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
2407             CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
2408
2409         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD,
2410             &sc->params.offload, 0, "hardware is capable of TCP offload");
2411
2412         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
2413             &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
2414
2415         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
2416             CTLTYPE_STRING | CTLFLAG_RD, &intr_timer, sizeof(intr_timer),
2417             sysctl_int_array, "A", "interrupt holdoff timer values (us)");
2418
2419         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
2420             CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount),
2421             sysctl_int_array, "A", "interrupt holdoff packet counter values");
2422
2423         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
2424             CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
2425             sysctl_devlog, "A", "device log");
2426
2427         return (0);
2428 }
2429
2430 static int
2431 cxgbe_sysctls(struct port_info *pi)
2432 {
2433         struct sysctl_ctx_list *ctx;
2434         struct sysctl_oid *oid;
2435         struct sysctl_oid_list *children;
2436
2437         ctx = device_get_sysctl_ctx(pi->dev);
2438
2439         /*
2440          * dev.cxgbe.X.
2441          */
2442         oid = device_get_sysctl_tree(pi->dev);
2443         children = SYSCTL_CHILDREN(oid);
2444
2445         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
2446             &pi->nrxq, 0, "# of rx queues");
2447         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
2448             &pi->ntxq, 0, "# of tx queues");
2449         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
2450             &pi->first_rxq, 0, "index of first rx queue");
2451         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
2452             &pi->first_txq, 0, "index of first tx queue");
2453
2454         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
2455             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
2456             "holdoff timer index");
2457         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
2458             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
2459             "holdoff packet counter index");
2460
2461         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
2462             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
2463             "rx queue size");
2464         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
2465             CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
2466             "tx queue size");
2467
2468         /*
2469          * dev.cxgbe.X.stats.
2470          */
2471         oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
2472             NULL, "port statistics");
2473         children = SYSCTL_CHILDREN(oid);
2474
2475 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
2476         SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
2477             CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
2478             sysctl_handle_t4_reg64, "QU", desc)
2479
2480         SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
2481             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
2482         SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
2483             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
2484         SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
2485             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
2486         SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
2487             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
2488         SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
2489             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
2490         SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
2491             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
2492         SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
2493             "# of tx frames in this range",
2494             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
2495         SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
2496             "# of tx frames in this range",
2497             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
2498         SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
2499             "# of tx frames in this range",
2500             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
2501         SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
2502             "# of tx frames in this range",
2503             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
2504         SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
2505             "# of tx frames in this range",
2506             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
2507         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
2508             "# of tx frames in this range",
2509             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
2510         SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
2511             "# of tx frames in this range",
2512             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
2513         SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
2514             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
2515         SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
2516             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
2517         SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
2518             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
2519         SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
2520             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
2521         SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
2522             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
2523         SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
2524             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
2525         SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
2526             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
2527         SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
2528             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
2529         SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
2530             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
2531         SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
2532             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
2533
2534         SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
2535             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
2536         SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
2537             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
2538         SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
2539             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
2540         SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
2541             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
2542         SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
2543             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
2544         SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
2545             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
2546         SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
2547             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
2548         SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
2549             "# of frames received with bad FCS",
2550             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
2551         SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
2552             "# of frames received with length error",
2553             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
2554         SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
2555             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
2556         SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
2557             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
2558         SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
2559             "# of rx frames in this range",
2560             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
2561         SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
2562             "# of rx frames in this range",
2563             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
2564         SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
2565             "# of rx frames in this range",
2566             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
2567         SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
2568             "# of rx frames in this range",
2569             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
2570         SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
2571             "# of rx frames in this range",
2572             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
2573         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
2574             "# of rx frames in this range",
2575             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
2576         SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
2577             "# of rx frames in this range",
2578             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
2579         SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
2580             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
2581         SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
2582             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
2583         SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
2584             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
2585         SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
2586             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
2587         SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
2588             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
2589         SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
2590             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
2591         SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
2592             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
2593         SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
2594             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
2595         SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
2596             PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
2597
2598 #undef SYSCTL_ADD_T4_REG64
2599
2600 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
2601         SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
2602             &pi->stats.name, desc)
2603
2604         /* We get these from port_stats and they may be stale by upto 1s */
2605         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
2606             "# drops due to buffer-group 0 overflows");
2607         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
2608             "# drops due to buffer-group 1 overflows");
2609         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
2610             "# drops due to buffer-group 2 overflows");
2611         SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
2612             "# drops due to buffer-group 3 overflows");
2613         SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
2614             "# of buffer-group 0 truncated packets");
2615         SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
2616             "# of buffer-group 1 truncated packets");
2617         SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
2618             "# of buffer-group 2 truncated packets");
2619         SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
2620             "# of buffer-group 3 truncated packets");
2621
2622 #undef SYSCTL_ADD_T4_PORTSTAT
2623
2624         return (0);
2625 }
2626
2627 static int
2628 sysctl_int_array(SYSCTL_HANDLER_ARGS)
2629 {
2630         int rc, *i;
2631         struct sbuf sb;
2632
2633         sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
2634         for (i = arg1; arg2; arg2 -= sizeof(int), i++)
2635                 sbuf_printf(&sb, "%d ", *i);
2636         sbuf_trim(&sb);
2637         sbuf_finish(&sb);
2638         rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2639         sbuf_delete(&sb);
2640         return (rc);
2641 }
2642
2643 static int
2644 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
2645 {
2646         struct port_info *pi = arg1;
2647         struct adapter *sc = pi->adapter;
2648         struct sge_rxq *rxq;
2649         int idx, rc, i;
2650
2651         idx = pi->tmr_idx;
2652
2653         rc = sysctl_handle_int(oidp, &idx, 0, req);
2654         if (rc != 0 || req->newptr == NULL)
2655                 return (rc);
2656
2657         if (idx < 0 || idx >= SGE_NTIMERS)
2658                 return (EINVAL);
2659
2660         ADAPTER_LOCK(sc);
2661         rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2662         if (rc == 0) {
2663                 for_each_rxq(pi, i, rxq) {
2664                         rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) |
2665                             V_QINTR_CNT_EN(pi->pktc_idx != -1);
2666                 }
2667                 pi->tmr_idx = idx;
2668         }
2669
2670         ADAPTER_UNLOCK(sc);
2671         return (rc);
2672 }
2673
2674 static int
2675 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
2676 {
2677         struct port_info *pi = arg1;
2678         struct adapter *sc = pi->adapter;
2679         int idx, rc;
2680
2681         idx = pi->pktc_idx;
2682
2683         rc = sysctl_handle_int(oidp, &idx, 0, req);
2684         if (rc != 0 || req->newptr == NULL)
2685                 return (rc);
2686
2687         if (idx < -1 || idx >= SGE_NCOUNTERS)
2688                 return (EINVAL);
2689
2690         ADAPTER_LOCK(sc);
2691         rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2692         if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2693                 rc = EBUSY; /* can be changed only when port is down */
2694
2695         if (rc == 0)
2696                 pi->pktc_idx = idx;
2697
2698         ADAPTER_UNLOCK(sc);
2699         return (rc);
2700 }
2701
2702 static int
2703 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
2704 {
2705         struct port_info *pi = arg1;
2706         struct adapter *sc = pi->adapter;
2707         int qsize, rc;
2708
2709         qsize = pi->qsize_rxq;
2710
2711         rc = sysctl_handle_int(oidp, &qsize, 0, req);
2712         if (rc != 0 || req->newptr == NULL)
2713                 return (rc);
2714
2715         if (qsize < 128 || (qsize & 7))
2716                 return (EINVAL);
2717
2718         ADAPTER_LOCK(sc);
2719         rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2720         if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2721                 rc = EBUSY; /* can be changed only when port is down */
2722
2723         if (rc == 0)
2724                 pi->qsize_rxq = qsize;
2725
2726         ADAPTER_UNLOCK(sc);
2727         return (rc);
2728 }
2729
2730 static int
2731 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
2732 {
2733         struct port_info *pi = arg1;
2734         struct adapter *sc = pi->adapter;
2735         int qsize, rc;
2736
2737         qsize = pi->qsize_txq;
2738
2739         rc = sysctl_handle_int(oidp, &qsize, 0, req);
2740         if (rc != 0 || req->newptr == NULL)
2741                 return (rc);
2742
2743         if (qsize < 128)
2744                 return (EINVAL);
2745
2746         ADAPTER_LOCK(sc);
2747         rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
2748         if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2749                 rc = EBUSY; /* can be changed only when port is down */
2750
2751         if (rc == 0)
2752                 pi->qsize_txq = qsize;
2753
2754         ADAPTER_UNLOCK(sc);
2755         return (rc);
2756 }
2757
2758 static int
2759 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
2760 {
2761         struct adapter *sc = arg1;
2762         int reg = arg2;
2763         uint64_t val;
2764
2765         val = t4_read_reg64(sc, reg);
2766
2767         return (sysctl_handle_64(oidp, &val, 0, req));
2768 }
2769
2770 const char *devlog_level_strings[] = {
2771         [FW_DEVLOG_LEVEL_EMERG]         = "EMERG",
2772         [FW_DEVLOG_LEVEL_CRIT]          = "CRIT",
2773         [FW_DEVLOG_LEVEL_ERR]           = "ERR",
2774         [FW_DEVLOG_LEVEL_NOTICE]        = "NOTICE",
2775         [FW_DEVLOG_LEVEL_INFO]          = "INFO",
2776         [FW_DEVLOG_LEVEL_DEBUG]         = "DEBUG"
2777 };
2778
2779 const char *devlog_facility_strings[] = {
2780         [FW_DEVLOG_FACILITY_CORE]       = "CORE",
2781         [FW_DEVLOG_FACILITY_SCHED]      = "SCHED",
2782         [FW_DEVLOG_FACILITY_TIMER]      = "TIMER",
2783         [FW_DEVLOG_FACILITY_RES]        = "RES",
2784         [FW_DEVLOG_FACILITY_HW]         = "HW",
2785         [FW_DEVLOG_FACILITY_FLR]        = "FLR",
2786         [FW_DEVLOG_FACILITY_DMAQ]       = "DMAQ",
2787         [FW_DEVLOG_FACILITY_PHY]        = "PHY",
2788         [FW_DEVLOG_FACILITY_MAC]        = "MAC",
2789         [FW_DEVLOG_FACILITY_PORT]       = "PORT",
2790         [FW_DEVLOG_FACILITY_VI]         = "VI",
2791         [FW_DEVLOG_FACILITY_FILTER]     = "FILTER",
2792         [FW_DEVLOG_FACILITY_ACL]        = "ACL",
2793         [FW_DEVLOG_FACILITY_TM]         = "TM",
2794         [FW_DEVLOG_FACILITY_QFC]        = "QFC",
2795         [FW_DEVLOG_FACILITY_DCB]        = "DCB",
2796         [FW_DEVLOG_FACILITY_ETH]        = "ETH",
2797         [FW_DEVLOG_FACILITY_OFLD]       = "OFLD",
2798         [FW_DEVLOG_FACILITY_RI]         = "RI",
2799         [FW_DEVLOG_FACILITY_ISCSI]      = "ISCSI",
2800         [FW_DEVLOG_FACILITY_FCOE]       = "FCOE",
2801         [FW_DEVLOG_FACILITY_FOISCSI]    = "FOISCSI",
2802         [FW_DEVLOG_FACILITY_FOFCOE]     = "FOFCOE"
2803 };
2804
2805 static int
2806 sysctl_devlog(SYSCTL_HANDLER_ARGS)
2807 {
2808         struct adapter *sc = arg1;
2809         struct devlog_params *dparams = &sc->params.devlog;
2810         struct fw_devlog_e *buf, *e;
2811         int i, j, rc, nentries, first = 0;
2812         struct sbuf *sb;
2813         uint64_t ftstamp = UINT64_MAX;
2814
2815         if (dparams->start == 0)
2816                 return (ENXIO);
2817
2818         nentries = dparams->size / sizeof(struct fw_devlog_e);
2819
2820         buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
2821         if (buf == NULL)
2822                 return (ENOMEM);
2823
2824         rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
2825             (void *)buf);
2826         if (rc != 0)
2827                 goto done;
2828
2829         for (i = 0; i < nentries; i++) {
2830                 e = &buf[i];
2831
2832                 if (e->timestamp == 0)
2833                         break;  /* end */
2834
2835                 e->timestamp = be64toh(e->timestamp);
2836                 e->seqno = be32toh(e->seqno);
2837                 for (j = 0; j < 8; j++)
2838                         e->params[j] = be32toh(e->params[j]);
2839
2840                 if (e->timestamp < ftstamp) {
2841                         ftstamp = e->timestamp;
2842                         first = i;
2843                 }
2844         }
2845
2846         if (buf[first].timestamp == 0)
2847                 goto done;      /* nothing in the log */
2848
2849         rc = sysctl_wire_old_buffer(req, 0);
2850         if (rc != 0)
2851                 goto done;
2852
2853         sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
2854         sbuf_printf(sb, "\n%10s  %15s  %8s  %8s  %s\n",
2855             "Seq#", "Tstamp", "Level", "Facility", "Message");
2856
2857         i = first;
2858         do {
2859                 e = &buf[i];
2860                 if (e->timestamp == 0)
2861                         break;  /* end */
2862
2863                 sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
2864                     e->seqno, e->timestamp,
2865                     (e->level < ARRAY_SIZE(devlog_level_strings) ?
2866                         devlog_level_strings[e->level] : "UNKNOWN"),
2867                     (e->facility < ARRAY_SIZE(devlog_facility_strings) ?
2868                         devlog_facility_strings[e->facility] : "UNKNOWN"));
2869                 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
2870                     e->params[2], e->params[3], e->params[4],
2871                     e->params[5], e->params[6], e->params[7]);
2872
2873                 if (++i == nentries)
2874                         i = 0;
2875         } while (i != first);
2876
2877         rc = sbuf_finish(sb);
2878         sbuf_delete(sb);
2879 done:
2880         free(buf, M_CXGBE);
2881         return (rc);
2882 }
2883
2884 static inline void
2885 txq_start(struct ifnet *ifp, struct sge_txq *txq)
2886 {
2887         struct buf_ring *br;
2888         struct mbuf *m;
2889
2890         TXQ_LOCK_ASSERT_OWNED(txq);
2891
2892         br = txq->br;
2893         m = txq->m ? txq->m : drbr_dequeue(ifp, br);
2894         if (m)
2895                 t4_eth_tx(ifp, txq, m);
2896 }
2897
2898 void
2899 cxgbe_txq_start(void *arg, int count)
2900 {
2901         struct sge_txq *txq = arg;
2902
2903         TXQ_LOCK(txq);
2904         if (txq->eq.flags & EQ_CRFLUSHED) {
2905                 txq->eq.flags &= ~EQ_CRFLUSHED;
2906                 txq_start(txq->ifp, txq);
2907         } else
2908                 wakeup_one(txq);        /* txq is going away, wakeup free_txq */
2909         TXQ_UNLOCK(txq);
2910 }
2911
2912 static uint32_t
2913 fconf_to_mode(uint32_t fconf)
2914 {
2915         uint32_t mode;
2916
2917         mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
2918             T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
2919
2920         if (fconf & F_FRAGMENTATION)
2921                 mode |= T4_FILTER_IP_FRAGMENT;
2922
2923         if (fconf & F_MPSHITTYPE)
2924                 mode |= T4_FILTER_MPS_HIT_TYPE;
2925
2926         if (fconf & F_MACMATCH)
2927                 mode |= T4_FILTER_MAC_IDX;
2928
2929         if (fconf & F_ETHERTYPE)
2930                 mode |= T4_FILTER_ETH_TYPE;
2931
2932         if (fconf & F_PROTOCOL)
2933                 mode |= T4_FILTER_IP_PROTO;
2934
2935         if (fconf & F_TOS)
2936                 mode |= T4_FILTER_IP_TOS;
2937
2938         if (fconf & F_VLAN)
2939                 mode |= T4_FILTER_IVLAN;
2940
2941         if (fconf & F_VNIC_ID)
2942                 mode |= T4_FILTER_OVLAN;
2943
2944         if (fconf & F_PORT)
2945                 mode |= T4_FILTER_PORT;
2946
2947         if (fconf & F_FCOE)
2948                 mode |= T4_FILTER_FCoE;
2949
2950         return (mode);
2951 }
2952
2953 static uint32_t
2954 mode_to_fconf(uint32_t mode)
2955 {
2956         uint32_t fconf = 0;
2957
2958         if (mode & T4_FILTER_IP_FRAGMENT)
2959                 fconf |= F_FRAGMENTATION;
2960
2961         if (mode & T4_FILTER_MPS_HIT_TYPE)
2962                 fconf |= F_MPSHITTYPE;
2963
2964         if (mode & T4_FILTER_MAC_IDX)
2965                 fconf |= F_MACMATCH;
2966
2967         if (mode & T4_FILTER_ETH_TYPE)
2968                 fconf |= F_ETHERTYPE;
2969
2970         if (mode & T4_FILTER_IP_PROTO)
2971                 fconf |= F_PROTOCOL;
2972
2973         if (mode & T4_FILTER_IP_TOS)
2974                 fconf |= F_TOS;
2975
2976         if (mode & T4_FILTER_IVLAN)
2977                 fconf |= F_VLAN;
2978
2979         if (mode & T4_FILTER_OVLAN)
2980                 fconf |= F_VNIC_ID;
2981
2982         if (mode & T4_FILTER_PORT)
2983                 fconf |= F_PORT;
2984
2985         if (mode & T4_FILTER_FCoE)
2986                 fconf |= F_FCOE;
2987
2988         return (fconf);
2989 }
2990
2991 static uint32_t
2992 fspec_to_fconf(struct t4_filter_specification *fs)
2993 {
2994         uint32_t fconf = 0;
2995
2996         if (fs->val.frag || fs->mask.frag)
2997                 fconf |= F_FRAGMENTATION;
2998
2999         if (fs->val.matchtype || fs->mask.matchtype)
3000                 fconf |= F_MPSHITTYPE;
3001
3002         if (fs->val.macidx || fs->mask.macidx)
3003                 fconf |= F_MACMATCH;
3004
3005         if (fs->val.ethtype || fs->mask.ethtype)
3006                 fconf |= F_ETHERTYPE;
3007
3008         if (fs->val.proto || fs->mask.proto)
3009                 fconf |= F_PROTOCOL;
3010
3011         if (fs->val.tos || fs->mask.tos)
3012                 fconf |= F_TOS;
3013
3014         if (fs->val.ivlan_vld || fs->mask.ivlan_vld)
3015                 fconf |= F_VLAN;
3016
3017         if (fs->val.ovlan_vld || fs->mask.ovlan_vld)
3018                 fconf |= F_VNIC_ID;
3019
3020         if (fs->val.iport || fs->mask.iport)
3021                 fconf |= F_PORT;
3022
3023         if (fs->val.fcoe || fs->mask.fcoe)
3024                 fconf |= F_FCOE;
3025
3026         return (fconf);
3027 }
3028
3029 static int
3030 get_filter_mode(struct adapter *sc, uint32_t *mode)
3031 {
3032         uint32_t fconf;
3033
3034         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3035             A_TP_VLAN_PRI_MAP);
3036
3037         *mode = fconf_to_mode(fconf);
3038
3039         return (0);
3040 }
3041
3042 static int
3043 set_filter_mode(struct adapter *sc, uint32_t mode)
3044 {
3045         uint32_t fconf;
3046         int rc;
3047
3048         fconf = mode_to_fconf(mode);
3049
3050         ADAPTER_LOCK(sc);
3051         if (IS_BUSY(sc)) {
3052                 rc = EAGAIN;
3053                 goto done;
3054         }
3055
3056         if (sc->tids.ftids_in_use > 0) {
3057                 rc = EBUSY;
3058                 goto done;
3059         }
3060
3061         rc = -t4_set_filter_mode(sc, fconf);
3062 done:
3063         ADAPTER_UNLOCK(sc);
3064         return (rc);
3065 }
3066
3067 static inline uint64_t
3068 get_filter_hits(struct adapter *sc, uint32_t fid)
3069 {
3070         uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
3071         uint64_t hits;
3072
3073         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
3074             tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
3075         t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
3076         hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
3077
3078         return (be64toh(hits));
3079 }
3080
3081 static int
3082 get_filter(struct adapter *sc, struct t4_filter *t)
3083 {
3084         int i, nfilters = sc->tids.nftids;
3085         struct filter_entry *f;
3086
3087         ADAPTER_LOCK_ASSERT_OWNED(sc);
3088
3089         if (IS_BUSY(sc))
3090                 return (EAGAIN);
3091
3092         if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
3093             t->idx >= nfilters) {
3094                 t->idx = 0xffffffff;
3095                 return (0);
3096         }
3097
3098         f = &sc->tids.ftid_tab[t->idx];
3099         for (i = t->idx; i < nfilters; i++, f++) {
3100                 if (f->valid) {
3101                         t->idx = i;
3102                         t->l2tidx = f->l2t ? f->l2t->idx : 0;
3103                         t->smtidx = f->smtidx;
3104                         if (f->fs.hitcnts)
3105                                 t->hits = get_filter_hits(sc, t->idx);
3106                         else
3107                                 t->hits = UINT64_MAX;
3108                         t->fs = f->fs;
3109
3110                         return (0);
3111                 }
3112         }
3113
3114         t->idx = 0xffffffff;
3115         return (0);
3116 }
3117
3118 static int
3119 set_filter(struct adapter *sc, struct t4_filter *t)
3120 {
3121         uint32_t fconf;
3122         unsigned int nfilters, nports;
3123         struct filter_entry *f;
3124         int i;
3125
3126         ADAPTER_LOCK_ASSERT_OWNED(sc);
3127
3128         nfilters = sc->tids.nftids;
3129         nports = sc->params.nports;
3130
3131         if (nfilters == 0)
3132                 return (ENOTSUP);
3133
3134         if (!(sc->flags & FULL_INIT_DONE))
3135                 return (EAGAIN);
3136
3137         if (t->idx >= nfilters)
3138                 return (EINVAL);
3139
3140         /* Validate against the global filter mode */
3141         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
3142             A_TP_VLAN_PRI_MAP);
3143         if ((fconf | fspec_to_fconf(&t->fs)) != fconf)
3144                 return (E2BIG);
3145
3146         if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
3147                 return (EINVAL);
3148
3149         if (t->fs.val.iport >= nports)
3150                 return (EINVAL);
3151
3152         /* Can't specify an iq if not steering to it */
3153         if (!t->fs.dirsteer && t->fs.iq)
3154                 return (EINVAL);
3155
3156         /* IPv6 filter idx must be 4 aligned */
3157         if (t->fs.type == 1 &&
3158             ((t->idx & 0x3) || t->idx + 4 >= nfilters))
3159                 return (EINVAL);
3160
3161         if (sc->tids.ftid_tab == NULL) {
3162                 KASSERT(sc->tids.ftids_in_use == 0,
3163                     ("%s: no memory allocated but filters_in_use > 0",
3164                     __func__));
3165
3166                 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
3167                     nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
3168                 if (sc->tids.ftid_tab == NULL)
3169                         return (ENOMEM);
3170         }
3171
3172         for (i = 0; i < 4; i++) {
3173                 f = &sc->tids.ftid_tab[t->idx + i];
3174
3175                 if (f->pending || f->valid)
3176                         return (EBUSY);
3177                 if (f->locked)
3178                         return (EPERM);
3179
3180                 if (t->fs.type == 0)
3181                         break;
3182         }
3183
3184         f = &sc->tids.ftid_tab[t->idx];
3185         f->fs = t->fs;
3186
3187         return set_filter_wr(sc, t->idx);
3188 }
3189
3190 static int
3191 del_filter(struct adapter *sc, struct t4_filter *t)
3192 {
3193         unsigned int nfilters;
3194         struct filter_entry *f;
3195
3196         ADAPTER_LOCK_ASSERT_OWNED(sc);
3197
3198         if (IS_BUSY(sc))
3199                 return (EAGAIN);
3200
3201         nfilters = sc->tids.nftids;
3202
3203         if (nfilters == 0)
3204                 return (ENOTSUP);
3205
3206         if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
3207             t->idx >= nfilters)
3208                 return (EINVAL);
3209
3210         if (!(sc->flags & FULL_INIT_DONE))
3211                 return (EAGAIN);
3212
3213         f = &sc->tids.ftid_tab[t->idx];
3214
3215         if (f->pending)
3216                 return (EBUSY);
3217         if (f->locked)
3218                 return (EPERM);
3219
3220         if (f->valid) {
3221                 t->fs = f->fs;  /* extra info for the caller */
3222                 return del_filter_wr(sc, t->idx);
3223         }
3224
3225         return (0);
3226 }
3227
3228 static void
3229 clear_filter(struct filter_entry *f)
3230 {
3231         if (f->l2t)
3232                 t4_l2t_release(f->l2t);
3233
3234         bzero(f, sizeof (*f));
3235 }
3236
3237 static int
3238 set_filter_wr(struct adapter *sc, int fidx)
3239 {
3240         int rc;
3241         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3242         struct mbuf *m;
3243         struct fw_filter_wr *fwr;
3244         unsigned int ftid;
3245
3246         ADAPTER_LOCK_ASSERT_OWNED(sc);
3247
3248         if (f->fs.newdmac || f->fs.newvlan) {
3249                 /* This filter needs an L2T entry; allocate one. */
3250                 f->l2t = t4_l2t_alloc_switching(sc->l2t);
3251                 if (f->l2t == NULL)
3252                         return (EAGAIN);
3253                 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
3254                     f->fs.dmac)) {
3255                         t4_l2t_release(f->l2t);
3256                         f->l2t = NULL;
3257                         return (ENOMEM);
3258                 }
3259         }
3260
3261         ftid = sc->tids.ftid_base + fidx;
3262
3263         m = m_gethdr(M_NOWAIT, MT_DATA);
3264         if (m == NULL)
3265                 return (ENOMEM);
3266
3267         fwr = mtod(m, struct fw_filter_wr *);
3268         m->m_len = m->m_pkthdr.len = sizeof(*fwr);
3269         bzero(fwr, sizeof (*fwr));
3270
3271         fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
3272         fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
3273         fwr->tid_to_iq =
3274             htobe32(V_FW_FILTER_WR_TID(ftid) |
3275                 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
3276                 V_FW_FILTER_WR_NOREPLY(0) |
3277                 V_FW_FILTER_WR_IQ(f->fs.iq));
3278         fwr->del_filter_to_l2tix =
3279             htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
3280                 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
3281                 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
3282                 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
3283                 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
3284                 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
3285                 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
3286                 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
3287                 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
3288                     f->fs.newvlan == VLAN_REWRITE) |
3289                 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
3290                     f->fs.newvlan == VLAN_REWRITE) |
3291                 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
3292                 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
3293                 V_FW_FILTER_WR_PRIO(f->fs.prio) |
3294                 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
3295         fwr->ethtype = htobe16(f->fs.val.ethtype);
3296         fwr->ethtypem = htobe16(f->fs.mask.ethtype);
3297         fwr->frag_to_ovlan_vldm =
3298             (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
3299                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
3300                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
3301                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
3302                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
3303                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
3304         fwr->smac_sel = 0;
3305         fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
3306             V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.intrq[0].abs_id));
3307         fwr->maci_to_matchtypem =
3308             htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
3309                 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
3310                 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
3311                 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
3312                 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
3313                 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
3314                 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
3315                 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
3316         fwr->ptcl = f->fs.val.proto;
3317         fwr->ptclm = f->fs.mask.proto;
3318         fwr->ttyp = f->fs.val.tos;
3319         fwr->ttypm = f->fs.mask.tos;
3320         fwr->ivlan = htobe16(f->fs.val.ivlan);
3321         fwr->ivlanm = htobe16(f->fs.mask.ivlan);
3322         fwr->ovlan = htobe16(f->fs.val.ovlan);
3323         fwr->ovlanm = htobe16(f->fs.mask.ovlan);
3324         bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
3325         bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
3326         bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
3327         bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
3328         fwr->lp = htobe16(f->fs.val.dport);
3329         fwr->lpm = htobe16(f->fs.mask.dport);
3330         fwr->fp = htobe16(f->fs.val.sport);
3331         fwr->fpm = htobe16(f->fs.mask.sport);
3332         if (f->fs.newsmac)
3333                 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
3334
3335         f->pending = 1;
3336         sc->tids.ftids_in_use++;
3337         rc = t4_mgmt_tx(sc, m);
3338         if (rc != 0) {
3339                 sc->tids.ftids_in_use--;
3340                 m_freem(m);
3341                 clear_filter(f);
3342         }
3343         return (rc);
3344 }
3345
3346 static int
3347 del_filter_wr(struct adapter *sc, int fidx)
3348 {
3349         struct filter_entry *f = &sc->tids.ftid_tab[fidx];
3350         struct mbuf *m;
3351         struct fw_filter_wr *fwr;
3352         unsigned int rc, ftid;
3353
3354         ADAPTER_LOCK_ASSERT_OWNED(sc);
3355
3356         ftid = sc->tids.ftid_base + fidx;
3357
3358         m = m_gethdr(M_NOWAIT, MT_DATA);
3359         if (m == NULL)
3360                 return (ENOMEM);
3361
3362         fwr = mtod(m, struct fw_filter_wr *);
3363         m->m_len = m->m_pkthdr.len = sizeof(*fwr);
3364         bzero(fwr, sizeof (*fwr));
3365
3366         t4_mk_filtdelwr(ftid, fwr, sc->sge.intrq[0].abs_id);
3367
3368         f->pending = 1;
3369         rc = t4_mgmt_tx(sc, m);
3370         if (rc != 0) {
3371                 f->pending = 0;
3372                 m_freem(m);
3373         }
3374         return (rc);
3375 }
3376
3377 /* XXX move intr handlers to main.c and make this static */
3378 void
3379 filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl)
3380 {
3381         unsigned int idx = GET_TID(rpl);
3382
3383         if (idx >= sc->tids.ftid_base &&
3384             (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
3385                 unsigned int rc = G_COOKIE(rpl->cookie);
3386                 struct filter_entry *f = &sc->tids.ftid_tab[idx];
3387
3388                 if (rc == FW_FILTER_WR_FLT_DELETED) {
3389                         /*
3390                          * Clear the filter when we get confirmation from the
3391                          * hardware that the filter has been deleted.
3392                          */
3393                         clear_filter(f);
3394                         sc->tids.ftids_in_use--;
3395                 } else if (rc == FW_FILTER_WR_SMT_TBL_FULL) {
3396                         device_printf(sc->dev,
3397                             "filter %u setup failed due to full SMT\n", idx);
3398                         clear_filter(f);
3399                         sc->tids.ftids_in_use--;
3400                 } else if (rc == FW_FILTER_WR_FLT_ADDED) {
3401                         f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
3402                         f->pending = 0;  /* asynchronous setup completed */
3403                         f->valid = 1;
3404                 } else {
3405                         /*
3406                          * Something went wrong.  Issue a warning about the
3407                          * problem and clear everything out.
3408                          */
3409                         device_printf(sc->dev,
3410                             "filter %u setup failed with error %u\n", idx, rc);
3411                         clear_filter(f);
3412                         sc->tids.ftids_in_use--;
3413                 }
3414         }
3415 }
3416
3417 static int
3418 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
3419 {
3420         int rc = EINVAL;
3421
3422         if (cntxt->cid > M_CTXTQID)
3423                 return (rc);
3424
3425         if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
3426             cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
3427                 return (rc);
3428
3429         if (sc->flags & FW_OK) {
3430                 ADAPTER_LOCK(sc);       /* Avoid parallel t4_wr_mbox */
3431                 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
3432                     &cntxt->data[0]);
3433                 ADAPTER_UNLOCK(sc);
3434         }
3435
3436         if (rc != 0) {
3437                 /* Read via firmware failed or wasn't even attempted */
3438
3439                 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
3440                     &cntxt->data[0]);
3441         }
3442
3443         return (rc);
3444 }
3445
3446 int
3447 t4_os_find_pci_capability(struct adapter *sc, int cap)
3448 {
3449         int i;
3450
3451         return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
3452 }
3453
3454 int
3455 t4_os_pci_save_state(struct adapter *sc)
3456 {
3457         device_t dev;
3458         struct pci_devinfo *dinfo;
3459
3460         dev = sc->dev;
3461         dinfo = device_get_ivars(dev);
3462
3463         pci_cfg_save(dev, dinfo, 0);
3464         return (0);
3465 }
3466
3467 int
3468 t4_os_pci_restore_state(struct adapter *sc)
3469 {
3470         device_t dev;
3471         struct pci_devinfo *dinfo;
3472
3473         dev = sc->dev;
3474         dinfo = device_get_ivars(dev);
3475
3476         pci_cfg_restore(dev, dinfo);
3477         return (0);
3478 }
3479
3480 void
3481 t4_os_portmod_changed(const struct adapter *sc, int idx)
3482 {
3483         struct port_info *pi = sc->port[idx];
3484         static const char *mod_str[] = {
3485                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
3486         };
3487
3488         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
3489                 if_printf(pi->ifp, "transceiver unplugged.\n");
3490         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
3491                 if_printf(pi->ifp, "unknown transceiver inserted.\n");
3492         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
3493                 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
3494         else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) {
3495                 if_printf(pi->ifp, "%s transceiver inserted.\n",
3496                     mod_str[pi->mod_type]);
3497         } else {
3498                 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
3499                     pi->mod_type);
3500         }
3501 }
3502
3503 void
3504 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
3505 {
3506         struct port_info *pi = sc->port[idx];
3507         struct ifnet *ifp = pi->ifp;
3508
3509         if (link_stat) {
3510                 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
3511                 if_link_state_change(ifp, LINK_STATE_UP);
3512         } else
3513                 if_link_state_change(ifp, LINK_STATE_DOWN);
3514 }
3515
3516 static int
3517 t4_open(struct cdev *dev, int flags, int type, struct thread *td)
3518 {
3519        return (0);
3520 }
3521
3522 static int
3523 t4_close(struct cdev *dev, int flags, int type, struct thread *td)
3524 {
3525        return (0);
3526 }
3527
3528 static int
3529 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
3530     struct thread *td)
3531 {
3532         int rc;
3533         struct adapter *sc = dev->si_drv1;
3534
3535         rc = priv_check(td, PRIV_DRIVER);
3536         if (rc != 0)
3537                 return (rc);
3538
3539         switch (cmd) {
3540         case CHELSIO_T4_GETREG: {
3541                 struct t4_reg *edata = (struct t4_reg *)data;
3542
3543                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
3544                         return (EFAULT);
3545
3546                 if (edata->size == 4)
3547                         edata->val = t4_read_reg(sc, edata->addr);
3548                 else if (edata->size == 8)
3549                         edata->val = t4_read_reg64(sc, edata->addr);
3550                 else
3551                         return (EINVAL);
3552
3553                 break;
3554         }
3555         case CHELSIO_T4_SETREG: {
3556                 struct t4_reg *edata = (struct t4_reg *)data;
3557
3558                 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
3559                         return (EFAULT);
3560
3561                 if (edata->size == 4) {
3562                         if (edata->val & 0xffffffff00000000)
3563                                 return (EINVAL);
3564                         t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
3565                 } else if (edata->size == 8)
3566                         t4_write_reg64(sc, edata->addr, edata->val);
3567                 else
3568                         return (EINVAL);
3569                 break;
3570         }
3571         case CHELSIO_T4_REGDUMP: {
3572                 struct t4_regdump *regs = (struct t4_regdump *)data;
3573                 int reglen = T4_REGDUMP_SIZE;
3574                 uint8_t *buf;
3575
3576                 if (regs->len < reglen) {
3577                         regs->len = reglen; /* hint to the caller */
3578                         return (ENOBUFS);
3579                 }
3580
3581                 regs->len = reglen;
3582                 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
3583                 t4_get_regs(sc, regs, buf);
3584                 rc = copyout(buf, regs->data, reglen);
3585                 free(buf, M_CXGBE);
3586                 break;
3587         }
3588         case CHELSIO_T4_GET_FILTER_MODE:
3589                 rc = get_filter_mode(sc, (uint32_t *)data);
3590                 break;
3591         case CHELSIO_T4_SET_FILTER_MODE:
3592                 rc = set_filter_mode(sc, *(uint32_t *)data);
3593                 break;
3594         case CHELSIO_T4_GET_FILTER:
3595                 ADAPTER_LOCK(sc);
3596                 rc = get_filter(sc, (struct t4_filter *)data);
3597                 ADAPTER_UNLOCK(sc);
3598                 break;
3599         case CHELSIO_T4_SET_FILTER:
3600                 ADAPTER_LOCK(sc);
3601                 rc = set_filter(sc, (struct t4_filter *)data);
3602                 ADAPTER_UNLOCK(sc);
3603                 break;
3604         case CHELSIO_T4_DEL_FILTER:
3605                 ADAPTER_LOCK(sc);
3606                 rc = del_filter(sc, (struct t4_filter *)data);
3607                 ADAPTER_UNLOCK(sc);
3608                 break;
3609         case CHELSIO_T4_GET_SGE_CONTEXT:
3610                 rc = get_sge_context(sc, (struct t4_sge_context *)data);
3611                 break;
3612         default:
3613                 rc = EINVAL;
3614         }
3615
3616         return (rc);
3617 }
3618
3619 static int
3620 t4_mod_event(module_t mod, int cmd, void *arg)
3621 {
3622
3623         if (cmd == MOD_LOAD)
3624                 t4_sge_modload();
3625
3626         return (0);
3627 }
3628
3629 static devclass_t t4_devclass;
3630 static devclass_t cxgbe_devclass;
3631
3632 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
3633 MODULE_VERSION(t4nex, 1);
3634
3635 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
3636 MODULE_VERSION(cxgbe, 1);