1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 Copyright (c) 2007-2009, Chelsio Inc.
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
29 ***************************************************************************/
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
40 #include <sys/module.h>
41 #include <sys/pciio.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
47 #include <sys/ioccom.h>
49 #include <sys/linker.h>
50 #include <sys/firmware.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/queue.h>
57 #include <sys/taskqueue.h>
61 #include <net/ethernet.h>
63 #include <net/if_var.h>
64 #include <net/if_arp.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip.h>
75 #include <netinet/tcp.h>
76 #include <netinet/udp.h>
77 #include <netinet/netdump/netdump.h>
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pci_private.h>
83 #include <cxgb_include.h>
89 static int cxgb_setup_interrupts(adapter_t *);
90 static void cxgb_teardown_interrupts(adapter_t *);
91 static void cxgb_init(void *);
92 static int cxgb_init_locked(struct port_info *);
93 static int cxgb_uninit_locked(struct port_info *);
94 static int cxgb_uninit_synchronized(struct port_info *);
95 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
96 static int cxgb_media_change(struct ifnet *);
97 static int cxgb_ifm_type(int);
98 static void cxgb_build_medialist(struct port_info *);
99 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
100 static uint64_t cxgb_get_counter(struct ifnet *, ift_counter);
101 static int setup_sge_qsets(adapter_t *);
102 static void cxgb_async_intr(void *);
103 static void cxgb_tick_handler(void *, int);
104 static void cxgb_tick(void *);
105 static void link_check_callout(void *);
106 static void check_link_status(void *, int);
107 static void setup_rss(adapter_t *sc);
108 static int alloc_filters(struct adapter *);
109 static int setup_hw_filters(struct adapter *);
110 static int set_filter(struct adapter *, int, const struct filter_info *);
111 static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int,
112 unsigned int, u64, u64);
113 static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int,
114 unsigned int, u64, u64);
116 static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *);
119 /* Attachment glue for the PCI controller end of the device. Each port of
120 * the device is attached separately, as defined later.
122 static int cxgb_controller_probe(device_t);
123 static int cxgb_controller_attach(device_t);
124 static int cxgb_controller_detach(device_t);
125 static void cxgb_free(struct adapter *);
126 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
128 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
129 static int cxgb_get_regs_len(void);
130 static void touch_bars(device_t dev);
131 static void cxgb_update_mac_settings(struct port_info *p);
133 static int toe_capability(struct port_info *, int);
136 static device_method_t cxgb_controller_methods[] = {
137 DEVMETHOD(device_probe, cxgb_controller_probe),
138 DEVMETHOD(device_attach, cxgb_controller_attach),
139 DEVMETHOD(device_detach, cxgb_controller_detach),
144 static driver_t cxgb_controller_driver = {
146 cxgb_controller_methods,
147 sizeof(struct adapter)
150 static int cxgbc_mod_event(module_t, int, void *);
151 static devclass_t cxgb_controller_devclass;
152 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass,
154 MODULE_VERSION(cxgbc, 1);
155 MODULE_DEPEND(cxgbc, firmware, 1, 1, 1);
158 * Attachment glue for the ports. Attachment is done directly to the
161 static int cxgb_port_probe(device_t);
162 static int cxgb_port_attach(device_t);
163 static int cxgb_port_detach(device_t);
165 static device_method_t cxgb_port_methods[] = {
166 DEVMETHOD(device_probe, cxgb_port_probe),
167 DEVMETHOD(device_attach, cxgb_port_attach),
168 DEVMETHOD(device_detach, cxgb_port_detach),
172 static driver_t cxgb_port_driver = {
178 static d_ioctl_t cxgb_extension_ioctl;
179 static d_open_t cxgb_extension_open;
180 static d_close_t cxgb_extension_close;
182 static struct cdevsw cxgb_cdevsw = {
183 .d_version = D_VERSION,
185 .d_open = cxgb_extension_open,
186 .d_close = cxgb_extension_close,
187 .d_ioctl = cxgb_extension_ioctl,
191 static devclass_t cxgb_port_devclass;
192 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
193 MODULE_VERSION(cxgb, 1);
195 NETDUMP_DEFINE(cxgb);
197 static struct mtx t3_list_lock;
198 static SLIST_HEAD(, adapter) t3_list;
200 static struct mtx t3_uld_list_lock;
201 static SLIST_HEAD(, uld_info) t3_uld_list;
205 * The driver uses the best interrupt scheme available on a platform in the
206 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
207 * of these schemes the driver may consider as follows:
209 * msi = 2: choose from among all three options
210 * msi = 1 : only consider MSI and pin interrupts
211 * msi = 0: force pin interrupts
213 static int msi_allowed = 2;
215 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
216 SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
217 "MSI-X, MSI, INTx selector");
220 * The driver uses an auto-queue algorithm by default.
221 * To disable it and force a single queue-set per port, use multiq = 0
223 static int multiq = 1;
224 SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0,
225 "use min(ncpus/ports, 8) queue-sets per port");
228 * By default the driver will not update the firmware unless
229 * it was compiled against a newer version
232 static int force_fw_update = 0;
233 SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
234 "update firmware even if up to date");
236 int cxgb_use_16k_clusters = -1;
237 SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
238 &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
240 static int nfilters = -1;
241 SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN,
242 &nfilters, 0, "max number of entries in the filter table");
245 MAX_TXQ_ENTRIES = 16384,
246 MAX_CTRL_TXQ_ENTRIES = 1024,
247 MAX_RSPQ_ENTRIES = 16384,
248 MAX_RX_BUFFERS = 16384,
249 MAX_RX_JUMBO_BUFFERS = 16384,
251 MIN_CTRL_TXQ_ENTRIES = 4,
252 MIN_RSPQ_ENTRIES = 32,
254 MIN_FL_JUMBO_ENTRIES = 32
269 u32 report_filter_id:1;
277 enum { FILTER_NO_VLAN_PRI = 7 };
279 #define EEPROM_MAGIC 0x38E2F10C
281 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
283 /* Table for probing the cards. The desc field isn't actually used */
289 } cxgb_identifiers[] = {
290 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
291 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
292 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
293 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
294 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
295 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
296 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
297 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
298 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
299 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
300 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
301 {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"},
302 {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"},
303 {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"},
307 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
311 t3rev2char(struct adapter *adapter)
315 switch(adapter->params.rev) {
330 static struct cxgb_ident *
331 cxgb_get_ident(device_t dev)
333 struct cxgb_ident *id;
335 for (id = cxgb_identifiers; id->desc != NULL; id++) {
336 if ((id->vendor == pci_get_vendor(dev)) &&
337 (id->device == pci_get_device(dev))) {
344 static const struct adapter_info *
345 cxgb_get_adapter_info(device_t dev)
347 struct cxgb_ident *id;
348 const struct adapter_info *ai;
350 id = cxgb_get_ident(dev);
354 ai = t3_get_adapter_info(id->index);
360 cxgb_controller_probe(device_t dev)
362 const struct adapter_info *ai;
363 char *ports, buf[80];
366 ai = cxgb_get_adapter_info(dev);
370 nports = ai->nports0 + ai->nports1;
376 snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
377 device_set_desc_copy(dev, buf);
378 return (BUS_PROBE_DEFAULT);
381 #define FW_FNAME "cxgb_t3fw"
382 #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom"
383 #define TPSRAM_NAME "cxgb_t3%c_protocol_sram"
386 upgrade_fw(adapter_t *sc)
388 const struct firmware *fw;
392 if ((fw = firmware_get(FW_FNAME)) == NULL) {
393 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
396 device_printf(sc->dev, "installing firmware on card\n");
397 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
400 device_printf(sc->dev, "failed to install firmware: %d\n",
403 t3_get_fw_version(sc, &vers);
404 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
405 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
406 G_FW_VERSION_MICRO(vers));
409 firmware_put(fw, FIRMWARE_UNLOAD);
415 * The cxgb_controller_attach function is responsible for the initial
416 * bringup of the device. Its responsibilities include:
418 * 1. Determine if the device supports MSI or MSI-X.
419 * 2. Allocate bus resources so that we can access the Base Address Register
420 * 3. Create and initialize mutexes for the controller and its control
421 * logic such as SGE and MDIO.
422 * 4. Call hardware specific setup routine for the adapter as a whole.
423 * 5. Allocate the BAR for doing MSI-X.
424 * 6. Setup the line interrupt iff MSI-X is not supported.
425 * 7. Create the driver's taskq.
426 * 8. Start one task queue service thread.
427 * 9. Check if the firmware and SRAM are up-to-date. They will be
428 * auto-updated later (before FULL_INIT_DONE), if required.
429 * 10. Create a child device for each MAC (port)
430 * 11. Initialize T3 private state.
431 * 12. Trigger the LED
432 * 13. Setup offload iff supported.
433 * 14. Reset/restart the tick callout.
436 * NOTE: Any modification or deviation from this list MUST be reflected in
437 * the above comment. Failure to do so will result in problems on various
438 * error conditions including link flapping.
441 cxgb_controller_attach(device_t dev)
444 const struct adapter_info *ai;
452 sc = device_get_softc(dev);
455 ai = cxgb_get_adapter_info(dev);
457 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
458 device_get_unit(dev));
459 ADAPTER_LOCK_INIT(sc, sc->lockbuf);
461 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
462 device_get_unit(dev));
463 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
464 device_get_unit(dev));
465 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
466 device_get_unit(dev));
468 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
469 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
470 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
472 mtx_lock(&t3_list_lock);
473 SLIST_INSERT_HEAD(&t3_list, sc, link);
474 mtx_unlock(&t3_list_lock);
476 /* find the PCIe link width and set max read request to 4KB*/
477 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
480 lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2);
481 sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4;
482 if (sc->link_width < 8 &&
483 (ai->caps & SUPPORTED_10000baseT_Full)) {
484 device_printf(sc->dev,
485 "PCIe x%d Link, expect reduced performance\n",
489 pci_set_max_read_req(dev, 4096);
493 pci_enable_busmaster(dev);
495 * Allocate the registers and make them available to the driver.
496 * The registers that we care about for NIC mode are in BAR 0
498 sc->regs_rid = PCIR_BAR(0);
499 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
500 &sc->regs_rid, RF_ACTIVE)) == NULL) {
501 device_printf(dev, "Cannot allocate BAR region 0\n");
506 sc->bt = rman_get_bustag(sc->regs_res);
507 sc->bh = rman_get_bushandle(sc->regs_res);
508 sc->mmio_len = rman_get_size(sc->regs_res);
510 for (i = 0; i < MAX_NPORTS; i++)
511 sc->port[i].adapter = sc;
513 if (t3_prep_adapter(sc, ai, 1) < 0) {
514 printf("prep adapter failed\n");
519 sc->udbs_rid = PCIR_BAR(2);
521 if (is_offload(sc) &&
522 ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
523 &sc->udbs_rid, RF_ACTIVE)) == NULL)) {
524 device_printf(dev, "Cannot allocate BAR region 1\n");
529 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
530 * enough messages for the queue sets. If that fails, try falling
531 * back to MSI. If that fails, then try falling back to the legacy
532 * interrupt pin model.
534 sc->msix_regs_rid = 0x20;
535 if ((msi_allowed >= 2) &&
536 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
537 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
540 port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus);
541 msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1;
543 if (pci_msix_count(dev) == 0 ||
544 (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 ||
545 sc->msi_count != msi_needed) {
546 device_printf(dev, "alloc msix failed - "
547 "msi_count=%d, msi_needed=%d, err=%d; "
548 "will try MSI\n", sc->msi_count,
552 pci_release_msi(dev);
553 bus_release_resource(dev, SYS_RES_MEMORY,
554 sc->msix_regs_rid, sc->msix_regs_res);
555 sc->msix_regs_res = NULL;
557 sc->flags |= USING_MSIX;
558 sc->cxgb_intr = cxgb_async_intr;
560 "using MSI-X interrupts (%u vectors)\n",
565 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
567 if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) {
568 device_printf(dev, "alloc msi failed - "
569 "err=%d; will try INTx\n", error);
572 pci_release_msi(dev);
574 sc->flags |= USING_MSI;
575 sc->cxgb_intr = t3_intr_msi;
576 device_printf(dev, "using MSI interrupts\n");
579 if (sc->msi_count == 0) {
580 device_printf(dev, "using line interrupts\n");
581 sc->cxgb_intr = t3b_intr;
584 /* Create a private taskqueue thread for handling driver events */
585 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
586 taskqueue_thread_enqueue, &sc->tq);
587 if (sc->tq == NULL) {
588 device_printf(dev, "failed to allocate controller task queue\n");
592 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
593 device_get_nameunit(dev));
594 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
597 /* Create a periodic callout for checking adapter status */
598 callout_init(&sc->cxgb_tick_ch, 1);
600 if (t3_check_fw_version(sc) < 0 || force_fw_update) {
602 * Warn user that a firmware update will be attempted in init.
604 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
605 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
606 sc->flags &= ~FW_UPTODATE;
608 sc->flags |= FW_UPTODATE;
611 if (t3_check_tpsram_version(sc) < 0) {
613 * Warn user that a firmware update will be attempted in init.
615 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
616 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
617 sc->flags &= ~TPS_UPTODATE;
619 sc->flags |= TPS_UPTODATE;
623 * Create a child device for each MAC. The ethernet attachment
624 * will be done in these children.
626 for (i = 0; i < (sc)->params.nports; i++) {
627 struct port_info *pi;
629 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
630 device_printf(dev, "failed to add child port\n");
636 pi->nqsets = port_qsets;
637 pi->first_qset = i*port_qsets;
639 pi->tx_chan = i >= ai->nports0;
640 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
641 sc->rxpkt_map[pi->txpkt_intf] = i;
642 sc->port[i].tx_chan = i >= ai->nports0;
643 sc->portdev[i] = child;
644 device_set_softc(child, pi);
646 if ((error = bus_generic_attach(dev)) != 0)
649 /* initialize sge private state */
650 t3_sge_init_adapter(sc);
654 error = t3_get_fw_version(sc, &vers);
658 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
659 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
660 G_FW_VERSION_MICRO(vers));
662 snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
663 ai->desc, is_offload(sc) ? "R" : "",
664 sc->params.vpd.ec, sc->params.vpd.sn);
665 device_set_desc_copy(dev, buf);
667 snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
668 sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
669 sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]);
671 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
672 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
673 t3_add_attach_sysctls(sc);
676 for (i = 0; i < NUM_CPL_HANDLERS; i++)
677 sc->cpl_handler[i] = cpl_not_handled;
681 error = cxgb_setup_interrupts(sc);
690 * The cxgb_controller_detach routine is called with the device is
691 * unloaded from the system.
695 cxgb_controller_detach(device_t dev)
699 sc = device_get_softc(dev);
707 * The cxgb_free() is called by the cxgb_controller_detach() routine
708 * to tear down the structures that were built up in
709 * cxgb_controller_attach(), and should be the final piece of work
710 * done when fully unloading the driver.
713 * 1. Shutting down the threads started by the cxgb_controller_attach()
715 * 2. Stopping the lower level device and all callouts (cxgb_down_locked()).
716 * 3. Detaching all of the port devices created during the
717 * cxgb_controller_attach() routine.
718 * 4. Removing the device children created via cxgb_controller_attach().
719 * 5. Releasing PCI resources associated with the device.
720 * 6. Turning off the offload support, iff it was turned on.
721 * 7. Destroying the mutexes created in cxgb_controller_attach().
725 cxgb_free(struct adapter *sc)
730 sc->flags |= CXGB_SHUTDOWN;
734 * Make sure all child devices are gone.
736 bus_generic_detach(sc->dev);
737 for (i = 0; i < (sc)->params.nports; i++) {
738 if (sc->portdev[i] &&
739 device_delete_child(sc->dev, sc->portdev[i]) != 0)
740 device_printf(sc->dev, "failed to delete child port\n");
741 nqsets += sc->port[i].nqsets;
745 * At this point, it is as if cxgb_port_detach has run on all ports, and
746 * cxgb_down has run on the adapter. All interrupts have been silenced,
747 * all open devices have been closed.
749 KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)",
750 __func__, sc->open_device_map));
751 for (i = 0; i < sc->params.nports; i++) {
752 KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!",
757 * Finish off the adapter's callouts.
759 callout_drain(&sc->cxgb_tick_ch);
760 callout_drain(&sc->sge_timer_ch);
763 * Release resources grabbed under FULL_INIT_DONE by cxgb_up. The
764 * sysctls are cleaned up by the kernel linker.
766 if (sc->flags & FULL_INIT_DONE) {
767 t3_free_sge_resources(sc, nqsets);
768 sc->flags &= ~FULL_INIT_DONE;
772 * Release all interrupt resources.
774 cxgb_teardown_interrupts(sc);
775 if (sc->flags & (USING_MSI | USING_MSIX)) {
776 device_printf(sc->dev, "releasing msi message(s)\n");
777 pci_release_msi(sc->dev);
779 device_printf(sc->dev, "no msi message to release\n");
782 if (sc->msix_regs_res != NULL) {
783 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
788 * Free the adapter's taskqueue.
790 if (sc->tq != NULL) {
791 taskqueue_free(sc->tq);
795 free(sc->filters, M_DEVBUF);
798 if (sc->udbs_res != NULL)
799 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
802 if (sc->regs_res != NULL)
803 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
806 MTX_DESTROY(&sc->mdio_lock);
807 MTX_DESTROY(&sc->sge.reg_lock);
808 MTX_DESTROY(&sc->elmer_lock);
809 mtx_lock(&t3_list_lock);
810 SLIST_REMOVE(&t3_list, sc, adapter, link);
811 mtx_unlock(&t3_list_lock);
812 ADAPTER_LOCK_DEINIT(sc);
816 * setup_sge_qsets - configure SGE Tx/Rx/response queues
817 * @sc: the controller softc
819 * Determines how many sets of SGE queues to use and initializes them.
820 * We support multiple queue sets per port if we have MSI-X, otherwise
821 * just one queue set per port.
824 setup_sge_qsets(adapter_t *sc)
826 int i, j, err, irq_idx = 0, qset_idx = 0;
827 u_int ntxq = SGE_TXQ_PER_SET;
829 if ((err = t3_sge_alloc(sc)) != 0) {
830 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
834 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
837 for (i = 0; i < (sc)->params.nports; i++) {
838 struct port_info *pi = &sc->port[i];
840 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
841 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
842 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
843 &sc->params.sge.qset[qset_idx], ntxq, pi);
845 t3_free_sge_resources(sc, qset_idx);
846 device_printf(sc->dev,
847 "t3_sge_alloc_qset failed with %d\n", err);
857 cxgb_teardown_interrupts(adapter_t *sc)
861 for (i = 0; i < SGE_QSETS; i++) {
862 if (sc->msix_intr_tag[i] == NULL) {
864 /* Should have been setup fully or not at all */
865 KASSERT(sc->msix_irq_res[i] == NULL &&
866 sc->msix_irq_rid[i] == 0,
867 ("%s: half-done interrupt (%d).", __func__, i));
872 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
873 sc->msix_intr_tag[i]);
874 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i],
875 sc->msix_irq_res[i]);
877 sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL;
878 sc->msix_irq_rid[i] = 0;
882 KASSERT(sc->irq_res != NULL,
883 ("%s: half-done interrupt.", __func__));
885 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
886 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
889 sc->irq_res = sc->intr_tag = NULL;
895 cxgb_setup_interrupts(adapter_t *sc)
897 struct resource *res;
899 int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX);
901 sc->irq_rid = intr_flag ? 1 : 0;
902 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid,
903 RF_SHAREABLE | RF_ACTIVE);
904 if (sc->irq_res == NULL) {
905 device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n",
906 intr_flag, sc->irq_rid);
910 err = bus_setup_intr(sc->dev, sc->irq_res,
911 INTR_MPSAFE | INTR_TYPE_NET, NULL,
912 sc->cxgb_intr, sc, &sc->intr_tag);
915 device_printf(sc->dev,
916 "Cannot set up interrupt (%x, %u, %d)\n",
917 intr_flag, sc->irq_rid, err);
918 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
920 sc->irq_res = sc->intr_tag = NULL;
925 /* That's all for INTx or MSI */
926 if (!(intr_flag & USING_MSIX) || err)
929 bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err");
930 for (i = 0; i < sc->msi_count - 1; i++) {
932 res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid,
933 RF_SHAREABLE | RF_ACTIVE);
935 device_printf(sc->dev, "Cannot allocate interrupt "
936 "for message %d\n", rid);
941 err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET,
942 NULL, t3_intr_msix, &sc->sge.qs[i], &tag);
944 device_printf(sc->dev, "Cannot set up interrupt "
945 "for message %d (%d)\n", rid, err);
946 bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res);
950 sc->msix_irq_rid[i] = rid;
951 sc->msix_irq_res[i] = res;
952 sc->msix_intr_tag[i] = tag;
953 bus_describe_intr(sc->dev, res, tag, "qs%d", i);
957 cxgb_teardown_interrupts(sc);
964 cxgb_port_probe(device_t dev)
970 p = device_get_softc(dev);
972 snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
973 device_set_desc_copy(dev, buf);
979 cxgb_makedev(struct port_info *pi)
982 pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
983 UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp));
985 if (pi->port_cdev == NULL)
988 pi->port_cdev->si_drv1 = (void *)pi;
993 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
994 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
995 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
996 #define CXGB_CAP_ENABLE CXGB_CAP
999 cxgb_port_attach(device_t dev)
1001 struct port_info *p;
1006 p = device_get_softc(dev);
1008 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
1009 device_get_unit(device_get_parent(dev)), p->port_id);
1010 PORT_LOCK_INIT(p, p->lockbuf);
1012 callout_init(&p->link_check_ch, 1);
1013 TASK_INIT(&p->link_check_task, 0, check_link_status, p);
1015 /* Allocate an ifnet object and set it up */
1016 ifp = p->ifp = if_alloc(IFT_ETHER);
1018 device_printf(dev, "Cannot allocate ifnet\n");
1022 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1023 ifp->if_init = cxgb_init;
1025 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1026 ifp->if_ioctl = cxgb_ioctl;
1027 ifp->if_transmit = cxgb_transmit;
1028 ifp->if_qflush = cxgb_qflush;
1029 ifp->if_get_counter = cxgb_get_counter;
1031 ifp->if_capabilities = CXGB_CAP;
1034 ifp->if_capabilities |= IFCAP_TOE4;
1036 ifp->if_capenable = CXGB_CAP_ENABLE;
1037 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1038 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1041 * Disable TSO on 4-port - it isn't supported by the firmware.
1043 if (sc->params.nports > 2) {
1044 ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1045 ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
1046 ifp->if_hwassist &= ~CSUM_TSO;
1049 ether_ifattach(ifp, p->hw_addr);
1051 /* Attach driver netdump methods. */
1052 NETDUMP_SET(ifp, cxgb);
1054 #ifdef DEFAULT_JUMBO
1055 if (sc->params.nports <= 2)
1056 ifp->if_mtu = ETHERMTU_JUMBO;
1058 if ((err = cxgb_makedev(p)) != 0) {
1059 printf("makedev failed %d\n", err);
1063 /* Create a list of media supported by this port */
1064 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
1066 cxgb_build_medialist(p);
1068 t3_sge_init_port(p);
1074 * cxgb_port_detach() is called via the device_detach methods when
1075 * cxgb_free() calls the bus_generic_detach. It is responsible for
1076 * removing the device from the view of the kernel, i.e. from all
1077 * interfaces lists etc. This routine is only called when the driver is
1078 * being unloaded, not when the link goes down.
1081 cxgb_port_detach(device_t dev)
1083 struct port_info *p;
1087 p = device_get_softc(dev);
1090 /* Tell cxgb_ioctl and if_init that the port is going away */
1095 mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0);
1099 if (p->port_cdev != NULL)
1100 destroy_dev(p->port_cdev);
1102 cxgb_uninit_synchronized(p);
1103 ether_ifdetach(p->ifp);
1105 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1106 struct sge_qset *qs = &sc->sge.qs[i];
1107 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1109 callout_drain(&txq->txq_watchdog);
1110 callout_drain(&txq->txq_timer);
1113 PORT_LOCK_DEINIT(p);
1119 wakeup_one(&sc->flags);
1125 t3_fatal_err(struct adapter *sc)
1129 if (sc->flags & FULL_INIT_DONE) {
1131 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1132 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1133 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1134 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1135 t3_intr_disable(sc);
1137 device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1138 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1139 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1140 fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1144 t3_os_find_pci_capability(adapter_t *sc, int cap)
1147 struct pci_devinfo *dinfo;
1153 dinfo = device_get_ivars(dev);
1156 status = pci_read_config(dev, PCIR_STATUS, 2);
1157 if (!(status & PCIM_STATUS_CAPPRESENT))
1160 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1166 ptr = PCIR_CAP_PTR_2;
1172 ptr = pci_read_config(dev, ptr, 1);
1175 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1177 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1184 t3_os_pci_save_state(struct adapter *sc)
1187 struct pci_devinfo *dinfo;
1190 dinfo = device_get_ivars(dev);
1192 pci_cfg_save(dev, dinfo, 0);
1197 t3_os_pci_restore_state(struct adapter *sc)
1200 struct pci_devinfo *dinfo;
1203 dinfo = device_get_ivars(dev);
1205 pci_cfg_restore(dev, dinfo);
1210 * t3_os_link_changed - handle link status changes
1211 * @sc: the adapter associated with the link change
1212 * @port_id: the port index whose link status has changed
1213 * @link_status: the new status of the link
1214 * @speed: the new speed setting
1215 * @duplex: the new duplex setting
1216 * @fc: the new flow-control setting
1218 * This is the OS-dependent handler for link status changes. The OS
1219 * neutral handler takes care of most of the processing for these events,
1220 * then calls this handler for any OS-specific processing.
1223 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1224 int duplex, int fc, int mac_was_reset)
1226 struct port_info *pi = &adapter->port[port_id];
1227 struct ifnet *ifp = pi->ifp;
1229 /* no race with detach, so ifp should always be good */
1230 KASSERT(ifp, ("%s: if detached.", __func__));
1232 /* Reapply mac settings if they were lost due to a reset */
1233 if (mac_was_reset) {
1235 cxgb_update_mac_settings(pi);
1240 ifp->if_baudrate = IF_Mbps(speed);
1241 if_link_state_change(ifp, LINK_STATE_UP);
1243 if_link_state_change(ifp, LINK_STATE_DOWN);
1247 * t3_os_phymod_changed - handle PHY module changes
1248 * @phy: the PHY reporting the module change
1249 * @mod_type: new module type
1251 * This is the OS-dependent handler for PHY module changes. It is
1252 * invoked when a PHY module is removed or inserted for any OS-specific
1255 void t3_os_phymod_changed(struct adapter *adap, int port_id)
1257 static const char *mod_str[] = {
1258 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown"
1260 struct port_info *pi = &adap->port[port_id];
1261 int mod = pi->phy.modtype;
1263 if (mod != pi->media.ifm_cur->ifm_data)
1264 cxgb_build_medialist(pi);
1266 if (mod == phy_modtype_none)
1267 if_printf(pi->ifp, "PHY module unplugged\n");
1269 KASSERT(mod < ARRAY_SIZE(mod_str),
1270 ("invalid PHY module type %d", mod));
1271 if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]);
1276 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1280 * The ifnet might not be allocated before this gets called,
1281 * as this is called early on in attach by t3_prep_adapter
1282 * save the address off in the port structure
1285 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1286 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1290 * Programs the XGMAC based on the settings in the ifnet. These settings
1291 * include MTU, MAC address, mcast addresses, etc.
1294 cxgb_update_mac_settings(struct port_info *p)
1296 struct ifnet *ifp = p->ifp;
1297 struct t3_rx_mode rm;
1298 struct cmac *mac = &p->mac;
1301 PORT_LOCK_ASSERT_OWNED(p);
1303 bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1306 if (ifp->if_capenable & IFCAP_VLAN_MTU)
1307 mtu += ETHER_VLAN_ENCAP_LEN;
1309 hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1311 t3_mac_set_mtu(mac, mtu);
1312 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1313 t3_mac_set_address(mac, 0, p->hw_addr);
1314 t3_init_rx_mode(&rm, p);
1315 t3_mac_set_rx_mode(mac, &rm);
1320 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1325 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1334 init_tp_parity(struct adapter *adap)
1338 struct cpl_set_tcb_field *greq;
1339 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1341 t3_tp_set_offload_mode(adap, 1);
1343 for (i = 0; i < 16; i++) {
1344 struct cpl_smt_write_req *req;
1346 m = m_gethdr(M_WAITOK, MT_DATA);
1347 req = mtod(m, struct cpl_smt_write_req *);
1348 m->m_len = m->m_pkthdr.len = sizeof(*req);
1349 memset(req, 0, sizeof(*req));
1350 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1351 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1353 t3_mgmt_tx(adap, m);
1356 for (i = 0; i < 2048; i++) {
1357 struct cpl_l2t_write_req *req;
1359 m = m_gethdr(M_WAITOK, MT_DATA);
1360 req = mtod(m, struct cpl_l2t_write_req *);
1361 m->m_len = m->m_pkthdr.len = sizeof(*req);
1362 memset(req, 0, sizeof(*req));
1363 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1364 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1365 req->params = htonl(V_L2T_W_IDX(i));
1366 t3_mgmt_tx(adap, m);
1369 for (i = 0; i < 2048; i++) {
1370 struct cpl_rte_write_req *req;
1372 m = m_gethdr(M_WAITOK, MT_DATA);
1373 req = mtod(m, struct cpl_rte_write_req *);
1374 m->m_len = m->m_pkthdr.len = sizeof(*req);
1375 memset(req, 0, sizeof(*req));
1376 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1377 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1378 req->l2t_idx = htonl(V_L2T_W_IDX(i));
1379 t3_mgmt_tx(adap, m);
1382 m = m_gethdr(M_WAITOK, MT_DATA);
1383 greq = mtod(m, struct cpl_set_tcb_field *);
1384 m->m_len = m->m_pkthdr.len = sizeof(*greq);
1385 memset(greq, 0, sizeof(*greq));
1386 greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1387 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1388 greq->mask = htobe64(1);
1389 t3_mgmt_tx(adap, m);
1391 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1392 t3_tp_set_offload_mode(adap, 0);
1397 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1398 * @adap: the adapter
1400 * Sets up RSS to distribute packets to multiple receive queues. We
1401 * configure the RSS CPU lookup table to distribute to the number of HW
1402 * receive queues, and the response queue lookup table to narrow that
1403 * down to the response queues actually configured for each port.
1404 * We always configure the RSS mapping for two ports since the mapping
1405 * table has plenty of entries.
1408 setup_rss(adapter_t *adap)
1412 uint8_t cpus[SGE_QSETS + 1];
1413 uint16_t rspq_map[RSS_TABLE_SIZE];
1415 for (i = 0; i < SGE_QSETS; ++i)
1417 cpus[SGE_QSETS] = 0xff;
1420 for_each_port(adap, i) {
1421 const struct port_info *pi = adap2pinfo(adap, i);
1423 nq[pi->tx_chan] += pi->nqsets;
1425 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1426 rspq_map[i] = nq[0] ? i % nq[0] : 0;
1427 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1430 /* Calculate the reverse RSS map table */
1431 for (i = 0; i < SGE_QSETS; ++i)
1432 adap->rrss_map[i] = 0xff;
1433 for (i = 0; i < RSS_TABLE_SIZE; ++i)
1434 if (adap->rrss_map[rspq_map[i]] == 0xff)
1435 adap->rrss_map[rspq_map[i]] = i;
1437 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1438 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1439 F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1444 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1448 struct mngt_pktsched_wr *req;
1450 m = m_gethdr(M_NOWAIT, MT_DATA);
1452 req = mtod(m, struct mngt_pktsched_wr *);
1453 req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1454 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1459 req->binding = port;
1460 m->m_len = m->m_pkthdr.len = sizeof(*req);
1461 t3_mgmt_tx(adap, m);
1466 bind_qsets(adapter_t *sc)
1470 for (i = 0; i < (sc)->params.nports; ++i) {
1471 const struct port_info *pi = adap2pinfo(sc, i);
1473 for (j = 0; j < pi->nqsets; ++j) {
1474 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1482 update_tpeeprom(struct adapter *adap)
1484 const struct firmware *tpeeprom;
1487 unsigned int major, minor;
1491 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1493 major = G_TP_VERSION_MAJOR(version);
1494 minor = G_TP_VERSION_MINOR(version);
1495 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1498 rev = t3rev2char(adap);
1499 snprintf(name, sizeof(name), TPEEPROM_NAME, rev);
1501 tpeeprom = firmware_get(name);
1502 if (tpeeprom == NULL) {
1503 device_printf(adap->dev,
1504 "could not load TP EEPROM: unable to load %s\n",
1509 len = tpeeprom->datasize - 4;
1511 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1513 goto release_tpeeprom;
1515 if (len != TP_SRAM_LEN) {
1516 device_printf(adap->dev,
1517 "%s length is wrong len=%d expected=%d\n", name,
1522 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1526 device_printf(adap->dev,
1527 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1528 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1530 device_printf(adap->dev,
1531 "Protocol SRAM image update in EEPROM failed\n");
1534 firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1540 update_tpsram(struct adapter *adap)
1542 const struct firmware *tpsram;
1546 rev = t3rev2char(adap);
1547 snprintf(name, sizeof(name), TPSRAM_NAME, rev);
1549 update_tpeeprom(adap);
1551 tpsram = firmware_get(name);
1552 if (tpsram == NULL){
1553 device_printf(adap->dev, "could not load TP SRAM\n");
1556 device_printf(adap->dev, "updating TP SRAM\n");
1558 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1560 goto release_tpsram;
1562 ret = t3_set_proto_sram(adap, tpsram->data);
1564 device_printf(adap->dev, "loading protocol SRAM failed\n");
1567 firmware_put(tpsram, FIRMWARE_UNLOAD);
1573 * cxgb_up - enable the adapter
1574 * @adap: adapter being enabled
1576 * Called when the first port is enabled, this function performs the
1577 * actions necessary to make an adapter operational, such as completing
1578 * the initialization of HW modules, and enabling interrupts.
1581 cxgb_up(struct adapter *sc)
1584 unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS;
1586 KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)",
1587 __func__, sc->open_device_map));
1589 if ((sc->flags & FULL_INIT_DONE) == 0) {
1591 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1593 if ((sc->flags & FW_UPTODATE) == 0)
1594 if ((err = upgrade_fw(sc)))
1597 if ((sc->flags & TPS_UPTODATE) == 0)
1598 if ((err = update_tpsram(sc)))
1601 if (is_offload(sc) && nfilters != 0) {
1602 sc->params.mc5.nservers = 0;
1605 sc->params.mc5.nfilters = mxf;
1607 sc->params.mc5.nfilters = min(nfilters, mxf);
1610 err = t3_init_hw(sc, 0);
1614 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1615 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1617 err = setup_sge_qsets(sc);
1624 t3_add_configured_sysctls(sc);
1625 sc->flags |= FULL_INIT_DONE;
1632 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1633 is_offload(sc) && init_tp_parity(sc) == 0)
1634 sc->flags |= TP_PARITY_INIT;
1636 if (sc->flags & TP_PARITY_INIT) {
1637 t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR);
1638 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1641 if (!(sc->flags & QUEUES_BOUND)) {
1643 setup_hw_filters(sc);
1644 sc->flags |= QUEUES_BOUND;
1647 t3_sge_reset_adapter(sc);
1653 * Called when the last open device is closed. Does NOT undo all of cxgb_up's
1654 * work. Specifically, the resources grabbed under FULL_INIT_DONE are released
1655 * during controller_detach, not here.
1658 cxgb_down(struct adapter *sc)
1661 t3_intr_disable(sc);
1665 * if_init for cxgb ports.
1668 cxgb_init(void *arg)
1670 struct port_info *p = arg;
1671 struct adapter *sc = p->adapter;
1674 cxgb_init_locked(p); /* releases adapter lock */
1675 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1679 cxgb_init_locked(struct port_info *p)
1681 struct adapter *sc = p->adapter;
1682 struct ifnet *ifp = p->ifp;
1683 struct cmac *mac = &p->mac;
1684 int i, rc = 0, may_sleep = 0, gave_up_lock = 0;
1686 ADAPTER_LOCK_ASSERT_OWNED(sc);
1688 while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1690 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) {
1699 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1702 * The code that runs during one-time adapter initialization can sleep
1703 * so it's important not to hold any locks across it.
1705 may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1;
1713 if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0))
1717 if (isset(&sc->open_device_map, p->port_id) &&
1718 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1722 t3_port_intr_enable(sc, p->port_id);
1723 if (!mac->multiport)
1725 cxgb_update_mac_settings(p);
1726 t3_link_start(&p->phy, mac, &p->link_config);
1727 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1728 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1729 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1732 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1733 struct sge_qset *qs = &sc->sge.qs[i];
1734 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1736 callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs,
1737 txq->txq_watchdog.c_cpu);
1741 setbit(&sc->open_device_map, p->port_id);
1742 callout_reset(&p->link_check_ch,
1743 p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4,
1744 link_check_callout, p);
1749 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1753 wakeup_one(&sc->flags);
1759 cxgb_uninit_locked(struct port_info *p)
1761 struct adapter *sc = p->adapter;
1764 ADAPTER_LOCK_ASSERT_OWNED(sc);
1766 while (!IS_DOOMED(p) && IS_BUSY(sc)) {
1767 if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) {
1776 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
1780 rc = cxgb_uninit_synchronized(p);
1783 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
1785 wakeup_one(&sc->flags);
1792 * Called on "ifconfig down", and from port_detach
1795 cxgb_uninit_synchronized(struct port_info *pi)
1797 struct adapter *sc = pi->adapter;
1798 struct ifnet *ifp = pi->ifp;
1801 * taskqueue_drain may cause a deadlock if the adapter lock is held.
1803 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1806 * Clear this port's bit from the open device map, and then drain all
1807 * the tasks that can access/manipulate this port's port_info or ifp.
1808 * We disable this port's interrupts here and so the slow/ext
1809 * interrupt tasks won't be enqueued. The tick task will continue to
1810 * be enqueued every second but the runs after this drain will not see
1811 * this port in the open device map.
1813 * A well behaved task must take open_device_map into account and ignore
1814 * ports that are not open.
1816 clrbit(&sc->open_device_map, pi->port_id);
1817 t3_port_intr_disable(sc, pi->port_id);
1818 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1819 taskqueue_drain(sc->tq, &sc->tick_task);
1821 callout_drain(&pi->link_check_ch);
1822 taskqueue_drain(sc->tq, &pi->link_check_task);
1825 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1827 /* disable pause frames */
1828 t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0);
1830 /* Reset RX FIFO HWM */
1831 t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset,
1832 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1836 /* Wait for TXFIFO empty */
1837 t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset,
1838 F_TXFIFO_EMPTY, 1, 20, 5);
1841 t3_mac_disable(&pi->mac, MAC_DIRECTION_RX);
1843 pi->phy.ops->power_down(&pi->phy, 1);
1847 pi->link_config.link_ok = 0;
1848 t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0);
1850 if (sc->open_device_map == 0)
1851 cxgb_down(pi->adapter);
1857 * Mark lro enabled or disabled in all qsets for this port
1860 cxgb_set_lro(struct port_info *p, int enabled)
1863 struct adapter *adp = p->adapter;
1866 for (i = 0; i < p->nqsets; i++) {
1867 q = &adp->sge.qs[p->first_qset + i];
1868 q->lro.enabled = (enabled != 0);
1874 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1876 struct port_info *p = ifp->if_softc;
1877 struct adapter *sc = p->adapter;
1878 struct ifreq *ifr = (struct ifreq *)data;
1879 int flags, error = 0, mtu;
1885 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1893 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
1898 cxgb_update_mac_settings(p);
1909 if (ifp->if_flags & IFF_UP) {
1910 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1911 flags = p->if_flags;
1912 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1913 ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) {
1919 cxgb_update_mac_settings(p);
1924 error = cxgb_init_locked(p);
1925 p->if_flags = ifp->if_flags;
1926 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1927 error = cxgb_uninit_locked(p);
1931 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1936 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1940 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1942 cxgb_update_mac_settings(p);
1950 error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
1954 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1955 if (mask & IFCAP_TXCSUM) {
1956 ifp->if_capenable ^= IFCAP_TXCSUM;
1957 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1959 if (IFCAP_TSO4 & ifp->if_capenable &&
1960 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1961 ifp->if_capenable &= ~IFCAP_TSO4;
1963 "tso4 disabled due to -txcsum.\n");
1966 if (mask & IFCAP_TXCSUM_IPV6) {
1967 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1968 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1970 if (IFCAP_TSO6 & ifp->if_capenable &&
1971 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1972 ifp->if_capenable &= ~IFCAP_TSO6;
1974 "tso6 disabled due to -txcsum6.\n");
1977 if (mask & IFCAP_RXCSUM)
1978 ifp->if_capenable ^= IFCAP_RXCSUM;
1979 if (mask & IFCAP_RXCSUM_IPV6)
1980 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1983 * Note that we leave CSUM_TSO alone (it is always set). The
1984 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1985 * sending a TSO request our way, so it's sufficient to toggle
1988 if (mask & IFCAP_TSO4) {
1989 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1990 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1991 if_printf(ifp, "enable txcsum first.\n");
1995 ifp->if_capenable ^= IFCAP_TSO4;
1997 if (mask & IFCAP_TSO6) {
1998 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1999 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2000 if_printf(ifp, "enable txcsum6 first.\n");
2004 ifp->if_capenable ^= IFCAP_TSO6;
2006 if (mask & IFCAP_LRO) {
2007 ifp->if_capenable ^= IFCAP_LRO;
2009 /* Safe to do this even if cxgb_up not called yet */
2010 cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2013 if (mask & IFCAP_TOE4) {
2014 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE4;
2016 error = toe_capability(p, enable);
2018 ifp->if_capenable ^= mask;
2021 if (mask & IFCAP_VLAN_HWTAGGING) {
2022 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2023 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2025 cxgb_update_mac_settings(p);
2029 if (mask & IFCAP_VLAN_MTU) {
2030 ifp->if_capenable ^= IFCAP_VLAN_MTU;
2031 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2033 cxgb_update_mac_settings(p);
2037 if (mask & IFCAP_VLAN_HWTSO)
2038 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2039 if (mask & IFCAP_VLAN_HWCSUM)
2040 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2042 #ifdef VLAN_CAPABILITIES
2043 VLAN_CAPABILITIES(ifp);
2049 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
2052 error = ether_ioctl(ifp, command, data);
2059 cxgb_media_change(struct ifnet *ifp)
2061 return (EOPNOTSUPP);
2065 * Translates phy->modtype to the correct Ethernet media subtype.
2068 cxgb_ifm_type(int mod)
2071 case phy_modtype_sr:
2072 return (IFM_10G_SR);
2073 case phy_modtype_lr:
2074 return (IFM_10G_LR);
2075 case phy_modtype_lrm:
2076 return (IFM_10G_LRM);
2077 case phy_modtype_twinax:
2078 return (IFM_10G_TWINAX);
2079 case phy_modtype_twinax_long:
2080 return (IFM_10G_TWINAX_LONG);
2081 case phy_modtype_none:
2083 case phy_modtype_unknown:
2084 return (IFM_UNKNOWN);
2087 KASSERT(0, ("%s: modtype %d unknown", __func__, mod));
2088 return (IFM_UNKNOWN);
2092 * Rebuilds the ifmedia list for this port, and sets the current media.
2095 cxgb_build_medialist(struct port_info *p)
2097 struct cphy *phy = &p->phy;
2098 struct ifmedia *media = &p->media;
2099 int mod = phy->modtype;
2100 int m = IFM_ETHER | IFM_FDX;
2104 ifmedia_removeall(media);
2105 if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) {
2108 if (phy->caps & SUPPORTED_10000baseT_Full)
2109 ifmedia_add(media, m | IFM_10G_T, mod, NULL);
2111 if (phy->caps & SUPPORTED_1000baseT_Full)
2112 ifmedia_add(media, m | IFM_1000_T, mod, NULL);
2114 if (phy->caps & SUPPORTED_100baseT_Full)
2115 ifmedia_add(media, m | IFM_100_TX, mod, NULL);
2117 if (phy->caps & SUPPORTED_10baseT_Full)
2118 ifmedia_add(media, m | IFM_10_T, mod, NULL);
2120 ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL);
2121 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2123 } else if (phy->caps & SUPPORTED_TP) {
2126 KASSERT(phy->caps & SUPPORTED_10000baseT_Full,
2127 ("%s: unexpected cap 0x%x", __func__, phy->caps));
2129 ifmedia_add(media, m | IFM_10G_CX4, mod, NULL);
2130 ifmedia_set(media, m | IFM_10G_CX4);
2132 } else if (phy->caps & SUPPORTED_FIBRE &&
2133 phy->caps & SUPPORTED_10000baseT_Full) {
2134 /* 10G optical (but includes SFP+ twinax) */
2136 m |= cxgb_ifm_type(mod);
2137 if (IFM_SUBTYPE(m) == IFM_NONE)
2140 ifmedia_add(media, m, mod, NULL);
2141 ifmedia_set(media, m);
2143 } else if (phy->caps & SUPPORTED_FIBRE &&
2144 phy->caps & SUPPORTED_1000baseT_Full) {
2147 /* XXX: Lie and claim to be SX, could actually be any 1G-X */
2148 ifmedia_add(media, m | IFM_1000_SX, mod, NULL);
2149 ifmedia_set(media, m | IFM_1000_SX);
2152 KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__,
2160 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2162 struct port_info *p = ifp->if_softc;
2163 struct ifmedia_entry *cur = p->media.ifm_cur;
2164 int speed = p->link_config.speed;
2166 if (cur->ifm_data != p->phy.modtype) {
2167 cxgb_build_medialist(p);
2168 cur = p->media.ifm_cur;
2171 ifmr->ifm_status = IFM_AVALID;
2172 if (!p->link_config.link_ok)
2175 ifmr->ifm_status |= IFM_ACTIVE;
2178 * active and current will differ iff current media is autoselect. That
2179 * can happen only for copper RJ45.
2181 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
2183 KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg,
2184 ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps));
2186 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
2187 if (speed == SPEED_10000)
2188 ifmr->ifm_active |= IFM_10G_T;
2189 else if (speed == SPEED_1000)
2190 ifmr->ifm_active |= IFM_1000_T;
2191 else if (speed == SPEED_100)
2192 ifmr->ifm_active |= IFM_100_TX;
2193 else if (speed == SPEED_10)
2194 ifmr->ifm_active |= IFM_10_T;
2196 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
2201 cxgb_get_counter(struct ifnet *ifp, ift_counter c)
2203 struct port_info *pi = ifp->if_softc;
2204 struct adapter *sc = pi->adapter;
2205 struct cmac *mac = &pi->mac;
2206 struct mac_stats *mstats = &mac->stats;
2208 cxgb_refresh_stats(pi);
2211 case IFCOUNTER_IPACKETS:
2212 return (mstats->rx_frames);
2214 case IFCOUNTER_IERRORS:
2215 return (mstats->rx_jabber + mstats->rx_data_errs +
2216 mstats->rx_sequence_errs + mstats->rx_runt +
2217 mstats->rx_too_long + mstats->rx_mac_internal_errs +
2218 mstats->rx_short + mstats->rx_fcs_errs);
2220 case IFCOUNTER_OPACKETS:
2221 return (mstats->tx_frames);
2223 case IFCOUNTER_OERRORS:
2224 return (mstats->tx_excess_collisions + mstats->tx_underrun +
2225 mstats->tx_len_errs + mstats->tx_mac_internal_errs +
2226 mstats->tx_excess_deferral + mstats->tx_fcs_errs);
2228 case IFCOUNTER_COLLISIONS:
2229 return (mstats->tx_total_collisions);
2231 case IFCOUNTER_IBYTES:
2232 return (mstats->rx_octets);
2234 case IFCOUNTER_OBYTES:
2235 return (mstats->tx_octets);
2237 case IFCOUNTER_IMCASTS:
2238 return (mstats->rx_mcast_frames);
2240 case IFCOUNTER_OMCASTS:
2241 return (mstats->tx_mcast_frames);
2243 case IFCOUNTER_IQDROPS:
2244 return (mstats->rx_cong_drops);
2246 case IFCOUNTER_OQDROPS: {
2251 if (sc->flags & FULL_INIT_DONE) {
2252 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
2253 drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops;
2261 return (if_get_counter_default(ifp, c));
2266 cxgb_async_intr(void *data)
2268 adapter_t *sc = data;
2270 t3_write_reg(sc, A_PL_INT_ENABLE0, 0);
2271 (void) t3_read_reg(sc, A_PL_INT_ENABLE0);
2272 taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2276 link_check_callout(void *arg)
2278 struct port_info *pi = arg;
2279 struct adapter *sc = pi->adapter;
2281 if (!isset(&sc->open_device_map, pi->port_id))
2284 taskqueue_enqueue(sc->tq, &pi->link_check_task);
2288 check_link_status(void *arg, int pending)
2290 struct port_info *pi = arg;
2291 struct adapter *sc = pi->adapter;
2293 if (!isset(&sc->open_device_map, pi->port_id))
2296 t3_link_changed(sc, pi->port_id);
2298 if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) ||
2299 pi->link_config.link_ok == 0)
2300 callout_reset(&pi->link_check_ch, hz, link_check_callout, pi);
2304 t3_os_link_intr(struct port_info *pi)
2307 * Schedule a link check in the near future. If the link is flapping
2308 * rapidly we'll keep resetting the callout and delaying the check until
2309 * things stabilize a bit.
2311 callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi);
2315 check_t3b2_mac(struct adapter *sc)
2319 if (sc->flags & CXGB_SHUTDOWN)
2322 for_each_port(sc, i) {
2323 struct port_info *p = &sc->port[i];
2326 struct ifnet *ifp = p->ifp;
2329 if (!isset(&sc->open_device_map, p->port_id) || p->link_fault ||
2330 !p->link_config.link_ok)
2333 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2334 ("%s: state mismatch (drv_flags %x, device_map %x)",
2335 __func__, ifp->if_drv_flags, sc->open_device_map));
2338 status = t3b2_mac_watchdog_task(&p->mac);
2340 p->mac.stats.num_toggled++;
2341 else if (status == 2) {
2342 struct cmac *mac = &p->mac;
2344 cxgb_update_mac_settings(p);
2345 t3_link_start(&p->phy, mac, &p->link_config);
2346 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2347 t3_port_intr_enable(sc, p->port_id);
2348 p->mac.stats.num_resets++;
2355 cxgb_tick(void *arg)
2357 adapter_t *sc = (adapter_t *)arg;
2359 if (sc->flags & CXGB_SHUTDOWN)
2362 taskqueue_enqueue(sc->tq, &sc->tick_task);
2363 callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc);
2367 cxgb_refresh_stats(struct port_info *pi)
2370 const struct timeval interval = {0, 250000}; /* 250ms */
2373 timevalsub(&tv, &interval);
2374 if (timevalcmp(&tv, &pi->last_refreshed, <))
2378 t3_mac_update_stats(&pi->mac);
2380 getmicrotime(&pi->last_refreshed);
2384 cxgb_tick_handler(void *arg, int count)
2386 adapter_t *sc = (adapter_t *)arg;
2387 const struct adapter_params *p = &sc->params;
2389 uint32_t cause, reset;
2391 if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE))
2394 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2397 cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY);
2399 struct sge_qset *qs = &sc->sge.qs[0];
2402 v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00;
2405 for (i = 0; i < SGE_QSETS; i++) {
2407 qs[i].rspq.starved++;
2411 mask <<= SGE_QSETS; /* skip RSPQXDISABLED */
2413 for (i = 0; i < SGE_QSETS * 2; i++) {
2415 qs[i / 2].fl[i % 2].empty++;
2421 t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v);
2422 t3_write_reg(sc, A_SG_INT_CAUSE, cause);
2425 for (i = 0; i < sc->params.nports; i++) {
2426 struct port_info *pi = &sc->port[i];
2427 struct cmac *mac = &pi->mac;
2429 if (!isset(&sc->open_device_map, pi->port_id))
2432 cxgb_refresh_stats(pi);
2437 /* Count rx fifo overflows, once per second */
2438 cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset);
2440 if (cause & F_RXFIFO_OVERFLOW) {
2441 mac->stats.rx_fifo_ovfl++;
2442 reset |= F_RXFIFO_OVERFLOW;
2444 t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset);
2449 touch_bars(device_t dev)
2454 #if !defined(__LP64__) && 0
2457 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2458 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2459 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2460 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2461 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2462 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2467 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2471 u32 aligned_offset, aligned_len, *p;
2472 struct adapter *adapter = pi->adapter;
2475 aligned_offset = offset & ~3;
2476 aligned_len = (len + (offset & 3) + 3) & ~3;
2478 if (aligned_offset != offset || aligned_len != len) {
2479 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2482 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2483 if (!err && aligned_len > 4)
2484 err = t3_seeprom_read(adapter,
2485 aligned_offset + aligned_len - 4,
2486 (u32 *)&buf[aligned_len - 4]);
2489 memcpy(buf + (offset & 3), data, len);
2491 buf = (uint8_t *)(uintptr_t)data;
2493 err = t3_seeprom_wp(adapter, 0);
2497 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2498 err = t3_seeprom_write(adapter, aligned_offset, *p);
2499 aligned_offset += 4;
2503 err = t3_seeprom_wp(adapter, 1);
2506 free(buf, M_DEVBUF);
2512 in_range(int val, int lo, int hi)
2514 return val < 0 || (val <= hi && val >= lo);
2518 cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td)
2524 cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2530 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2531 int fflag, struct thread *td)
2534 struct port_info *pi = dev->si_drv1;
2535 adapter_t *sc = pi->adapter;
2537 #ifdef PRIV_SUPPORTED
2538 if (priv_check(td, PRIV_DRIVER)) {
2540 printf("user does not have access to privileged ioctls\n");
2546 printf("user does not have access to privileged ioctls\n");
2552 case CHELSIO_GET_MIIREG: {
2554 struct cphy *phy = &pi->phy;
2555 struct ch_mii_data *mid = (struct ch_mii_data *)data;
2557 if (!phy->mdio_read)
2558 return (EOPNOTSUPP);
2560 mmd = mid->phy_id >> 8;
2563 else if (mmd > MDIO_DEV_VEND2)
2566 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2567 mid->reg_num, &val);
2569 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2570 mid->reg_num & 0x1f, &val);
2575 case CHELSIO_SET_MIIREG: {
2576 struct cphy *phy = &pi->phy;
2577 struct ch_mii_data *mid = (struct ch_mii_data *)data;
2579 if (!phy->mdio_write)
2580 return (EOPNOTSUPP);
2582 mmd = mid->phy_id >> 8;
2585 else if (mmd > MDIO_DEV_VEND2)
2588 error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2589 mmd, mid->reg_num, mid->val_in);
2591 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2592 mid->reg_num & 0x1f,
2596 case CHELSIO_SETREG: {
2597 struct ch_reg *edata = (struct ch_reg *)data;
2598 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2600 t3_write_reg(sc, edata->addr, edata->val);
2603 case CHELSIO_GETREG: {
2604 struct ch_reg *edata = (struct ch_reg *)data;
2605 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2607 edata->val = t3_read_reg(sc, edata->addr);
2610 case CHELSIO_GET_SGE_CONTEXT: {
2611 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2612 mtx_lock_spin(&sc->sge.reg_lock);
2613 switch (ecntxt->cntxt_type) {
2614 case CNTXT_TYPE_EGRESS:
2615 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2619 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2622 case CNTXT_TYPE_RSP:
2623 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2627 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2634 mtx_unlock_spin(&sc->sge.reg_lock);
2637 case CHELSIO_GET_SGE_DESC: {
2638 struct ch_desc *edesc = (struct ch_desc *)data;
2640 if (edesc->queue_num >= SGE_QSETS * 6)
2642 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2643 edesc->queue_num % 6, edesc->idx, edesc->data);
2649 case CHELSIO_GET_QSET_PARAMS: {
2650 struct qset_params *q;
2651 struct ch_qset_params *t = (struct ch_qset_params *)data;
2652 int q1 = pi->first_qset;
2653 int nqsets = pi->nqsets;
2656 if (t->qset_idx >= nqsets)
2659 i = q1 + t->qset_idx;
2660 q = &sc->params.sge.qset[i];
2661 t->rspq_size = q->rspq_size;
2662 t->txq_size[0] = q->txq_size[0];
2663 t->txq_size[1] = q->txq_size[1];
2664 t->txq_size[2] = q->txq_size[2];
2665 t->fl_size[0] = q->fl_size;
2666 t->fl_size[1] = q->jumbo_size;
2667 t->polling = q->polling;
2669 t->intr_lat = q->coalesce_usecs;
2670 t->cong_thres = q->cong_thres;
2673 if ((sc->flags & FULL_INIT_DONE) == 0)
2675 else if (sc->flags & USING_MSIX)
2676 t->vector = rman_get_start(sc->msix_irq_res[i]);
2678 t->vector = rman_get_start(sc->irq_res);
2682 case CHELSIO_GET_QSET_NUM: {
2683 struct ch_reg *edata = (struct ch_reg *)data;
2684 edata->val = pi->nqsets;
2687 case CHELSIO_LOAD_FW: {
2690 struct ch_mem_range *t = (struct ch_mem_range *)data;
2693 * You're allowed to load a firmware only before FULL_INIT_DONE
2695 * FW_UPTODATE is also set so the rest of the initialization
2696 * will not overwrite what was loaded here. This gives you the
2697 * flexibility to load any firmware (and maybe shoot yourself in
2702 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2707 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2711 error = copyin(t->buf, fw_data, t->len);
2714 error = -t3_load_fw(sc, fw_data, t->len);
2716 if (t3_get_fw_version(sc, &vers) == 0) {
2717 snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2718 "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2719 G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2723 sc->flags |= FW_UPTODATE;
2725 free(fw_data, M_DEVBUF);
2729 case CHELSIO_LOAD_BOOT: {
2731 struct ch_mem_range *t = (struct ch_mem_range *)data;
2733 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2737 error = copyin(t->buf, boot_data, t->len);
2739 error = -t3_load_boot(sc, boot_data, t->len);
2741 free(boot_data, M_DEVBUF);
2744 case CHELSIO_GET_PM: {
2745 struct ch_pm *m = (struct ch_pm *)data;
2746 struct tp_params *p = &sc->params.tp;
2748 if (!is_offload(sc))
2749 return (EOPNOTSUPP);
2751 m->tx_pg_sz = p->tx_pg_size;
2752 m->tx_num_pg = p->tx_num_pgs;
2753 m->rx_pg_sz = p->rx_pg_size;
2754 m->rx_num_pg = p->rx_num_pgs;
2755 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2759 case CHELSIO_SET_PM: {
2760 struct ch_pm *m = (struct ch_pm *)data;
2761 struct tp_params *p = &sc->params.tp;
2763 if (!is_offload(sc))
2764 return (EOPNOTSUPP);
2765 if (sc->flags & FULL_INIT_DONE)
2768 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2769 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2770 return (EINVAL); /* not power of 2 */
2771 if (!(m->rx_pg_sz & 0x14000))
2772 return (EINVAL); /* not 16KB or 64KB */
2773 if (!(m->tx_pg_sz & 0x1554000))
2775 if (m->tx_num_pg == -1)
2776 m->tx_num_pg = p->tx_num_pgs;
2777 if (m->rx_num_pg == -1)
2778 m->rx_num_pg = p->rx_num_pgs;
2779 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2781 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2782 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2785 p->rx_pg_size = m->rx_pg_sz;
2786 p->tx_pg_size = m->tx_pg_sz;
2787 p->rx_num_pgs = m->rx_num_pg;
2788 p->tx_num_pgs = m->tx_num_pg;
2791 case CHELSIO_SETMTUTAB: {
2792 struct ch_mtus *m = (struct ch_mtus *)data;
2795 if (!is_offload(sc))
2796 return (EOPNOTSUPP);
2797 if (offload_running(sc))
2799 if (m->nmtus != NMTUS)
2801 if (m->mtus[0] < 81) /* accommodate SACK */
2805 * MTUs must be in ascending order
2807 for (i = 1; i < NMTUS; ++i)
2808 if (m->mtus[i] < m->mtus[i - 1])
2811 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2814 case CHELSIO_GETMTUTAB: {
2815 struct ch_mtus *m = (struct ch_mtus *)data;
2817 if (!is_offload(sc))
2818 return (EOPNOTSUPP);
2820 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2824 case CHELSIO_GET_MEM: {
2825 struct ch_mem_range *t = (struct ch_mem_range *)data;
2831 * Use these to avoid modifying len/addr in the return
2834 uint32_t len = t->len, addr = t->addr;
2836 if (!is_offload(sc))
2837 return (EOPNOTSUPP);
2838 if (!(sc->flags & FULL_INIT_DONE))
2839 return (EIO); /* need the memory controllers */
2840 if ((addr & 0x7) || (len & 0x7))
2842 if (t->mem_id == MEM_CM)
2844 else if (t->mem_id == MEM_PMRX)
2846 else if (t->mem_id == MEM_PMTX)
2853 * bits 0..9: chip version
2854 * bits 10..15: chip revision
2856 t->version = 3 | (sc->params.rev << 10);
2859 * Read 256 bytes at a time as len can be large and we don't
2860 * want to use huge intermediate buffers.
2862 useraddr = (uint8_t *)t->buf;
2864 unsigned int chunk = min(len, sizeof(buf));
2866 error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2869 if (copyout(buf, useraddr, chunk))
2877 case CHELSIO_READ_TCAM_WORD: {
2878 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2880 if (!is_offload(sc))
2881 return (EOPNOTSUPP);
2882 if (!(sc->flags & FULL_INIT_DONE))
2883 return (EIO); /* need MC5 */
2884 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2887 case CHELSIO_SET_TRACE_FILTER: {
2888 struct ch_trace *t = (struct ch_trace *)data;
2889 const struct trace_params *tp;
2891 tp = (const struct trace_params *)&t->sip;
2893 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2896 t3_config_trace_filter(sc, tp, 1, t->invert_match,
2900 case CHELSIO_SET_PKTSCHED: {
2901 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2902 if (sc->open_device_map == 0)
2904 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2908 case CHELSIO_IFCONF_GETREGS: {
2909 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2910 int reglen = cxgb_get_regs_len();
2911 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2915 if (regs->len > reglen)
2917 else if (regs->len < reglen)
2921 cxgb_get_regs(sc, regs, buf);
2922 error = copyout(buf, regs->data, reglen);
2924 free(buf, M_DEVBUF);
2928 case CHELSIO_SET_HW_SCHED: {
2929 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2930 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2932 if ((sc->flags & FULL_INIT_DONE) == 0)
2933 return (EAGAIN); /* need TP to be initialized */
2934 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2935 !in_range(t->channel, 0, 1) ||
2936 !in_range(t->kbps, 0, 10000000) ||
2937 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2938 !in_range(t->flow_ipg, 0,
2939 dack_ticks_to_usec(sc, 0x7ff)))
2943 error = t3_config_sched(sc, t->kbps, t->sched);
2947 if (t->class_ipg >= 0)
2948 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2949 if (t->flow_ipg >= 0) {
2950 t->flow_ipg *= 1000; /* us -> ns */
2951 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2954 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2956 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2957 bit, t->mode ? bit : 0);
2959 if (t->channel >= 0)
2960 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2961 1 << t->sched, t->channel << t->sched);
2964 case CHELSIO_GET_EEPROM: {
2966 struct ch_eeprom *e = (struct ch_eeprom *)data;
2969 if (e->offset & 3 || e->offset >= EEPROMSIZE ||
2970 e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) {
2974 buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2978 e->magic = EEPROM_MAGIC;
2979 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2980 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2983 error = copyout(buf + e->offset, e->data, e->len);
2985 free(buf, M_DEVBUF);
2988 case CHELSIO_CLEAR_STATS: {
2989 if (!(sc->flags & FULL_INIT_DONE))
2993 t3_mac_update_stats(&pi->mac);
2994 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2998 case CHELSIO_GET_UP_LA: {
2999 struct ch_up_la *la = (struct ch_up_la *)data;
3000 uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT);
3004 if (la->bufsize < LA_BUFSIZE)
3008 error = -t3_get_up_la(sc, &la->stopped, &la->idx,
3011 error = copyout(buf, la->data, la->bufsize);
3013 free(buf, M_DEVBUF);
3016 case CHELSIO_GET_UP_IOQS: {
3017 struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data;
3018 uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT);
3024 if (ioqs->bufsize < IOQS_BUFSIZE)
3028 error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf);
3031 v = (uint32_t *)buf;
3033 ioqs->ioq_rx_enable = *v++;
3034 ioqs->ioq_tx_enable = *v++;
3035 ioqs->ioq_rx_status = *v++;
3036 ioqs->ioq_tx_status = *v++;
3038 error = copyout(v, ioqs->data, ioqs->bufsize);
3041 free(buf, M_DEVBUF);
3044 case CHELSIO_SET_FILTER: {
3045 struct ch_filter *f = (struct ch_filter *)data;
3046 struct filter_info *p;
3047 unsigned int nfilters = sc->params.mc5.nfilters;
3049 if (!is_offload(sc))
3050 return (EOPNOTSUPP); /* No TCAM */
3051 if (!(sc->flags & FULL_INIT_DONE))
3052 return (EAGAIN); /* mc5 not setup yet */
3054 return (EBUSY); /* TOE will use TCAM */
3057 if (f->filter_id >= nfilters ||
3058 (f->val.dip && f->mask.dip != 0xffffffff) ||
3059 (f->val.sport && f->mask.sport != 0xffff) ||
3060 (f->val.dport && f->mask.dport != 0xffff) ||
3061 (f->val.vlan && f->mask.vlan != 0xfff) ||
3062 (f->val.vlan_prio &&
3063 f->mask.vlan_prio != FILTER_NO_VLAN_PRI) ||
3064 (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) ||
3065 f->qset >= SGE_QSETS ||
3066 sc->rrss_map[f->qset] >= RSS_TABLE_SIZE)
3069 /* Was allocated with M_WAITOK */
3070 KASSERT(sc->filters, ("filter table NULL\n"));
3072 p = &sc->filters[f->filter_id];
3076 bzero(p, sizeof(*p));
3077 p->sip = f->val.sip;
3078 p->sip_mask = f->mask.sip;
3079 p->dip = f->val.dip;
3080 p->sport = f->val.sport;
3081 p->dport = f->val.dport;
3082 p->vlan = f->mask.vlan ? f->val.vlan : 0xfff;
3083 p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) :
3085 p->mac_hit = f->mac_hit;
3086 p->mac_vld = f->mac_addr_idx != 0xffff;
3087 p->mac_idx = f->mac_addr_idx;
3088 p->pkt_type = f->proto;
3089 p->report_filter_id = f->want_filter_id;
3094 error = set_filter(sc, f->filter_id, p);
3099 case CHELSIO_DEL_FILTER: {
3100 struct ch_filter *f = (struct ch_filter *)data;
3101 struct filter_info *p;
3102 unsigned int nfilters = sc->params.mc5.nfilters;
3104 if (!is_offload(sc))
3105 return (EOPNOTSUPP);
3106 if (!(sc->flags & FULL_INIT_DONE))
3108 if (nfilters == 0 || sc->filters == NULL)
3110 if (f->filter_id >= nfilters)
3113 p = &sc->filters[f->filter_id];
3117 return (EFAULT); /* Read "Bad address" as "Bad index" */
3119 bzero(p, sizeof(*p));
3120 p->sip = p->sip_mask = 0xffffffff;
3122 p->vlan_prio = FILTER_NO_VLAN_PRI;
3124 error = set_filter(sc, f->filter_id, p);
3127 case CHELSIO_GET_FILTER: {
3128 struct ch_filter *f = (struct ch_filter *)data;
3129 struct filter_info *p;
3130 unsigned int i, nfilters = sc->params.mc5.nfilters;
3132 if (!is_offload(sc))
3133 return (EOPNOTSUPP);
3134 if (!(sc->flags & FULL_INIT_DONE))
3136 if (nfilters == 0 || sc->filters == NULL)
3139 i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1;
3140 for (; i < nfilters; i++) {
3141 p = &sc->filters[i];
3145 bzero(f, sizeof(*f));
3148 f->val.sip = p->sip;
3149 f->mask.sip = p->sip_mask;
3150 f->val.dip = p->dip;
3151 f->mask.dip = p->dip ? 0xffffffff : 0;
3152 f->val.sport = p->sport;
3153 f->mask.sport = p->sport ? 0xffff : 0;
3154 f->val.dport = p->dport;
3155 f->mask.dport = p->dport ? 0xffff : 0;
3156 f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan;
3157 f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff;
3158 f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3160 f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ?
3161 0 : FILTER_NO_VLAN_PRI;
3162 f->mac_hit = p->mac_hit;
3163 f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff;
3164 f->proto = p->pkt_type;
3165 f->want_filter_id = p->report_filter_id;
3174 f->filter_id = 0xffffffff;
3178 return (EOPNOTSUPP);
3185 static __inline void
3186 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
3189 uint32_t *p = (uint32_t *)(buf + start);
3191 for ( ; start <= end; start += sizeof(uint32_t))
3192 *p++ = t3_read_reg(ap, start);
3195 #define T3_REGMAP_SIZE (3 * 1024)
3197 cxgb_get_regs_len(void)
3199 return T3_REGMAP_SIZE;
3203 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
3208 * bits 0..9: chip version
3209 * bits 10..15: chip revision
3210 * bit 31: set for PCIe cards
3212 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
3215 * We skip the MAC statistics registers because they are clear-on-read.
3216 * Also reading multi-register stats would need to synchronize with the
3217 * periodic mac stats accumulation. Hard to justify the complexity.
3219 memset(buf, 0, cxgb_get_regs_len());
3220 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
3221 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
3222 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
3223 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
3224 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
3225 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
3226 XGM_REG(A_XGM_SERDES_STAT3, 1));
3227 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
3228 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
3232 alloc_filters(struct adapter *sc)
3234 struct filter_info *p;
3235 unsigned int nfilters = sc->params.mc5.nfilters;
3240 p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO);
3243 p = &sc->filters[nfilters - 1];
3245 p->vlan_prio = FILTER_NO_VLAN_PRI;
3246 p->pass = p->rss = p->valid = p->locked = 1;
3252 setup_hw_filters(struct adapter *sc)
3255 unsigned int nfilters = sc->params.mc5.nfilters;
3260 t3_enable_filters(sc);
3262 for (i = rc = 0; i < nfilters && !rc; i++) {
3263 if (sc->filters[i].locked)
3264 rc = set_filter(sc, i, &sc->filters[i]);
3271 set_filter(struct adapter *sc, int id, const struct filter_info *f)
3275 struct ulp_txpkt *txpkt;
3276 struct work_request_hdr *wr;
3277 struct cpl_pass_open_req *oreq;
3278 struct cpl_set_tcb_field *sreq;
3280 len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq);
3281 KASSERT(len <= MHLEN, ("filter request too big for an mbuf"));
3283 id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes -
3284 sc->params.mc5.nfilters;
3286 m = m_gethdr(M_WAITOK, MT_DATA);
3287 m->m_len = m->m_pkthdr.len = len;
3288 bzero(mtod(m, char *), len);
3290 wr = mtod(m, struct work_request_hdr *);
3291 wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC);
3293 oreq = (struct cpl_pass_open_req *)(wr + 1);
3294 txpkt = (struct ulp_txpkt *)oreq;
3295 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3296 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8));
3297 OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id));
3298 oreq->local_port = htons(f->dport);
3299 oreq->peer_port = htons(f->sport);
3300 oreq->local_ip = htonl(f->dip);
3301 oreq->peer_ip = htonl(f->sip);
3302 oreq->peer_netmask = htonl(f->sip_mask);
3304 oreq->opt0l = htonl(F_NO_OFFLOAD);
3305 oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) |
3306 V_CONN_POLICY(CPL_CONN_POLICY_FILTER) |
3307 V_VLAN_PRI(f->vlan_prio >> 1) |
3308 V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) |
3309 V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) |
3310 V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4)));
3312 sreq = (struct cpl_set_tcb_field *)(oreq + 1);
3313 set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL,
3314 (f->report_filter_id << 15) | (1 << 23) |
3315 ((u64)f->pass << 35) | ((u64)!f->rss << 36));
3316 set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1);
3319 if (f->pass && !f->rss) {
3320 len = sizeof(*sreq);
3321 m = m_gethdr(M_WAITOK, MT_DATA);
3322 m->m_len = m->m_pkthdr.len = len;
3323 bzero(mtod(m, char *), len);
3324 sreq = mtod(m, struct cpl_set_tcb_field *);
3325 sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
3326 mk_set_tcb_field(sreq, id, 25, 0x3f80000,
3327 (u64)sc->rrss_map[f->qset] << 19);
3334 mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid,
3335 unsigned int word, u64 mask, u64 val)
3337 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
3338 req->reply = V_NO_REPLY(1);
3340 req->word = htons(word);
3341 req->mask = htobe64(mask);
3342 req->val = htobe64(val);
3346 set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid,
3347 unsigned int word, u64 mask, u64 val)
3349 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
3351 txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT));
3352 txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8));
3353 mk_set_tcb_field(req, tid, word, mask, val);
3357 t3_iterate(void (*func)(struct adapter *, void *), void *arg)
3361 mtx_lock(&t3_list_lock);
3362 SLIST_FOREACH(sc, &t3_list, link) {
3364 * func should not make any assumptions about what state sc is
3365 * in - the only guarantee is that sc->sc_lock is a valid lock.
3369 mtx_unlock(&t3_list_lock);
3374 toe_capability(struct port_info *pi, int enable)
3377 struct adapter *sc = pi->adapter;
3379 ADAPTER_LOCK_ASSERT_OWNED(sc);
3381 if (!is_offload(sc))
3385 if (!(sc->flags & FULL_INIT_DONE)) {
3387 "You must enable a cxgb interface first\n");
3391 if (isset(&sc->offload_map, pi->port_id))
3394 if (!(sc->flags & TOM_INIT_DONE)) {
3395 rc = t3_activate_uld(sc, ULD_TOM);
3398 "You must kldload t3_tom.ko before trying "
3399 "to enable TOE on a cxgb interface.\n");
3403 KASSERT(sc->tom_softc != NULL,
3404 ("%s: TOM activated but softc NULL", __func__));
3405 KASSERT(sc->flags & TOM_INIT_DONE,
3406 ("%s: TOM activated but flag not set", __func__));
3409 setbit(&sc->offload_map, pi->port_id);
3412 * XXX: Temporary code to allow iWARP to be enabled when TOE is
3413 * enabled on any port. Need to figure out how to enable,
3414 * disable, load, and unload iWARP cleanly.
3416 if (!isset(&sc->offload_map, MAX_NPORTS) &&
3417 t3_activate_uld(sc, ULD_IWARP) == 0)
3418 setbit(&sc->offload_map, MAX_NPORTS);
3420 if (!isset(&sc->offload_map, pi->port_id))
3423 KASSERT(sc->flags & TOM_INIT_DONE,
3424 ("%s: TOM never initialized?", __func__));
3425 clrbit(&sc->offload_map, pi->port_id);
3432 * Add an upper layer driver to the global list.
3435 t3_register_uld(struct uld_info *ui)
3440 mtx_lock(&t3_uld_list_lock);
3441 SLIST_FOREACH(u, &t3_uld_list, link) {
3442 if (u->uld_id == ui->uld_id) {
3448 SLIST_INSERT_HEAD(&t3_uld_list, ui, link);
3451 mtx_unlock(&t3_uld_list_lock);
3456 t3_unregister_uld(struct uld_info *ui)
3461 mtx_lock(&t3_uld_list_lock);
3463 SLIST_FOREACH(u, &t3_uld_list, link) {
3465 if (ui->refcount > 0) {
3470 SLIST_REMOVE(&t3_uld_list, ui, uld_info, link);
3476 mtx_unlock(&t3_uld_list_lock);
3481 t3_activate_uld(struct adapter *sc, int id)
3484 struct uld_info *ui;
3486 mtx_lock(&t3_uld_list_lock);
3488 SLIST_FOREACH(ui, &t3_uld_list, link) {
3489 if (ui->uld_id == id) {
3490 rc = ui->activate(sc);
3497 mtx_unlock(&t3_uld_list_lock);
3503 t3_deactivate_uld(struct adapter *sc, int id)
3506 struct uld_info *ui;
3508 mtx_lock(&t3_uld_list_lock);
3510 SLIST_FOREACH(ui, &t3_uld_list, link) {
3511 if (ui->uld_id == id) {
3512 rc = ui->deactivate(sc);
3519 mtx_unlock(&t3_uld_list_lock);
3525 cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused,
3533 t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3535 uintptr_t *loc, new;
3537 if (opcode >= NUM_CPL_HANDLERS)
3540 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
3541 loc = (uintptr_t *) &sc->cpl_handler[opcode];
3542 atomic_store_rel_ptr(loc, new);
3549 cxgbc_mod_event(module_t mod, int cmd, void *arg)
3555 mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF);
3556 SLIST_INIT(&t3_list);
3558 mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF);
3559 SLIST_INIT(&t3_uld_list);
3565 mtx_lock(&t3_uld_list_lock);
3566 if (!SLIST_EMPTY(&t3_uld_list)) {
3568 mtx_unlock(&t3_uld_list_lock);
3571 mtx_unlock(&t3_uld_list_lock);
3572 mtx_destroy(&t3_uld_list_lock);
3574 mtx_lock(&t3_list_lock);
3575 if (!SLIST_EMPTY(&t3_list)) {
3577 mtx_unlock(&t3_list_lock);
3580 mtx_unlock(&t3_list_lock);
3581 mtx_destroy(&t3_list_lock);
3590 cxgb_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
3592 struct port_info *pi;
3595 pi = if_getsoftc(ifp);
3599 *ncl = adap->sge.qs[0].fl[1].size;
3600 *clsize = adap->sge.qs[0].fl[1].buf_size;
3601 ADAPTER_UNLOCK(adap);
3605 cxgb_netdump_event(struct ifnet *ifp, enum netdump_ev event)
3607 struct port_info *pi;
3608 struct sge_qset *qs;
3611 pi = if_getsoftc(ifp);
3612 if (event == NETDUMP_START)
3613 for (i = 0; i < SGE_QSETS; i++) {
3614 qs = &pi->adapter->sge.qs[i];
3616 /* Need to reinit after netdump_mbuf_dump(). */
3617 qs->fl[0].zone = zone_pack;
3618 qs->fl[1].zone = zone_clust;
3619 qs->lro.enabled = 0;
3624 cxgb_netdump_transmit(struct ifnet *ifp, struct mbuf *m)
3626 struct port_info *pi;
3627 struct sge_qset *qs;
3629 pi = if_getsoftc(ifp);
3630 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
3634 qs = &pi->adapter->sge.qs[pi->first_qset];
3635 return (cxgb_netdump_encap(qs, &m));
3639 cxgb_netdump_poll(struct ifnet *ifp, int count)
3641 struct port_info *pi;
3645 pi = if_getsoftc(ifp);
3646 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
3650 for (i = 0; i < SGE_QSETS; i++)
3651 (void)cxgb_netdump_poll_rx(adap, &adap->sge.qs[i]);
3652 (void)cxgb_netdump_poll_tx(&adap->sge.qs[pi->first_qset]);
3655 #endif /* NETDUMP */