1 /**************************************************************************
3 Copyright (c) 2007-2008, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/pciio.h>
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 #include <sys/bus_dma.h>
45 #include <sys/ioccom.h>
47 #include <sys/linker.h>
48 #include <sys/firmware.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/queue.h>
55 #include <sys/taskqueue.h>
59 #include <net/ethernet.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_types.h>
65 #include <net/if_vlan_var.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77 #include <dev/pci/pci_private.h>
79 #include <cxgb_include.h>
85 #ifdef IFNET_MULTIQUEUE
86 #include <machine/intr_machdep.h>
89 static int cxgb_setup_msix(adapter_t *, int);
90 static void cxgb_teardown_msix(adapter_t *);
91 static void cxgb_init(void *);
92 static void cxgb_init_locked(struct port_info *);
93 static void cxgb_stop_locked(struct port_info *);
94 static void cxgb_set_rxmode(struct port_info *);
95 static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
96 static int cxgb_media_change(struct ifnet *);
97 static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
98 static int setup_sge_qsets(adapter_t *);
99 static void cxgb_async_intr(void *);
100 static void cxgb_ext_intr_handler(void *, int);
101 static void cxgb_tick_handler(void *, int);
102 static void cxgb_down_locked(struct adapter *sc);
103 static void cxgb_tick(void *);
104 static void setup_rss(adapter_t *sc);
106 /* Attachment glue for the PCI controller end of the device. Each port of
107 * the device is attached separately, as defined later.
109 static int cxgb_controller_probe(device_t);
110 static int cxgb_controller_attach(device_t);
111 static int cxgb_controller_detach(device_t);
112 static void cxgb_free(struct adapter *);
113 static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
115 static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf);
116 static int cxgb_get_regs_len(void);
117 static int offload_open(struct port_info *pi);
118 static void touch_bars(device_t dev);
119 static int offload_close(struct t3cdev *tdev);
120 static void cxgb_link_start(struct port_info *p);
122 static device_method_t cxgb_controller_methods[] = {
123 DEVMETHOD(device_probe, cxgb_controller_probe),
124 DEVMETHOD(device_attach, cxgb_controller_attach),
125 DEVMETHOD(device_detach, cxgb_controller_detach),
128 DEVMETHOD(bus_print_child, bus_generic_print_child),
129 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
134 static driver_t cxgb_controller_driver = {
136 cxgb_controller_methods,
137 sizeof(struct adapter)
140 static devclass_t cxgb_controller_devclass;
141 DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
144 * Attachment glue for the ports. Attachment is done directly to the
147 static int cxgb_port_probe(device_t);
148 static int cxgb_port_attach(device_t);
149 static int cxgb_port_detach(device_t);
151 static device_method_t cxgb_port_methods[] = {
152 DEVMETHOD(device_probe, cxgb_port_probe),
153 DEVMETHOD(device_attach, cxgb_port_attach),
154 DEVMETHOD(device_detach, cxgb_port_detach),
158 static driver_t cxgb_port_driver = {
164 static d_ioctl_t cxgb_extension_ioctl;
165 static d_open_t cxgb_extension_open;
166 static d_close_t cxgb_extension_close;
168 static struct cdevsw cxgb_cdevsw = {
169 .d_version = D_VERSION,
171 .d_open = cxgb_extension_open,
172 .d_close = cxgb_extension_close,
173 .d_ioctl = cxgb_extension_ioctl,
177 static devclass_t cxgb_port_devclass;
178 DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
180 #define SGE_MSIX_COUNT (SGE_QSETS + 1)
183 * The driver uses the best interrupt scheme available on a platform in the
184 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
185 * of these schemes the driver may consider as follows:
187 * msi = 2: choose from among all three options
188 * msi = 1 : only consider MSI and pin interrupts
189 * msi = 0: force pin interrupts
191 static int msi_allowed = 2;
193 TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
194 SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
195 SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
196 "MSI-X, MSI, INTx selector");
199 * The driver enables offload as a default.
200 * To disable it, use ofld_disable = 1.
202 static int ofld_disable = 0;
203 TUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable);
204 SYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0,
205 "disable ULP offload");
208 * The driver uses an auto-queue algorithm by default.
209 * To disable it and force a single queue-set per port, use singleq = 1.
211 static int singleq = 0;
212 TUNABLE_INT("hw.cxgb.singleq", &singleq);
213 SYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0,
214 "use a single queue-set per port");
218 * The driver uses an auto-queue algorithm by default.
219 * To disable it and force a single queue-set per port, use singleq = 1.
221 static int force_fw_update = 0;
222 TUNABLE_INT("hw.cxgb.force_fw_update", &force_fw_update);
223 SYSCTL_UINT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0,
224 "update firmware even if up to date");
226 int cxgb_use_16k_clusters = 1;
227 TUNABLE_INT("hw.cxgb.use_16k_clusters", &cxgb_use_16k_clusters);
228 SYSCTL_UINT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN,
229 &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue ");
232 MAX_TXQ_ENTRIES = 16384,
233 MAX_CTRL_TXQ_ENTRIES = 1024,
234 MAX_RSPQ_ENTRIES = 16384,
235 MAX_RX_BUFFERS = 16384,
236 MAX_RX_JUMBO_BUFFERS = 16384,
238 MIN_CTRL_TXQ_ENTRIES = 4,
239 MIN_RSPQ_ENTRIES = 32,
241 MIN_FL_JUMBO_ENTRIES = 32
256 u32 report_filter_id:1;
264 enum { FILTER_NO_VLAN_PRI = 7 };
266 #define EEPROM_MAGIC 0x38E2F10C
268 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
270 /* Table for probing the cards. The desc field isn't actually used */
276 } cxgb_identifiers[] = {
277 {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
278 {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
279 {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
280 {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
281 {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
282 {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
283 {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
284 {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
285 {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
286 {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
287 {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"},
291 static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
295 t3rev2char(struct adapter *adapter)
299 switch(adapter->params.rev) {
314 static struct cxgb_ident *
315 cxgb_get_ident(device_t dev)
317 struct cxgb_ident *id;
319 for (id = cxgb_identifiers; id->desc != NULL; id++) {
320 if ((id->vendor == pci_get_vendor(dev)) &&
321 (id->device == pci_get_device(dev))) {
328 static const struct adapter_info *
329 cxgb_get_adapter_info(device_t dev)
331 struct cxgb_ident *id;
332 const struct adapter_info *ai;
334 id = cxgb_get_ident(dev);
338 ai = t3_get_adapter_info(id->index);
344 cxgb_controller_probe(device_t dev)
346 const struct adapter_info *ai;
347 char *ports, buf[80];
349 struct adapter *sc = device_get_softc(dev);
351 ai = cxgb_get_adapter_info(dev);
355 nports = ai->nports0 + ai->nports1;
361 snprintf(buf, sizeof(buf), "%s %sNIC, rev: %d nports: %d %s",
362 ai->desc, is_offload(sc) ? "R" : "",
363 sc->params.rev, nports, ports);
364 device_set_desc_copy(dev, buf);
365 return (BUS_PROBE_DEFAULT);
368 #define FW_FNAME "cxgb_t3fw"
369 #define TPEEPROM_NAME "t3b_tp_eeprom"
370 #define TPSRAM_NAME "t3b_protocol_sram"
373 upgrade_fw(adapter_t *sc)
375 #ifdef FIRMWARE_LATEST
376 const struct firmware *fw;
382 if ((fw = firmware_get(FW_FNAME)) == NULL) {
383 device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME);
386 device_printf(sc->dev, "updating firmware on card\n");
387 status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
389 device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status);
391 firmware_put(fw, FIRMWARE_UNLOAD);
397 cxgb_controller_attach(device_t dev)
400 const struct adapter_info *ai;
409 sc = device_get_softc(dev);
412 ai = cxgb_get_adapter_info(dev);
415 * XXX not really related but a recent addition
418 /* find the PCIe link width and set max read request to 4KB*/
419 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
421 lnk = pci_read_config(dev, reg + 0x12, 2);
422 sc->link_width = (lnk >> 4) & 0x3f;
424 pectl = pci_read_config(dev, reg + 0x8, 2);
425 pectl = (pectl & ~0x7000) | (5 << 12);
426 pci_write_config(dev, reg + 0x8, pectl, 2);
429 if (sc->link_width != 0 && sc->link_width <= 4 &&
430 (ai->nports0 + ai->nports1) <= 2) {
431 device_printf(sc->dev,
432 "PCIe x%d Link, expect reduced performance\n",
437 pci_enable_busmaster(dev);
439 * Allocate the registers and make them available to the driver.
440 * The registers that we care about for NIC mode are in BAR 0
442 sc->regs_rid = PCIR_BAR(0);
443 if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
444 &sc->regs_rid, RF_ACTIVE)) == NULL) {
445 device_printf(dev, "Cannot allocate BAR region 0\n");
448 sc->udbs_rid = PCIR_BAR(2);
449 if ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
450 &sc->udbs_rid, RF_ACTIVE)) == NULL) {
451 device_printf(dev, "Cannot allocate BAR region 1\n");
456 snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d",
457 device_get_unit(dev));
458 ADAPTER_LOCK_INIT(sc, sc->lockbuf);
460 snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d",
461 device_get_unit(dev));
462 snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d",
463 device_get_unit(dev));
464 snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d",
465 device_get_unit(dev));
467 MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN);
468 MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF);
469 MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF);
471 sc->bt = rman_get_bustag(sc->regs_res);
472 sc->bh = rman_get_bushandle(sc->regs_res);
473 sc->mmio_len = rman_get_size(sc->regs_res);
475 if (t3_prep_adapter(sc, ai, 1) < 0) {
476 printf("prep adapter failed\n");
480 /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
481 * enough messages for the queue sets. If that fails, try falling
482 * back to MSI. If that fails, then try falling back to the legacy
483 * interrupt pin model.
487 sc->msix_regs_rid = 0x20;
488 if ((msi_allowed >= 2) &&
489 (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
490 &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
492 msi_needed = sc->msi_count = SGE_MSIX_COUNT;
494 if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) ||
495 (sc->msi_count != msi_needed)) {
496 device_printf(dev, "msix allocation failed - msi_count = %d"
497 " msi_needed=%d will try msi err=%d\n", sc->msi_count,
500 pci_release_msi(dev);
501 bus_release_resource(dev, SYS_RES_MEMORY,
502 sc->msix_regs_rid, sc->msix_regs_res);
503 sc->msix_regs_res = NULL;
505 sc->flags |= USING_MSIX;
506 sc->cxgb_intr = t3_intr_msix;
510 if ((msi_allowed >= 1) && (sc->msi_count == 0)) {
512 if (pci_alloc_msi(dev, &sc->msi_count)) {
513 device_printf(dev, "alloc msi failed - will try INTx\n");
515 pci_release_msi(dev);
517 sc->flags |= USING_MSI;
519 sc->cxgb_intr = t3_intr_msi;
523 if (sc->msi_count == 0) {
524 device_printf(dev, "using line interrupts\n");
526 sc->cxgb_intr = t3b_intr;
529 if ((sc->flags & USING_MSIX) && !singleq)
530 port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus);
532 /* Create a private taskqueue thread for handling driver events */
533 #ifdef TASKQUEUE_CURRENT
534 sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
535 taskqueue_thread_enqueue, &sc->tq);
537 sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
538 taskqueue_thread_enqueue, &sc->tq);
540 if (sc->tq == NULL) {
541 device_printf(dev, "failed to allocate controller task queue\n");
545 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
546 device_get_nameunit(dev));
547 TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
548 TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc);
551 /* Create a periodic callout for checking adapter status */
552 callout_init(&sc->cxgb_tick_ch, TRUE);
554 if ((t3_check_fw_version(sc, &must_load) != 0 && must_load) || force_fw_update) {
556 * Warn user that a firmware update will be attempted in init.
558 device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n",
559 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
560 sc->flags &= ~FW_UPTODATE;
562 sc->flags |= FW_UPTODATE;
565 if (t3_check_tpsram_version(sc, &must_load) != 0 && must_load) {
567 * Warn user that a firmware update will be attempted in init.
569 device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n",
570 t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
571 sc->flags &= ~TPS_UPTODATE;
573 sc->flags |= TPS_UPTODATE;
577 * Create a child device for each MAC. The ethernet attachment
578 * will be done in these children.
580 for (i = 0; i < (sc)->params.nports; i++) {
581 struct port_info *pi;
583 if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
584 device_printf(dev, "failed to add child port\n");
590 pi->nqsets = port_qsets;
591 pi->first_qset = i*port_qsets;
593 pi->tx_chan = i >= ai->nports0;
594 pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i;
595 sc->rxpkt_map[pi->txpkt_intf] = i;
596 sc->port[i].tx_chan = i >= ai->nports0;
597 sc->portdev[i] = child;
598 device_set_softc(child, pi);
600 if ((error = bus_generic_attach(dev)) != 0)
603 /* initialize sge private state */
604 t3_sge_init_adapter(sc);
609 if (is_offload(sc)) {
610 setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT);
611 cxgb_adapter_ofld(sc);
613 error = t3_get_fw_version(sc, &vers);
617 snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d",
618 G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
619 G_FW_VERSION_MICRO(vers));
621 device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]);
622 callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
623 t3_add_attach_sysctls(sc);
632 cxgb_controller_detach(device_t dev)
636 sc = device_get_softc(dev);
644 cxgb_free(struct adapter *sc)
649 sc->flags |= CXGB_SHUTDOWN;
651 cxgb_pcpu_shutdown_threads(sc);
657 cxgb_down_locked(sc);
660 if (sc->flags & (USING_MSI | USING_MSIX)) {
661 device_printf(sc->dev, "releasing msi message(s)\n");
662 pci_release_msi(sc->dev);
664 device_printf(sc->dev, "no msi message to release\n");
667 if (sc->msix_regs_res != NULL) {
668 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
672 t3_sge_deinit_sw(sc);
674 * Wait for last callout
679 for (i = 0; i < (sc)->params.nports; ++i) {
680 if (sc->portdev[i] != NULL)
681 device_delete_child(sc->dev, sc->portdev[i]);
684 bus_generic_detach(sc->dev);
685 if (sc->tq != NULL) {
686 taskqueue_free(sc->tq);
690 if (is_offload(sc)) {
691 cxgb_adapter_unofld(sc);
692 if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT))
693 offload_close(&sc->tdev);
695 printf("cxgb_free: DEVMAP_BIT not set\n");
697 printf("not offloading set\n");
699 if (sc->flags & CXGB_OFLD_INIT)
700 cxgb_offload_deactivate(sc);
702 free(sc->filters, M_DEVBUF);
707 if (sc->udbs_res != NULL)
708 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid,
711 if (sc->regs_res != NULL)
712 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
715 MTX_DESTROY(&sc->mdio_lock);
716 MTX_DESTROY(&sc->sge.reg_lock);
717 MTX_DESTROY(&sc->elmer_lock);
718 ADAPTER_LOCK_DEINIT(sc);
722 * setup_sge_qsets - configure SGE Tx/Rx/response queues
723 * @sc: the controller softc
725 * Determines how many sets of SGE queues to use and initializes them.
726 * We support multiple queue sets per port if we have MSI-X, otherwise
727 * just one queue set per port.
730 setup_sge_qsets(adapter_t *sc)
732 int i, j, err, irq_idx = 0, qset_idx = 0;
733 u_int ntxq = SGE_TXQ_PER_SET;
735 if ((err = t3_sge_alloc(sc)) != 0) {
736 device_printf(sc->dev, "t3_sge_alloc returned %d\n", err);
740 if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
743 for (i = 0; i < (sc)->params.nports; i++) {
744 struct port_info *pi = &sc->port[i];
746 for (j = 0; j < pi->nqsets; j++, qset_idx++) {
747 err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports,
748 (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
749 &sc->params.sge.qset[qset_idx], ntxq, pi);
751 t3_free_sge_resources(sc);
752 device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n",
763 cxgb_teardown_msix(adapter_t *sc)
767 for (nqsets = i = 0; i < (sc)->params.nports; i++)
768 nqsets += sc->port[i].nqsets;
770 for (i = 0; i < nqsets; i++) {
771 if (sc->msix_intr_tag[i] != NULL) {
772 bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
773 sc->msix_intr_tag[i]);
774 sc->msix_intr_tag[i] = NULL;
776 if (sc->msix_irq_res[i] != NULL) {
777 bus_release_resource(sc->dev, SYS_RES_IRQ,
778 sc->msix_irq_rid[i], sc->msix_irq_res[i]);
779 sc->msix_irq_res[i] = NULL;
785 cxgb_setup_msix(adapter_t *sc, int msix_count)
787 int i, j, k, nqsets, rid;
789 /* The first message indicates link changes and error conditions */
791 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
792 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
793 device_printf(sc->dev, "Cannot allocate msix interrupt\n");
797 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
801 cxgb_async_intr, sc, &sc->intr_tag)) {
802 device_printf(sc->dev, "Cannot set up interrupt\n");
805 for (i = k = 0; i < (sc)->params.nports; i++) {
806 nqsets = sc->port[i].nqsets;
807 for (j = 0; j < nqsets; j++, k++) {
808 struct sge_qset *qs = &sc->sge.qs[k];
812 printf("rid=%d ", rid);
813 if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
814 sc->dev, SYS_RES_IRQ, &rid,
815 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
816 device_printf(sc->dev, "Cannot allocate "
817 "interrupt for message %d\n", rid);
820 sc->msix_irq_rid[k] = rid;
821 if (bus_setup_intr(sc->dev, sc->msix_irq_res[k],
822 INTR_MPSAFE|INTR_TYPE_NET,
826 t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
827 device_printf(sc->dev, "Cannot set up "
828 "interrupt for message %d\n", rid);
831 #ifdef IFNET_MULTIQUEUE
833 int vector = rman_get_start(sc->msix_irq_res[k]);
835 device_printf(sc->dev, "binding vector=%d to cpu=%d\n", vector, k % mp_ncpus);
836 intr_bind(vector, k % mp_ncpus);
846 cxgb_port_probe(device_t dev)
852 p = device_get_softc(dev);
854 snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
855 device_set_desc_copy(dev, buf);
861 cxgb_makedev(struct port_info *pi)
864 pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit,
865 UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp));
867 if (pi->port_cdev == NULL)
870 pi->port_cdev->si_drv1 = (void *)pi;
875 #ifndef LRO_SUPPORTED
879 #define IFCAP_LRO 0x0
883 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO)
884 /* Don't enable TSO6 yet */
885 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU | IFCAP_LRO)
887 #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
888 /* Don't enable TSO6 yet */
889 #define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
890 #define IFCAP_TSO4 0x0
891 #define IFCAP_TSO6 0x0
897 cxgb_port_attach(device_t dev)
901 int err, media_flags;
905 p = device_get_softc(dev);
907 snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d",
908 device_get_unit(device_get_parent(dev)), p->port_id);
909 PORT_LOCK_INIT(p, p->lockbuf);
911 /* Allocate an ifnet object and set it up */
912 ifp = p->ifp = if_alloc(IFT_ETHER);
914 device_printf(dev, "Cannot allocate ifnet\n");
919 * Note that there is currently no watchdog timer.
921 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
922 ifp->if_init = cxgb_init;
924 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
925 ifp->if_ioctl = cxgb_ioctl;
926 ifp->if_start = cxgb_start;
929 #ifdef IFNET_MULTIQUEUE
930 ifp->if_flags |= IFF_MULTIQ;
931 ifp->if_mq_start = cxgb_pcpu_start;
934 ifp->if_timer = 0; /* Disable ifnet watchdog */
935 ifp->if_watchdog = NULL;
937 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
938 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
939 IFQ_SET_READY(&ifp->if_snd);
941 ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
942 ifp->if_capabilities |= CXGB_CAP;
943 ifp->if_capenable |= CXGB_CAP_ENABLE;
944 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
946 * disable TSO on 4-port - it isn't supported by the firmware yet
948 if (p->adapter->params.nports > 2) {
949 ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6);
950 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6);
951 ifp->if_hwassist &= ~CSUM_TSO;
954 ether_ifattach(ifp, p->hw_addr);
956 * Only default to jumbo frames on 10GigE
958 if (p->adapter->params.nports <= 2)
959 ifp->if_mtu = ETHERMTU_JUMBO;
960 if ((err = cxgb_makedev(p)) != 0) {
961 printf("makedev failed %d\n", err);
964 ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
967 if (!strcmp(p->phy.desc, "10GBASE-CX4")) {
968 media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX;
969 } else if (!strcmp(p->phy.desc, "10GBASE-SR")) {
970 media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX;
971 } else if (!strcmp(p->phy.desc, "10GBASE-R")) {
972 media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX;
973 } else if (!strcmp(p->phy.desc, "10/100/1000BASE-T")) {
974 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL);
975 ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX,
977 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX,
979 ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
981 ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
984 } else if (!strcmp(p->phy.desc, "1000BASE-X")) {
986 * XXX: This is not very accurate. Fix when common code
987 * returns more specific value - eg 1000BASE-SX, LX, etc.
989 media_flags = IFM_ETHER | IFM_1000_SX | IFM_FDX;
991 printf("unsupported media type %s\n", p->phy.desc);
995 ifmedia_add(&p->media, media_flags, 0, NULL);
996 ifmedia_set(&p->media, media_flags);
998 ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
999 ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO);
1002 /* Get the latest mac address, User can use a LAA */
1003 bcopy(IF_LLADDR(p->ifp), p->hw_addr, ETHER_ADDR_LEN);
1004 t3_sge_init_port(p);
1005 #if defined(LINK_ATTACH)
1007 t3_link_changed(sc, p->port_id);
1013 cxgb_port_detach(device_t dev)
1015 struct port_info *p;
1017 p = device_get_softc(dev);
1020 if (p->ifp->if_drv_flags & IFF_DRV_RUNNING)
1021 cxgb_stop_locked(p);
1024 ether_ifdetach(p->ifp);
1025 printf("waiting for callout to stop ...");
1029 * the lock may be acquired in ifdetach
1031 PORT_LOCK_DEINIT(p);
1034 if (p->port_cdev != NULL)
1035 destroy_dev(p->port_cdev);
1041 t3_fatal_err(struct adapter *sc)
1045 if (sc->flags & FULL_INIT_DONE) {
1047 t3_write_reg(sc, A_XGM_TX_CTRL, 0);
1048 t3_write_reg(sc, A_XGM_RX_CTRL, 0);
1049 t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0);
1050 t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0);
1051 t3_intr_disable(sc);
1053 device_printf(sc->dev,"encountered fatal error, operation suspended\n");
1054 if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
1055 device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
1056 fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
1060 t3_os_find_pci_capability(adapter_t *sc, int cap)
1063 struct pci_devinfo *dinfo;
1069 dinfo = device_get_ivars(dev);
1072 status = pci_read_config(dev, PCIR_STATUS, 2);
1073 if (!(status & PCIM_STATUS_CAPPRESENT))
1076 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1082 ptr = PCIR_CAP_PTR_2;
1088 ptr = pci_read_config(dev, ptr, 1);
1091 if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
1093 ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1100 t3_os_pci_save_state(struct adapter *sc)
1103 struct pci_devinfo *dinfo;
1106 dinfo = device_get_ivars(dev);
1108 pci_cfg_save(dev, dinfo, 0);
1113 t3_os_pci_restore_state(struct adapter *sc)
1116 struct pci_devinfo *dinfo;
1119 dinfo = device_get_ivars(dev);
1121 pci_cfg_restore(dev, dinfo);
1126 * t3_os_link_changed - handle link status changes
1127 * @adapter: the adapter associated with the link change
1128 * @port_id: the port index whose limk status has changed
1129 * @link_status: the new status of the link
1130 * @speed: the new speed setting
1131 * @duplex: the new duplex setting
1132 * @fc: the new flow-control setting
1134 * This is the OS-dependent handler for link status changes. The OS
1135 * neutral handler takes care of most of the processing for these events,
1136 * then calls this handler for any OS-specific processing.
1139 t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
1142 struct port_info *pi = &adapter->port[port_id];
1143 struct cmac *mac = &adapter->port[port_id].mac;
1147 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1148 /* Clear errors created by MAC enable */
1149 t3_set_reg_field(adapter,
1150 A_XGM_STAT_CTRL + pi->mac.offset,
1152 if_link_state_change(pi->ifp, LINK_STATE_UP);
1155 pi->phy.ops->power_down(&pi->phy, 1);
1156 t3_mac_disable(mac, MAC_DIRECTION_RX);
1157 t3_link_start(&pi->phy, mac, &pi->link_config);
1158 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1159 if_link_state_change(pi->ifp, LINK_STATE_DOWN);
1164 * t3_os_phymod_changed - handle PHY module changes
1165 * @phy: the PHY reporting the module change
1166 * @mod_type: new module type
1168 * This is the OS-dependent handler for PHY module changes. It is
1169 * invoked when a PHY module is removed or inserted for any OS-specific
1172 void t3_os_phymod_changed(struct adapter *adap, int port_id)
1174 static const char *mod_str[] = {
1175 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
1178 struct port_info *pi = &adap->port[port_id];
1180 if (pi->phy.modtype == phy_modtype_none)
1181 device_printf(adap->dev, "PHY module unplugged\n");
1183 KASSERT(pi->phy.modtype < ARRAY_SIZE(mod_str),
1184 ("invalid PHY module type %d", pi->phy.modtype));
1185 device_printf(adap->dev, "%s PHY module inserted\n",
1186 mod_str[pi->phy.modtype]);
1191 * Interrupt-context handler for external (PHY) interrupts.
1194 t3_os_ext_intr_handler(adapter_t *sc)
1197 printf("t3_os_ext_intr_handler\n");
1199 * Schedule a task to handle external interrupts as they may be slow
1200 * and we use a mutex to protect MDIO registers. We disable PHY
1201 * interrupts in the meantime and let the task reenable them when
1205 if (sc->slow_intr_mask) {
1206 sc->slow_intr_mask &= ~F_T3DBG;
1207 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
1208 taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
1214 t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
1218 * The ifnet might not be allocated before this gets called,
1219 * as this is called early on in attach by t3_prep_adapter
1220 * save the address off in the port structure
1223 printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
1224 bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
1228 * link_start - enable a port
1229 * @p: the port to enable
1231 * Performs the MAC and PHY actions needed to enable a port.
1234 cxgb_link_start(struct port_info *p)
1237 struct t3_rx_mode rm;
1238 struct cmac *mac = &p->mac;
1243 bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN);
1246 if (ifp->if_capenable & IFCAP_VLAN_MTU)
1247 mtu += ETHER_VLAN_ENCAP_LEN;
1249 hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0;
1251 t3_init_rx_mode(&rm, p);
1252 if (!mac->multiport)
1254 t3_mac_set_mtu(mac, mtu);
1255 t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging);
1256 t3_mac_set_address(mac, 0, p->hw_addr);
1257 t3_mac_set_rx_mode(mac, &rm);
1258 t3_link_start(&p->phy, mac, &p->link_config);
1259 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
1264 await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
1269 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
1278 init_tp_parity(struct adapter *adap)
1282 struct cpl_set_tcb_field *greq;
1283 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
1285 t3_tp_set_offload_mode(adap, 1);
1287 for (i = 0; i < 16; i++) {
1288 struct cpl_smt_write_req *req;
1290 m = m_gethdr(M_WAITOK, MT_DATA);
1291 req = mtod(m, struct cpl_smt_write_req *);
1292 m->m_len = m->m_pkthdr.len = sizeof(*req);
1293 memset(req, 0, sizeof(*req));
1294 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1295 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
1297 t3_mgmt_tx(adap, m);
1300 for (i = 0; i < 2048; i++) {
1301 struct cpl_l2t_write_req *req;
1303 m = m_gethdr(M_WAITOK, MT_DATA);
1304 req = mtod(m, struct cpl_l2t_write_req *);
1305 m->m_len = m->m_pkthdr.len = sizeof(*req);
1306 memset(req, 0, sizeof(*req));
1307 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1308 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
1309 req->params = htonl(V_L2T_W_IDX(i));
1310 t3_mgmt_tx(adap, m);
1313 for (i = 0; i < 2048; i++) {
1314 struct cpl_rte_write_req *req;
1316 m = m_gethdr(M_WAITOK, MT_DATA);
1317 req = mtod(m, struct cpl_rte_write_req *);
1318 m->m_len = m->m_pkthdr.len = sizeof(*req);
1319 memset(req, 0, sizeof(*req));
1320 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1321 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
1322 req->l2t_idx = htonl(V_L2T_W_IDX(i));
1323 t3_mgmt_tx(adap, m);
1326 m = m_gethdr(M_WAITOK, MT_DATA);
1327 greq = mtod(m, struct cpl_set_tcb_field *);
1328 m->m_len = m->m_pkthdr.len = sizeof(*greq);
1329 memset(greq, 0, sizeof(*greq));
1330 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1331 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
1332 greq->mask = htobe64(1);
1333 t3_mgmt_tx(adap, m);
1335 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
1336 t3_tp_set_offload_mode(adap, 0);
1341 * setup_rss - configure Receive Side Steering (per-queue connection demux)
1342 * @adap: the adapter
1344 * Sets up RSS to distribute packets to multiple receive queues. We
1345 * configure the RSS CPU lookup table to distribute to the number of HW
1346 * receive queues, and the response queue lookup table to narrow that
1347 * down to the response queues actually configured for each port.
1348 * We always configure the RSS mapping for two ports since the mapping
1349 * table has plenty of entries.
1352 setup_rss(adapter_t *adap)
1356 uint8_t cpus[SGE_QSETS + 1];
1357 uint16_t rspq_map[RSS_TABLE_SIZE];
1359 for (i = 0; i < SGE_QSETS; ++i)
1361 cpus[SGE_QSETS] = 0xff;
1364 for_each_port(adap, i) {
1365 const struct port_info *pi = adap2pinfo(adap, i);
1367 nq[pi->tx_chan] += pi->nqsets;
1369 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
1370 rspq_map[i] = nq[0] ? i % nq[0] : 0;
1371 rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0;
1373 /* Calculate the reverse RSS map table */
1374 for (i = 0; i < RSS_TABLE_SIZE; ++i)
1375 if (adap->rrss_map[rspq_map[i]] == 0xff)
1376 adap->rrss_map[rspq_map[i]] = i;
1378 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
1379 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN |
1380 F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ,
1386 * Sends an mbuf to an offload queue driver
1387 * after dealing with any active network taps.
1390 offload_tx(struct t3cdev *tdev, struct mbuf *m)
1394 ret = t3_offload_tx(tdev, m);
1399 write_smt_entry(struct adapter *adapter, int idx)
1401 struct port_info *pi = &adapter->port[idx];
1402 struct cpl_smt_write_req *req;
1405 if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
1408 req = mtod(m, struct cpl_smt_write_req *);
1409 m->m_pkthdr.len = m->m_len = sizeof(struct cpl_smt_write_req);
1411 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1412 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
1413 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
1415 memset(req->src_mac1, 0, sizeof(req->src_mac1));
1416 memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN);
1418 m_set_priority(m, 1);
1420 offload_tx(&adapter->tdev, m);
1426 init_smt(struct adapter *adapter)
1430 for_each_port(adapter, i)
1431 write_smt_entry(adapter, i);
1436 init_port_mtus(adapter_t *adapter)
1438 unsigned int mtus = adapter->port[0].ifp->if_mtu;
1440 if (adapter->port[1].ifp)
1441 mtus |= adapter->port[1].ifp->if_mtu << 16;
1442 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
1446 send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
1450 struct mngt_pktsched_wr *req;
1452 m = m_gethdr(M_DONTWAIT, MT_DATA);
1454 req = mtod(m, struct mngt_pktsched_wr *);
1455 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1456 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1461 req->binding = port;
1462 m->m_len = m->m_pkthdr.len = sizeof(*req);
1463 t3_mgmt_tx(adap, m);
1468 bind_qsets(adapter_t *sc)
1472 cxgb_pcpu_startup_threads(sc);
1473 for (i = 0; i < (sc)->params.nports; ++i) {
1474 const struct port_info *pi = adap2pinfo(sc, i);
1476 for (j = 0; j < pi->nqsets; ++j) {
1477 send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
1485 update_tpeeprom(struct adapter *adap)
1487 #ifdef FIRMWARE_LATEST
1488 const struct firmware *tpeeprom;
1490 struct firmware *tpeeprom;
1494 unsigned int major, minor;
1498 t3_seeprom_read(adap, TP_SRAM_OFFSET, &version);
1500 major = G_TP_VERSION_MAJOR(version);
1501 minor = G_TP_VERSION_MINOR(version);
1502 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
1505 rev = t3rev2char(adap);
1507 tpeeprom = firmware_get(TPEEPROM_NAME);
1508 if (tpeeprom == NULL) {
1509 device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n",
1514 len = tpeeprom->datasize - 4;
1516 ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize);
1518 goto release_tpeeprom;
1520 if (len != TP_SRAM_LEN) {
1521 device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", TPEEPROM_NAME, len, TP_SRAM_LEN);
1525 ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize,
1529 device_printf(adap->dev,
1530 "Protocol SRAM image updated in EEPROM to %d.%d.%d\n",
1531 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1533 device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n");
1536 firmware_put(tpeeprom, FIRMWARE_UNLOAD);
1542 update_tpsram(struct adapter *adap)
1544 #ifdef FIRMWARE_LATEST
1545 const struct firmware *tpsram;
1547 struct firmware *tpsram;
1552 rev = t3rev2char(adap);
1556 update_tpeeprom(adap);
1558 tpsram = firmware_get(TPSRAM_NAME);
1559 if (tpsram == NULL){
1560 device_printf(adap->dev, "could not load TP SRAM\n");
1563 device_printf(adap->dev, "updating TP SRAM\n");
1565 ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize);
1567 goto release_tpsram;
1569 ret = t3_set_proto_sram(adap, tpsram->data);
1571 device_printf(adap->dev, "loading protocol SRAM failed\n");
1574 firmware_put(tpsram, FIRMWARE_UNLOAD);
1580 * cxgb_up - enable the adapter
1581 * @adap: adapter being enabled
1583 * Called when the first port is enabled, this function performs the
1584 * actions necessary to make an adapter operational, such as completing
1585 * the initialization of HW modules, and enabling interrupts.
1589 cxgb_up(struct adapter *sc)
1593 if ((sc->flags & FULL_INIT_DONE) == 0) {
1595 if ((sc->flags & FW_UPTODATE) == 0)
1596 if ((err = upgrade_fw(sc)))
1598 if ((sc->flags & TPS_UPTODATE) == 0)
1599 if ((err = update_tpsram(sc)))
1601 err = t3_init_hw(sc, 0);
1605 t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1606 t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1608 err = setup_sge_qsets(sc);
1613 t3_add_configured_sysctls(sc);
1614 sc->flags |= FULL_INIT_DONE;
1619 /* If it's MSI or INTx, allocate a single interrupt for everything */
1620 if ((sc->flags & USING_MSIX) == 0) {
1621 if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
1622 &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1623 device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n",
1628 device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res);
1630 if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
1634 sc->cxgb_intr, sc, &sc->intr_tag)) {
1635 device_printf(sc->dev, "Cannot set up interrupt\n");
1640 cxgb_setup_msix(sc, sc->msi_count);
1646 if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) &&
1647 is_offload(sc) && init_tp_parity(sc) == 0)
1648 sc->flags |= TP_PARITY_INIT;
1650 if (sc->flags & TP_PARITY_INIT) {
1651 t3_write_reg(sc, A_TP_INT_CAUSE,
1652 F_CMCACHEPERR | F_ARPLUTPERR);
1653 t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff);
1657 if (!(sc->flags & QUEUES_BOUND)) {
1659 sc->flags |= QUEUES_BOUND;
1664 CH_ERR(sc, "request_irq failed, err %d\n", err);
1670 * Release resources when all the ports and offloading have been stopped.
1673 cxgb_down_locked(struct adapter *sc)
1677 t3_intr_disable(sc);
1679 if (sc->intr_tag != NULL) {
1680 bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
1681 sc->intr_tag = NULL;
1683 if (sc->irq_res != NULL) {
1684 device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
1685 sc->irq_rid, sc->irq_res);
1686 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
1691 if (sc->flags & USING_MSIX)
1692 cxgb_teardown_msix(sc);
1694 callout_stop(&sc->cxgb_tick_ch);
1695 callout_stop(&sc->sge_timer_ch);
1696 callout_drain(&sc->cxgb_tick_ch);
1697 callout_drain(&sc->sge_timer_ch);
1699 if (sc->tq != NULL) {
1700 printf("draining slow intr\n");
1702 taskqueue_drain(sc->tq, &sc->slow_intr_task);
1703 printf("draining ext intr\n");
1704 taskqueue_drain(sc->tq, &sc->ext_intr_task);
1705 printf("draining tick task\n");
1706 taskqueue_drain(sc->tq, &sc->tick_task);
1712 offload_open(struct port_info *pi)
1714 struct adapter *adapter = pi->adapter;
1715 struct t3cdev *tdev = &adapter->tdev;
1717 int adap_up = adapter->open_device_map & PORT_MASK;
1720 if (atomic_cmpset_int(&adapter->open_device_map,
1721 (adapter->open_device_map & ~(1<<OFFLOAD_DEVMAP_BIT)),
1722 (adapter->open_device_map | (1<<OFFLOAD_DEVMAP_BIT))) == 0)
1725 if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1726 printf("offload_open: DEVMAP_BIT did not get set 0x%x\n",
1727 adapter->open_device_map);
1728 ADAPTER_LOCK(pi->adapter);
1730 err = cxgb_up(adapter);
1731 ADAPTER_UNLOCK(pi->adapter);
1735 t3_tp_set_offload_mode(adapter, 1);
1736 tdev->lldev = pi->ifp;
1738 init_port_mtus(adapter);
1739 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1740 adapter->params.b_wnd,
1741 adapter->params.rev == 0 ?
1742 adapter->port[0].ifp->if_mtu : 0xffff);
1744 /* Call back all registered clients */
1745 cxgb_add_clients(tdev);
1747 /* restore them in case the offload module has changed them */
1749 t3_tp_set_offload_mode(adapter, 0);
1750 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1751 cxgb_set_dummy_ops(tdev);
1757 offload_close(struct t3cdev *tdev)
1759 struct adapter *adapter = tdev2adap(tdev);
1761 if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT))
1764 /* Call back all registered clients */
1765 cxgb_remove_clients(tdev);
1768 cxgb_set_dummy_ops(tdev);
1769 t3_tp_set_offload_mode(adapter, 0);
1770 clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
1772 ADAPTER_LOCK(adapter);
1773 if (!adapter->open_device_map)
1774 cxgb_down_locked(adapter);
1776 ADAPTER_UNLOCK(adapter);
1782 cxgb_init(void *arg)
1784 struct port_info *p = arg;
1787 cxgb_init_locked(p);
1792 cxgb_init_locked(struct port_info *p)
1795 adapter_t *sc = p->adapter;
1798 PORT_LOCK_ASSERT_OWNED(p);
1801 ADAPTER_LOCK(p->adapter);
1802 if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) {
1803 ADAPTER_UNLOCK(p->adapter);
1804 cxgb_stop_locked(p);
1807 if (p->adapter->open_device_map == 0) {
1810 setbit(&p->adapter->open_device_map, p->port_id);
1811 ADAPTER_UNLOCK(p->adapter);
1813 if (is_offload(sc) && !ofld_disable) {
1814 err = offload_open(p);
1817 "Could not initialize offload capabilities\n");
1819 #if !defined(LINK_ATTACH)
1821 t3_link_changed(sc, p->port_id);
1823 ifp->if_baudrate = p->link_config.speed * 1000000;
1825 device_printf(sc->dev, "enabling interrupts on port=%d\n", p->port_id);
1826 t3_port_intr_enable(sc, p->port_id);
1828 t3_sge_reset_adapter(sc);
1830 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1831 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1835 cxgb_set_rxmode(struct port_info *p)
1837 struct t3_rx_mode rm;
1838 struct cmac *mac = &p->mac;
1840 t3_init_rx_mode(&rm, p);
1841 mtx_lock(&p->adapter->mdio_lock);
1842 t3_mac_set_rx_mode(mac, &rm);
1843 mtx_unlock(&p->adapter->mdio_lock);
1847 cxgb_stop_locked(struct port_info *pi)
1851 PORT_LOCK_ASSERT_OWNED(pi);
1852 ADAPTER_LOCK_ASSERT_NOTOWNED(pi->adapter);
1855 t3_port_intr_disable(pi->adapter, pi->port_id);
1856 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1858 /* disable pause frames */
1859 t3_set_reg_field(pi->adapter, A_XGM_TX_CFG + pi->mac.offset,
1862 /* Reset RX FIFO HWM */
1863 t3_set_reg_field(pi->adapter, A_XGM_RXFIFO_CFG + pi->mac.offset,
1864 V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0);
1867 ADAPTER_LOCK(pi->adapter);
1868 clrbit(&pi->adapter->open_device_map, pi->port_id);
1870 if (pi->adapter->open_device_map == 0) {
1871 cxgb_down_locked(pi->adapter);
1873 ADAPTER_UNLOCK(pi->adapter);
1875 #if !defined(LINK_ATTACH)
1878 /* Wait for TXFIFO empty */
1879 t3_wait_op_done(pi->adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
1880 F_TXFIFO_EMPTY, 1, 20, 5);
1883 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1885 pi->phy.ops->power_down(&pi->phy, 1);
1891 cxgb_set_mtu(struct port_info *p, int mtu)
1893 struct ifnet *ifp = p->ifp;
1896 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1898 else if (ifp->if_mtu != mtu) {
1901 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1902 cxgb_stop_locked(p);
1903 cxgb_init_locked(p);
1910 #ifdef LRO_SUPPORTED
1912 * Mark lro enabled or disabled in all qsets for this port
1915 cxgb_set_lro(struct port_info *p, int enabled)
1918 struct adapter *adp = p->adapter;
1921 PORT_LOCK_ASSERT_OWNED(p);
1922 for (i = 0; i < p->nqsets; i++) {
1923 q = &adp->sge.qs[p->first_qset + i];
1924 q->lro.enabled = (enabled != 0);
1931 cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
1933 struct port_info *p = ifp->if_softc;
1934 struct ifaddr *ifa = (struct ifaddr *)data;
1935 struct ifreq *ifr = (struct ifreq *)data;
1936 int flags, error = 0, reinit = 0;
1940 * XXX need to check that we aren't in the middle of an unload
1944 error = cxgb_set_mtu(p, ifr->ifr_mtu);
1947 if (ifa->ifa_addr->sa_family == AF_INET) {
1948 ifp->if_flags |= IFF_UP;
1949 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1951 cxgb_init_locked(p);
1954 arp_ifinit(ifp, ifa);
1956 error = ether_ioctl(ifp, command, data);
1960 if (ifp->if_flags & IFF_UP) {
1961 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1962 flags = p->if_flags;
1963 if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
1964 ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
1967 cxgb_init_locked(p);
1968 p->if_flags = ifp->if_flags;
1969 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1970 cxgb_stop_locked(p);
1976 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1982 error = ifmedia_ioctl(ifp, ifr, &p->media, command);
1986 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1987 if (mask & IFCAP_TXCSUM) {
1988 if (IFCAP_TXCSUM & ifp->if_capenable) {
1989 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
1990 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
1991 | CSUM_IP | CSUM_TSO);
1993 ifp->if_capenable |= IFCAP_TXCSUM;
1994 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
1998 if (mask & IFCAP_RXCSUM) {
1999 ifp->if_capenable ^= IFCAP_RXCSUM;
2001 if (mask & IFCAP_TSO4) {
2002 if (IFCAP_TSO4 & ifp->if_capenable) {
2003 ifp->if_capenable &= ~IFCAP_TSO4;
2004 ifp->if_hwassist &= ~CSUM_TSO;
2005 } else if (IFCAP_TXCSUM & ifp->if_capenable) {
2006 ifp->if_capenable |= IFCAP_TSO4;
2007 ifp->if_hwassist |= CSUM_TSO;
2010 printf("cxgb requires tx checksum offload"
2011 " be enabled to use TSO\n");
2015 #ifdef LRO_SUPPORTED
2016 if (mask & IFCAP_LRO) {
2017 ifp->if_capenable ^= IFCAP_LRO;
2019 /* Safe to do this even if cxgb_up not called yet */
2020 cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO);
2023 if (mask & IFCAP_VLAN_HWTAGGING) {
2024 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2025 reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
2027 if (mask & IFCAP_VLAN_MTU) {
2028 ifp->if_capenable ^= IFCAP_VLAN_MTU;
2029 reinit = ifp->if_drv_flags & IFF_DRV_RUNNING;
2031 if (mask & IFCAP_VLAN_HWCSUM) {
2032 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2035 cxgb_stop_locked(p);
2036 cxgb_init_locked(p);
2040 #ifdef VLAN_CAPABILITIES
2041 VLAN_CAPABILITIES(ifp);
2045 error = ether_ioctl(ifp, command, data);
2052 cxgb_media_change(struct ifnet *ifp)
2054 if_printf(ifp, "media change not supported\n");
2059 cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2061 struct port_info *p = ifp->if_softc;
2063 ifmr->ifm_status = IFM_AVALID;
2064 ifmr->ifm_active = IFM_ETHER;
2066 if (!p->link_config.link_ok)
2069 ifmr->ifm_status |= IFM_ACTIVE;
2071 switch (p->link_config.speed) {
2073 ifmr->ifm_active |= IFM_10_T;
2076 ifmr->ifm_active |= IFM_100_TX;
2079 ifmr->ifm_active |= IFM_1000_T;
2083 if (p->link_config.duplex)
2084 ifmr->ifm_active |= IFM_FDX;
2086 ifmr->ifm_active |= IFM_HDX;
2090 cxgb_async_intr(void *data)
2092 adapter_t *sc = data;
2095 device_printf(sc->dev, "cxgb_async_intr\n");
2097 * May need to sleep - defer to taskqueue
2099 taskqueue_enqueue(sc->tq, &sc->slow_intr_task);
2103 cxgb_ext_intr_handler(void *arg, int count)
2105 adapter_t *sc = (adapter_t *)arg;
2108 printf("cxgb_ext_intr_handler\n");
2110 t3_phy_intr_handler(sc);
2112 /* Now reenable external interrupts */
2114 if (sc->slow_intr_mask) {
2115 sc->slow_intr_mask |= F_T3DBG;
2116 t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
2117 t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
2123 check_link_status(adapter_t *sc)
2127 for (i = 0; i < (sc)->params.nports; ++i) {
2128 struct port_info *p = &sc->port[i];
2130 if (!(p->phy.caps & SUPPORTED_IRQ))
2131 t3_link_changed(sc, i);
2132 p->ifp->if_baudrate = p->link_config.speed * 1000000;
2137 check_t3b2_mac(struct adapter *adapter)
2141 if(adapter->flags & CXGB_SHUTDOWN)
2144 for_each_port(adapter, i) {
2145 struct port_info *p = &adapter->port[i];
2146 struct ifnet *ifp = p->ifp;
2149 if(adapter->flags & CXGB_SHUTDOWN)
2152 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2157 if ((ifp->if_drv_flags & IFF_DRV_RUNNING))
2158 status = t3b2_mac_watchdog_task(&p->mac);
2160 p->mac.stats.num_toggled++;
2161 else if (status == 2) {
2162 struct cmac *mac = &p->mac;
2163 int mtu = ifp->if_mtu;
2165 if (ifp->if_capenable & IFCAP_VLAN_MTU)
2166 mtu += ETHER_VLAN_ENCAP_LEN;
2167 t3_mac_set_mtu(mac, mtu);
2168 t3_mac_set_address(mac, 0, p->hw_addr);
2170 t3_link_start(&p->phy, mac, &p->link_config);
2171 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2172 t3_port_intr_enable(adapter, p->port_id);
2173 p->mac.stats.num_resets++;
2180 cxgb_tick(void *arg)
2182 adapter_t *sc = (adapter_t *)arg;
2184 if(sc->flags & CXGB_SHUTDOWN)
2187 taskqueue_enqueue(sc->tq, &sc->tick_task);
2188 callout_reset(&sc->cxgb_tick_ch, CXGB_TICKS(sc), cxgb_tick, sc);
2192 cxgb_tick_handler(void *arg, int count)
2194 adapter_t *sc = (adapter_t *)arg;
2195 const struct adapter_params *p = &sc->params;
2198 if(sc->flags & CXGB_SHUTDOWN)
2202 if (p->linkpoll_period)
2203 check_link_status(sc);
2205 sc->check_task_cnt++;
2208 * adapter lock can currently only be acquired after the
2213 if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map)
2216 /* Update MAC stats if it's time to do so */
2217 if (!p->linkpoll_period ||
2218 (sc->check_task_cnt * p->linkpoll_period) / 10 >=
2219 p->stats_update_period) {
2220 for_each_port(sc, i) {
2221 struct port_info *port = &sc->port[i];
2223 t3_mac_update_stats(&port->mac);
2226 sc->check_task_cnt = 0;
2231 touch_bars(device_t dev)
2236 #if !defined(__LP64__) && 0
2239 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v);
2240 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v);
2241 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v);
2242 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v);
2243 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v);
2244 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v);
2249 set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
2253 u32 aligned_offset, aligned_len, *p;
2254 struct adapter *adapter = pi->adapter;
2257 aligned_offset = offset & ~3;
2258 aligned_len = (len + (offset & 3) + 3) & ~3;
2260 if (aligned_offset != offset || aligned_len != len) {
2261 buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
2264 err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
2265 if (!err && aligned_len > 4)
2266 err = t3_seeprom_read(adapter,
2267 aligned_offset + aligned_len - 4,
2268 (u32 *)&buf[aligned_len - 4]);
2271 memcpy(buf + (offset & 3), data, len);
2273 buf = (uint8_t *)(uintptr_t)data;
2275 err = t3_seeprom_wp(adapter, 0);
2279 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2280 err = t3_seeprom_write(adapter, aligned_offset, *p);
2281 aligned_offset += 4;
2285 err = t3_seeprom_wp(adapter, 1);
2288 free(buf, M_DEVBUF);
2294 in_range(int val, int lo, int hi)
2296 return val < 0 || (val <= hi && val >= lo);
2300 cxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td)
2306 cxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
2312 cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
2313 int fflag, struct thread *td)
2316 struct port_info *pi = dev->si_drv1;
2317 adapter_t *sc = pi->adapter;
2319 #ifdef PRIV_SUPPORTED
2320 if (priv_check(td, PRIV_DRIVER)) {
2322 printf("user does not have access to privileged ioctls\n");
2328 printf("user does not have access to privileged ioctls\n");
2334 case CHELSIO_GET_MIIREG: {
2336 struct cphy *phy = &pi->phy;
2337 struct ch_mii_data *mid = (struct ch_mii_data *)data;
2339 if (!phy->mdio_read)
2340 return (EOPNOTSUPP);
2342 mmd = mid->phy_id >> 8;
2345 else if (mmd > MDIO_DEV_XGXS)
2348 error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
2349 mid->reg_num, &val);
2351 error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
2352 mid->reg_num & 0x1f, &val);
2357 case CHELSIO_SET_MIIREG: {
2358 struct cphy *phy = &pi->phy;
2359 struct ch_mii_data *mid = (struct ch_mii_data *)data;
2361 if (!phy->mdio_write)
2362 return (EOPNOTSUPP);
2364 mmd = mid->phy_id >> 8;
2367 else if (mmd > MDIO_DEV_XGXS)
2370 error = phy->mdio_write(sc, mid->phy_id & 0x1f,
2371 mmd, mid->reg_num, mid->val_in);
2373 error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
2374 mid->reg_num & 0x1f,
2378 case CHELSIO_SETREG: {
2379 struct ch_reg *edata = (struct ch_reg *)data;
2380 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2382 t3_write_reg(sc, edata->addr, edata->val);
2385 case CHELSIO_GETREG: {
2386 struct ch_reg *edata = (struct ch_reg *)data;
2387 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
2389 edata->val = t3_read_reg(sc, edata->addr);
2392 case CHELSIO_GET_SGE_CONTEXT: {
2393 struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
2394 mtx_lock_spin(&sc->sge.reg_lock);
2395 switch (ecntxt->cntxt_type) {
2396 case CNTXT_TYPE_EGRESS:
2397 error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
2401 error = -t3_sge_read_fl(sc, ecntxt->cntxt_id,
2404 case CNTXT_TYPE_RSP:
2405 error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id,
2409 error = -t3_sge_read_cq(sc, ecntxt->cntxt_id,
2416 mtx_unlock_spin(&sc->sge.reg_lock);
2419 case CHELSIO_GET_SGE_DESC: {
2420 struct ch_desc *edesc = (struct ch_desc *)data;
2422 if (edesc->queue_num >= SGE_QSETS * 6)
2424 ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
2425 edesc->queue_num % 6, edesc->idx, edesc->data);
2431 case CHELSIO_GET_QSET_PARAMS: {
2432 struct qset_params *q;
2433 struct ch_qset_params *t = (struct ch_qset_params *)data;
2434 int q1 = pi->first_qset;
2435 int nqsets = pi->nqsets;
2438 if (t->qset_idx >= nqsets)
2441 i = q1 + t->qset_idx;
2442 q = &sc->params.sge.qset[i];
2443 t->rspq_size = q->rspq_size;
2444 t->txq_size[0] = q->txq_size[0];
2445 t->txq_size[1] = q->txq_size[1];
2446 t->txq_size[2] = q->txq_size[2];
2447 t->fl_size[0] = q->fl_size;
2448 t->fl_size[1] = q->jumbo_size;
2449 t->polling = q->polling;
2451 t->intr_lat = q->coalesce_usecs;
2452 t->cong_thres = q->cong_thres;
2455 if (sc->flags & USING_MSIX)
2456 t->vector = rman_get_start(sc->msix_irq_res[i]);
2458 t->vector = rman_get_start(sc->irq_res);
2462 case CHELSIO_GET_QSET_NUM: {
2463 struct ch_reg *edata = (struct ch_reg *)data;
2464 edata->val = pi->nqsets;
2467 case CHELSIO_LOAD_FW: {
2470 struct ch_mem_range *t = (struct ch_mem_range *)data;
2473 * You're allowed to load a firmware only before FULL_INIT_DONE
2475 * FW_UPTODATE is also set so the rest of the initialization
2476 * will not overwrite what was loaded here. This gives you the
2477 * flexibility to load any firmware (and maybe shoot yourself in
2482 if (sc->open_device_map || sc->flags & FULL_INIT_DONE) {
2487 fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2491 error = copyin(t->buf, fw_data, t->len);
2494 error = -t3_load_fw(sc, fw_data, t->len);
2496 if (t3_get_fw_version(sc, &vers) == 0) {
2497 snprintf(&sc->fw_version[0], sizeof(sc->fw_version),
2498 "%d.%d.%d", G_FW_VERSION_MAJOR(vers),
2499 G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers));
2503 sc->flags |= FW_UPTODATE;
2505 free(fw_data, M_DEVBUF);
2509 case CHELSIO_LOAD_BOOT: {
2511 struct ch_mem_range *t = (struct ch_mem_range *)data;
2513 boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT);
2517 error = copyin(t->buf, boot_data, t->len);
2519 error = -t3_load_boot(sc, boot_data, t->len);
2521 free(boot_data, M_DEVBUF);
2524 case CHELSIO_GET_PM: {
2525 struct ch_pm *m = (struct ch_pm *)data;
2526 struct tp_params *p = &sc->params.tp;
2528 if (!is_offload(sc))
2529 return (EOPNOTSUPP);
2531 m->tx_pg_sz = p->tx_pg_size;
2532 m->tx_num_pg = p->tx_num_pgs;
2533 m->rx_pg_sz = p->rx_pg_size;
2534 m->rx_num_pg = p->rx_num_pgs;
2535 m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2539 case CHELSIO_SET_PM: {
2540 struct ch_pm *m = (struct ch_pm *)data;
2541 struct tp_params *p = &sc->params.tp;
2543 if (!is_offload(sc))
2544 return (EOPNOTSUPP);
2545 if (sc->flags & FULL_INIT_DONE)
2548 if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) ||
2549 !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1)))
2550 return (EINVAL); /* not power of 2 */
2551 if (!(m->rx_pg_sz & 0x14000))
2552 return (EINVAL); /* not 16KB or 64KB */
2553 if (!(m->tx_pg_sz & 0x1554000))
2555 if (m->tx_num_pg == -1)
2556 m->tx_num_pg = p->tx_num_pgs;
2557 if (m->rx_num_pg == -1)
2558 m->rx_num_pg = p->rx_num_pgs;
2559 if (m->tx_num_pg % 24 || m->rx_num_pg % 24)
2561 if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size ||
2562 m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size)
2565 p->rx_pg_size = m->rx_pg_sz;
2566 p->tx_pg_size = m->tx_pg_sz;
2567 p->rx_num_pgs = m->rx_num_pg;
2568 p->tx_num_pgs = m->tx_num_pg;
2571 case CHELSIO_SETMTUTAB: {
2572 struct ch_mtus *m = (struct ch_mtus *)data;
2575 if (!is_offload(sc))
2576 return (EOPNOTSUPP);
2577 if (offload_running(sc))
2579 if (m->nmtus != NMTUS)
2581 if (m->mtus[0] < 81) /* accommodate SACK */
2585 * MTUs must be in ascending order
2587 for (i = 1; i < NMTUS; ++i)
2588 if (m->mtus[i] < m->mtus[i - 1])
2591 memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus));
2594 case CHELSIO_GETMTUTAB: {
2595 struct ch_mtus *m = (struct ch_mtus *)data;
2597 if (!is_offload(sc))
2598 return (EOPNOTSUPP);
2600 memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus));
2604 case CHELSIO_GET_MEM: {
2605 struct ch_mem_range *t = (struct ch_mem_range *)data;
2611 * Use these to avoid modifying len/addr in the the return
2614 uint32_t len = t->len, addr = t->addr;
2616 if (!is_offload(sc))
2617 return (EOPNOTSUPP);
2618 if (!(sc->flags & FULL_INIT_DONE))
2619 return (EIO); /* need the memory controllers */
2620 if ((addr & 0x7) || (len & 0x7))
2622 if (t->mem_id == MEM_CM)
2624 else if (t->mem_id == MEM_PMRX)
2626 else if (t->mem_id == MEM_PMTX)
2633 * bits 0..9: chip version
2634 * bits 10..15: chip revision
2636 t->version = 3 | (sc->params.rev << 10);
2639 * Read 256 bytes at a time as len can be large and we don't
2640 * want to use huge intermediate buffers.
2642 useraddr = (uint8_t *)t->buf;
2644 unsigned int chunk = min(len, sizeof(buf));
2646 error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf);
2649 if (copyout(buf, useraddr, chunk))
2657 case CHELSIO_READ_TCAM_WORD: {
2658 struct ch_tcam_word *t = (struct ch_tcam_word *)data;
2660 if (!is_offload(sc))
2661 return (EOPNOTSUPP);
2662 if (!(sc->flags & FULL_INIT_DONE))
2663 return (EIO); /* need MC5 */
2664 return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf);
2667 case CHELSIO_SET_TRACE_FILTER: {
2668 struct ch_trace *t = (struct ch_trace *)data;
2669 const struct trace_params *tp;
2671 tp = (const struct trace_params *)&t->sip;
2673 t3_config_trace_filter(sc, tp, 0, t->invert_match,
2676 t3_config_trace_filter(sc, tp, 1, t->invert_match,
2680 case CHELSIO_SET_PKTSCHED: {
2681 struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
2682 if (sc->open_device_map == 0)
2684 send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
2688 case CHELSIO_IFCONF_GETREGS: {
2689 struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data;
2690 int reglen = cxgb_get_regs_len();
2691 uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT);
2695 if (regs->len > reglen)
2697 else if (regs->len < reglen)
2701 cxgb_get_regs(sc, regs, buf);
2702 error = copyout(buf, regs->data, reglen);
2704 free(buf, M_DEVBUF);
2708 case CHELSIO_SET_HW_SCHED: {
2709 struct ch_hw_sched *t = (struct ch_hw_sched *)data;
2710 unsigned int ticks_per_usec = core_ticks_per_usec(sc);
2712 if ((sc->flags & FULL_INIT_DONE) == 0)
2713 return (EAGAIN); /* need TP to be initialized */
2714 if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) ||
2715 !in_range(t->channel, 0, 1) ||
2716 !in_range(t->kbps, 0, 10000000) ||
2717 !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) ||
2718 !in_range(t->flow_ipg, 0,
2719 dack_ticks_to_usec(sc, 0x7ff)))
2723 error = t3_config_sched(sc, t->kbps, t->sched);
2727 if (t->class_ipg >= 0)
2728 t3_set_sched_ipg(sc, t->sched, t->class_ipg);
2729 if (t->flow_ipg >= 0) {
2730 t->flow_ipg *= 1000; /* us -> ns */
2731 t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1);
2734 int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched);
2736 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2737 bit, t->mode ? bit : 0);
2739 if (t->channel >= 0)
2740 t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP,
2741 1 << t->sched, t->channel << t->sched);
2744 case CHELSIO_GET_EEPROM: {
2746 struct ch_eeprom *e = (struct ch_eeprom *)data;
2747 uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT);
2752 e->magic = EEPROM_MAGIC;
2753 for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4)
2754 error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]);
2757 error = copyout(buf + e->offset, e->data, e->len);
2759 free(buf, M_DEVBUF);
2762 case CHELSIO_CLEAR_STATS: {
2763 if (!(sc->flags & FULL_INIT_DONE))
2767 t3_mac_update_stats(&pi->mac);
2768 memset(&pi->mac.stats, 0, sizeof(pi->mac.stats));
2773 return (EOPNOTSUPP);
2780 static __inline void
2781 reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
2784 uint32_t *p = (uint32_t *)(buf + start);
2786 for ( ; start <= end; start += sizeof(uint32_t))
2787 *p++ = t3_read_reg(ap, start);
2790 #define T3_REGMAP_SIZE (3 * 1024)
2792 cxgb_get_regs_len(void)
2794 return T3_REGMAP_SIZE;
2798 cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf)
2803 * bits 0..9: chip version
2804 * bits 10..15: chip revision
2805 * bit 31: set for PCIe cards
2807 regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
2810 * We skip the MAC statistics registers because they are clear-on-read.
2811 * Also reading multi-register stats would need to synchronize with the
2812 * periodic mac stats accumulation. Hard to justify the complexity.
2814 memset(buf, 0, cxgb_get_regs_len());
2815 reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
2816 reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
2817 reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
2818 reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
2819 reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
2820 reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
2821 XGM_REG(A_XGM_SERDES_STAT3, 1));
2822 reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
2823 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
2827 MODULE_DEPEND(if_cxgb, cxgb_t3fw, 1, 1, 1);