2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019 The FreeBSD Foundation, Inc.
6 * This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver.
36 * Product information:
37 * LAN7430 https://www.microchip.com/wwwproducts/en/LAN7430
38 * - Integrated IEEE 802.3 compliant PHY
39 * LAN7431 https://www.microchip.com/wwwproducts/en/LAN7431
42 * This driver uses the iflib interface and the default 'ukphy' PHY driver.
44 * UNIMPLEMENTED FEATURES
45 * ----------------------
46 * A number of features supported by LAN743X device are not yet implemented in
49 * - Multiple (up to 4) RX queues support
50 * - Just needs to remove asserts and malloc multiple `rx_ring_data`
51 * structs based on ncpus.
52 * - RX/TX Checksum Offloading support
54 * - Receive Packet Filtering (Multicast Perfect/Hash Address) support
55 * - Wake on LAN (WoL) support
57 * - Receive Side Scaling (RSS) support
58 * - Debugging Capabilities:
59 * - Could include MAC statistics and
60 * error status registers in sysctl.
63 #include <sys/param.h>
65 #include <sys/endian.h>
67 #include <sys/kernel.h>
68 #include <sys/module.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
72 #include <machine/bus.h>
73 #include <machine/resource.h>
75 #include <net/ethernet.h>
77 #include <net/if_var.h>
78 #include <net/if_types.h>
79 #include <net/if_media.h>
80 #include <net/iflib.h>
82 #include <dev/mgb/if_mgb.h>
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 #include <dev/pci/pcireg.h>
86 #include <dev/pci/pcivar.h>
89 #include "miibus_if.h"
91 static pci_vendor_info_t mgb_vendor_info_array[] = {
92 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID,
93 "Microchip LAN7430 PCIe Gigabit Ethernet Controller"),
94 PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID,
95 "Microchip LAN7431 PCIe Gigabit Ethernet Controller"),
100 static device_register_t mgb_register;
103 static ifdi_attach_pre_t mgb_attach_pre;
104 static ifdi_attach_post_t mgb_attach_post;
105 static ifdi_detach_t mgb_detach;
107 static ifdi_tx_queues_alloc_t mgb_tx_queues_alloc;
108 static ifdi_rx_queues_alloc_t mgb_rx_queues_alloc;
109 static ifdi_queues_free_t mgb_queues_free;
111 static ifdi_init_t mgb_init;
112 static ifdi_stop_t mgb_stop;
114 static ifdi_msix_intr_assign_t mgb_msix_intr_assign;
115 static ifdi_tx_queue_intr_enable_t mgb_tx_queue_intr_enable;
116 static ifdi_rx_queue_intr_enable_t mgb_rx_queue_intr_enable;
117 static ifdi_intr_enable_t mgb_intr_enable_all;
118 static ifdi_intr_disable_t mgb_intr_disable_all;
120 /* IFLIB_TXRX methods */
121 static int mgb_isc_txd_encap(void *,
123 static void mgb_isc_txd_flush(void *,
125 static int mgb_isc_txd_credits_update(void *,
127 static int mgb_isc_rxd_available(void *,
128 uint16_t, qidx_t, qidx_t);
129 static int mgb_isc_rxd_pkt_get(void *,
131 static void mgb_isc_rxd_refill(void *,
133 static void mgb_isc_rxd_flush(void *,
134 uint16_t, uint8_t, qidx_t);
137 static driver_filter_t mgb_legacy_intr;
138 static driver_filter_t mgb_admin_intr;
139 static driver_filter_t mgb_rxq_intr;
140 static bool mgb_intr_test(struct mgb_softc *);
143 static miibus_readreg_t mgb_miibus_readreg;
144 static miibus_writereg_t mgb_miibus_writereg;
145 static miibus_linkchg_t mgb_miibus_linkchg;
146 static miibus_statchg_t mgb_miibus_statchg;
148 static int mgb_media_change(if_t);
149 static void mgb_media_status(if_t,
150 struct ifmediareq *);
152 /* Helper/Test functions */
153 static int mgb_test_bar(struct mgb_softc *);
154 static int mgb_alloc_regs(struct mgb_softc *);
155 static int mgb_release_regs(struct mgb_softc *);
157 static void mgb_get_ethaddr(struct mgb_softc *,
158 struct ether_addr *);
160 static int mgb_wait_for_bits(struct mgb_softc *,
163 /* H/W init, reset and teardown helpers */
164 static int mgb_hw_init(struct mgb_softc *);
165 static int mgb_hw_teardown(struct mgb_softc *);
166 static int mgb_hw_reset(struct mgb_softc *);
167 static int mgb_mac_init(struct mgb_softc *);
168 static int mgb_dmac_reset(struct mgb_softc *);
169 static int mgb_phy_reset(struct mgb_softc *);
171 static int mgb_dma_init(struct mgb_softc *);
172 static int mgb_dma_tx_ring_init(struct mgb_softc *,
174 static int mgb_dma_rx_ring_init(struct mgb_softc *,
177 static int mgb_dmac_control(struct mgb_softc *,
178 int, int, enum mgb_dmac_cmd);
179 static int mgb_fct_control(struct mgb_softc *,
180 int, int, enum mgb_fct_cmd);
182 /*********************************************************************
183 * FreeBSD Device Interface Entry Points
184 *********************************************************************/
186 static device_method_t mgb_methods[] = {
187 /* Device interface */
188 DEVMETHOD(device_register, mgb_register),
189 DEVMETHOD(device_probe, iflib_device_probe),
190 DEVMETHOD(device_attach, iflib_device_attach),
191 DEVMETHOD(device_detach, iflib_device_detach),
192 DEVMETHOD(device_shutdown, iflib_device_shutdown),
193 DEVMETHOD(device_suspend, iflib_device_suspend),
194 DEVMETHOD(device_resume, iflib_device_resume),
197 DEVMETHOD(miibus_readreg, mgb_miibus_readreg),
198 DEVMETHOD(miibus_writereg, mgb_miibus_writereg),
199 DEVMETHOD(miibus_linkchg, mgb_miibus_linkchg),
200 DEVMETHOD(miibus_statchg, mgb_miibus_statchg),
205 static driver_t mgb_driver = {
206 "mgb", mgb_methods, sizeof(struct mgb_softc)
209 devclass_t mgb_devclass;
210 DRIVER_MODULE(mgb, pci, mgb_driver, mgb_devclass, NULL, NULL);
211 IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array);
212 MODULE_VERSION(mgb, 1);
214 #if 0 /* MIIBUS_DEBUG */
215 /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */
216 DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL,
218 #endif /* MIIBUS_DEBUG */
219 DRIVER_MODULE(miibus, mgb, miibus_driver, miibus_devclass, NULL, NULL);
221 MODULE_DEPEND(mgb, pci, 1, 1, 1);
222 MODULE_DEPEND(mgb, ether, 1, 1, 1);
223 MODULE_DEPEND(mgb, miibus, 1, 1, 1);
224 MODULE_DEPEND(mgb, iflib, 1, 1, 1);
226 static device_method_t mgb_iflib_methods[] = {
227 DEVMETHOD(ifdi_attach_pre, mgb_attach_pre),
228 DEVMETHOD(ifdi_attach_post, mgb_attach_post),
229 DEVMETHOD(ifdi_detach, mgb_detach),
231 DEVMETHOD(ifdi_init, mgb_init),
232 DEVMETHOD(ifdi_stop, mgb_stop),
234 DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc),
235 DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc),
236 DEVMETHOD(ifdi_queues_free, mgb_queues_free),
238 DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign),
239 DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable),
240 DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable),
241 DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all),
242 DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all),
244 #if 0 /* Not yet implemented IFLIB methods */
246 * Set multicast addresses, mtu and promiscuous mode
248 DEVMETHOD(ifdi_multi_set, mgb_multi_set),
249 DEVMETHOD(ifdi_mtu_set, mgb_mtu_set),
250 DEVMETHOD(ifdi_promisc_set, mgb_promisc_set),
253 * Needed for VLAN support
255 DEVMETHOD(ifdi_vlan_register, mgb_vlan_register),
256 DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister),
259 * Needed for WOL support
262 DEVMETHOD(ifdi_shutdown, mgb_shutdown),
263 DEVMETHOD(ifdi_suspend, mgb_suspend),
264 DEVMETHOD(ifdi_resume, mgb_resume),
265 #endif /* UNUSED_IFLIB_METHODS */
269 static driver_t mgb_iflib_driver = {
270 "mgb", mgb_iflib_methods, sizeof(struct mgb_softc)
273 struct if_txrx mgb_txrx = {
274 .ift_txd_encap = mgb_isc_txd_encap,
275 .ift_txd_flush = mgb_isc_txd_flush,
276 .ift_txd_credits_update = mgb_isc_txd_credits_update,
277 .ift_rxd_available = mgb_isc_rxd_available,
278 .ift_rxd_pkt_get = mgb_isc_rxd_pkt_get,
279 .ift_rxd_refill = mgb_isc_rxd_refill,
280 .ift_rxd_flush = mgb_isc_rxd_flush,
282 .ift_legacy_intr = mgb_legacy_intr
285 struct if_shared_ctx mgb_sctx_init = {
286 .isc_magic = IFLIB_MAGIC,
288 .isc_q_align = PAGE_SIZE,
289 .isc_admin_intrcnt = 1,
290 .isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/,
292 .isc_vendor_info = mgb_vendor_info_array,
293 .isc_driver_version = "1",
294 .isc_driver = &mgb_iflib_driver,
295 /* 2 queues per set for TX and RX (ring queue, head writeback queue) */
298 .isc_tx_maxsize = MGB_DMA_MAXSEGS * MCLBYTES,
299 /* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */
300 .isc_tx_maxsegsize = MCLBYTES,
302 .isc_ntxd_min = {1, 1}, /* Will want to make this bigger */
303 .isc_ntxd_max = {MGB_DMA_RING_SIZE, 1},
304 .isc_ntxd_default = {MGB_DMA_RING_SIZE, 1},
308 .isc_rx_maxsize = MCLBYTES,
309 .isc_rx_nsegments = 1,
310 .isc_rx_maxsegsize = MCLBYTES,
312 .isc_nrxd_min = {1, 1}, /* Will want to make this bigger */
313 .isc_nrxd_max = {MGB_DMA_RING_SIZE, 1},
314 .isc_nrxd_default = {MGB_DMA_RING_SIZE, 1},
316 .isc_nfl = 1, /*one free list since there is only one queue */
317 #if 0 /* UNUSED_CTX */
319 .isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
320 .isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE,
321 #endif /* UNUSED_CTX */
324 /*********************************************************************/
327 mgb_register(device_t dev)
330 return (&mgb_sctx_init);
334 mgb_attach_pre(if_ctx_t ctx)
336 struct mgb_softc *sc;
337 if_softc_ctx_t scctx;
338 int error, phyaddr, rid;
339 struct ether_addr hwaddr;
340 struct mii_data *miid;
342 sc = iflib_get_softc(ctx);
344 sc->dev = iflib_get_dev(ctx);
345 scctx = iflib_get_softc_ctx(ctx);
347 /* IFLIB required setup */
348 scctx->isc_txrx = &mgb_txrx;
349 scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS;
350 /* Ring desc queues */
351 scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) *
353 scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) *
357 scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1];
358 scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1];
360 /* XXX: Must have 1 txqset, but can have up to 4 rxqsets */
361 scctx->isc_nrxqsets = 1;
362 scctx->isc_ntxqsets = 1;
364 /* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) |
365 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */
366 scctx->isc_tx_csum_flags = 0;
367 scctx->isc_capabilities = scctx->isc_capenable = 0;
370 * CSUM, TSO and VLAN support are TBD
372 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
373 IFCAP_TSO4 | IFCAP_TSO6 |
374 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
375 IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
376 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
378 scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
382 error = mgb_alloc_regs(sc);
384 device_printf(sc->dev,
385 "Unable to allocate bus resource: registers.\n");
389 error = mgb_test_bar(sc);
393 error = mgb_hw_init(sc);
395 device_printf(sc->dev,
396 "MGB device init failed. (err: %d)\n", error);
400 switch (pci_get_device(sc->dev))
402 case MGB_LAN7430_DEVICE_ID:
405 case MGB_LAN7431_DEVICE_ID:
407 phyaddr = MII_PHY_ANY;
411 /* XXX: Would be nice(r) if locked methods were here */
412 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx),
413 mgb_media_change, mgb_media_status,
414 BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE);
416 device_printf(sc->dev, "Failed to attach MII interface\n");
420 miid = device_get_softc(sc->miibus);
421 scctx->isc_media = &miid->mii_media;
423 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
424 /** Setup PBA BAR **/
425 rid = pci_msix_pba_bar(sc->dev);
426 if (rid != scctx->isc_msix_bar) {
427 sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
429 if (sc->pba == NULL) {
431 device_printf(sc->dev, "Failed to setup PBA BAR\n");
436 mgb_get_ethaddr(sc, &hwaddr);
437 if (ETHER_IS_BROADCAST(hwaddr.octet) ||
438 ETHER_IS_MULTICAST(hwaddr.octet) ||
439 ETHER_IS_ZERO(hwaddr.octet))
440 ether_gen_addr(iflib_get_ifp(ctx), &hwaddr);
443 * XXX: if the MAC address was generated the linux driver
444 * writes it back to the device.
446 iflib_set_mac(ctx, hwaddr.octet);
448 /* Map all vectors to vector 0 (admin interrupts) by default. */
449 CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0);
450 CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0);
451 CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0);
461 mgb_attach_post(if_ctx_t ctx)
463 struct mgb_softc *sc;
465 sc = iflib_get_softc(ctx);
467 device_printf(sc->dev, "Interrupt test: %s\n",
468 (mgb_intr_test(sc) ? "PASS" : "FAIL"));
474 mgb_detach(if_ctx_t ctx)
476 struct mgb_softc *sc;
479 sc = iflib_get_softc(ctx);
481 /* XXX: Should report errors but still detach everything. */
482 error = mgb_hw_teardown(sc);
485 iflib_irq_free(ctx, &sc->rx_irq);
486 iflib_irq_free(ctx, &sc->admin_irq);
488 if (sc->miibus != NULL)
489 device_delete_child(sc->dev, sc->miibus);
492 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
493 rman_get_rid(sc->pba), sc->pba);
496 error = mgb_release_regs(sc);
502 mgb_media_change(if_t ifp)
504 struct mii_data *miid;
505 struct mii_softc *miisc;
506 struct mgb_softc *sc;
510 ctx = if_getsoftc(ifp);
511 sc = iflib_get_softc(ctx);
512 miid = device_get_softc(sc->miibus);
513 LIST_FOREACH(miisc, &miid->mii_phys, mii_list)
516 needs_reset = mii_mediachg(miid);
517 if (needs_reset != 0)
519 return (needs_reset);
523 mgb_media_status(if_t ifp, struct ifmediareq *ifmr)
525 struct mgb_softc *sc;
526 struct mii_data *miid;
528 sc = iflib_get_softc(if_getsoftc(ifp));
529 miid = device_get_softc(sc->miibus);
530 if ((if_getflags(ifp) & IFF_UP) == 0)
534 ifmr->ifm_active = miid->mii_media_active;
535 ifmr->ifm_status = miid->mii_media_status;
539 mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs,
542 struct mgb_softc *sc;
543 struct mgb_ring_data *rdata;
546 sc = iflib_get_softc(ctx);
547 KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets));
548 rdata = &sc->tx_ring_data;
549 for (q = 0; q < ntxqsets; q++) {
550 KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs));
552 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0];
553 rdata->ring_bus_addr = paddrs[q * ntxqs + 0];
556 rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1];
557 rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1];
563 mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs,
566 struct mgb_softc *sc;
567 struct mgb_ring_data *rdata;
570 sc = iflib_get_softc(ctx);
571 KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets));
572 rdata = &sc->rx_ring_data;
573 for (q = 0; q < nrxqsets; q++) {
574 KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs));
576 rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0];
577 rdata->ring_bus_addr = paddrs[q * nrxqs + 0];
580 rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1];
581 rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1];
587 mgb_queues_free(if_ctx_t ctx)
589 struct mgb_softc *sc;
591 sc = iflib_get_softc(ctx);
593 memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data));
594 memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data));
598 mgb_init(if_ctx_t ctx)
600 struct mgb_softc *sc;
601 struct mii_data *miid;
604 sc = iflib_get_softc(ctx);
605 miid = device_get_softc(sc->miibus);
606 device_printf(sc->dev, "running init ...\n");
610 /* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */
611 CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER);
612 CSR_UPDATE_REG(sc, MGB_RFE_CTL,
613 MGB_RFE_ALLOW_BROADCAST |
614 MGB_RFE_ALLOW_UNICAST |
615 MGB_RFE_ALLOW_UNICAST);
617 error = mii_mediachg(miid);
618 KASSERT(!error, ("mii_mediachg returned: %d", error));
623 mgb_dump_some_stats(struct mgb_softc *sc)
626 int first_stat = 0x1200;
627 int last_stat = 0x12FC;
629 for (i = first_stat; i <= last_stat; i += 4)
630 if (CSR_READ_REG(sc, i) != 0)
631 device_printf(sc->dev, "0x%04x: 0x%08x\n", i,
632 CSR_READ_REG(sc, i));
633 char *stat_names[] = {
678 printf("==============================\n");
680 device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n",
681 stat_names[i - 1], stats[i - 1],
682 CSR_READ_REG(sc, stats[i - 1]));
683 printf("==== TX RING DESCS ====\n");
684 for (i = 0; i < MGB_DMA_RING_SIZE; i++)
685 device_printf(sc->dev, "ring[%d].data0=0x%08x\n"
686 "ring[%d].data1=0x%08x\n"
687 "ring[%d].data2=0x%08x\n"
688 "ring[%d].data3=0x%08x\n",
689 i, sc->tx_ring_data.ring[i].ctl,
690 i, sc->tx_ring_data.ring[i].addr.low,
691 i, sc->tx_ring_data.ring[i].addr.high,
692 i, sc->tx_ring_data.ring[i].sts);
693 device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n");
695 CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0
696 for (i = 0; i < 128; i++) {
697 CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR
699 CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD
701 while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY
704 device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i,
705 CSR_READ_REG(sc, 0x30)); // DP_DATA
711 mgb_stop(if_ctx_t ctx)
713 struct mgb_softc *sc ;
714 if_softc_ctx_t scctx;
717 sc = iflib_get_softc(ctx);
718 scctx = iflib_get_softc_ctx(ctx);
720 /* XXX: Could potentially timeout */
721 for (i = 0; i < scctx->isc_nrxqsets; i++) {
722 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP);
723 mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE);
725 for (i = 0; i < scctx->isc_ntxqsets; i++) {
726 mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP);
727 mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE);
732 mgb_legacy_intr(void *xsc)
734 struct mgb_softc *sc;
737 iflib_admin_intr_deferred(sc->ctx);
738 return (FILTER_HANDLED);
742 mgb_rxq_intr(void *xsc)
744 struct mgb_softc *sc;
745 if_softc_ctx_t scctx;
746 uint32_t intr_sts, intr_en;
750 scctx = iflib_get_softc_ctx(sc->ctx);
752 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
753 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
756 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
757 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
758 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
759 MGB_INTR_STS_RX(qidx));
760 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx));
763 return (FILTER_SCHEDULE_THREAD);
767 mgb_admin_intr(void *xsc)
769 struct mgb_softc *sc;
770 if_softc_ctx_t scctx;
771 uint32_t intr_sts, intr_en;
775 scctx = iflib_get_softc_ctx(sc->ctx);
777 intr_sts = CSR_READ_REG(sc, MGB_INTR_STS);
778 intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET);
782 * NOTE: Debugging printfs here
783 * will likely cause interrupt test failure.
786 /* TODO: shouldn't continue if suspended */
787 if ((intr_sts & MGB_INTR_STS_ANY) == 0)
789 device_printf(sc->dev, "non-mgb interrupt triggered.\n");
790 return (FILTER_SCHEDULE_THREAD);
792 if ((intr_sts & MGB_INTR_STS_TEST) != 0)
794 sc->isr_test_flag = true;
795 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
796 return (FILTER_HANDLED);
798 if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0)
800 for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) {
801 if ((intr_sts & MGB_INTR_STS_RX(qidx))){
802 iflib_rx_intr_deferred(sc->ctx, qidx);
805 return (FILTER_HANDLED);
807 /* XXX: TX interrupts should not occur */
808 if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0)
810 for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) {
811 if ((intr_sts & MGB_INTR_STS_RX(qidx))) {
812 /* clear the interrupt sts and run handler */
813 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR,
814 MGB_INTR_STS_TX(qidx));
815 CSR_WRITE_REG(sc, MGB_INTR_STS,
816 MGB_INTR_STS_TX(qidx));
817 iflib_tx_intr_deferred(sc->ctx, qidx);
820 return (FILTER_HANDLED);
823 return (FILTER_SCHEDULE_THREAD);
827 mgb_msix_intr_assign(if_ctx_t ctx, int msix)
829 struct mgb_softc *sc;
830 if_softc_ctx_t scctx;
831 int error, i, vectorid;
834 sc = iflib_get_softc(ctx);
835 scctx = iflib_get_softc_ctx(ctx);
837 KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1,
838 ("num rxqsets/txqsets != 1 "));
841 * First vector should be admin interrupts, others vectors are TX/RX
843 * RIDs start at 1, and vector ids start at 0.
846 error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1,
847 IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin");
849 device_printf(sc->dev,
850 "Failed to register admin interrupt handler\n");
854 for (i = 0; i < scctx->isc_nrxqsets; i++) {
856 snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
857 error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1,
858 IFLIB_INTR_RX, mgb_rxq_intr, sc, i, irq_name);
860 device_printf(sc->dev,
861 "Failed to register rxq %d interrupt handler\n", i);
864 CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP,
865 MGB_INTR_VEC_MAP(vectorid, i));
868 /* Not actually mapping hw TX interrupts ... */
869 for (i = 0; i < scctx->isc_ntxqsets; i++) {
870 snprintf(irq_name, sizeof(irq_name), "txq%d", i);
871 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
879 mgb_intr_enable_all(if_ctx_t ctx)
881 struct mgb_softc *sc;
882 if_softc_ctx_t scctx;
883 int i, dmac_enable = 0, intr_sts = 0, vec_en = 0;
885 sc = iflib_get_softc(ctx);
886 scctx = iflib_get_softc_ctx(ctx);
887 intr_sts |= MGB_INTR_STS_ANY;
888 vec_en |= MGB_INTR_STS_ANY;
890 for (i = 0; i < scctx->isc_nrxqsets; i++) {
891 intr_sts |= MGB_INTR_STS_RX(i);
892 dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i);
893 vec_en |= MGB_INTR_RX_VEC_STS(i);
896 /* TX interrupts aren't needed ... */
898 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts);
899 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en);
900 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable);
901 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable);
905 mgb_intr_disable_all(if_ctx_t ctx)
907 struct mgb_softc *sc;
909 sc = iflib_get_softc(ctx);
910 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX);
911 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX);
912 CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX);
914 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX);
915 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX);
919 mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
921 /* called after successful rx isr */
922 struct mgb_softc *sc;
924 sc = iflib_get_softc(ctx);
925 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid));
926 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid));
928 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid));
929 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid));
934 mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
936 /* XXX: not called (since tx interrupts not used) */
937 struct mgb_softc *sc;
939 sc = iflib_get_softc(ctx);
941 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid));
943 CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid));
944 CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid));
949 mgb_intr_test(struct mgb_softc *sc)
953 sc->isr_test_flag = false;
954 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
955 CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY);
956 CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET,
957 MGB_INTR_STS_ANY | MGB_INTR_STS_TEST);
958 CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST);
959 if (sc->isr_test_flag)
961 for (i = 0; i < MGB_TIMEOUT; i++) {
963 if (sc->isr_test_flag)
966 CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST);
967 CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST);
968 return sc->isr_test_flag;
972 mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi)
974 struct mgb_softc *sc;
975 if_softc_ctx_t scctx;
976 struct mgb_ring_data *rdata;
977 struct mgb_ring_desc *txd;
978 bus_dma_segment_t *segs;
982 KASSERT(ipi->ipi_qsidx == 0,
983 ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx));
985 scctx = iflib_get_softc_ctx(sc->ctx);
986 rdata = &sc->tx_ring_data;
988 pidx = ipi->ipi_pidx;
989 segs = ipi->ipi_segs;
990 nsegs = ipi->ipi_nsegs;
992 /* For each seg, create a descriptor */
993 for (i = 0; i < nsegs; ++i) {
994 KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n"));
995 txd = &rdata->ring[pidx];
997 (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) |
999 * XXX: This will be wrong in the multipacket case
1000 * I suspect FS should be for the first packet and
1001 * LS should be for the last packet
1003 MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS |
1005 txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(
1007 txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(
1010 (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK);
1011 pidx = MGB_NEXT_RING_IDX(pidx);
1013 ipi->ipi_new_pidx = pidx;
1018 mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx)
1020 struct mgb_softc *sc;
1021 struct mgb_ring_data *rdata;
1023 KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid));
1025 rdata = &sc->tx_ring_data;
1027 if (rdata->last_tail != pidx) {
1028 rdata->last_tail = pidx;
1029 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail);
1034 mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear)
1036 struct mgb_softc *sc;
1037 struct mgb_ring_desc *txd;
1038 struct mgb_ring_data *rdata;
1042 * > If clear is true, we need to report the number of TX command ring
1043 * > descriptors that have been processed by the device. If clear is
1044 * > false, we just need to report whether or not at least one TX
1045 * > command ring descriptor has been processed by the device.
1048 KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n",
1051 rdata = &sc->tx_ring_data;
1053 while (*(rdata->head_wb) != rdata->last_head) {
1057 txd = &rdata->ring[rdata->last_head];
1058 memset(txd, 0, sizeof(struct mgb_ring_desc));
1059 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1067 mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1069 struct mgb_softc *sc;
1070 if_softc_ctx_t scctx;
1071 struct mgb_ring_data *rdata;
1075 KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n",
1078 rdata = &sc->rx_ring_data;
1079 scctx = iflib_get_softc_ctx(sc->ctx);
1080 for (; idx != *(rdata->head_wb);
1081 idx = MGB_NEXT_RING_IDX(idx)) {
1083 /* XXX: Could verify desc is device owned here */
1084 if (avail == budget)
1091 mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri)
1093 struct mgb_softc *sc;
1094 struct mgb_ring_data *rdata;
1095 struct mgb_ring_desc rxd;
1098 KASSERT(ri->iri_qsidx == 0,
1099 ("tried to check availability in RX Channel %d\n", ri->iri_qsidx));
1102 rdata = &sc->rx_ring_data;
1104 while (*(rdata->head_wb) != rdata->last_head) {
1105 /* copy ring desc and do swapping */
1106 rxd = rdata->ring[rdata->last_head];
1107 rxd.ctl = le32toh(rxd.ctl);
1108 rxd.addr.low = le32toh(rxd.ctl);
1109 rxd.addr.high = le32toh(rxd.ctl);
1110 rxd.sts = le32toh(rxd.ctl);
1112 if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) {
1113 device_printf(sc->dev,
1114 "Tried to read descriptor ... "
1115 "found that it's owned by the driver\n");
1118 if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) {
1119 device_printf(sc->dev,
1120 "Tried to read descriptor ... "
1121 "found that FS is not set.\n");
1122 device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n");
1125 /* XXX: Multi-packet support */
1126 if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) {
1127 device_printf(sc->dev,
1128 "Tried to read descriptor ... "
1129 "found that LS is not set. (Multi-buffer packets not yet supported)\n");
1132 ri->iri_frags[0].irf_flid = 0;
1133 ri->iri_frags[0].irf_idx = rdata->last_head;
1134 ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd);
1135 total_len += ri->iri_frags[0].irf_len;
1137 rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head);
1141 ri->iri_len = total_len;
1147 mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru)
1149 if_softc_ctx_t scctx;
1150 struct mgb_softc *sc;
1151 struct mgb_ring_data *rdata;
1152 struct mgb_ring_desc *rxd;
1158 count = iru->iru_count;
1159 len = iru->iru_buf_size;
1160 idxs = iru->iru_idxs;
1161 paddrs = iru->iru_paddrs;
1162 KASSERT(iru->iru_qsidx == 0,
1163 ("tried to refill RX Channel %d.\n", iru->iru_qsidx));
1166 scctx = iflib_get_softc_ctx(sc->ctx);
1167 rdata = &sc->rx_ring_data;
1170 idx = idxs[--count];
1171 rxd = &rdata->ring[idx];
1175 htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count]));
1177 htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count]));
1178 rxd->ctl = htole32(MGB_DESC_CTL_OWN |
1179 (len & MGB_DESC_CTL_BUFLEN_MASK));
1185 mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1187 struct mgb_softc *sc;
1191 KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid));
1193 * According to the programming guide, last_tail must be set to
1194 * the last valid RX descriptor, rather than to the one past that.
1195 * Note that this is not true for the TX ring!
1197 sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx);
1198 CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail);
1203 mgb_test_bar(struct mgb_softc *sc)
1205 uint32_t id_rev, dev_id, rev;
1207 id_rev = CSR_READ_REG(sc, 0);
1208 dev_id = id_rev >> 16;
1209 rev = id_rev & 0xFFFF;
1210 if (dev_id == MGB_LAN7430_DEVICE_ID ||
1211 dev_id == MGB_LAN7431_DEVICE_ID) {
1214 device_printf(sc->dev, "ID check failed.\n");
1220 mgb_alloc_regs(struct mgb_softc *sc)
1224 rid = PCIR_BAR(MGB_BAR);
1225 pci_enable_busmaster(sc->dev);
1226 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1228 if (sc->regs == NULL)
1235 mgb_release_regs(struct mgb_softc *sc)
1239 if (sc->regs != NULL)
1240 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
1241 rman_get_rid(sc->regs), sc->regs);
1243 pci_disable_busmaster(sc->dev);
1248 mgb_dma_init(struct mgb_softc *sc)
1250 if_softc_ctx_t scctx;
1253 scctx = iflib_get_softc_ctx(sc->ctx);
1255 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1256 if ((error = mgb_dma_rx_ring_init(sc, ch)))
1259 for (ch = 0; ch < scctx->isc_nrxqsets; ch++)
1260 if ((error = mgb_dma_tx_ring_init(sc, ch)))
1268 mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel)
1270 struct mgb_ring_data *rdata;
1271 int ring_config, error = 0;
1273 rdata = &sc->rx_ring_data;
1274 mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET);
1275 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel),
1276 ("Trying to init channels when not in init state\n"));
1278 /* write ring address */
1279 if (rdata->ring_bus_addr == 0) {
1280 device_printf(sc->dev, "Invalid ring bus addr.\n");
1284 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel),
1285 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1286 CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel),
1287 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1289 /* write head pointer writeback address */
1290 if (rdata->head_wb_bus_addr == 0) {
1291 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1294 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel),
1295 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1296 CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel),
1297 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1299 /* Enable head pointer writeback */
1300 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL);
1302 ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel));
1304 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1305 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1306 /* packet padding (PAD_2 is better for IP header alignment ...) */
1307 ring_config &= ~MGB_DMA_RING_PAD_MASK;
1308 ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK);
1310 CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config);
1312 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel));
1314 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET);
1316 device_printf(sc->dev, "Failed to reset RX FCT.\n");
1319 mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE);
1321 device_printf(sc->dev, "Failed to enable RX FCT.\n");
1324 mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START);
1326 device_printf(sc->dev, "Failed to start RX DMAC.\n");
1332 mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel)
1334 struct mgb_ring_data *rdata;
1335 int ring_config, error = 0;
1337 rdata = &sc->tx_ring_data;
1338 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) {
1339 device_printf(sc->dev, "Failed to reset TX FCT.\n");
1342 if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel,
1344 device_printf(sc->dev, "Failed to enable TX FCT.\n");
1347 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1349 device_printf(sc->dev, "Failed to reset TX DMAC.\n");
1352 KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel),
1353 ("Trying to init channels in not init state\n"));
1355 /* write ring address */
1356 if (rdata->ring_bus_addr == 0) {
1357 device_printf(sc->dev, "Invalid ring bus addr.\n");
1360 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel),
1361 CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr));
1362 CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel),
1363 CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr));
1365 /* write ring size */
1366 ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel));
1367 ring_config &= ~MGB_DMA_RING_LEN_MASK;
1368 ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK);
1369 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config);
1371 /* Enable interrupt on completion and head pointer writeback */
1372 ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL);
1373 CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config);
1375 /* write head pointer writeback address */
1376 if (rdata->head_wb_bus_addr == 0) {
1377 device_printf(sc->dev, "Invalid head wb bus addr.\n");
1380 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel),
1381 CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr));
1382 CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel),
1383 CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr));
1385 rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel));
1386 KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n"));
1387 rdata->last_tail = 0;
1388 CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail);
1390 if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel,
1392 device_printf(sc->dev, "Failed to start TX DMAC.\n");
1398 mgb_dmac_control(struct mgb_softc *sc, int start, int channel,
1399 enum mgb_dmac_cmd cmd)
1405 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1406 MGB_DMAC_CMD_RESET(start, channel));
1407 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0,
1408 MGB_DMAC_CMD_RESET(start, channel));
1413 * NOTE: this simplifies the logic, since it will never
1414 * try to start in STOP_PENDING, but it also increases work.
1416 error = mgb_dmac_control(sc, start, channel, DMAC_STOP);
1419 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1420 MGB_DMAC_CMD_START(start, channel));
1424 CSR_WRITE_REG(sc, MGB_DMAC_CMD,
1425 MGB_DMAC_CMD_STOP(start, channel));
1426 error = mgb_wait_for_bits(sc, MGB_DMAC_CMD,
1427 MGB_DMAC_CMD_STOP(start, channel),
1428 MGB_DMAC_CMD_START(start, channel));
1435 mgb_fct_control(struct mgb_softc *sc, int reg, int channel,
1436 enum mgb_fct_cmd cmd)
1441 CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel));
1442 return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel));
1444 CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel));
1447 CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel));
1448 return mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel));
1453 mgb_hw_teardown(struct mgb_softc *sc)
1458 CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1459 CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1460 if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
1462 if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
1468 mgb_hw_init(struct mgb_softc *sc)
1472 error = mgb_hw_reset(sc);
1478 error = mgb_phy_reset(sc);
1482 error = mgb_dmac_reset(sc);
1491 mgb_hw_reset(struct mgb_softc *sc)
1494 CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET);
1495 return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET));
1499 mgb_mac_init(struct mgb_softc *sc)
1503 * enable automatic duplex detection and
1504 * automatic speed detection
1506 CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL);
1507 CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
1508 CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
1514 mgb_phy_reset(struct mgb_softc *sc)
1517 CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET);
1518 if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) ==
1520 return MGB_STS_TIMEOUT;
1521 return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0));
1525 mgb_dmac_reset(struct mgb_softc *sc)
1528 CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET);
1529 return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET));
1533 mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits)
1540 * XXX: Datasheets states delay should be > 5 microseconds
1544 val = CSR_READ_REG(sc, reg);
1545 if ((val & set_bits) == set_bits &&
1546 (val & clear_bits) == 0)
1548 } while (i++ < MGB_TIMEOUT);
1550 return MGB_STS_TIMEOUT;
1554 mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest)
1557 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4);
1558 CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2);
1562 mgb_miibus_readreg(device_t dev, int phy, int reg)
1564 struct mgb_softc *sc;
1567 sc = iflib_get_softc(device_get_softc(dev));
1569 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1572 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1573 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1574 mii_access |= MGB_MII_BUSY | MGB_MII_READ;
1575 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1576 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1579 return (CSR_READ_2_BYTES(sc, MGB_MII_DATA));
1583 mgb_miibus_writereg(device_t dev, int phy, int reg, int data)
1585 struct mgb_softc *sc;
1588 sc = iflib_get_softc(device_get_softc(dev));
1590 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS,
1591 0, MGB_MII_BUSY) == MGB_STS_TIMEOUT)
1593 mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT;
1594 mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT;
1595 mii_access |= MGB_MII_BUSY | MGB_MII_WRITE;
1596 CSR_WRITE_REG(sc, MGB_MII_DATA, data);
1597 CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access);
1598 if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) ==
1604 /* XXX: May need to lock these up */
1606 mgb_miibus_statchg(device_t dev)
1608 struct mgb_softc *sc;
1609 struct mii_data *miid;
1611 sc = iflib_get_softc(device_get_softc(dev));
1612 miid = device_get_softc(sc->miibus);
1613 /* Update baudrate in iflib */
1614 sc->baudrate = ifmedia_baudrate(miid->mii_media_active);
1615 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);
1619 mgb_miibus_linkchg(device_t dev)
1621 struct mgb_softc *sc;
1622 struct mii_data *miid;
1625 sc = iflib_get_softc(device_get_softc(dev));
1626 miid = device_get_softc(sc->miibus);
1627 /* XXX: copied from miibus_linkchg **/
1628 if (miid->mii_media_status & IFM_AVALID) {
1629 if (miid->mii_media_status & IFM_ACTIVE)
1630 link_state = LINK_STATE_UP;
1632 link_state = LINK_STATE_DOWN;
1634 link_state = LINK_STATE_UNKNOWN;
1635 sc->link_state = link_state;
1636 iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate);