2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/module.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_llc.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <net/if_vlan_var.h>
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
69 #include <machine/bus.h>
70 #include <machine/in_cksum.h>
72 #include <dev/ale/if_alereg.h>
73 #include <dev/ale/if_alevar.h>
75 /* "device miibus" required. See GENERIC if you get errors here. */
76 #include "miibus_if.h"
78 /* For more information about Tx checksum offload issues see ale_encap(). */
79 #define ALE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
81 MODULE_DEPEND(ale, pci, 1, 1, 1);
82 MODULE_DEPEND(ale, ether, 1, 1, 1);
83 MODULE_DEPEND(ale, miibus, 1, 1, 1);
86 static int msi_disable = 0;
87 static int msix_disable = 0;
88 TUNABLE_INT("hw.ale.msi_disable", &msi_disable);
89 TUNABLE_INT("hw.ale.msix_disable", &msix_disable);
92 * Devices supported by this driver.
94 static const struct ale_dev {
95 uint16_t ale_vendorid;
96 uint16_t ale_deviceid;
99 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR81XX,
100 "Atheros AR8121/AR8113/AR8114 PCIe Ethernet" },
103 static int ale_attach(device_t);
104 static int ale_check_boundary(struct ale_softc *);
105 static int ale_detach(device_t);
106 static int ale_dma_alloc(struct ale_softc *);
107 static void ale_dma_free(struct ale_softc *);
108 static void ale_dmamap_cb(void *, bus_dma_segment_t *, int, int);
109 static int ale_encap(struct ale_softc *, struct mbuf **);
110 static void ale_get_macaddr(struct ale_softc *);
111 static void ale_init(void *);
112 static void ale_init_locked(struct ale_softc *);
113 static void ale_init_rx_pages(struct ale_softc *);
114 static void ale_init_tx_ring(struct ale_softc *);
115 static void ale_int_task(void *, int);
116 static int ale_intr(void *);
117 static int ale_ioctl(struct ifnet *, u_long, caddr_t);
118 static void ale_mac_config(struct ale_softc *);
119 static int ale_miibus_readreg(device_t, int, int);
120 static void ale_miibus_statchg(device_t);
121 static int ale_miibus_writereg(device_t, int, int, int);
122 static int ale_mediachange(struct ifnet *);
123 static void ale_mediastatus(struct ifnet *, struct ifmediareq *);
124 static void ale_phy_reset(struct ale_softc *);
125 static int ale_probe(device_t);
126 static void ale_reset(struct ale_softc *);
127 static int ale_resume(device_t);
128 static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **,
129 uint32_t, uint32_t *);
130 static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t);
131 static int ale_rxeof(struct ale_softc *sc, int);
132 static void ale_rxfilter(struct ale_softc *);
133 static void ale_rxvlan(struct ale_softc *);
134 static void ale_setlinkspeed(struct ale_softc *);
135 static void ale_setwol(struct ale_softc *);
136 static int ale_shutdown(device_t);
137 static void ale_start(struct ifnet *);
138 static void ale_start_locked(struct ifnet *);
139 static void ale_stats_clear(struct ale_softc *);
140 static void ale_stats_update(struct ale_softc *);
141 static void ale_stop(struct ale_softc *);
142 static void ale_stop_mac(struct ale_softc *);
143 static int ale_suspend(device_t);
144 static void ale_sysctl_node(struct ale_softc *);
145 static void ale_tick(void *);
146 static void ale_txeof(struct ale_softc *);
147 static void ale_watchdog(struct ale_softc *);
148 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
149 static int sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS);
150 static int sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS);
152 static device_method_t ale_methods[] = {
153 /* Device interface. */
154 DEVMETHOD(device_probe, ale_probe),
155 DEVMETHOD(device_attach, ale_attach),
156 DEVMETHOD(device_detach, ale_detach),
157 DEVMETHOD(device_shutdown, ale_shutdown),
158 DEVMETHOD(device_suspend, ale_suspend),
159 DEVMETHOD(device_resume, ale_resume),
162 DEVMETHOD(miibus_readreg, ale_miibus_readreg),
163 DEVMETHOD(miibus_writereg, ale_miibus_writereg),
164 DEVMETHOD(miibus_statchg, ale_miibus_statchg),
169 static driver_t ale_driver = {
172 sizeof(struct ale_softc)
175 static devclass_t ale_devclass;
177 DRIVER_MODULE(ale, pci, ale_driver, ale_devclass, NULL, NULL);
178 DRIVER_MODULE(miibus, ale, miibus_driver, miibus_devclass, NULL, NULL);
180 static struct resource_spec ale_res_spec_mem[] = {
181 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
185 static struct resource_spec ale_irq_spec_legacy[] = {
186 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
190 static struct resource_spec ale_irq_spec_msi[] = {
191 { SYS_RES_IRQ, 1, RF_ACTIVE },
195 static struct resource_spec ale_irq_spec_msix[] = {
196 { SYS_RES_IRQ, 1, RF_ACTIVE },
201 ale_miibus_readreg(device_t dev, int phy, int reg)
203 struct ale_softc *sc;
207 sc = device_get_softc(dev);
209 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
210 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
211 for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
213 v = CSR_READ_4(sc, ALE_MDIO);
214 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
219 device_printf(sc->ale_dev, "phy read timeout : %d\n", reg);
223 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
227 ale_miibus_writereg(device_t dev, int phy, int reg, int val)
229 struct ale_softc *sc;
233 sc = device_get_softc(dev);
235 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
236 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
237 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
238 for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
240 v = CSR_READ_4(sc, ALE_MDIO);
241 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
246 device_printf(sc->ale_dev, "phy write timeout : %d\n", reg);
252 ale_miibus_statchg(device_t dev)
254 struct ale_softc *sc;
255 struct mii_data *mii;
259 sc = device_get_softc(dev);
260 mii = device_get_softc(sc->ale_miibus);
262 if (mii == NULL || ifp == NULL ||
263 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
266 sc->ale_flags &= ~ALE_FLAG_LINK;
267 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
268 (IFM_ACTIVE | IFM_AVALID)) {
269 switch (IFM_SUBTYPE(mii->mii_media_active)) {
272 sc->ale_flags |= ALE_FLAG_LINK;
275 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
276 sc->ale_flags |= ALE_FLAG_LINK;
283 /* Stop Rx/Tx MACs. */
286 /* Program MACs with resolved speed/duplex/flow-control. */
287 if ((sc->ale_flags & ALE_FLAG_LINK) != 0) {
289 /* Reenable Tx/Rx MACs. */
290 reg = CSR_READ_4(sc, ALE_MAC_CFG);
291 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
292 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
297 ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
299 struct ale_softc *sc;
300 struct mii_data *mii;
304 if ((ifp->if_flags & IFF_UP) == 0) {
308 mii = device_get_softc(sc->ale_miibus);
311 ifmr->ifm_status = mii->mii_media_status;
312 ifmr->ifm_active = mii->mii_media_active;
317 ale_mediachange(struct ifnet *ifp)
319 struct ale_softc *sc;
320 struct mii_data *mii;
321 struct mii_softc *miisc;
326 mii = device_get_softc(sc->ale_miibus);
327 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
329 error = mii_mediachg(mii);
336 ale_probe(device_t dev)
338 const struct ale_dev *sp;
340 uint16_t vendor, devid;
342 vendor = pci_get_vendor(dev);
343 devid = pci_get_device(dev);
345 for (i = 0; i < sizeof(ale_devs) / sizeof(ale_devs[0]); i++) {
346 if (vendor == sp->ale_vendorid &&
347 devid == sp->ale_deviceid) {
348 device_set_desc(dev, sp->ale_name);
349 return (BUS_PROBE_DEFAULT);
358 ale_get_macaddr(struct ale_softc *sc)
363 reg = CSR_READ_4(sc, ALE_SPI_CTRL);
364 if ((reg & SPI_VPD_ENB) != 0) {
366 CSR_WRITE_4(sc, ALE_SPI_CTRL, reg);
369 if (pci_find_cap(sc->ale_dev, PCIY_VPD, &vpdc) == 0) {
371 * PCI VPD capability found, let TWSI reload EEPROM.
372 * This will set ethernet address of controller.
374 CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) |
375 TWSI_CTRL_SW_LD_START);
376 for (i = 100; i > 0; i--) {
378 reg = CSR_READ_4(sc, ALE_TWSI_CTRL);
379 if ((reg & TWSI_CTRL_SW_LD_START) == 0)
383 device_printf(sc->ale_dev,
384 "reloading EEPROM timeout!\n");
387 device_printf(sc->ale_dev,
388 "PCI VPD capability not found!\n");
391 ea[0] = CSR_READ_4(sc, ALE_PAR0);
392 ea[1] = CSR_READ_4(sc, ALE_PAR1);
393 sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF;
394 sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF;
395 sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF;
396 sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF;
397 sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF;
398 sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF;
402 ale_phy_reset(struct ale_softc *sc)
405 /* Reset magic from Linux. */
406 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
407 GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
408 GPHY_CTRL_PHY_PLL_ON);
410 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
411 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE |
412 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON);
415 #define ATPHY_DBG_ADDR 0x1D
416 #define ATPHY_DBG_DATA 0x1E
418 /* Enable hibernation mode. */
419 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
420 ATPHY_DBG_ADDR, 0x0B);
421 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
422 ATPHY_DBG_DATA, 0xBC00);
423 /* Set Class A/B for all modes. */
424 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
425 ATPHY_DBG_ADDR, 0x00);
426 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
427 ATPHY_DBG_DATA, 0x02EF);
428 /* Enable 10BT power saving. */
429 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
430 ATPHY_DBG_ADDR, 0x12);
431 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
432 ATPHY_DBG_DATA, 0x4C04);
433 /* Adjust 1000T power. */
434 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
435 ATPHY_DBG_ADDR, 0x04);
436 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
437 ATPHY_DBG_ADDR, 0x8BBB);
438 /* 10BT center tap voltage. */
439 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
440 ATPHY_DBG_ADDR, 0x05);
441 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
442 ATPHY_DBG_ADDR, 0x2C46);
444 #undef ATPHY_DBG_ADDR
445 #undef ATPHY_DBG_DATA
450 ale_attach(device_t dev)
452 struct ale_softc *sc;
455 int error, i, msic, msixc, pmc;
456 uint32_t rxf_len, txf_len;
459 sc = device_get_softc(dev);
462 mtx_init(&sc->ale_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
464 callout_init_mtx(&sc->ale_tick_ch, &sc->ale_mtx, 0);
465 TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc);
467 /* Map the device. */
468 pci_enable_busmaster(dev);
469 sc->ale_res_spec = ale_res_spec_mem;
470 sc->ale_irq_spec = ale_irq_spec_legacy;
471 error = bus_alloc_resources(dev, sc->ale_res_spec, sc->ale_res);
473 device_printf(dev, "cannot allocate memory resources.\n");
477 /* Set PHY address. */
478 sc->ale_phyaddr = ALE_PHY_ADDR;
483 /* Reset the ethernet controller. */
486 /* Get PCI and chip id/revision. */
487 sc->ale_rev = pci_get_revid(dev);
488 if (sc->ale_rev >= 0xF0) {
489 /* L2E Rev. B. AR8114 */
490 sc->ale_flags |= ALE_FLAG_FASTETHER;
492 if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) {
494 sc->ale_flags |= ALE_FLAG_JUMBO;
496 /* L2E Rev. A. AR8113 */
497 sc->ale_flags |= ALE_FLAG_FASTETHER;
501 * All known controllers seems to require 4 bytes alignment
502 * of Tx buffers to make Tx checksum offload with custom
503 * checksum generation method work.
505 sc->ale_flags |= ALE_FLAG_TXCSUM_BUG;
507 * All known controllers seems to have issues on Rx checksum
508 * offload for fragmented IP datagrams.
510 sc->ale_flags |= ALE_FLAG_RXCSUM_BUG;
512 * Don't use Tx CMB. It is known to cause RRS update failure
513 * under certain circumstances. Typical phenomenon of the
514 * issue would be unexpected sequence number encountered in
517 sc->ale_flags |= ALE_FLAG_TXCMB_BUG;
518 sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >>
519 MASTER_CHIP_REV_SHIFT;
521 device_printf(dev, "PCI device revision : 0x%04x\n",
523 device_printf(dev, "Chip id/revision : 0x%04x\n",
526 txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN);
527 rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
529 * Uninitialized hardware returns an invalid chip id/revision
530 * as well as 0xFFFFFFFF for Tx/Rx fifo length.
532 if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF ||
533 rxf_len == 0xFFFFFFF) {
534 device_printf(dev,"chip revision : 0x%04x, %u Tx FIFO "
535 "%u Rx FIFO -- not initialized?\n", sc->ale_chip_rev,
540 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", txf_len, rxf_len);
542 /* Allocate IRQ resources. */
543 msixc = pci_msix_count(dev);
544 msic = pci_msi_count(dev);
546 device_printf(dev, "MSIX count : %d\n", msixc);
547 device_printf(dev, "MSI count : %d\n", msic);
550 /* Prefer MSIX over MSI. */
551 if (msix_disable == 0 || msi_disable == 0) {
552 if (msix_disable == 0 && msixc == ALE_MSIX_MESSAGES &&
553 pci_alloc_msix(dev, &msixc) == 0) {
554 if (msixc == ALE_MSIX_MESSAGES) {
555 device_printf(dev, "Using %d MSIX messages.\n",
557 sc->ale_flags |= ALE_FLAG_MSIX;
558 sc->ale_irq_spec = ale_irq_spec_msix;
560 pci_release_msi(dev);
562 if (msi_disable == 0 && (sc->ale_flags & ALE_FLAG_MSIX) == 0 &&
563 msic == ALE_MSI_MESSAGES &&
564 pci_alloc_msi(dev, &msic) == 0) {
565 if (msic == ALE_MSI_MESSAGES) {
566 device_printf(dev, "Using %d MSI messages.\n",
568 sc->ale_flags |= ALE_FLAG_MSI;
569 sc->ale_irq_spec = ale_irq_spec_msi;
571 pci_release_msi(dev);
575 error = bus_alloc_resources(dev, sc->ale_irq_spec, sc->ale_irq);
577 device_printf(dev, "cannot allocate IRQ resources.\n");
581 /* Get DMA parameters from PCIe device control register. */
582 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
583 sc->ale_flags |= ALE_FLAG_PCIE;
584 burst = pci_read_config(dev, i + 0x08, 2);
585 /* Max read request size. */
586 sc->ale_dma_rd_burst = ((burst >> 12) & 0x07) <<
587 DMA_CFG_RD_BURST_SHIFT;
588 /* Max payload size. */
589 sc->ale_dma_wr_burst = ((burst >> 5) & 0x07) <<
590 DMA_CFG_WR_BURST_SHIFT;
592 device_printf(dev, "Read request size : %d bytes.\n",
593 128 << ((burst >> 12) & 0x07));
594 device_printf(dev, "TLP payload size : %d bytes.\n",
595 128 << ((burst >> 5) & 0x07));
598 sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128;
599 sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128;
602 /* Create device sysctl node. */
605 if ((error = ale_dma_alloc(sc) != 0))
608 /* Load station address. */
611 ifp = sc->ale_ifp = if_alloc(IFT_ETHER);
613 device_printf(dev, "cannot allocate ifnet structure.\n");
619 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
620 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
621 ifp->if_ioctl = ale_ioctl;
622 ifp->if_start = ale_start;
623 ifp->if_init = ale_init;
624 ifp->if_snd.ifq_drv_maxlen = ALE_TX_RING_CNT - 1;
625 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
626 IFQ_SET_READY(&ifp->if_snd);
627 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4;
628 ifp->if_hwassist = ALE_CSUM_FEATURES | CSUM_TSO;
629 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
630 sc->ale_flags |= ALE_FLAG_PMCAP;
631 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
633 ifp->if_capenable = ifp->if_capabilities;
635 /* Set up MII bus. */
636 error = mii_attach(dev, &sc->ale_miibus, ifp, ale_mediachange,
637 ale_mediastatus, BMSR_DEFCAPMASK, sc->ale_phyaddr, MII_OFFSET_ANY,
640 device_printf(dev, "attaching PHYs failed\n");
644 ether_ifattach(ifp, sc->ale_eaddr);
646 /* VLAN capability setup. */
647 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
648 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
649 ifp->if_capenable = ifp->if_capabilities;
651 * Even though controllers supported by ale(3) have Rx checksum
652 * offload bug the workaround for fragmented frames seemed to
653 * work so far. However it seems Rx checksum offload does not
654 * work under certain conditions. So disable Rx checksum offload
655 * until I find more clue about it but allow users to override it.
657 ifp->if_capenable &= ~IFCAP_RXCSUM;
659 /* Tell the upper layer(s) we support long frames. */
660 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
662 /* Create local taskq. */
663 sc->ale_tq = taskqueue_create_fast("ale_taskq", M_WAITOK,
664 taskqueue_thread_enqueue, &sc->ale_tq);
665 if (sc->ale_tq == NULL) {
666 device_printf(dev, "could not create taskqueue.\n");
671 taskqueue_start_threads(&sc->ale_tq, 1, PI_NET, "%s taskq",
672 device_get_nameunit(sc->ale_dev));
674 if ((sc->ale_flags & ALE_FLAG_MSIX) != 0)
675 msic = ALE_MSIX_MESSAGES;
676 else if ((sc->ale_flags & ALE_FLAG_MSI) != 0)
677 msic = ALE_MSI_MESSAGES;
680 for (i = 0; i < msic; i++) {
681 error = bus_setup_intr(dev, sc->ale_irq[i],
682 INTR_TYPE_NET | INTR_MPSAFE, ale_intr, NULL, sc,
683 &sc->ale_intrhand[i]);
688 device_printf(dev, "could not set up interrupt handler.\n");
689 taskqueue_free(sc->ale_tq);
703 ale_detach(device_t dev)
705 struct ale_softc *sc;
709 sc = device_get_softc(dev);
712 if (device_is_attached(dev)) {
717 callout_drain(&sc->ale_tick_ch);
718 taskqueue_drain(sc->ale_tq, &sc->ale_int_task);
721 if (sc->ale_tq != NULL) {
722 taskqueue_drain(sc->ale_tq, &sc->ale_int_task);
723 taskqueue_free(sc->ale_tq);
727 if (sc->ale_miibus != NULL) {
728 device_delete_child(dev, sc->ale_miibus);
729 sc->ale_miibus = NULL;
731 bus_generic_detach(dev);
739 if ((sc->ale_flags & ALE_FLAG_MSIX) != 0)
740 msic = ALE_MSIX_MESSAGES;
741 else if ((sc->ale_flags & ALE_FLAG_MSI) != 0)
742 msic = ALE_MSI_MESSAGES;
745 for (i = 0; i < msic; i++) {
746 if (sc->ale_intrhand[i] != NULL) {
747 bus_teardown_intr(dev, sc->ale_irq[i],
748 sc->ale_intrhand[i]);
749 sc->ale_intrhand[i] = NULL;
753 bus_release_resources(dev, sc->ale_irq_spec, sc->ale_irq);
754 if ((sc->ale_flags & (ALE_FLAG_MSI | ALE_FLAG_MSIX)) != 0)
755 pci_release_msi(dev);
756 bus_release_resources(dev, sc->ale_res_spec, sc->ale_res);
757 mtx_destroy(&sc->ale_mtx);
762 #define ALE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
763 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
765 #if __FreeBSD_version >= 900030
766 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
767 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
768 #elif __FreeBSD_version > 800000
769 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
770 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
772 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
773 SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
777 ale_sysctl_node(struct ale_softc *sc)
779 struct sysctl_ctx_list *ctx;
780 struct sysctl_oid_list *child, *parent;
781 struct sysctl_oid *tree;
782 struct ale_hw_stats *stats;
785 stats = &sc->ale_stats;
786 ctx = device_get_sysctl_ctx(sc->ale_dev);
787 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ale_dev));
789 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
790 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_rx_mod, 0,
791 sysctl_hw_ale_int_mod, "I", "ale Rx interrupt moderation");
792 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
793 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_tx_mod, 0,
794 sysctl_hw_ale_int_mod, "I", "ale Tx interrupt moderation");
795 /* Pull in device tunables. */
796 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
797 error = resource_int_value(device_get_name(sc->ale_dev),
798 device_get_unit(sc->ale_dev), "int_rx_mod", &sc->ale_int_rx_mod);
800 if (sc->ale_int_rx_mod < ALE_IM_TIMER_MIN ||
801 sc->ale_int_rx_mod > ALE_IM_TIMER_MAX) {
802 device_printf(sc->ale_dev, "int_rx_mod value out of "
803 "range; using default: %d\n",
804 ALE_IM_RX_TIMER_DEFAULT);
805 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
808 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
809 error = resource_int_value(device_get_name(sc->ale_dev),
810 device_get_unit(sc->ale_dev), "int_tx_mod", &sc->ale_int_tx_mod);
812 if (sc->ale_int_tx_mod < ALE_IM_TIMER_MIN ||
813 sc->ale_int_tx_mod > ALE_IM_TIMER_MAX) {
814 device_printf(sc->ale_dev, "int_tx_mod value out of "
815 "range; using default: %d\n",
816 ALE_IM_TX_TIMER_DEFAULT);
817 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
820 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
821 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_process_limit, 0,
822 sysctl_hw_ale_proc_limit, "I",
823 "max number of Rx events to process");
824 /* Pull in device tunables. */
825 sc->ale_process_limit = ALE_PROC_DEFAULT;
826 error = resource_int_value(device_get_name(sc->ale_dev),
827 device_get_unit(sc->ale_dev), "process_limit",
828 &sc->ale_process_limit);
830 if (sc->ale_process_limit < ALE_PROC_MIN ||
831 sc->ale_process_limit > ALE_PROC_MAX) {
832 device_printf(sc->ale_dev,
833 "process_limit value out of range; "
834 "using default: %d\n", ALE_PROC_DEFAULT);
835 sc->ale_process_limit = ALE_PROC_DEFAULT;
839 /* Misc statistics. */
840 ALE_SYSCTL_STAT_ADD32(ctx, child, "reset_brk_seq",
841 &stats->reset_brk_seq,
842 "Controller resets due to broken Rx sequnce number");
844 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
845 NULL, "ATE statistics");
846 parent = SYSCTL_CHILDREN(tree);
849 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
850 NULL, "Rx MAC statistics");
851 child = SYSCTL_CHILDREN(tree);
852 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
853 &stats->rx_frames, "Good frames");
854 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
855 &stats->rx_bcast_frames, "Good broadcast frames");
856 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
857 &stats->rx_mcast_frames, "Good multicast frames");
858 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
859 &stats->rx_pause_frames, "Pause control frames");
860 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
861 &stats->rx_control_frames, "Control frames");
862 ALE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
863 &stats->rx_crcerrs, "CRC errors");
864 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
865 &stats->rx_lenerrs, "Frames with length mismatched");
866 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
867 &stats->rx_bytes, "Good octets");
868 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
869 &stats->rx_bcast_bytes, "Good broadcast octets");
870 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
871 &stats->rx_mcast_bytes, "Good multicast octets");
872 ALE_SYSCTL_STAT_ADD32(ctx, child, "runts",
873 &stats->rx_runts, "Too short frames");
874 ALE_SYSCTL_STAT_ADD32(ctx, child, "fragments",
875 &stats->rx_fragments, "Fragmented frames");
876 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
877 &stats->rx_pkts_64, "64 bytes frames");
878 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
879 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
880 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
881 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
882 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
883 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
884 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
885 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
886 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
887 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
888 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
889 &stats->rx_pkts_1519_max, "1519 to max frames");
890 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
891 &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
892 ALE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
893 &stats->rx_fifo_oflows, "FIFO overflows");
894 ALE_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
895 &stats->rx_rrs_errs, "Return status write-back errors");
896 ALE_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
897 &stats->rx_alignerrs, "Alignment errors");
898 ALE_SYSCTL_STAT_ADD32(ctx, child, "filtered",
899 &stats->rx_pkts_filtered,
900 "Frames dropped due to address filtering");
903 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
904 NULL, "Tx MAC statistics");
905 child = SYSCTL_CHILDREN(tree);
906 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
907 &stats->tx_frames, "Good frames");
908 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
909 &stats->tx_bcast_frames, "Good broadcast frames");
910 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
911 &stats->tx_mcast_frames, "Good multicast frames");
912 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
913 &stats->tx_pause_frames, "Pause control frames");
914 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
915 &stats->tx_control_frames, "Control frames");
916 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
917 &stats->tx_excess_defer, "Frames with excessive derferrals");
918 ALE_SYSCTL_STAT_ADD32(ctx, child, "defers",
919 &stats->tx_excess_defer, "Frames with derferrals");
920 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
921 &stats->tx_bytes, "Good octets");
922 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
923 &stats->tx_bcast_bytes, "Good broadcast octets");
924 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
925 &stats->tx_mcast_bytes, "Good multicast octets");
926 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
927 &stats->tx_pkts_64, "64 bytes frames");
928 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
929 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
930 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
931 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
932 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
933 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
934 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
935 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
936 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
937 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
938 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
939 &stats->tx_pkts_1519_max, "1519 to max frames");
940 ALE_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
941 &stats->tx_single_colls, "Single collisions");
942 ALE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
943 &stats->tx_multi_colls, "Multiple collisions");
944 ALE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
945 &stats->tx_late_colls, "Late collisions");
946 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
947 &stats->tx_excess_colls, "Excessive collisions");
948 ALE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
949 &stats->tx_underrun, "FIFO underruns");
950 ALE_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
951 &stats->tx_desc_underrun, "Descriptor write-back errors");
952 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
953 &stats->tx_lenerrs, "Frames with length mismatched");
954 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
955 &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
958 #undef ALE_SYSCTL_STAT_ADD32
959 #undef ALE_SYSCTL_STAT_ADD64
961 struct ale_dmamap_arg {
962 bus_addr_t ale_busaddr;
966 ale_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
968 struct ale_dmamap_arg *ctx;
973 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
975 ctx = (struct ale_dmamap_arg *)arg;
976 ctx->ale_busaddr = segs[0].ds_addr;
980 * Tx descriptors/RXF0/CMB DMA blocks share ALE_DESC_ADDR_HI register
981 * which specifies high address region of DMA blocks. Therefore these
982 * blocks should have the same high address of given 4GB address
983 * space(i.e. crossing 4GB boundary is not allowed).
986 ale_check_boundary(struct ale_softc *sc)
988 bus_addr_t rx_cmb_end[ALE_RX_PAGES], tx_cmb_end;
989 bus_addr_t rx_page_end[ALE_RX_PAGES], tx_ring_end;
991 rx_page_end[0] = sc->ale_cdata.ale_rx_page[0].page_paddr +
993 rx_page_end[1] = sc->ale_cdata.ale_rx_page[1].page_paddr +
995 tx_ring_end = sc->ale_cdata.ale_tx_ring_paddr + ALE_TX_RING_SZ;
996 tx_cmb_end = sc->ale_cdata.ale_tx_cmb_paddr + ALE_TX_CMB_SZ;
997 rx_cmb_end[0] = sc->ale_cdata.ale_rx_page[0].cmb_paddr + ALE_RX_CMB_SZ;
998 rx_cmb_end[1] = sc->ale_cdata.ale_rx_page[1].cmb_paddr + ALE_RX_CMB_SZ;
1000 if ((ALE_ADDR_HI(tx_ring_end) !=
1001 ALE_ADDR_HI(sc->ale_cdata.ale_tx_ring_paddr)) ||
1002 (ALE_ADDR_HI(rx_page_end[0]) !=
1003 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].page_paddr)) ||
1004 (ALE_ADDR_HI(rx_page_end[1]) !=
1005 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].page_paddr)) ||
1006 (ALE_ADDR_HI(tx_cmb_end) !=
1007 ALE_ADDR_HI(sc->ale_cdata.ale_tx_cmb_paddr)) ||
1008 (ALE_ADDR_HI(rx_cmb_end[0]) !=
1009 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].cmb_paddr)) ||
1010 (ALE_ADDR_HI(rx_cmb_end[1]) !=
1011 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].cmb_paddr)))
1014 if ((ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[0])) ||
1015 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[1])) ||
1016 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[0])) ||
1017 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[1])) ||
1018 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(tx_cmb_end)))
1025 ale_dma_alloc(struct ale_softc *sc)
1027 struct ale_txdesc *txd;
1029 struct ale_dmamap_arg ctx;
1030 int error, guard_size, i;
1032 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
1033 guard_size = ALE_JUMBO_FRAMELEN;
1035 guard_size = ALE_MAX_FRAMELEN;
1036 sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ,
1038 lowaddr = BUS_SPACE_MAXADDR;
1040 /* Create parent DMA tag. */
1041 error = bus_dma_tag_create(
1042 bus_get_dma_tag(sc->ale_dev), /* parent */
1043 1, 0, /* alignment, boundary */
1044 lowaddr, /* lowaddr */
1045 BUS_SPACE_MAXADDR, /* highaddr */
1046 NULL, NULL, /* filter, filterarg */
1047 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1049 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1051 NULL, NULL, /* lockfunc, lockarg */
1052 &sc->ale_cdata.ale_parent_tag);
1054 device_printf(sc->ale_dev,
1055 "could not create parent DMA tag.\n");
1059 /* Create DMA tag for Tx descriptor ring. */
1060 error = bus_dma_tag_create(
1061 sc->ale_cdata.ale_parent_tag, /* parent */
1062 ALE_TX_RING_ALIGN, 0, /* alignment, boundary */
1063 BUS_SPACE_MAXADDR, /* lowaddr */
1064 BUS_SPACE_MAXADDR, /* highaddr */
1065 NULL, NULL, /* filter, filterarg */
1066 ALE_TX_RING_SZ, /* maxsize */
1068 ALE_TX_RING_SZ, /* maxsegsize */
1070 NULL, NULL, /* lockfunc, lockarg */
1071 &sc->ale_cdata.ale_tx_ring_tag);
1073 device_printf(sc->ale_dev,
1074 "could not create Tx ring DMA tag.\n");
1078 /* Create DMA tag for Rx pages. */
1079 for (i = 0; i < ALE_RX_PAGES; i++) {
1080 error = bus_dma_tag_create(
1081 sc->ale_cdata.ale_parent_tag, /* parent */
1082 ALE_RX_PAGE_ALIGN, 0, /* alignment, boundary */
1083 BUS_SPACE_MAXADDR, /* lowaddr */
1084 BUS_SPACE_MAXADDR, /* highaddr */
1085 NULL, NULL, /* filter, filterarg */
1086 sc->ale_pagesize, /* maxsize */
1088 sc->ale_pagesize, /* maxsegsize */
1090 NULL, NULL, /* lockfunc, lockarg */
1091 &sc->ale_cdata.ale_rx_page[i].page_tag);
1093 device_printf(sc->ale_dev,
1094 "could not create Rx page %d DMA tag.\n", i);
1099 /* Create DMA tag for Tx coalescing message block. */
1100 error = bus_dma_tag_create(
1101 sc->ale_cdata.ale_parent_tag, /* parent */
1102 ALE_CMB_ALIGN, 0, /* alignment, boundary */
1103 BUS_SPACE_MAXADDR, /* lowaddr */
1104 BUS_SPACE_MAXADDR, /* highaddr */
1105 NULL, NULL, /* filter, filterarg */
1106 ALE_TX_CMB_SZ, /* maxsize */
1108 ALE_TX_CMB_SZ, /* maxsegsize */
1110 NULL, NULL, /* lockfunc, lockarg */
1111 &sc->ale_cdata.ale_tx_cmb_tag);
1113 device_printf(sc->ale_dev,
1114 "could not create Tx CMB DMA tag.\n");
1118 /* Create DMA tag for Rx coalescing message block. */
1119 for (i = 0; i < ALE_RX_PAGES; i++) {
1120 error = bus_dma_tag_create(
1121 sc->ale_cdata.ale_parent_tag, /* parent */
1122 ALE_CMB_ALIGN, 0, /* alignment, boundary */
1123 BUS_SPACE_MAXADDR, /* lowaddr */
1124 BUS_SPACE_MAXADDR, /* highaddr */
1125 NULL, NULL, /* filter, filterarg */
1126 ALE_RX_CMB_SZ, /* maxsize */
1128 ALE_RX_CMB_SZ, /* maxsegsize */
1130 NULL, NULL, /* lockfunc, lockarg */
1131 &sc->ale_cdata.ale_rx_page[i].cmb_tag);
1133 device_printf(sc->ale_dev,
1134 "could not create Rx page %d CMB DMA tag.\n", i);
1139 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1140 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_ring_tag,
1141 (void **)&sc->ale_cdata.ale_tx_ring,
1142 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1143 &sc->ale_cdata.ale_tx_ring_map);
1145 device_printf(sc->ale_dev,
1146 "could not allocate DMA'able memory for Tx ring.\n");
1149 ctx.ale_busaddr = 0;
1150 error = bus_dmamap_load(sc->ale_cdata.ale_tx_ring_tag,
1151 sc->ale_cdata.ale_tx_ring_map, sc->ale_cdata.ale_tx_ring,
1152 ALE_TX_RING_SZ, ale_dmamap_cb, &ctx, 0);
1153 if (error != 0 || ctx.ale_busaddr == 0) {
1154 device_printf(sc->ale_dev,
1155 "could not load DMA'able memory for Tx ring.\n");
1158 sc->ale_cdata.ale_tx_ring_paddr = ctx.ale_busaddr;
1161 for (i = 0; i < ALE_RX_PAGES; i++) {
1162 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].page_tag,
1163 (void **)&sc->ale_cdata.ale_rx_page[i].page_addr,
1164 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1165 &sc->ale_cdata.ale_rx_page[i].page_map);
1167 device_printf(sc->ale_dev,
1168 "could not allocate DMA'able memory for "
1169 "Rx page %d.\n", i);
1172 ctx.ale_busaddr = 0;
1173 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].page_tag,
1174 sc->ale_cdata.ale_rx_page[i].page_map,
1175 sc->ale_cdata.ale_rx_page[i].page_addr,
1176 sc->ale_pagesize, ale_dmamap_cb, &ctx, 0);
1177 if (error != 0 || ctx.ale_busaddr == 0) {
1178 device_printf(sc->ale_dev,
1179 "could not load DMA'able memory for "
1180 "Rx page %d.\n", i);
1183 sc->ale_cdata.ale_rx_page[i].page_paddr = ctx.ale_busaddr;
1187 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_cmb_tag,
1188 (void **)&sc->ale_cdata.ale_tx_cmb,
1189 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1190 &sc->ale_cdata.ale_tx_cmb_map);
1192 device_printf(sc->ale_dev,
1193 "could not allocate DMA'able memory for Tx CMB.\n");
1196 ctx.ale_busaddr = 0;
1197 error = bus_dmamap_load(sc->ale_cdata.ale_tx_cmb_tag,
1198 sc->ale_cdata.ale_tx_cmb_map, sc->ale_cdata.ale_tx_cmb,
1199 ALE_TX_CMB_SZ, ale_dmamap_cb, &ctx, 0);
1200 if (error != 0 || ctx.ale_busaddr == 0) {
1201 device_printf(sc->ale_dev,
1202 "could not load DMA'able memory for Tx CMB.\n");
1205 sc->ale_cdata.ale_tx_cmb_paddr = ctx.ale_busaddr;
1208 for (i = 0; i < ALE_RX_PAGES; i++) {
1209 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].cmb_tag,
1210 (void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr,
1211 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1212 &sc->ale_cdata.ale_rx_page[i].cmb_map);
1214 device_printf(sc->ale_dev, "could not allocate "
1215 "DMA'able memory for Rx page %d CMB.\n", i);
1218 ctx.ale_busaddr = 0;
1219 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].cmb_tag,
1220 sc->ale_cdata.ale_rx_page[i].cmb_map,
1221 sc->ale_cdata.ale_rx_page[i].cmb_addr,
1222 ALE_RX_CMB_SZ, ale_dmamap_cb, &ctx, 0);
1223 if (error != 0 || ctx.ale_busaddr == 0) {
1224 device_printf(sc->ale_dev, "could not load DMA'able "
1225 "memory for Rx page %d CMB.\n", i);
1228 sc->ale_cdata.ale_rx_page[i].cmb_paddr = ctx.ale_busaddr;
1232 * Tx descriptors/RXF0/CMB DMA blocks share the same
1233 * high address region of 64bit DMA address space.
1235 if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1236 (error = ale_check_boundary(sc)) != 0) {
1237 device_printf(sc->ale_dev, "4GB boundary crossed, "
1238 "switching to 32bit DMA addressing mode.\n");
1241 * Limit max allowable DMA address space to 32bit
1244 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1249 * Create Tx buffer parent tag.
1250 * AR81xx allows 64bit DMA addressing of Tx buffers so it
1251 * needs separate parent DMA tag as parent DMA address space
1252 * could be restricted to be within 32bit address space by
1253 * 4GB boundary crossing.
1255 error = bus_dma_tag_create(
1256 bus_get_dma_tag(sc->ale_dev), /* parent */
1257 1, 0, /* alignment, boundary */
1258 BUS_SPACE_MAXADDR, /* lowaddr */
1259 BUS_SPACE_MAXADDR, /* highaddr */
1260 NULL, NULL, /* filter, filterarg */
1261 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1263 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1265 NULL, NULL, /* lockfunc, lockarg */
1266 &sc->ale_cdata.ale_buffer_tag);
1268 device_printf(sc->ale_dev,
1269 "could not create parent buffer DMA tag.\n");
1273 /* Create DMA tag for Tx buffers. */
1274 error = bus_dma_tag_create(
1275 sc->ale_cdata.ale_buffer_tag, /* parent */
1276 1, 0, /* alignment, boundary */
1277 BUS_SPACE_MAXADDR, /* lowaddr */
1278 BUS_SPACE_MAXADDR, /* highaddr */
1279 NULL, NULL, /* filter, filterarg */
1280 ALE_TSO_MAXSIZE, /* maxsize */
1281 ALE_MAXTXSEGS, /* nsegments */
1282 ALE_TSO_MAXSEGSIZE, /* maxsegsize */
1284 NULL, NULL, /* lockfunc, lockarg */
1285 &sc->ale_cdata.ale_tx_tag);
1287 device_printf(sc->ale_dev, "could not create Tx DMA tag.\n");
1291 /* Create DMA maps for Tx buffers. */
1292 for (i = 0; i < ALE_TX_RING_CNT; i++) {
1293 txd = &sc->ale_cdata.ale_txdesc[i];
1295 txd->tx_dmamap = NULL;
1296 error = bus_dmamap_create(sc->ale_cdata.ale_tx_tag, 0,
1299 device_printf(sc->ale_dev,
1300 "could not create Tx dmamap.\n");
1310 ale_dma_free(struct ale_softc *sc)
1312 struct ale_txdesc *txd;
1316 if (sc->ale_cdata.ale_tx_tag != NULL) {
1317 for (i = 0; i < ALE_TX_RING_CNT; i++) {
1318 txd = &sc->ale_cdata.ale_txdesc[i];
1319 if (txd->tx_dmamap != NULL) {
1320 bus_dmamap_destroy(sc->ale_cdata.ale_tx_tag,
1322 txd->tx_dmamap = NULL;
1325 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_tag);
1326 sc->ale_cdata.ale_tx_tag = NULL;
1328 /* Tx descriptor ring. */
1329 if (sc->ale_cdata.ale_tx_ring_tag != NULL) {
1330 if (sc->ale_cdata.ale_tx_ring_map != NULL)
1331 bus_dmamap_unload(sc->ale_cdata.ale_tx_ring_tag,
1332 sc->ale_cdata.ale_tx_ring_map);
1333 if (sc->ale_cdata.ale_tx_ring_map != NULL &&
1334 sc->ale_cdata.ale_tx_ring != NULL)
1335 bus_dmamem_free(sc->ale_cdata.ale_tx_ring_tag,
1336 sc->ale_cdata.ale_tx_ring,
1337 sc->ale_cdata.ale_tx_ring_map);
1338 sc->ale_cdata.ale_tx_ring = NULL;
1339 sc->ale_cdata.ale_tx_ring_map = NULL;
1340 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_ring_tag);
1341 sc->ale_cdata.ale_tx_ring_tag = NULL;
1343 /* Rx page block. */
1344 for (i = 0; i < ALE_RX_PAGES; i++) {
1345 if (sc->ale_cdata.ale_rx_page[i].page_tag != NULL) {
1346 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL)
1348 sc->ale_cdata.ale_rx_page[i].page_tag,
1349 sc->ale_cdata.ale_rx_page[i].page_map);
1350 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL &&
1351 sc->ale_cdata.ale_rx_page[i].page_addr != NULL)
1353 sc->ale_cdata.ale_rx_page[i].page_tag,
1354 sc->ale_cdata.ale_rx_page[i].page_addr,
1355 sc->ale_cdata.ale_rx_page[i].page_map);
1356 sc->ale_cdata.ale_rx_page[i].page_addr = NULL;
1357 sc->ale_cdata.ale_rx_page[i].page_map = NULL;
1358 bus_dma_tag_destroy(
1359 sc->ale_cdata.ale_rx_page[i].page_tag);
1360 sc->ale_cdata.ale_rx_page[i].page_tag = NULL;
1364 for (i = 0; i < ALE_RX_PAGES; i++) {
1365 if (sc->ale_cdata.ale_rx_page[i].cmb_tag != NULL) {
1366 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL)
1368 sc->ale_cdata.ale_rx_page[i].cmb_tag,
1369 sc->ale_cdata.ale_rx_page[i].cmb_map);
1370 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL &&
1371 sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL)
1373 sc->ale_cdata.ale_rx_page[i].cmb_tag,
1374 sc->ale_cdata.ale_rx_page[i].cmb_addr,
1375 sc->ale_cdata.ale_rx_page[i].cmb_map);
1376 sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL;
1377 sc->ale_cdata.ale_rx_page[i].cmb_map = NULL;
1378 bus_dma_tag_destroy(
1379 sc->ale_cdata.ale_rx_page[i].cmb_tag);
1380 sc->ale_cdata.ale_rx_page[i].cmb_tag = NULL;
1384 if (sc->ale_cdata.ale_tx_cmb_tag != NULL) {
1385 if (sc->ale_cdata.ale_tx_cmb_map != NULL)
1386 bus_dmamap_unload(sc->ale_cdata.ale_tx_cmb_tag,
1387 sc->ale_cdata.ale_tx_cmb_map);
1388 if (sc->ale_cdata.ale_tx_cmb_map != NULL &&
1389 sc->ale_cdata.ale_tx_cmb != NULL)
1390 bus_dmamem_free(sc->ale_cdata.ale_tx_cmb_tag,
1391 sc->ale_cdata.ale_tx_cmb,
1392 sc->ale_cdata.ale_tx_cmb_map);
1393 sc->ale_cdata.ale_tx_cmb = NULL;
1394 sc->ale_cdata.ale_tx_cmb_map = NULL;
1395 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_cmb_tag);
1396 sc->ale_cdata.ale_tx_cmb_tag = NULL;
1398 if (sc->ale_cdata.ale_buffer_tag != NULL) {
1399 bus_dma_tag_destroy(sc->ale_cdata.ale_buffer_tag);
1400 sc->ale_cdata.ale_buffer_tag = NULL;
1402 if (sc->ale_cdata.ale_parent_tag != NULL) {
1403 bus_dma_tag_destroy(sc->ale_cdata.ale_parent_tag);
1404 sc->ale_cdata.ale_parent_tag = NULL;
1409 ale_shutdown(device_t dev)
1412 return (ale_suspend(dev));
1416 * Note, this driver resets the link speed to 10/100Mbps by
1417 * restarting auto-negotiation in suspend/shutdown phase but we
1418 * don't know whether that auto-negotiation would succeed or not
1419 * as driver has no control after powering off/suspend operation.
1420 * If the renegotiation fail WOL may not work. Running at 1Gbps
1421 * will draw more power than 375mA at 3.3V which is specified in
1422 * PCI specification and that would result in complete
1423 * shutdowning power to ethernet controller.
1426 * Save current negotiated media speed/duplex/flow-control to
1427 * softc and restore the same link again after resuming. PHY
1428 * handling such as power down/resetting to 100Mbps may be better
1429 * handled in suspend method in phy driver.
1432 ale_setlinkspeed(struct ale_softc *sc)
1434 struct mii_data *mii;
1437 mii = device_get_softc(sc->ale_miibus);
1440 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1441 (IFM_ACTIVE | IFM_AVALID)) {
1442 switch IFM_SUBTYPE(mii->mii_media_active) {
1453 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_100T2CR, 0);
1454 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
1455 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1456 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr,
1457 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1461 * Poll link state until ale(4) get a 10/100Mbps link.
1463 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1465 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1466 == (IFM_ACTIVE | IFM_AVALID)) {
1467 switch (IFM_SUBTYPE(
1468 mii->mii_media_active)) {
1478 pause("alelnk", hz);
1481 if (i == MII_ANEGTICKS_GIGE)
1482 device_printf(sc->ale_dev,
1483 "establishing a link failed, WOL may not work!");
1486 * No link, force MAC to have 100Mbps, full-duplex link.
1487 * This is the last resort and may/may not work.
1489 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1490 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1495 ale_setwol(struct ale_softc *sc)
1502 ALE_LOCK_ASSERT(sc);
1504 if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) != 0) {
1506 CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
1507 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC);
1508 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1509 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg);
1510 /* Force PHY power down. */
1511 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
1512 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN |
1513 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_PHY_PLL_ON |
1514 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_IDDQ |
1515 GPHY_CTRL_PCLK_SEL_DIS | GPHY_CTRL_PWDOWN_HW);
1520 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1521 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
1522 ale_setlinkspeed(sc);
1526 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1527 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1528 CSR_WRITE_4(sc, ALE_WOL_CFG, pmcs);
1529 reg = CSR_READ_4(sc, ALE_MAC_CFG);
1530 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1532 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1533 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1534 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1535 reg |= MAC_CFG_RX_ENB;
1536 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1538 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1539 /* WOL disabled, PHY power down. */
1540 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC);
1541 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1542 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg);
1543 CSR_WRITE_2(sc, ALE_GPHY_CTRL,
1544 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN |
1545 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
1546 GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PCLK_SEL_DIS |
1547 GPHY_CTRL_PWDOWN_HW);
1550 pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2);
1551 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1552 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1553 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1554 pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1558 ale_suspend(device_t dev)
1560 struct ale_softc *sc;
1562 sc = device_get_softc(dev);
1573 ale_resume(device_t dev)
1575 struct ale_softc *sc;
1580 sc = device_get_softc(dev);
1583 if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) == 0) {
1584 /* Disable PME and clear PME status. */
1585 pmstat = pci_read_config(sc->ale_dev,
1586 pmc + PCIR_POWER_STATUS, 2);
1587 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1588 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1589 pci_write_config(sc->ale_dev,
1590 pmc + PCIR_POWER_STATUS, pmstat, 2);
1596 if ((ifp->if_flags & IFF_UP) != 0) {
1597 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1598 ale_init_locked(sc);
1606 ale_encap(struct ale_softc *sc, struct mbuf **m_head)
1608 struct ale_txdesc *txd, *txd_last;
1609 struct tx_desc *desc;
1613 bus_dma_segment_t txsegs[ALE_MAXTXSEGS];
1615 uint32_t cflags, hdrlen, ip_off, poff, vtag;
1616 int error, i, nsegs, prod, si;
1618 ALE_LOCK_ASSERT(sc);
1620 M_ASSERTPKTHDR((*m_head));
1627 if ((m->m_pkthdr.csum_flags & (ALE_CSUM_FEATURES | CSUM_TSO)) != 0) {
1629 * AR81xx requires offset of TCP/UDP payload in its Tx
1630 * descriptor to perform hardware Tx checksum offload.
1631 * Additionally, TSO requires IP/TCP header size and
1632 * modification of IP/TCP header in order to make TSO
1633 * engine work. This kind of operation takes many CPU
1634 * cycles on FreeBSD so fast host CPU is required to
1635 * get smooth TSO performance.
1637 struct ether_header *eh;
1639 if (M_WRITABLE(m) == 0) {
1640 /* Get a writable copy. */
1641 m = m_dup(*m_head, M_NOWAIT);
1642 /* Release original mbufs. */
1652 * Buggy-controller requires 4 byte aligned Tx buffer
1653 * to make custom checksum offload work.
1655 if ((sc->ale_flags & ALE_FLAG_TXCSUM_BUG) != 0 &&
1656 (m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0 &&
1657 (mtod(m, intptr_t) & 3) != 0) {
1658 m = m_defrag(*m_head, M_NOWAIT);
1667 ip_off = sizeof(struct ether_header);
1668 m = m_pullup(m, ip_off);
1673 eh = mtod(m, struct ether_header *);
1675 * Check if hardware VLAN insertion is off.
1676 * Additional check for LLC/SNAP frame?
1678 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1679 ip_off = sizeof(struct ether_vlan_header);
1680 m = m_pullup(m, ip_off);
1686 m = m_pullup(m, ip_off + sizeof(struct ip));
1691 ip = (struct ip *)(mtod(m, char *) + ip_off);
1692 poff = ip_off + (ip->ip_hl << 2);
1693 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1696 * AR81xx requires the first descriptor should
1697 * not include any TCP playload for TSO case.
1698 * (i.e. ethernet header + IP + TCP header only)
1699 * m_pullup(9) above will ensure this too.
1700 * However it's not correct if the first mbuf
1701 * of the chain does not use cluster.
1703 m = m_pullup(m, poff + sizeof(struct tcphdr));
1708 ip = (struct ip *)(mtod(m, char *) + ip_off);
1709 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1710 m = m_pullup(m, poff + (tcp->th_off << 2));
1716 * AR81xx requires IP/TCP header size and offset as
1717 * well as TCP pseudo checksum which complicates
1718 * TSO configuration. I guess this comes from the
1719 * adherence to Microsoft NDIS Large Send
1720 * specification which requires insertion of
1721 * pseudo checksum by upper stack. The pseudo
1722 * checksum that NDIS refers to doesn't include
1723 * TCP payload length so ale(4) should recompute
1724 * the pseudo checksum here. Hopefully this wouldn't
1725 * be much burden on modern CPUs.
1726 * Reset IP checksum and recompute TCP pseudo
1727 * checksum as NDIS specification said.
1730 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1731 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1736 si = prod = sc->ale_cdata.ale_tx_prod;
1737 txd = &sc->ale_cdata.ale_txdesc[prod];
1739 map = txd->tx_dmamap;
1741 error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map,
1742 *m_head, txsegs, &nsegs, 0);
1743 if (error == EFBIG) {
1744 m = m_collapse(*m_head, M_NOWAIT, ALE_MAXTXSEGS);
1751 error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map,
1752 *m_head, txsegs, &nsegs, 0);
1758 } else if (error != 0)
1766 /* Check descriptor overrun. */
1767 if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 3) {
1768 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, map);
1771 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, map, BUS_DMASYNC_PREWRITE);
1774 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1775 /* Request TSO and set MSS. */
1776 cflags |= ALE_TD_TSO;
1777 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << ALE_TD_MSS_SHIFT);
1778 /* Set IP/TCP header size. */
1779 cflags |= ip->ip_hl << ALE_TD_IPHDR_LEN_SHIFT;
1780 cflags |= tcp->th_off << ALE_TD_TCPHDR_LEN_SHIFT;
1781 } else if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) {
1783 * AR81xx supports Tx custom checksum offload feature
1784 * that offloads single 16bit checksum computation.
1785 * So you can choose one among IP, TCP and UDP.
1786 * Normally driver sets checksum start/insertion
1787 * position from the information of TCP/UDP frame as
1788 * TCP/UDP checksum takes more time than that of IP.
1789 * However it seems that custom checksum offload
1790 * requires 4 bytes aligned Tx buffers due to hardware
1792 * AR81xx also supports explicit Tx checksum computation
1793 * if it is told that the size of IP header and TCP
1794 * header(for UDP, the header size does not matter
1795 * because it's fixed length). However with this scheme
1796 * TSO does not work so you have to choose one either
1797 * TSO or explicit Tx checksum offload. I chosen TSO
1798 * plus custom checksum offload with work-around which
1799 * will cover most common usage for this consumer
1800 * ethernet controller. The work-around takes a lot of
1801 * CPU cycles if Tx buffer is not aligned on 4 bytes
1804 cflags |= ALE_TD_CXSUM;
1805 /* Set checksum start offset. */
1806 cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT);
1807 /* Set checksum insertion position of TCP/UDP. */
1808 cflags |= ((poff + m->m_pkthdr.csum_data) <<
1809 ALE_TD_CSUM_XSUMOFFSET_SHIFT);
1812 /* Configure VLAN hardware tag insertion. */
1813 if ((m->m_flags & M_VLANTAG) != 0) {
1814 vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
1815 vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK);
1816 cflags |= ALE_TD_INSERT_VLAN_TAG;
1820 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1822 * Make sure the first fragment contains
1823 * only ethernet and IP/TCP header with options.
1825 hdrlen = poff + (tcp->th_off << 2);
1826 desc = &sc->ale_cdata.ale_tx_ring[prod];
1827 desc->addr = htole64(txsegs[i].ds_addr);
1828 desc->len = htole32(ALE_TX_BYTES(hdrlen) | vtag);
1829 desc->flags = htole32(cflags);
1830 sc->ale_cdata.ale_tx_cnt++;
1831 ALE_DESC_INC(prod, ALE_TX_RING_CNT);
1832 if (m->m_len - hdrlen > 0) {
1833 /* Handle remaining payload of the first fragment. */
1834 desc = &sc->ale_cdata.ale_tx_ring[prod];
1835 desc->addr = htole64(txsegs[i].ds_addr + hdrlen);
1836 desc->len = htole32(ALE_TX_BYTES(m->m_len - hdrlen) |
1838 desc->flags = htole32(cflags);
1839 sc->ale_cdata.ale_tx_cnt++;
1840 ALE_DESC_INC(prod, ALE_TX_RING_CNT);
1844 for (; i < nsegs; i++) {
1845 desc = &sc->ale_cdata.ale_tx_ring[prod];
1846 desc->addr = htole64(txsegs[i].ds_addr);
1847 desc->len = htole32(ALE_TX_BYTES(txsegs[i].ds_len) | vtag);
1848 desc->flags = htole32(cflags);
1849 sc->ale_cdata.ale_tx_cnt++;
1850 ALE_DESC_INC(prod, ALE_TX_RING_CNT);
1852 /* Update producer index. */
1853 sc->ale_cdata.ale_tx_prod = prod;
1854 /* Set TSO header on the first descriptor. */
1855 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1856 desc = &sc->ale_cdata.ale_tx_ring[si];
1857 desc->flags |= htole32(ALE_TD_TSO_HDR);
1860 /* Finally set EOP on the last descriptor. */
1861 prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT;
1862 desc = &sc->ale_cdata.ale_tx_ring[prod];
1863 desc->flags |= htole32(ALE_TD_EOP);
1865 /* Swap dmamap of the first and the last. */
1866 txd = &sc->ale_cdata.ale_txdesc[prod];
1867 map = txd_last->tx_dmamap;
1868 txd_last->tx_dmamap = txd->tx_dmamap;
1869 txd->tx_dmamap = map;
1872 /* Sync descriptors. */
1873 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
1874 sc->ale_cdata.ale_tx_ring_map,
1875 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1881 ale_start(struct ifnet *ifp)
1883 struct ale_softc *sc;
1887 ale_start_locked(ifp);
1892 ale_start_locked(struct ifnet *ifp)
1894 struct ale_softc *sc;
1895 struct mbuf *m_head;
1900 ALE_LOCK_ASSERT(sc);
1902 /* Reclaim transmitted frames. */
1903 if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT)
1906 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1907 IFF_DRV_RUNNING || (sc->ale_flags & ALE_FLAG_LINK) == 0)
1910 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1911 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1915 * Pack the data into the transmit ring. If we
1916 * don't have room, set the OACTIVE flag and wait
1917 * for the NIC to drain the ring.
1919 if (ale_encap(sc, &m_head)) {
1922 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1923 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1929 * If there's a BPF listener, bounce a copy of this frame
1932 ETHER_BPF_MTAP(ifp, m_head);
1937 CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX,
1938 sc->ale_cdata.ale_tx_prod);
1939 /* Set a timeout in case the chip goes out to lunch. */
1940 sc->ale_watchdog_timer = ALE_TX_TIMEOUT;
1945 ale_watchdog(struct ale_softc *sc)
1949 ALE_LOCK_ASSERT(sc);
1951 if (sc->ale_watchdog_timer == 0 || --sc->ale_watchdog_timer)
1955 if ((sc->ale_flags & ALE_FLAG_LINK) == 0) {
1956 if_printf(sc->ale_ifp, "watchdog timeout (lost link)\n");
1958 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1959 ale_init_locked(sc);
1962 if_printf(sc->ale_ifp, "watchdog timeout -- resetting\n");
1964 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1965 ale_init_locked(sc);
1966 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1967 ale_start_locked(ifp);
1971 ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1973 struct ale_softc *sc;
1975 struct mii_data *mii;
1979 ifr = (struct ifreq *)data;
1983 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALE_JUMBO_MTU ||
1984 ((sc->ale_flags & ALE_FLAG_JUMBO) == 0 &&
1985 ifr->ifr_mtu > ETHERMTU))
1987 else if (ifp->if_mtu != ifr->ifr_mtu) {
1989 ifp->if_mtu = ifr->ifr_mtu;
1990 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1991 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1992 ale_init_locked(sc);
1999 if ((ifp->if_flags & IFF_UP) != 0) {
2000 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2001 if (((ifp->if_flags ^ sc->ale_if_flags)
2002 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2005 ale_init_locked(sc);
2008 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2011 sc->ale_if_flags = ifp->if_flags;
2017 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2023 mii = device_get_softc(sc->ale_miibus);
2024 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2028 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2029 if ((mask & IFCAP_TXCSUM) != 0 &&
2030 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2031 ifp->if_capenable ^= IFCAP_TXCSUM;
2032 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2033 ifp->if_hwassist |= ALE_CSUM_FEATURES;
2035 ifp->if_hwassist &= ~ALE_CSUM_FEATURES;
2037 if ((mask & IFCAP_RXCSUM) != 0 &&
2038 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
2039 ifp->if_capenable ^= IFCAP_RXCSUM;
2040 if ((mask & IFCAP_TSO4) != 0 &&
2041 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2042 ifp->if_capenable ^= IFCAP_TSO4;
2043 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
2044 ifp->if_hwassist |= CSUM_TSO;
2046 ifp->if_hwassist &= ~CSUM_TSO;
2049 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2050 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2051 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2052 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2053 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2054 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2055 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2056 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2057 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2058 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2059 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2060 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2061 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2062 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2063 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2064 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2065 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
2069 VLAN_CAPABILITIES(ifp);
2072 error = ether_ioctl(ifp, cmd, data);
2080 ale_mac_config(struct ale_softc *sc)
2082 struct mii_data *mii;
2085 ALE_LOCK_ASSERT(sc);
2087 mii = device_get_softc(sc->ale_miibus);
2088 reg = CSR_READ_4(sc, ALE_MAC_CFG);
2089 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2090 MAC_CFG_SPEED_MASK);
2091 /* Reprogram MAC with resolved speed/duplex. */
2092 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2095 reg |= MAC_CFG_SPEED_10_100;
2098 reg |= MAC_CFG_SPEED_1000;
2101 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2102 reg |= MAC_CFG_FULL_DUPLEX;
2103 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2104 reg |= MAC_CFG_TX_FC;
2105 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2106 reg |= MAC_CFG_RX_FC;
2108 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
2112 ale_stats_clear(struct ale_softc *sc)
2118 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
2119 CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
2120 i += sizeof(uint32_t);
2122 /* Read Tx statistics. */
2123 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
2124 CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
2125 i += sizeof(uint32_t);
2130 ale_stats_update(struct ale_softc *sc)
2132 struct ale_hw_stats *stat;
2133 struct smb sb, *smb;
2138 ALE_LOCK_ASSERT(sc);
2141 stat = &sc->ale_stats;
2144 /* Read Rx statistics. */
2145 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
2146 *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
2147 i += sizeof(uint32_t);
2149 /* Read Tx statistics. */
2150 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
2151 *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
2152 i += sizeof(uint32_t);
2156 stat->rx_frames += smb->rx_frames;
2157 stat->rx_bcast_frames += smb->rx_bcast_frames;
2158 stat->rx_mcast_frames += smb->rx_mcast_frames;
2159 stat->rx_pause_frames += smb->rx_pause_frames;
2160 stat->rx_control_frames += smb->rx_control_frames;
2161 stat->rx_crcerrs += smb->rx_crcerrs;
2162 stat->rx_lenerrs += smb->rx_lenerrs;
2163 stat->rx_bytes += smb->rx_bytes;
2164 stat->rx_runts += smb->rx_runts;
2165 stat->rx_fragments += smb->rx_fragments;
2166 stat->rx_pkts_64 += smb->rx_pkts_64;
2167 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2168 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2169 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2170 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2171 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2172 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2173 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2174 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2175 stat->rx_rrs_errs += smb->rx_rrs_errs;
2176 stat->rx_alignerrs += smb->rx_alignerrs;
2177 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2178 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2179 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2182 stat->tx_frames += smb->tx_frames;
2183 stat->tx_bcast_frames += smb->tx_bcast_frames;
2184 stat->tx_mcast_frames += smb->tx_mcast_frames;
2185 stat->tx_pause_frames += smb->tx_pause_frames;
2186 stat->tx_excess_defer += smb->tx_excess_defer;
2187 stat->tx_control_frames += smb->tx_control_frames;
2188 stat->tx_deferred += smb->tx_deferred;
2189 stat->tx_bytes += smb->tx_bytes;
2190 stat->tx_pkts_64 += smb->tx_pkts_64;
2191 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2192 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2193 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2194 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2195 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2196 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2197 stat->tx_single_colls += smb->tx_single_colls;
2198 stat->tx_multi_colls += smb->tx_multi_colls;
2199 stat->tx_late_colls += smb->tx_late_colls;
2200 stat->tx_excess_colls += smb->tx_excess_colls;
2201 stat->tx_underrun += smb->tx_underrun;
2202 stat->tx_desc_underrun += smb->tx_desc_underrun;
2203 stat->tx_lenerrs += smb->tx_lenerrs;
2204 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2205 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2206 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2208 /* Update counters in ifnet. */
2209 ifp->if_opackets += smb->tx_frames;
2211 ifp->if_collisions += smb->tx_single_colls +
2212 smb->tx_multi_colls * 2 + smb->tx_late_colls +
2213 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2215 ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
2216 smb->tx_underrun + smb->tx_pkts_truncated;
2218 ifp->if_ipackets += smb->rx_frames;
2220 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2221 smb->rx_runts + smb->rx_pkts_truncated +
2222 smb->rx_fifo_oflows + smb->rx_rrs_errs +
2229 struct ale_softc *sc;
2232 sc = (struct ale_softc *)arg;
2234 status = CSR_READ_4(sc, ALE_INTR_STATUS);
2235 if ((status & ALE_INTRS) == 0)
2236 return (FILTER_STRAY);
2237 /* Disable interrupts. */
2238 CSR_WRITE_4(sc, ALE_INTR_STATUS, INTR_DIS_INT);
2239 taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task);
2241 return (FILTER_HANDLED);
2245 ale_int_task(void *arg, int pending)
2247 struct ale_softc *sc;
2252 sc = (struct ale_softc *)arg;
2254 status = CSR_READ_4(sc, ALE_INTR_STATUS);
2256 if (sc->ale_morework != 0)
2257 status |= INTR_RX_PKT;
2258 if ((status & ALE_INTRS) == 0)
2261 /* Acknowledge interrupts but still disable interrupts. */
2262 CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT);
2266 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2267 more = ale_rxeof(sc, sc->ale_process_limit);
2269 sc->ale_morework = 1;
2270 else if (more == EIO) {
2271 sc->ale_stats.reset_brk_seq++;
2272 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2273 ale_init_locked(sc);
2278 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
2279 if ((status & INTR_DMA_RD_TO_RST) != 0)
2280 device_printf(sc->ale_dev,
2281 "DMA read error! -- resetting\n");
2282 if ((status & INTR_DMA_WR_TO_RST) != 0)
2283 device_printf(sc->ale_dev,
2284 "DMA write error! -- resetting\n");
2285 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2286 ale_init_locked(sc);
2290 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2291 ale_start_locked(ifp);
2294 if (more == EAGAIN ||
2295 (CSR_READ_4(sc, ALE_INTR_STATUS) & ALE_INTRS) != 0) {
2297 taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task);
2304 /* Re-enable interrupts. */
2305 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF);
2309 ale_txeof(struct ale_softc *sc)
2312 struct ale_txdesc *txd;
2313 uint32_t cons, prod;
2316 ALE_LOCK_ASSERT(sc);
2320 if (sc->ale_cdata.ale_tx_cnt == 0)
2323 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
2324 sc->ale_cdata.ale_tx_ring_map,
2325 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2326 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) {
2327 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag,
2328 sc->ale_cdata.ale_tx_cmb_map,
2329 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2330 prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK;
2332 prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX);
2333 cons = sc->ale_cdata.ale_tx_cons;
2335 * Go through our Tx list and free mbufs for those
2336 * frames which have been transmitted.
2338 for (prog = 0; cons != prod; prog++,
2339 ALE_DESC_INC(cons, ALE_TX_RING_CNT)) {
2340 if (sc->ale_cdata.ale_tx_cnt <= 0)
2343 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2344 sc->ale_cdata.ale_tx_cnt--;
2345 txd = &sc->ale_cdata.ale_txdesc[cons];
2346 if (txd->tx_m != NULL) {
2347 /* Reclaim transmitted mbufs. */
2348 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag,
2349 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2350 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag,
2358 sc->ale_cdata.ale_tx_cons = cons;
2360 * Unarm watchdog timer only when there is no pending
2361 * Tx descriptors in queue.
2363 if (sc->ale_cdata.ale_tx_cnt == 0)
2364 sc->ale_watchdog_timer = 0;
2369 ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page,
2370 uint32_t length, uint32_t *prod)
2372 struct ale_rx_page *rx_page;
2375 /* Update consumer position. */
2376 rx_page->cons += roundup(length + sizeof(struct rx_rs),
2378 if (rx_page->cons >= ALE_RX_PAGE_SZ) {
2380 * End of Rx page reached, let hardware reuse
2384 *rx_page->cmb_addr = 0;
2385 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
2386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2387 CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp,
2389 /* Switch to alternate Rx page. */
2390 sc->ale_cdata.ale_rx_curp ^= 1;
2392 &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
2393 /* Page flipped, sync CMB and Rx page. */
2394 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
2395 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2396 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
2397 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2398 /* Sync completed, cache updated producer index. */
2399 *prod = *rx_page->cmb_addr;
2405 * It seems that AR81xx controller can compute partial checksum.
2406 * The partial checksum value can be used to accelerate checksum
2407 * computation for fragmented TCP/UDP packets. Upper network stack
2408 * already takes advantage of the partial checksum value in IP
2409 * reassembly stage. But I'm not sure the correctness of the
2410 * partial hardware checksum assistance due to lack of data sheet.
2411 * In addition, the Rx feature of controller that requires copying
2412 * for every frames effectively nullifies one of most nice offload
2413 * capability of controller.
2416 ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status)
2423 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2424 if ((status & ALE_RD_IPCSUM_NOK) == 0)
2425 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2427 if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) {
2428 if (((status & ALE_RD_IPV4_FRAG) == 0) &&
2429 ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) &&
2430 ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) {
2431 m->m_pkthdr.csum_flags |=
2432 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2433 m->m_pkthdr.csum_data = 0xffff;
2436 if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 &&
2437 (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) {
2438 p = mtod(m, char *);
2440 if ((status & ALE_RD_802_3) != 0)
2441 p += LLC_SNAPFRAMELEN;
2442 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 &&
2443 (status & ALE_RD_VLAN) != 0)
2444 p += ETHER_VLAN_ENCAP_LEN;
2445 ip = (struct ip *)p;
2446 if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0)
2448 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2450 m->m_pkthdr.csum_data = 0xffff;
2454 * Don't mark bad checksum for TCP/UDP frames
2455 * as fragmented frames may always have set
2456 * bad checksummed bit of frame status.
2460 /* Process received frames. */
2462 ale_rxeof(struct ale_softc *sc, int count)
2464 struct ale_rx_page *rx_page;
2468 uint32_t length, prod, seqno, status, vtags;
2472 rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
2473 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
2474 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2475 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
2476 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2478 * Don't directly access producer index as hardware may
2479 * update it while Rx handler is in progress. It would
2480 * be even better if there is a way to let hardware
2481 * know how far driver processed its received frames.
2482 * Alternatively, hardware could provide a way to disable
2483 * CMB updates until driver acknowledges the end of CMB
2486 prod = *rx_page->cmb_addr;
2487 for (prog = 0; prog < count; prog++) {
2488 if (rx_page->cons >= prod)
2490 rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons);
2491 seqno = ALE_RX_SEQNO(le32toh(rs->seqno));
2492 if (sc->ale_cdata.ale_rx_seqno != seqno) {
2494 * Normally I believe this should not happen unless
2495 * severe driver bug or corrupted memory. However
2496 * it seems to happen under certain conditions which
2497 * is triggered by abrupt Rx events such as initiation
2498 * of bulk transfer of remote host. It's not easy to
2499 * reproduce this and I doubt it could be related
2500 * with FIFO overflow of hardware or activity of Tx
2501 * CMB updates. I also remember similar behaviour
2502 * seen on RealTek 8139 which uses resembling Rx
2506 device_printf(sc->ale_dev,
2507 "garbled seq: %u, expected: %u -- "
2508 "resetting!\n", seqno,
2509 sc->ale_cdata.ale_rx_seqno);
2512 /* Frame received. */
2513 sc->ale_cdata.ale_rx_seqno++;
2514 length = ALE_RX_BYTES(le32toh(rs->length));
2515 status = le32toh(rs->flags);
2516 if ((status & ALE_RD_ERROR) != 0) {
2518 * We want to pass the following frames to upper
2519 * layer regardless of error status of Rx return
2522 * o IP/TCP/UDP checksum is bad.
2523 * o frame length and protocol specific length
2526 if ((status & (ALE_RD_CRC | ALE_RD_CODE |
2527 ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW |
2528 ALE_RD_TRUNC)) != 0) {
2529 ale_rx_update_page(sc, &rx_page, length, &prod);
2534 * m_devget(9) is major bottle-neck of ale(4)(It comes
2535 * from hardware limitation). For jumbo frames we could
2536 * get a slightly better performance if driver use
2537 * m_getjcl(9) with proper buffer size argument. However
2538 * that would make code more complicated and I don't
2539 * think users would expect good Rx performance numbers
2540 * on these low-end consumer ethernet controller.
2542 m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN,
2543 ETHER_ALIGN, ifp, NULL);
2546 ale_rx_update_page(sc, &rx_page, length, &prod);
2549 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2550 (status & ALE_RD_IPV4) != 0)
2551 ale_rxcsum(sc, m, status);
2552 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2553 (status & ALE_RD_VLAN) != 0) {
2554 vtags = ALE_RX_VLAN(le32toh(rs->vtags));
2555 m->m_pkthdr.ether_vtag = ALE_RX_VLAN_TAG(vtags);
2556 m->m_flags |= M_VLANTAG;
2559 /* Pass it to upper layer. */
2561 (*ifp->if_input)(ifp, m);
2564 ale_rx_update_page(sc, &rx_page, length, &prod);
2567 return (count > 0 ? 0 : EAGAIN);
2573 struct ale_softc *sc;
2574 struct mii_data *mii;
2576 sc = (struct ale_softc *)arg;
2578 ALE_LOCK_ASSERT(sc);
2580 mii = device_get_softc(sc->ale_miibus);
2582 ale_stats_update(sc);
2584 * Reclaim Tx buffers that have been transferred. It's not
2585 * needed here but it would release allocated mbuf chains
2586 * faster and limit the maximum delay to a hz.
2590 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc);
2594 ale_reset(struct ale_softc *sc)
2599 /* Initialize PCIe module. From Linux. */
2600 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2602 CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET);
2603 for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
2605 if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0)
2609 device_printf(sc->ale_dev, "master reset timeout!\n");
2611 for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
2612 if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0)
2618 device_printf(sc->ale_dev, "reset timeout(0x%08x)!\n", reg);
2624 struct ale_softc *sc;
2626 sc = (struct ale_softc *)xsc;
2628 ale_init_locked(sc);
2633 ale_init_locked(struct ale_softc *sc)
2636 struct mii_data *mii;
2637 uint8_t eaddr[ETHER_ADDR_LEN];
2639 uint32_t reg, rxf_hi, rxf_lo;
2641 ALE_LOCK_ASSERT(sc);
2644 mii = device_get_softc(sc->ale_miibus);
2646 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2649 * Cancel any pending I/O.
2653 * Reset the chip to a known state.
2656 /* Initialize Tx descriptors, DMA memory blocks. */
2657 ale_init_rx_pages(sc);
2658 ale_init_tx_ring(sc);
2660 /* Reprogram the station address. */
2661 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2662 CSR_WRITE_4(sc, ALE_PAR0,
2663 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2664 CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]);
2666 * Clear WOL status and disable all WOL feature as WOL
2667 * would interfere Rx operation under normal environments.
2669 CSR_READ_4(sc, ALE_WOL_CFG);
2670 CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
2672 * Set Tx descriptor/RXF0/CMB base addresses. They share
2673 * the same high address part of DMAable region.
2675 paddr = sc->ale_cdata.ale_tx_ring_paddr;
2676 CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr));
2677 CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr));
2678 CSR_WRITE_4(sc, ALE_TPD_CNT,
2679 (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK);
2680 /* Set Rx page base address, note we use single queue. */
2681 paddr = sc->ale_cdata.ale_rx_page[0].page_paddr;
2682 CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr));
2683 paddr = sc->ale_cdata.ale_rx_page[1].page_paddr;
2684 CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr));
2685 /* Set Tx/Rx CMB addresses. */
2686 paddr = sc->ale_cdata.ale_tx_cmb_paddr;
2687 CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr));
2688 paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr;
2689 CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr));
2690 paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr;
2691 CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr));
2692 /* Mark RXF0 is valid. */
2693 CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID);
2694 CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID);
2696 * No need to initialize RFX1/RXF2/RXF3. We don't use
2700 /* Set Rx page size, excluding guard frame size. */
2701 CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ);
2702 /* Tell hardware that we're ready to load DMA blocks. */
2703 CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD);
2705 /* Set Rx/Tx interrupt trigger threshold. */
2706 CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) |
2707 (4 << INT_TRIG_TX_THRESH_SHIFT));
2710 * Set interrupt trigger timer, its purpose and relation
2711 * with interrupt moderation mechanism is not clear yet.
2713 CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER,
2714 ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) |
2715 (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT)));
2717 /* Configure interrupt moderation timer. */
2718 reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT;
2719 reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT;
2720 CSR_WRITE_4(sc, ALE_IM_TIMER, reg);
2721 reg = CSR_READ_4(sc, ALE_MASTER_CFG);
2722 reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
2723 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2724 if (ALE_USECS(sc->ale_int_rx_mod) != 0)
2725 reg |= MASTER_IM_RX_TIMER_ENB;
2726 if (ALE_USECS(sc->ale_int_tx_mod) != 0)
2727 reg |= MASTER_IM_TX_TIMER_ENB;
2728 CSR_WRITE_4(sc, ALE_MASTER_CFG, reg);
2729 CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000));
2731 /* Set Maximum frame size of controller. */
2732 if (ifp->if_mtu < ETHERMTU)
2733 sc->ale_max_frame_size = ETHERMTU;
2735 sc->ale_max_frame_size = ifp->if_mtu;
2736 sc->ale_max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2738 CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size);
2739 /* Configure IPG/IFG parameters. */
2740 CSR_WRITE_4(sc, ALE_IPG_IFG_CFG,
2741 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
2742 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2743 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2744 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
2745 /* Set parameters for half-duplex media. */
2746 CSR_WRITE_4(sc, ALE_HDPX_CFG,
2747 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2748 HDPX_CFG_LCOL_MASK) |
2749 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2750 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2751 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2752 HDPX_CFG_ABEBT_MASK) |
2753 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2754 HDPX_CFG_JAMIPG_MASK));
2756 /* Configure Tx jumbo frame parameters. */
2757 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
2758 if (ifp->if_mtu < ETHERMTU)
2759 reg = sc->ale_max_frame_size;
2760 else if (ifp->if_mtu < 6 * 1024)
2761 reg = (sc->ale_max_frame_size * 2) / 3;
2763 reg = sc->ale_max_frame_size / 2;
2764 CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH,
2765 roundup(reg, TX_JUMBO_THRESH_UNIT) >>
2766 TX_JUMBO_THRESH_UNIT_SHIFT);
2768 /* Configure TxQ. */
2769 reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT))
2770 << TXQ_CFG_TX_FIFO_BURST_SHIFT;
2771 reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
2772 TXQ_CFG_TPD_BURST_MASK;
2773 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB);
2775 /* Configure Rx jumbo frame & flow control parameters. */
2776 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
2777 reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT);
2778 CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH,
2779 (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) <<
2780 RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) |
2781 ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) &
2782 RX_JUMBO_LKAH_MASK));
2783 reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
2784 rxf_hi = (reg * 7) / 10;
2785 rxf_lo = (reg * 3)/ 10;
2786 CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH,
2787 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2788 RX_FIFO_PAUSE_THRESH_LO_MASK) |
2789 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2790 RX_FIFO_PAUSE_THRESH_HI_MASK));
2794 CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0);
2795 CSR_WRITE_4(sc, ALE_RSS_CPU, 0);
2797 /* Configure RxQ. */
2798 CSR_WRITE_4(sc, ALE_RXQ_CFG,
2799 RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
2801 /* Configure DMA parameters. */
2803 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0)
2804 reg |= DMA_CFG_TXCMB_ENB;
2805 CSR_WRITE_4(sc, ALE_DMA_CFG,
2806 DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 |
2807 sc->ale_dma_rd_burst | reg |
2808 sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB |
2809 ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2810 DMA_CFG_RD_DELAY_CNT_MASK) |
2811 ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2812 DMA_CFG_WR_DELAY_CNT_MASK));
2815 * Hardware can be configured to issue SMB interrupt based
2816 * on programmed interval. Since there is a callout that is
2817 * invoked for every hz in driver we use that instead of
2818 * relying on periodic SMB interrupt.
2820 CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0));
2821 /* Clear MAC statistics. */
2822 ale_stats_clear(sc);
2825 * Configure Tx/Rx MACs.
2826 * - Auto-padding for short frames.
2827 * - Enable CRC generation.
2828 * Actual reconfiguration of MAC for resolved speed/duplex
2829 * is followed after detection of link establishment.
2830 * AR81xx always does checksum computation regardless of
2831 * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will
2832 * cause Rx handling issue for fragmented IP datagrams due
2835 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2836 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2837 MAC_CFG_PREAMBLE_MASK);
2838 if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0)
2839 reg |= MAC_CFG_SPEED_10_100;
2841 reg |= MAC_CFG_SPEED_1000;
2842 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
2844 /* Set up the receive filter. */
2848 /* Acknowledge all pending interrupts and clear it. */
2849 CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS);
2850 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
2851 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0);
2853 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2854 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2856 sc->ale_flags &= ~ALE_FLAG_LINK;
2857 /* Switch to the current media. */
2860 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc);
2864 ale_stop(struct ale_softc *sc)
2867 struct ale_txdesc *txd;
2871 ALE_LOCK_ASSERT(sc);
2873 * Mark the interface down and cancel the watchdog timer.
2876 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2877 sc->ale_flags &= ~ALE_FLAG_LINK;
2878 callout_stop(&sc->ale_tick_ch);
2879 sc->ale_watchdog_timer = 0;
2880 ale_stats_update(sc);
2881 /* Disable interrupts. */
2882 CSR_WRITE_4(sc, ALE_INTR_MASK, 0);
2883 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
2884 /* Disable queue processing and DMA. */
2885 reg = CSR_READ_4(sc, ALE_TXQ_CFG);
2886 reg &= ~TXQ_CFG_ENB;
2887 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg);
2888 reg = CSR_READ_4(sc, ALE_RXQ_CFG);
2889 reg &= ~RXQ_CFG_ENB;
2890 CSR_WRITE_4(sc, ALE_RXQ_CFG, reg);
2891 reg = CSR_READ_4(sc, ALE_DMA_CFG);
2892 reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB);
2893 CSR_WRITE_4(sc, ALE_DMA_CFG, reg);
2895 /* Stop Rx/Tx MACs. */
2897 /* Disable interrupts which might be touched in taskq handler. */
2898 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
2901 * Free TX mbufs still in the queues.
2903 for (i = 0; i < ALE_TX_RING_CNT; i++) {
2904 txd = &sc->ale_cdata.ale_txdesc[i];
2905 if (txd->tx_m != NULL) {
2906 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag,
2907 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2908 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag,
2917 ale_stop_mac(struct ale_softc *sc)
2922 ALE_LOCK_ASSERT(sc);
2924 reg = CSR_READ_4(sc, ALE_MAC_CFG);
2925 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2926 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2927 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
2930 for (i = ALE_TIMEOUT; i > 0; i--) {
2931 reg = CSR_READ_4(sc, ALE_IDLE_STATUS);
2937 device_printf(sc->ale_dev,
2938 "could not disable Tx/Rx MAC(0x%08x)!\n", reg);
2942 ale_init_tx_ring(struct ale_softc *sc)
2944 struct ale_txdesc *txd;
2947 ALE_LOCK_ASSERT(sc);
2949 sc->ale_cdata.ale_tx_prod = 0;
2950 sc->ale_cdata.ale_tx_cons = 0;
2951 sc->ale_cdata.ale_tx_cnt = 0;
2953 bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ);
2954 bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ);
2955 for (i = 0; i < ALE_TX_RING_CNT; i++) {
2956 txd = &sc->ale_cdata.ale_txdesc[i];
2959 *sc->ale_cdata.ale_tx_cmb = 0;
2960 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag,
2961 sc->ale_cdata.ale_tx_cmb_map,
2962 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2963 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag,
2964 sc->ale_cdata.ale_tx_ring_map,
2965 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2969 ale_init_rx_pages(struct ale_softc *sc)
2971 struct ale_rx_page *rx_page;
2974 ALE_LOCK_ASSERT(sc);
2976 sc->ale_morework = 0;
2977 sc->ale_cdata.ale_rx_seqno = 0;
2978 sc->ale_cdata.ale_rx_curp = 0;
2980 for (i = 0; i < ALE_RX_PAGES; i++) {
2981 rx_page = &sc->ale_cdata.ale_rx_page[i];
2982 bzero(rx_page->page_addr, sc->ale_pagesize);
2983 bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ);
2985 *rx_page->cmb_addr = 0;
2986 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map,
2987 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2988 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map,
2989 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2994 ale_rxvlan(struct ale_softc *sc)
2999 ALE_LOCK_ASSERT(sc);
3002 reg = CSR_READ_4(sc, ALE_MAC_CFG);
3003 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3004 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3005 reg |= MAC_CFG_VLAN_TAG_STRIP;
3006 CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
3010 ale_rxfilter(struct ale_softc *sc)
3013 struct ifmultiaddr *ifma;
3018 ALE_LOCK_ASSERT(sc);
3022 rxcfg = CSR_READ_4(sc, ALE_MAC_CFG);
3023 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3024 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3025 rxcfg |= MAC_CFG_BCAST;
3026 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3027 if ((ifp->if_flags & IFF_PROMISC) != 0)
3028 rxcfg |= MAC_CFG_PROMISC;
3029 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3030 rxcfg |= MAC_CFG_ALLMULTI;
3031 CSR_WRITE_4(sc, ALE_MAR0, 0xFFFFFFFF);
3032 CSR_WRITE_4(sc, ALE_MAR1, 0xFFFFFFFF);
3033 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
3037 /* Program new filter. */
3038 bzero(mchash, sizeof(mchash));
3040 if_maddr_rlock(ifp);
3041 TAILQ_FOREACH(ifma, &sc->ale_ifp->if_multiaddrs, ifma_link) {
3042 if (ifma->ifma_addr->sa_family != AF_LINK)
3044 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3045 ifma->ifma_addr), ETHER_ADDR_LEN);
3046 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3048 if_maddr_runlock(ifp);
3050 CSR_WRITE_4(sc, ALE_MAR0, mchash[0]);
3051 CSR_WRITE_4(sc, ALE_MAR1, mchash[1]);
3052 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
3056 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3062 value = *(int *)arg1;
3063 error = sysctl_handle_int(oidp, &value, 0, req);
3064 if (error || req->newptr == NULL)
3066 if (value < low || value > high)
3068 *(int *)arg1 = value;
3074 sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS)
3076 return (sysctl_int_range(oidp, arg1, arg2, req,
3077 ALE_PROC_MIN, ALE_PROC_MAX));
3081 sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS)
3084 return (sysctl_int_range(oidp, arg1, arg2, req,
3085 ALE_IM_TIMER_MIN, ALE_IM_TIMER_MAX));