2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
67 #include <machine/bus.h>
69 #include "miibus_if.h"
75 * Devices supported by this driver.
77 static struct ae_dev {
82 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
83 "Attansic Technology Corp, L2 FastEthernet" },
85 #define AE_DEVS_COUNT (sizeof(ae_devs) / sizeof(*ae_devs))
87 static struct resource_spec ae_res_spec_mem[] = {
88 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
91 static struct resource_spec ae_res_spec_irq[] = {
92 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
95 static struct resource_spec ae_res_spec_msi[] = {
96 { SYS_RES_IRQ, 1, RF_ACTIVE },
100 static int ae_probe(device_t dev);
101 static int ae_attach(device_t dev);
102 static void ae_pcie_init(ae_softc_t *sc);
103 static void ae_phy_reset(ae_softc_t *sc);
104 static void ae_phy_init(ae_softc_t *sc);
105 static int ae_reset(ae_softc_t *sc);
106 static void ae_init(void *arg);
107 static int ae_init_locked(ae_softc_t *sc);
108 static int ae_detach(device_t dev);
109 static int ae_miibus_readreg(device_t dev, int phy, int reg);
110 static int ae_miibus_writereg(device_t dev, int phy, int reg, int val);
111 static void ae_miibus_statchg(device_t dev);
112 static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
113 static int ae_mediachange(struct ifnet *ifp);
114 static void ae_retrieve_address(ae_softc_t *sc);
115 static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
117 static int ae_alloc_rings(ae_softc_t *sc);
118 static void ae_dma_free(ae_softc_t *sc);
119 static int ae_shutdown(device_t dev);
120 static int ae_suspend(device_t dev);
121 static void ae_powersave_disable(ae_softc_t *sc);
122 static void ae_powersave_enable(ae_softc_t *sc);
123 static int ae_resume(device_t dev);
124 static unsigned int ae_tx_avail_size(ae_softc_t *sc);
125 static int ae_encap(ae_softc_t *sc, struct mbuf **m_head);
126 static void ae_start(struct ifnet *ifp);
127 static void ae_link_task(void *arg, int pending);
128 static void ae_stop_rxmac(ae_softc_t *sc);
129 static void ae_stop_txmac(ae_softc_t *sc);
130 static void ae_tx_task(void *arg, int pending);
131 static void ae_mac_config(ae_softc_t *sc);
132 static int ae_intr(void *arg);
133 static void ae_int_task(void *arg, int pending);
134 static void ae_tx_intr(ae_softc_t *sc);
135 static int ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
136 static void ae_rx_intr(ae_softc_t *sc);
137 static void ae_watchdog(ae_softc_t *sc);
138 static void ae_tick(void *arg);
139 static void ae_rxfilter(ae_softc_t *sc);
140 static void ae_rxvlan(ae_softc_t *sc);
141 static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
142 static void ae_stop(ae_softc_t *sc);
143 static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
144 static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
145 static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
146 static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
147 static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
148 static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
149 static void ae_init_tunables(ae_softc_t *sc);
151 static device_method_t ae_methods[] = {
152 /* Device interface. */
153 DEVMETHOD(device_probe, ae_probe),
154 DEVMETHOD(device_attach, ae_attach),
155 DEVMETHOD(device_detach, ae_detach),
156 DEVMETHOD(device_shutdown, ae_shutdown),
157 DEVMETHOD(device_suspend, ae_suspend),
158 DEVMETHOD(device_resume, ae_resume),
161 DEVMETHOD(miibus_readreg, ae_miibus_readreg),
162 DEVMETHOD(miibus_writereg, ae_miibus_writereg),
163 DEVMETHOD(miibus_statchg, ae_miibus_statchg),
167 static driver_t ae_driver = {
172 static devclass_t ae_devclass;
174 DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0);
175 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0);
176 MODULE_DEPEND(ae, pci, 1, 1, 1);
177 MODULE_DEPEND(ae, ether, 1, 1, 1);
178 MODULE_DEPEND(ae, miibus, 1, 1, 1);
183 static int msi_disable = 0;
184 TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
186 #define AE_READ_4(sc, reg) \
187 bus_read_4((sc)->mem[0], (reg))
188 #define AE_READ_2(sc, reg) \
189 bus_read_2((sc)->mem[0], (reg))
190 #define AE_READ_1(sc, reg) \
191 bus_read_1((sc)->mem[0], (reg))
192 #define AE_WRITE_4(sc, reg, val) \
193 bus_write_4((sc)->mem[0], (reg), (val))
194 #define AE_WRITE_2(sc, reg, val) \
195 bus_write_2((sc)->mem[0], (reg), (val))
196 #define AE_WRITE_1(sc, reg, val) \
197 bus_write_1((sc)->mem[0], (reg), (val))
198 #define AE_PHY_READ(sc, reg) \
199 ae_miibus_readreg(sc->dev, 0, reg)
200 #define AE_PHY_WRITE(sc, reg, val) \
201 ae_miibus_writereg(sc->dev, 0, reg, val)
202 #define AE_CHECK_EADDR_VALID(eaddr) \
203 ((eaddr[0] == 0 && eaddr[1] == 0) || \
204 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
205 #define AE_RXD_VLAN(vtag) \
206 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
207 #define AE_TXD_VLAN(vtag) \
208 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
213 #define STATS_ENTRY(node, desc, field) \
214 { node, desc, offsetof(struct ae_stats, field) }
220 STATS_ENTRY("bcast", "broadcast frames", tx_bcast),
221 STATS_ENTRY("mcast", "multicast frames", tx_mcast),
222 STATS_ENTRY("pause", "PAUSE frames", tx_pause),
223 STATS_ENTRY("control", "control frames", tx_ctrl),
224 STATS_ENTRY("defers", "deferrals occuried", tx_defer),
225 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer),
226 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol),
227 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol),
228 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol),
229 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol),
230 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun)
232 STATS_ENTRY("bcast", "broadcast frames", rx_bcast),
233 STATS_ENTRY("mcast", "multicast frames", rx_mcast),
234 STATS_ENTRY("pause", "PAUSE frames", rx_pause),
235 STATS_ENTRY("control", "control frames", rx_ctrl),
236 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr),
237 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr),
238 STATS_ENTRY("runt", "runt frames", rx_runt),
239 STATS_ENTRY("frag", "fragmented frames", rx_frag),
240 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align),
241 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
244 #define AE_STATS_RX_LEN (sizeof(ae_stats_rx) / sizeof(*ae_stats_rx))
245 #define AE_STATS_TX_LEN (sizeof(ae_stats_tx) / sizeof(*ae_stats_tx))
248 ae_probe(device_t dev)
250 uint16_t deviceid, vendorid;
253 vendorid = pci_get_vendor(dev);
254 deviceid = pci_get_device(dev);
257 * Search through the list of supported devs for matching one.
259 for (i = 0; i < AE_DEVS_COUNT; i++) {
260 if (vendorid == ae_devs[i].vendorid &&
261 deviceid == ae_devs[i].deviceid) {
262 device_set_desc(dev, ae_devs[i].name);
263 return (BUS_PROBE_DEFAULT);
270 ae_attach(device_t dev)
279 sc = device_get_softc(dev); /* Automatically allocated and zeroed
281 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
285 * Initialize mutexes and tasks.
287 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
288 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
289 TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
290 TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
292 pci_enable_busmaster(dev); /* Enable bus mastering. */
294 sc->spec_mem = ae_res_spec_mem;
297 * Allocate memory-mapped registers.
299 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
301 device_printf(dev, "could not allocate memory resources.\n");
307 * Retrieve PCI and chip revisions.
309 pcirev = pci_get_revid(dev);
310 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
311 AE_MASTER_REVNUM_MASK;
313 device_printf(dev, "pci device revision: %#04x\n", pcirev);
314 device_printf(dev, "chip id: %#02x\n", chiprev);
316 nmsi = pci_msi_count(dev);
318 device_printf(dev, "MSI count: %d.\n", nmsi);
321 * Allocate interrupt resources.
323 if (msi_disable == 0 && nmsi == 1) {
324 error = pci_alloc_msi(dev, &nmsi);
326 device_printf(dev, "Using MSI messages.\n");
327 sc->spec_irq = ae_res_spec_msi;
328 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
330 device_printf(dev, "MSI allocation failed.\n");
332 pci_release_msi(dev);
334 sc->flags |= AE_FLAG_MSI;
338 if (sc->spec_irq == NULL) {
339 sc->spec_irq = ae_res_spec_irq;
340 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
342 device_printf(dev, "could not allocate IRQ resources.\n");
348 ae_init_tunables(sc);
350 ae_phy_reset(sc); /* Reset PHY. */
351 error = ae_reset(sc); /* Reset the controller itself. */
357 ae_retrieve_address(sc); /* Load MAC address. */
359 error = ae_alloc_rings(sc); /* Allocate ring buffers. */
363 /* Set default PHY address. */
364 sc->phyaddr = AE_PHYADDR_DEFAULT;
366 ifp = sc->ifp = if_alloc(IFT_ETHER);
368 device_printf(dev, "could not allocate ifnet structure.\n");
373 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
374 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
375 ifp->if_ioctl = ae_ioctl;
376 ifp->if_start = ae_start;
377 ifp->if_init = ae_init;
378 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
379 ifp->if_hwassist = 0;
380 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
381 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
382 IFQ_SET_READY(&ifp->if_snd);
383 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
384 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
385 sc->flags |= AE_FLAG_PMG;
387 ifp->if_capenable = ifp->if_capabilities;
390 * Configure and attach MII bus.
392 error = mii_phy_probe(dev, &sc->miibus, ae_mediachange,
395 device_printf(dev, "no PHY found.\n");
399 ether_ifattach(ifp, sc->eaddr);
400 /* Tell the upper layer(s) we support long frames. */
401 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
404 * Create and run all helper tasks.
406 TASK_INIT(&sc->tx_task, 1, ae_tx_task, ifp);
407 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
408 taskqueue_thread_enqueue, &sc->tq);
409 if (sc->tq == NULL) {
410 device_printf(dev, "could not create taskqueue.\n");
415 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
416 device_get_nameunit(sc->dev));
419 * Configure interrupt handlers.
421 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
422 ae_intr, NULL, sc, &sc->intrhand);
424 device_printf(dev, "could not set up interrupt handler.\n");
425 taskqueue_free(sc->tq);
439 ae_init_tunables(ae_softc_t *sc)
441 struct sysctl_ctx_list *ctx;
442 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
443 struct ae_stats *ae_stats;
446 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
447 ae_stats = &sc->stats;
449 ctx = device_get_sysctl_ctx(sc->dev);
450 root = device_get_sysctl_tree(sc->dev);
451 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
452 CTLFLAG_RD, NULL, "ae statistics");
455 * Receiver statistcics.
457 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
458 CTLFLAG_RD, NULL, "Rx MAC statistics");
459 for (i = 0; i < AE_STATS_RX_LEN; i++)
460 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx), OID_AUTO,
461 ae_stats_rx[i].node, CTLFLAG_RD, (char *)ae_stats +
462 ae_stats_rx[i].offset, 0, ae_stats_rx[i].desc);
465 * Receiver statistcics.
467 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
468 CTLFLAG_RD, NULL, "Tx MAC statistics");
469 for (i = 0; i < AE_STATS_TX_LEN; i++)
470 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx), OID_AUTO,
471 ae_stats_tx[i].node, CTLFLAG_RD, (char *)ae_stats +
472 ae_stats_tx[i].offset, 0, ae_stats_tx[i].desc);
476 ae_pcie_init(ae_softc_t *sc)
479 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
480 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
484 ae_phy_reset(ae_softc_t *sc)
487 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
488 DELAY(1000); /* XXX: pause(9) ? */
492 ae_reset(ae_softc_t *sc)
497 * Issue a soft reset.
499 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
500 bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
501 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
504 * Wait for reset to complete.
506 for (i = 0; i < AE_RESET_TIMEOUT; i++) {
507 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
511 if (i == AE_RESET_TIMEOUT) {
512 device_printf(sc->dev, "reset timeout.\n");
517 * Wait for everything to enter idle state.
519 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
520 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
524 if (i == AE_IDLE_TIMEOUT) {
525 device_printf(sc->dev, "could not enter idle state.\n");
536 sc = (ae_softc_t *)arg;
543 ae_phy_init(ae_softc_t *sc)
547 * Enable link status change interrupt.
551 AE_PHY_WRITE(sc, 18, 0xc00);
556 ae_init_locked(ae_softc_t *sc)
559 struct mii_data *mii;
560 uint8_t eaddr[ETHER_ADDR_LEN];
567 mii = device_get_softc(sc->miibus);
571 ae_pcie_init(sc); /* Initialize PCIE stuff. */
573 ae_powersave_disable(sc);
576 * Clear and disable interrupts.
578 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
581 * Set the MAC address.
583 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
584 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
585 AE_WRITE_4(sc, AE_EADDR0_REG, val);
586 val = eaddr[0] << 8 | eaddr[1];
587 AE_WRITE_4(sc, AE_EADDR1_REG, val);
590 * Set ring buffers base addresses.
592 addr = sc->dma_rxd_busaddr;
593 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
594 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
595 addr = sc->dma_txd_busaddr;
596 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
597 addr = sc->dma_txs_busaddr;
598 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
601 * Configure ring buffers sizes.
603 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
604 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
605 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
608 * Configure interframe gap parameters.
610 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
612 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
614 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
616 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
618 AE_WRITE_4(sc, AE_IFG_REG, val);
621 * Configure half-duplex operation.
623 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
625 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
626 AE_HDPX_RETRY_MASK) |
627 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
628 AE_HDPX_ABEBT_MASK) |
629 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
630 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
631 AE_WRITE_4(sc, AE_HDPX_REG, val);
634 * Configure interrupt moderate timer.
636 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
637 val = AE_READ_4(sc, AE_MASTER_REG);
638 val |= AE_MASTER_IMT_EN;
639 AE_WRITE_4(sc, AE_MASTER_REG, val);
642 * Configure interrupt clearing timer.
644 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
649 val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
651 AE_WRITE_2(sc, AE_MTU_REG, val);
654 * Configure cut-through threshold.
656 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
659 * Configure flow control.
661 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
662 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
663 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
664 (AE_RXD_COUNT_DEFAULT / 12));
669 sc->txd_cur = sc->rxd_cur = 0;
670 sc->txs_ack = sc->txd_ack = 0;
672 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
673 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
675 sc->tx_inproc = 0; /* Number of packets the chip processes now. */
676 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
681 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
682 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
685 * Check if everything is OK.
687 val = AE_READ_4(sc, AE_ISR_REG);
688 if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
689 device_printf(sc->dev, "Initialization failed.\n");
694 * Clear interrupt status.
696 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
697 AE_WRITE_4(sc, AE_ISR_REG, 0x0);
702 val = AE_READ_4(sc, AE_MASTER_REG);
703 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
704 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
709 AE_WRITE_4(sc, AE_WOL_REG, 0);
714 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
715 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
716 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
717 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
718 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
719 AE_MAC_PREAMBLE_MASK);
720 AE_WRITE_4(sc, AE_MAC_REG, val);
731 val = AE_READ_4(sc, AE_MAC_REG);
732 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
734 sc->flags &= ~AE_FLAG_LINK;
735 mii_mediachg(mii); /* Switch to the current media. */
737 callout_reset(&sc->tick_ch, hz, ae_tick, sc);
739 ifp->if_drv_flags |= IFF_DRV_RUNNING;
740 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
743 device_printf(sc->dev, "Initialization complete.\n");
750 ae_detach(device_t dev)
755 sc = device_get_softc(dev);
756 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
758 if (device_is_attached(dev)) {
760 sc->flags |= AE_FLAG_DETACH;
763 callout_drain(&sc->tick_ch);
764 taskqueue_drain(sc->tq, &sc->int_task);
765 taskqueue_drain(sc->tq, &sc->tx_task);
766 taskqueue_drain(taskqueue_swi, &sc->link_task);
769 if (sc->tq != NULL) {
770 taskqueue_drain(sc->tq, &sc->int_task);
771 taskqueue_free(sc->tq);
774 if (sc->miibus != NULL) {
775 device_delete_child(dev, sc->miibus);
778 bus_generic_detach(sc->dev);
780 if (sc->intrhand != NULL) {
781 bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
788 if (sc->spec_irq != NULL)
789 bus_release_resources(dev, sc->spec_irq, sc->irq);
790 if (sc->spec_mem != NULL)
791 bus_release_resources(dev, sc->spec_mem, sc->mem);
792 if ((sc->flags & AE_FLAG_MSI) != 0)
793 pci_release_msi(dev);
794 mtx_destroy(&sc->mtx);
800 ae_miibus_readreg(device_t dev, int phy, int reg)
806 sc = device_get_softc(dev);
807 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
810 * Locking is done in upper layers.
813 if (phy != sc->phyaddr)
816 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
817 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
818 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
819 AE_WRITE_4(sc, AE_MDIO_REG, val);
822 * Wait for operation to complete.
824 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
826 val = AE_READ_4(sc, AE_MDIO_REG);
827 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
830 if (i == AE_MDIO_TIMEOUT) {
831 device_printf(sc->dev, "phy read timeout: %d.\n", reg);
834 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
838 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
844 sc = device_get_softc(dev);
845 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
848 * Locking is done in upper layers.
851 if (phy != sc->phyaddr)
854 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
855 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
856 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
857 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
858 AE_WRITE_4(sc, AE_MDIO_REG, aereg);
861 * Wait for operation to complete.
863 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
865 aereg = AE_READ_4(sc, AE_MDIO_REG);
866 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
869 if (i == AE_MDIO_TIMEOUT) {
870 device_printf(sc->dev, "phy write timeout: %d.\n", reg);
876 ae_miibus_statchg(device_t dev)
880 sc = device_get_softc(dev);
881 taskqueue_enqueue(taskqueue_swi, &sc->link_task);
885 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
888 struct mii_data *mii;
891 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
894 mii = device_get_softc(sc->miibus);
896 ifmr->ifm_status = mii->mii_media_status;
897 ifmr->ifm_active = mii->mii_media_active;
902 ae_mediachange(struct ifnet *ifp)
905 struct mii_data *mii;
906 struct mii_softc *mii_sc;
909 /* XXX: check IFF_UP ?? */
911 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
913 mii = device_get_softc(sc->miibus);
914 if (mii->mii_instance != 0) {
915 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
916 mii_phy_reset(mii_sc);
918 error = mii_mediachg(mii);
925 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
930 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
933 * Not sure why, but Linux does this.
935 val = AE_READ_4(sc, AE_SPICTL_REG);
936 if ((val & AE_SPICTL_VPD_EN) != 0) {
937 val &= ~AE_SPICTL_VPD_EN;
938 AE_WRITE_4(sc, AE_SPICTL_REG, val);
940 error = pci_find_extcap(sc->dev, PCIY_VPD, vpdc);
945 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
950 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
953 * VPD registers start at offset 0x100. Read them.
955 val = 0x100 + reg * 4;
956 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
957 AE_VPD_CAP_ADDR_MASK);
958 for (i = 0; i < AE_VPD_TIMEOUT; i++) {
960 val = AE_READ_4(sc, AE_VPD_CAP_REG);
961 if ((val & AE_VPD_CAP_DONE) != 0)
964 if (i == AE_VPD_TIMEOUT) {
965 device_printf(sc->dev, "timeout reading VPD register %d.\n",
969 *word = AE_READ_4(sc, AE_VPD_DATA_REG);
974 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
976 uint32_t word, reg, val;
982 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
983 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
988 error = ae_check_eeprom_present(sc, &vpdc);
993 * Read the VPD configuration space.
994 * Each register is prefixed with signature,
995 * so we can check if it is valid.
997 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
998 error = ae_vpd_read_word(sc, i, &word);
1005 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1007 reg = word >> AE_VPD_REG_SHIFT;
1008 i++; /* Move to the next word. */
1010 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1013 error = ae_vpd_read_word(sc, i, &val);
1016 if (reg == AE_EADDR0_REG)
1026 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1027 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1029 device_printf(sc->dev,
1030 "VPD ethernet address registers are invalid.\n");
1037 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
1041 * BIOS is supposed to set this.
1043 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1044 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1045 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1047 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1049 device_printf(sc->dev,
1050 "Ethetnet address registers are invalid.\n");
1057 ae_retrieve_address(ae_softc_t *sc)
1059 uint32_t eaddr[2] = {0, 0};
1065 error = ae_get_vpd_eaddr(sc, eaddr);
1067 error = ae_get_reg_eaddr(sc, eaddr);
1070 device_printf(sc->dev,
1071 "Generating random ethernet address.\n");
1072 eaddr[0] = arc4random();
1075 * Set OUI to ASUSTek COMPUTER INC.
1077 sc->eaddr[0] = 0x02; /* U/L bit set. */
1078 sc->eaddr[1] = 0x1f;
1079 sc->eaddr[2] = 0xc6;
1080 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1081 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1082 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1084 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
1085 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
1086 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
1087 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1088 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1089 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1094 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1096 bus_addr_t *addr = arg;
1100 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
1102 *addr = segs[0].ds_addr;
1106 ae_alloc_rings(ae_softc_t *sc)
1112 * Create parent DMA tag.
1114 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1115 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1116 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
1117 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
1118 &sc->dma_parent_tag);
1120 device_printf(sc->dev, "could not creare parent DMA tag.\n");
1125 * Create DMA tag for TxD.
1127 error = bus_dma_tag_create(sc->dma_parent_tag,
1128 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1129 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
1130 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
1133 device_printf(sc->dev, "could not creare TxD DMA tag.\n");
1138 * Create DMA tag for TxS.
1140 error = bus_dma_tag_create(sc->dma_parent_tag,
1141 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1142 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
1143 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
1146 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1151 * Create DMA tag for RxD.
1153 error = bus_dma_tag_create(sc->dma_parent_tag,
1154 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1155 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + 120, 1,
1156 AE_RXD_COUNT_DEFAULT * 1536 + 120, 0, NULL, NULL,
1159 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1164 * Allocate TxD DMA memory.
1166 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
1167 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1170 device_printf(sc->dev,
1171 "could not allocate DMA memory for TxD ring.\n");
1174 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
1175 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1176 if (error != 0 || busaddr == 0) {
1177 device_printf(sc->dev,
1178 "could not load DMA map for TxD ring.\n");
1181 sc->dma_txd_busaddr = busaddr;
1184 * Allocate TxS DMA memory.
1186 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
1187 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1190 device_printf(sc->dev,
1191 "could not allocate DMA memory for TxS ring.\n");
1194 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
1195 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1196 if (error != 0 || busaddr == 0) {
1197 device_printf(sc->dev,
1198 "could not load DMA map for TxS ring.\n");
1201 sc->dma_txs_busaddr = busaddr;
1204 * Allocate RxD DMA memory.
1206 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
1207 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1210 device_printf(sc->dev,
1211 "could not allocate DMA memory for RxD ring.\n");
1214 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
1215 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + 120, ae_dmamap_cb,
1216 &busaddr, BUS_DMA_NOWAIT);
1217 if (error != 0 || busaddr == 0) {
1218 device_printf(sc->dev,
1219 "could not load DMA map for RxD ring.\n");
1222 sc->dma_rxd_busaddr = busaddr + 120;
1223 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + 120);
1229 ae_dma_free(ae_softc_t *sc)
1232 if (sc->dma_txd_tag != NULL) {
1233 if (sc->dma_txd_map != NULL) {
1234 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1235 if (sc->txd_base != NULL)
1236 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1240 bus_dma_tag_destroy(sc->dma_txd_tag);
1241 sc->dma_txd_map = NULL;
1242 sc->dma_txd_tag = NULL;
1243 sc->txd_base = NULL;
1245 if (sc->dma_txs_tag != NULL) {
1246 if (sc->dma_txs_map != NULL) {
1247 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1248 if (sc->txs_base != NULL)
1249 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1253 bus_dma_tag_destroy(sc->dma_txs_tag);
1254 sc->dma_txs_map = NULL;
1255 sc->dma_txs_tag = NULL;
1256 sc->txs_base = NULL;
1258 if (sc->dma_rxd_tag != NULL) {
1259 if (sc->dma_rxd_map != NULL) {
1260 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1261 if (sc->rxd_base_dma != NULL)
1262 bus_dmamem_free(sc->dma_rxd_tag,
1263 sc->rxd_base_dma, sc->dma_rxd_map);
1266 bus_dma_tag_destroy(sc->dma_rxd_tag);
1267 sc->dma_rxd_map = NULL;
1268 sc->dma_rxd_tag = NULL;
1269 sc->rxd_base_dma = NULL;
1271 if (sc->dma_parent_tag != NULL) {
1272 bus_dma_tag_destroy(sc->dma_parent_tag);
1273 sc->dma_parent_tag = NULL;
1278 ae_shutdown(device_t dev)
1283 sc = device_get_softc(dev);
1284 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
1286 error = ae_suspend(dev);
1288 ae_powersave_enable(sc);
1294 ae_powersave_disable(ae_softc_t *sc)
1300 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1301 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1302 if (val & AE_PHY_DBG_POWERSAVE) {
1303 val &= ~AE_PHY_DBG_POWERSAVE;
1304 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1310 ae_powersave_enable(ae_softc_t *sc)
1317 * XXX magic numbers.
1319 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1320 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1321 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1322 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1323 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1324 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1325 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1329 ae_pm_init(ae_softc_t *sc)
1334 struct mii_data *mii;
1340 if ((sc->flags & AE_FLAG_PMG) == 0) {
1341 /* Disable WOL entirely. */
1342 AE_WRITE_4(sc, AE_WOL_REG, 0);
1347 * Configure WOL if enabled.
1349 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1350 mii = device_get_softc(sc->miibus);
1352 if ((mii->mii_media_status & IFM_AVALID) != 0 &&
1353 (mii->mii_media_status & IFM_ACTIVE) != 0) {
1354 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
1360 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
1361 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
1362 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
1363 AE_HALFBUF_MASK) | \
1364 ((AE_MAC_PREAMBLE_DEFAULT << \
1365 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
1366 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
1367 if ((IFM_OPTIONS(mii->mii_media_active) & \
1369 val |= AE_MAC_FULL_DUPLEX;
1370 AE_WRITE_4(sc, AE_MAC_REG, val);
1372 } else { /* No link. */
1373 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
1375 AE_WRITE_4(sc, AE_MAC_REG, 0);
1378 ae_powersave_enable(sc);
1382 * PCIE hacks. Magic numbers.
1384 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
1385 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
1386 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
1387 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
1388 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
1389 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
1394 pci_find_extcap(sc->dev, PCIY_PMG, &pmc);
1395 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
1396 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1397 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1398 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1399 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1403 ae_suspend(device_t dev)
1407 sc = device_get_softc(dev);
1418 ae_resume(device_t dev)
1422 sc = device_get_softc(dev);
1423 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1426 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1427 if ((sc->ifp->if_flags & IFF_UP) != 0)
1435 ae_tx_avail_size(ae_softc_t *sc)
1439 if (sc->txd_cur >= sc->txd_ack)
1440 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1442 avail = sc->txd_ack - sc->txd_cur;
1444 return (avail - 4); /* 4-byte header. */
1448 ae_encap(ae_softc_t *sc, struct mbuf **m_head)
1452 unsigned int to_end;
1458 len = m0->m_pkthdr.len;
1460 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
1461 ae_tx_avail_size(sc) < len) {
1463 if_printf(sc->ifp, "No free Tx available.\n");
1468 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
1469 bzero(hdr, sizeof(*hdr));
1470 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT; /* Header
1472 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; /* Space available to
1473 * the end of the ring
1475 if (to_end >= len) {
1476 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1478 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1480 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1484 * Set TxD flags and parameters.
1486 if ((m0->m_flags & M_VLANTAG) != 0) {
1487 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
1488 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1490 hdr->len = htole16(len);
1494 * Set current TxD position and round up to a 4-byte boundary.
1496 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1497 if (sc->txd_cur == sc->txd_ack)
1498 sc->flags &= ~AE_FLAG_TXAVAIL;
1500 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1504 * Update TxS position and check if there are empty TxS available.
1506 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1507 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1508 if (sc->txs_cur == sc->txs_ack)
1509 sc->flags &= ~AE_FLAG_TXAVAIL;
1512 * Synchronize DMA memory.
1514 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
1515 BUS_DMASYNC_PREWRITE);
1516 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1517 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1523 ae_start(struct ifnet *ifp)
1531 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1535 if_printf(ifp, "Start called.\n");
1538 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1539 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0) {
1545 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1546 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
1548 break; /* Nothing to do. */
1550 error = ae_encap(sc, &m0);
1553 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
1554 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1556 if_printf(ifp, "Setting OACTIVE.\n");
1564 /* Bounce a copy of the frame to BPF. */
1565 ETHER_BPF_MTAP(ifp, m0);
1570 if (count > 0) { /* Something was dequeued. */
1571 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1572 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1574 if_printf(ifp, "%d packets dequeued.\n", count);
1575 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1582 ae_link_task(void *arg, int pending)
1585 struct mii_data *mii;
1589 sc = (ae_softc_t *)arg;
1590 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1594 mii = device_get_softc(sc->miibus);
1595 if (mii == NULL || ifp == NULL ||
1596 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1597 AE_UNLOCK(sc); /* XXX: could happen? */
1601 sc->flags &= ~AE_FLAG_LINK;
1602 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
1603 (IFM_AVALID | IFM_ACTIVE)) {
1604 switch(IFM_SUBTYPE(mii->mii_media_active)) {
1607 sc->flags |= AE_FLAG_LINK;
1620 if ((sc->flags & AE_FLAG_LINK) != 0) {
1624 * Restart DMA engines.
1626 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
1627 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
1630 * Enable Rx and Tx MACs.
1632 val = AE_READ_4(sc, AE_MAC_REG);
1633 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
1634 AE_WRITE_4(sc, AE_MAC_REG, val);
1640 ae_stop_rxmac(ae_softc_t *sc)
1648 * Stop Rx MAC engine.
1650 val = AE_READ_4(sc, AE_MAC_REG);
1651 if ((val & AE_MAC_RX_EN) != 0) {
1652 val &= ~AE_MAC_RX_EN;
1653 AE_WRITE_4(sc, AE_MAC_REG, val);
1657 * Stop Rx DMA engine.
1659 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
1660 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
1663 * Wait for IDLE state.
1665 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
1666 val = AE_READ_4(sc, AE_IDLE_REG);
1667 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
1671 if (i == AE_IDLE_TIMEOUT)
1672 device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
1676 ae_stop_txmac(ae_softc_t *sc)
1684 * Stop Tx MAC engine.
1686 val = AE_READ_4(sc, AE_MAC_REG);
1687 if ((val & AE_MAC_TX_EN) != 0) {
1688 val &= ~AE_MAC_TX_EN;
1689 AE_WRITE_4(sc, AE_MAC_REG, val);
1693 * Stop Tx DMA engine.
1695 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
1696 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
1699 * Wait for IDLE state.
1701 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
1702 val = AE_READ_4(sc, AE_IDLE_REG);
1703 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
1707 if (i == AE_IDLE_TIMEOUT)
1708 device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
1712 ae_tx_task(void *arg, int pending)
1716 ifp = (struct ifnet *)arg;
1721 ae_mac_config(ae_softc_t *sc)
1723 struct mii_data *mii;
1728 mii = device_get_softc(sc->miibus);
1729 val = AE_READ_4(sc, AE_MAC_REG);
1730 val &= ~AE_MAC_FULL_DUPLEX;
1731 /* XXX disable AE_MAC_TX_FLOW_EN? */
1733 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1734 val |= AE_MAC_FULL_DUPLEX;
1736 AE_WRITE_4(sc, AE_MAC_REG, val);
1745 sc = (ae_softc_t *)arg;
1746 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1748 val = AE_READ_4(sc, AE_ISR_REG);
1749 if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
1750 return (FILTER_STRAY);
1752 /* Disable interrupts. */
1753 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
1755 /* Schedule interrupt processing. */
1756 taskqueue_enqueue(sc->tq, &sc->int_task);
1758 return (FILTER_HANDLED);
1762 ae_int_task(void *arg, int pending)
1768 sc = (ae_softc_t *)arg;
1774 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */
1777 * Clear interrupts and disable them.
1779 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
1782 if_printf(ifp, "Interrupt received: 0x%08x\n", val);
1785 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1786 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
1787 AE_ISR_PHY_LINKDOWN)) != 0) {
1790 if ((val & AE_ISR_TX_EVENT) != 0)
1792 if ((val & AE_ISR_RX_EVENT) != 0)
1797 * Re-enable interrupts.
1799 AE_WRITE_4(sc, AE_ISR_REG, 0);
1805 ae_tx_intr(ae_softc_t *sc)
1817 if_printf(ifp, "Tx interrupt occuried.\n");
1821 * Syncronize DMA buffers.
1823 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1824 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1825 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1826 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1829 txs = sc->txs_base + sc->txs_ack;
1830 flags = le16toh(txs->flags);
1831 if ((flags & AE_TXS_UPDATE) == 0)
1833 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
1835 ae_update_stats_tx(flags, &sc->stats);
1838 * Update TxS position.
1840 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
1841 sc->flags |= AE_FLAG_TXAVAIL;
1843 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
1844 if (txs->len != txd->len)
1845 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
1846 le16toh(txs->len), le16toh(txd->len));
1849 * Move txd ack and align on 4-byte boundary.
1851 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
1852 AE_TXD_BUFSIZE_DEFAULT;
1854 if ((flags & AE_TXS_SUCCESS) != 0)
1861 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1864 if (sc->tx_inproc < 0) {
1865 if_printf(ifp, "Received stray Tx interrupt(s).\n");
1869 if (sc->tx_inproc == 0)
1870 sc->wd_timer = 0; /* Unarm watchdog. */
1872 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
1873 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1874 taskqueue_enqueue(sc->tq, &sc->tx_task);
1878 * Syncronize DMA buffers.
1880 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1881 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1882 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1883 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1887 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
1897 flags = le16toh(rxd->flags);
1900 if_printf(ifp, "Rx interrupt occuried.\n");
1902 size = le16toh(rxd->len) - ETHER_CRC_LEN;
1903 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
1904 if_printf(ifp, "Runt frame received.");
1908 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
1912 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
1913 (flags & AE_RXD_HAS_VLAN) != 0) {
1914 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
1915 m->m_flags |= M_VLANTAG;
1922 (*ifp->if_input)(ifp, m);
1929 ae_rx_intr(ae_softc_t *sc)
1936 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1943 * Syncronize DMA buffers.
1945 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1946 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1949 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
1950 flags = le16toh(rxd->flags);
1951 if ((flags & AE_RXD_UPDATE) == 0)
1953 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
1955 ae_update_stats_rx(flags, &sc->stats);
1958 * Update position index.
1960 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
1962 if ((flags & AE_RXD_SUCCESS) == 0) {
1966 error = ae_rxeof(sc, rxd);
1978 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
1982 ae_watchdog(ae_softc_t *sc)
1986 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1990 if (sc->wd_timer == 0 || --sc->wd_timer != 0)
1991 return; /* Noting to do. */
1993 if ((sc->flags & AE_FLAG_LINK) == 0)
1994 if_printf(ifp, "watchdog timeout (missed link).\n");
1996 if_printf(ifp, "watchdog timeout - resetting.\n");
2000 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2001 taskqueue_enqueue(sc->tq, &sc->tx_task);
2008 struct mii_data *mii;
2010 sc = (ae_softc_t *)arg;
2011 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2014 mii = device_get_softc(sc->miibus);
2016 ae_watchdog(sc); /* Watchdog check. */
2017 callout_reset(&sc->tick_ch, hz, ae_tick, sc);
2021 ae_rxvlan(ae_softc_t *sc)
2028 val = AE_READ_4(sc, AE_MAC_REG);
2029 val &= ~AE_MAC_RMVLAN_EN;
2030 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2031 val |= AE_MAC_RMVLAN_EN;
2032 AE_WRITE_4(sc, AE_MAC_REG, val);
2036 ae_rxfilter(ae_softc_t *sc)
2039 struct ifmultiaddr *ifma;
2044 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2050 rxcfg = AE_READ_4(sc, AE_MAC_REG);
2051 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
2053 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2054 rxcfg |= AE_MAC_BCAST_EN;
2055 if ((ifp->if_flags & IFF_PROMISC) != 0)
2056 rxcfg |= AE_MAC_PROMISC_EN;
2057 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2058 rxcfg |= AE_MAC_MCAST_EN;
2061 * Wipe old settings.
2063 AE_WRITE_4(sc, AE_REG_MHT0, 0);
2064 AE_WRITE_4(sc, AE_REG_MHT1, 0);
2065 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2066 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
2067 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
2068 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2073 * Load multicast tables.
2075 bzero(mchash, sizeof(mchash));
2076 if_maddr_rlock(ifp);
2077 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2078 if (ifma->ifma_addr->sa_family != AF_LINK)
2080 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2081 ifma->ifma_addr), ETHER_ADDR_LEN);
2082 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2084 if_maddr_runlock(ifp);
2085 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
2086 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
2087 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2091 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2093 struct ae_softc *sc;
2095 struct mii_data *mii;
2099 ifr = (struct ifreq *)data;
2104 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2106 else if (ifp->if_mtu != ifr->ifr_mtu) {
2108 ifp->if_mtu = ifr->ifr_mtu;
2109 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2116 if ((ifp->if_flags & IFF_UP) != 0) {
2117 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2118 if (((ifp->if_flags ^ sc->if_flags)
2119 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2122 if ((sc->flags & AE_FLAG_DETACH) == 0)
2126 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2129 sc->if_flags = ifp->if_flags;
2135 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2141 mii = device_get_softc(sc->miibus);
2142 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2146 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2147 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2148 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2149 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2152 VLAN_CAPABILITIES(ifp);
2156 error = ether_ioctl(ifp, cmd, data);
2163 ae_stop(ae_softc_t *sc)
2171 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2172 sc->flags &= ~AE_FLAG_LINK;
2173 sc->wd_timer = 0; /* Cancel watchdog. */
2174 callout_stop(&sc->tick_ch);
2177 * Clear and disable interrupts.
2179 AE_WRITE_4(sc, AE_IMR_REG, 0);
2180 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
2191 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
2192 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
2195 * Wait for everything to enter idle state.
2197 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
2198 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
2202 if (i == AE_IDLE_TIMEOUT)
2203 device_printf(sc->dev, "could not enter idle state in stop.\n");
2207 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
2210 if ((flags & AE_TXS_BCAST) != 0)
2212 if ((flags & AE_TXS_MCAST) != 0)
2214 if ((flags & AE_TXS_PAUSE) != 0)
2216 if ((flags & AE_TXS_CTRL) != 0)
2218 if ((flags & AE_TXS_DEFER) != 0)
2220 if ((flags & AE_TXS_EXCDEFER) != 0)
2221 stats->tx_excdefer++;
2222 if ((flags & AE_TXS_SINGLECOL) != 0)
2223 stats->tx_singlecol++;
2224 if ((flags & AE_TXS_MULTICOL) != 0)
2225 stats->tx_multicol++;
2226 if ((flags & AE_TXS_LATECOL) != 0)
2227 stats->tx_latecol++;
2228 if ((flags & AE_TXS_ABORTCOL) != 0)
2229 stats->tx_abortcol++;
2230 if ((flags & AE_TXS_UNDERRUN) != 0)
2231 stats->tx_underrun++;
2235 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
2238 if ((flags & AE_RXD_BCAST) != 0)
2240 if ((flags & AE_RXD_MCAST) != 0)
2242 if ((flags & AE_RXD_PAUSE) != 0)
2244 if ((flags & AE_RXD_CTRL) != 0)
2246 if ((flags & AE_RXD_CRCERR) != 0)
2248 if ((flags & AE_RXD_CODEERR) != 0)
2249 stats->rx_codeerr++;
2250 if ((flags & AE_RXD_RUNT) != 0)
2252 if ((flags & AE_RXD_FRAG) != 0)
2254 if ((flags & AE_RXD_TRUNC) != 0)
2256 if ((flags & AE_RXD_ALIGN) != 0)