2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
29 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
32 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
41 #include <sys/mutex.h>
43 #include <sys/module.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <net/if_var.h>
53 #include <net/if_arp.h>
54 #include <net/ethernet.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/tcp.h>
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
70 #include <machine/bus.h>
72 #include "miibus_if.h"
78 * Devices supported by this driver.
80 static struct ae_dev {
85 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
86 "Attansic Technology Corp, L2 FastEthernet" },
88 #define AE_DEVS_COUNT nitems(ae_devs)
90 static struct resource_spec ae_res_spec_mem[] = {
91 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
94 static struct resource_spec ae_res_spec_irq[] = {
95 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
98 static struct resource_spec ae_res_spec_msi[] = {
99 { SYS_RES_IRQ, 1, RF_ACTIVE },
103 static int ae_probe(device_t dev);
104 static int ae_attach(device_t dev);
105 static void ae_pcie_init(ae_softc_t *sc);
106 static void ae_phy_reset(ae_softc_t *sc);
107 static void ae_phy_init(ae_softc_t *sc);
108 static int ae_reset(ae_softc_t *sc);
109 static void ae_init(void *arg);
110 static int ae_init_locked(ae_softc_t *sc);
111 static int ae_detach(device_t dev);
112 static int ae_miibus_readreg(device_t dev, int phy, int reg);
113 static int ae_miibus_writereg(device_t dev, int phy, int reg, int val);
114 static void ae_miibus_statchg(device_t dev);
115 static void ae_mediastatus(if_t ifp, struct ifmediareq *ifmr);
116 static int ae_mediachange(if_t ifp);
117 static void ae_retrieve_address(ae_softc_t *sc);
118 static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
120 static int ae_alloc_rings(ae_softc_t *sc);
121 static void ae_dma_free(ae_softc_t *sc);
122 static int ae_shutdown(device_t dev);
123 static int ae_suspend(device_t dev);
124 static void ae_powersave_disable(ae_softc_t *sc);
125 static void ae_powersave_enable(ae_softc_t *sc);
126 static int ae_resume(device_t dev);
127 static unsigned int ae_tx_avail_size(ae_softc_t *sc);
128 static int ae_encap(ae_softc_t *sc, struct mbuf **m_head);
129 static void ae_start(if_t ifp);
130 static void ae_start_locked(if_t ifp);
131 static void ae_link_task(void *arg, int pending);
132 static void ae_stop_rxmac(ae_softc_t *sc);
133 static void ae_stop_txmac(ae_softc_t *sc);
134 static void ae_mac_config(ae_softc_t *sc);
135 static int ae_intr(void *arg);
136 static void ae_int_task(void *arg, int pending);
137 static void ae_tx_intr(ae_softc_t *sc);
138 static void ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
139 static void ae_rx_intr(ae_softc_t *sc);
140 static void ae_watchdog(ae_softc_t *sc);
141 static void ae_tick(void *arg);
142 static void ae_rxfilter(ae_softc_t *sc);
143 static void ae_rxvlan(ae_softc_t *sc);
144 static int ae_ioctl(if_t ifp, u_long cmd, caddr_t data);
145 static void ae_stop(ae_softc_t *sc);
146 static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
147 static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
148 static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
149 static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
150 static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
151 static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
152 static void ae_init_tunables(ae_softc_t *sc);
154 static device_method_t ae_methods[] = {
155 /* Device interface. */
156 DEVMETHOD(device_probe, ae_probe),
157 DEVMETHOD(device_attach, ae_attach),
158 DEVMETHOD(device_detach, ae_detach),
159 DEVMETHOD(device_shutdown, ae_shutdown),
160 DEVMETHOD(device_suspend, ae_suspend),
161 DEVMETHOD(device_resume, ae_resume),
164 DEVMETHOD(miibus_readreg, ae_miibus_readreg),
165 DEVMETHOD(miibus_writereg, ae_miibus_writereg),
166 DEVMETHOD(miibus_statchg, ae_miibus_statchg),
169 static driver_t ae_driver = {
175 DRIVER_MODULE(ae, pci, ae_driver, 0, 0);
176 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs,
178 DRIVER_MODULE(miibus, ae, miibus_driver, 0, 0);
179 MODULE_DEPEND(ae, pci, 1, 1, 1);
180 MODULE_DEPEND(ae, ether, 1, 1, 1);
181 MODULE_DEPEND(ae, miibus, 1, 1, 1);
186 static int msi_disable = 0;
187 TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
189 #define AE_READ_4(sc, reg) \
190 bus_read_4((sc)->mem[0], (reg))
191 #define AE_READ_2(sc, reg) \
192 bus_read_2((sc)->mem[0], (reg))
193 #define AE_READ_1(sc, reg) \
194 bus_read_1((sc)->mem[0], (reg))
195 #define AE_WRITE_4(sc, reg, val) \
196 bus_write_4((sc)->mem[0], (reg), (val))
197 #define AE_WRITE_2(sc, reg, val) \
198 bus_write_2((sc)->mem[0], (reg), (val))
199 #define AE_WRITE_1(sc, reg, val) \
200 bus_write_1((sc)->mem[0], (reg), (val))
201 #define AE_PHY_READ(sc, reg) \
202 ae_miibus_readreg(sc->dev, 0, reg)
203 #define AE_PHY_WRITE(sc, reg, val) \
204 ae_miibus_writereg(sc->dev, 0, reg, val)
205 #define AE_CHECK_EADDR_VALID(eaddr) \
206 ((eaddr[0] == 0 && eaddr[1] == 0) || \
207 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
208 #define AE_RXD_VLAN(vtag) \
209 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
210 #define AE_TXD_VLAN(vtag) \
211 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
214 ae_probe(device_t dev)
216 uint16_t deviceid, vendorid;
219 vendorid = pci_get_vendor(dev);
220 deviceid = pci_get_device(dev);
223 * Search through the list of supported devs for matching one.
225 for (i = 0; i < AE_DEVS_COUNT; i++) {
226 if (vendorid == ae_devs[i].vendorid &&
227 deviceid == ae_devs[i].deviceid) {
228 device_set_desc(dev, ae_devs[i].name);
229 return (BUS_PROBE_DEFAULT);
236 ae_attach(device_t dev)
245 sc = device_get_softc(dev); /* Automatically allocated and zeroed
247 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
251 * Initialize mutexes and tasks.
253 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
254 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
255 TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
256 TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
258 pci_enable_busmaster(dev); /* Enable bus mastering. */
260 sc->spec_mem = ae_res_spec_mem;
263 * Allocate memory-mapped registers.
265 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
267 device_printf(dev, "could not allocate memory resources.\n");
273 * Retrieve PCI and chip revisions.
275 pcirev = pci_get_revid(dev);
276 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
277 AE_MASTER_REVNUM_MASK;
279 device_printf(dev, "pci device revision: %#04x\n", pcirev);
280 device_printf(dev, "chip id: %#02x\n", chiprev);
282 nmsi = pci_msi_count(dev);
284 device_printf(dev, "MSI count: %d.\n", nmsi);
287 * Allocate interrupt resources.
289 if (msi_disable == 0 && nmsi == 1) {
290 error = pci_alloc_msi(dev, &nmsi);
292 device_printf(dev, "Using MSI messages.\n");
293 sc->spec_irq = ae_res_spec_msi;
294 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
296 device_printf(dev, "MSI allocation failed.\n");
298 pci_release_msi(dev);
300 sc->flags |= AE_FLAG_MSI;
304 if (sc->spec_irq == NULL) {
305 sc->spec_irq = ae_res_spec_irq;
306 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
308 device_printf(dev, "could not allocate IRQ resources.\n");
314 ae_init_tunables(sc);
316 ae_phy_reset(sc); /* Reset PHY. */
317 error = ae_reset(sc); /* Reset the controller itself. */
323 ae_retrieve_address(sc); /* Load MAC address. */
325 error = ae_alloc_rings(sc); /* Allocate ring buffers. */
329 ifp = sc->ifp = if_alloc(IFT_ETHER);
331 device_printf(dev, "could not allocate ifnet structure.\n");
336 if_setsoftc(ifp, sc);
337 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
338 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
339 if_setioctlfn(ifp, ae_ioctl);
340 if_setstartfn(ifp, ae_start);
341 if_setinitfn(ifp, ae_init);
342 if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
343 if_sethwassist(ifp, 0);
344 if_setsendqlen(ifp, ifqmaxlen);
345 if_setsendqready(ifp);
346 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
347 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
348 sc->flags |= AE_FLAG_PMG;
350 if_setcapenable(ifp, if_getcapabilities(ifp));
353 * Configure and attach MII bus.
355 error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
356 ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
359 device_printf(dev, "attaching PHYs failed\n");
363 ether_ifattach(ifp, sc->eaddr);
364 /* Tell the upper layer(s) we support long frames. */
365 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
368 * Create and run all helper tasks.
370 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
371 taskqueue_thread_enqueue, &sc->tq);
372 if (sc->tq == NULL) {
373 device_printf(dev, "could not create taskqueue.\n");
378 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
379 device_get_nameunit(sc->dev));
382 * Configure interrupt handlers.
384 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
385 ae_intr, NULL, sc, &sc->intrhand);
387 device_printf(dev, "could not set up interrupt handler.\n");
388 taskqueue_free(sc->tq);
401 #define AE_SYSCTL(stx, parent, name, desc, ptr) \
402 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
405 ae_init_tunables(ae_softc_t *sc)
407 struct sysctl_ctx_list *ctx;
408 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
409 struct ae_stats *ae_stats;
411 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
412 ae_stats = &sc->stats;
414 ctx = device_get_sysctl_ctx(sc->dev);
415 root = device_get_sysctl_tree(sc->dev);
416 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
417 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics");
420 * Receiver statistcics.
422 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
423 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
424 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
425 "broadcast frames", &ae_stats->rx_bcast);
426 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
427 "multicast frames", &ae_stats->rx_mcast);
428 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
429 "PAUSE frames", &ae_stats->rx_pause);
430 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
431 "control frames", &ae_stats->rx_ctrl);
432 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
433 "frames with CRC errors", &ae_stats->rx_crcerr);
434 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
435 "frames with invalid opcode", &ae_stats->rx_codeerr);
436 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
437 "runt frames", &ae_stats->rx_runt);
438 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
439 "fragmented frames", &ae_stats->rx_frag);
440 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
441 "frames with alignment errors", &ae_stats->rx_align);
442 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
443 "frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
446 * Receiver statistcics.
448 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
449 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
450 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
451 "broadcast frames", &ae_stats->tx_bcast);
452 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
453 "multicast frames", &ae_stats->tx_mcast);
454 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
455 "PAUSE frames", &ae_stats->tx_pause);
456 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
457 "control frames", &ae_stats->tx_ctrl);
458 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
459 "deferrals occuried", &ae_stats->tx_defer);
460 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
461 "excessive deferrals occuried", &ae_stats->tx_excdefer);
462 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
463 "single collisions occuried", &ae_stats->tx_singlecol);
464 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
465 "multiple collisions occuried", &ae_stats->tx_multicol);
466 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
467 "late collisions occuried", &ae_stats->tx_latecol);
468 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
469 "transmit aborts due collisions", &ae_stats->tx_abortcol);
470 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
471 "Tx FIFO underruns", &ae_stats->tx_underrun);
475 ae_pcie_init(ae_softc_t *sc)
478 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
479 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
483 ae_phy_reset(ae_softc_t *sc)
486 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
487 DELAY(1000); /* XXX: pause(9) ? */
491 ae_reset(ae_softc_t *sc)
496 * Issue a soft reset.
498 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
499 bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
500 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
503 * Wait for reset to complete.
505 for (i = 0; i < AE_RESET_TIMEOUT; i++) {
506 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
510 if (i == AE_RESET_TIMEOUT) {
511 device_printf(sc->dev, "reset timeout.\n");
516 * Wait for everything to enter idle state.
518 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
519 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
523 if (i == AE_IDLE_TIMEOUT) {
524 device_printf(sc->dev, "could not enter idle state.\n");
535 sc = (ae_softc_t *)arg;
542 ae_phy_init(ae_softc_t *sc)
546 * Enable link status change interrupt.
550 AE_PHY_WRITE(sc, 18, 0xc00);
555 ae_init_locked(ae_softc_t *sc)
558 struct mii_data *mii;
559 uint8_t eaddr[ETHER_ADDR_LEN];
566 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
568 mii = device_get_softc(sc->miibus);
572 ae_pcie_init(sc); /* Initialize PCIE stuff. */
574 ae_powersave_disable(sc);
577 * Clear and disable interrupts.
579 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
582 * Set the MAC address.
584 bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
585 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
586 AE_WRITE_4(sc, AE_EADDR0_REG, val);
587 val = eaddr[0] << 8 | eaddr[1];
588 AE_WRITE_4(sc, AE_EADDR1_REG, val);
590 bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
591 bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
592 bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
594 * Set ring buffers base addresses.
596 addr = sc->dma_rxd_busaddr;
597 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
598 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
599 addr = sc->dma_txd_busaddr;
600 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
601 addr = sc->dma_txs_busaddr;
602 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
605 * Configure ring buffers sizes.
607 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
608 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
609 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
612 * Configure interframe gap parameters.
614 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
616 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
618 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
620 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
622 AE_WRITE_4(sc, AE_IFG_REG, val);
625 * Configure half-duplex operation.
627 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
629 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
630 AE_HDPX_RETRY_MASK) |
631 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
632 AE_HDPX_ABEBT_MASK) |
633 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
634 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
635 AE_WRITE_4(sc, AE_HDPX_REG, val);
638 * Configure interrupt moderate timer.
640 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
641 val = AE_READ_4(sc, AE_MASTER_REG);
642 val |= AE_MASTER_IMT_EN;
643 AE_WRITE_4(sc, AE_MASTER_REG, val);
646 * Configure interrupt clearing timer.
648 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
653 val = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
655 AE_WRITE_2(sc, AE_MTU_REG, val);
658 * Configure cut-through threshold.
660 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
663 * Configure flow control.
665 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
666 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
667 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
668 (AE_RXD_COUNT_DEFAULT / 12));
673 sc->txd_cur = sc->rxd_cur = 0;
674 sc->txs_ack = sc->txd_ack = 0;
676 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
677 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
679 sc->tx_inproc = 0; /* Number of packets the chip processes now. */
680 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
685 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
686 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
689 * Check if everything is OK.
691 val = AE_READ_4(sc, AE_ISR_REG);
692 if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
693 device_printf(sc->dev, "Initialization failed.\n");
698 * Clear interrupt status.
700 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
701 AE_WRITE_4(sc, AE_ISR_REG, 0x0);
706 val = AE_READ_4(sc, AE_MASTER_REG);
707 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
708 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
713 AE_WRITE_4(sc, AE_WOL_REG, 0);
718 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
719 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
720 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
721 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
722 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
723 AE_MAC_PREAMBLE_MASK);
724 AE_WRITE_4(sc, AE_MAC_REG, val);
735 val = AE_READ_4(sc, AE_MAC_REG);
736 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
738 sc->flags &= ~AE_FLAG_LINK;
739 mii_mediachg(mii); /* Switch to the current media. */
741 callout_reset(&sc->tick_ch, hz, ae_tick, sc);
743 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
744 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
747 device_printf(sc->dev, "Initialization complete.\n");
754 ae_detach(device_t dev)
759 sc = device_get_softc(dev);
760 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
762 if (device_is_attached(dev)) {
764 sc->flags |= AE_FLAG_DETACH;
767 callout_drain(&sc->tick_ch);
768 taskqueue_drain(sc->tq, &sc->int_task);
769 taskqueue_drain(taskqueue_swi, &sc->link_task);
772 if (sc->tq != NULL) {
773 taskqueue_drain(sc->tq, &sc->int_task);
774 taskqueue_free(sc->tq);
777 if (sc->miibus != NULL) {
778 device_delete_child(dev, sc->miibus);
781 bus_generic_detach(sc->dev);
783 if (sc->intrhand != NULL) {
784 bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
791 if (sc->spec_irq != NULL)
792 bus_release_resources(dev, sc->spec_irq, sc->irq);
793 if (sc->spec_mem != NULL)
794 bus_release_resources(dev, sc->spec_mem, sc->mem);
795 if ((sc->flags & AE_FLAG_MSI) != 0)
796 pci_release_msi(dev);
797 mtx_destroy(&sc->mtx);
803 ae_miibus_readreg(device_t dev, int phy, int reg)
809 sc = device_get_softc(dev);
810 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
813 * Locking is done in upper layers.
816 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
817 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
818 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
819 AE_WRITE_4(sc, AE_MDIO_REG, val);
822 * Wait for operation to complete.
824 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
826 val = AE_READ_4(sc, AE_MDIO_REG);
827 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
830 if (i == AE_MDIO_TIMEOUT) {
831 device_printf(sc->dev, "phy read timeout: %d.\n", reg);
834 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
838 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
844 sc = device_get_softc(dev);
845 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
848 * Locking is done in upper layers.
851 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
852 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
853 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
854 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
855 AE_WRITE_4(sc, AE_MDIO_REG, aereg);
858 * Wait for operation to complete.
860 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
862 aereg = AE_READ_4(sc, AE_MDIO_REG);
863 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
866 if (i == AE_MDIO_TIMEOUT) {
867 device_printf(sc->dev, "phy write timeout: %d.\n", reg);
873 ae_miibus_statchg(device_t dev)
877 sc = device_get_softc(dev);
878 taskqueue_enqueue(taskqueue_swi, &sc->link_task);
882 ae_mediastatus(if_t ifp, struct ifmediareq *ifmr)
885 struct mii_data *mii;
887 sc = if_getsoftc(ifp);
888 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
891 mii = device_get_softc(sc->miibus);
893 ifmr->ifm_status = mii->mii_media_status;
894 ifmr->ifm_active = mii->mii_media_active;
899 ae_mediachange(if_t ifp)
902 struct mii_data *mii;
903 struct mii_softc *mii_sc;
906 /* XXX: check IFF_UP ?? */
907 sc = if_getsoftc(ifp);
908 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
910 mii = device_get_softc(sc->miibus);
911 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
913 error = mii_mediachg(mii);
920 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
925 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
928 * Not sure why, but Linux does this.
930 val = AE_READ_4(sc, AE_SPICTL_REG);
931 if ((val & AE_SPICTL_VPD_EN) != 0) {
932 val &= ~AE_SPICTL_VPD_EN;
933 AE_WRITE_4(sc, AE_SPICTL_REG, val);
935 error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
940 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
945 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
948 * VPD registers start at offset 0x100. Read them.
950 val = 0x100 + reg * 4;
951 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
952 AE_VPD_CAP_ADDR_MASK);
953 for (i = 0; i < AE_VPD_TIMEOUT; i++) {
955 val = AE_READ_4(sc, AE_VPD_CAP_REG);
956 if ((val & AE_VPD_CAP_DONE) != 0)
959 if (i == AE_VPD_TIMEOUT) {
960 device_printf(sc->dev, "timeout reading VPD register %d.\n",
964 *word = AE_READ_4(sc, AE_VPD_DATA_REG);
969 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
971 uint32_t word, reg, val;
977 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
978 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
983 error = ae_check_eeprom_present(sc, &vpdc);
988 * Read the VPD configuration space.
989 * Each register is prefixed with signature,
990 * so we can check if it is valid.
992 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
993 error = ae_vpd_read_word(sc, i, &word);
1000 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1002 reg = word >> AE_VPD_REG_SHIFT;
1003 i++; /* Move to the next word. */
1005 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1008 error = ae_vpd_read_word(sc, i, &val);
1011 if (reg == AE_EADDR0_REG)
1021 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1022 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1024 device_printf(sc->dev,
1025 "VPD ethernet address registers are invalid.\n");
1032 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
1036 * BIOS is supposed to set this.
1038 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1039 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1040 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1042 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1044 device_printf(sc->dev,
1045 "Ethernet address registers are invalid.\n");
1052 ae_retrieve_address(ae_softc_t *sc)
1054 uint32_t eaddr[2] = {0, 0};
1060 error = ae_get_vpd_eaddr(sc, eaddr);
1062 error = ae_get_reg_eaddr(sc, eaddr);
1065 device_printf(sc->dev,
1066 "Generating random ethernet address.\n");
1067 eaddr[0] = arc4random();
1070 * Set OUI to ASUSTek COMPUTER INC.
1072 sc->eaddr[0] = 0x02; /* U/L bit set. */
1073 sc->eaddr[1] = 0x1f;
1074 sc->eaddr[2] = 0xc6;
1075 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1076 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1077 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1079 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
1080 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
1081 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
1082 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1083 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1084 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1089 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1091 bus_addr_t *addr = arg;
1095 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
1097 *addr = segs[0].ds_addr;
1101 ae_alloc_rings(ae_softc_t *sc)
1107 * Create parent DMA tag.
1109 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1110 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1111 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
1112 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
1113 &sc->dma_parent_tag);
1115 device_printf(sc->dev, "could not creare parent DMA tag.\n");
1120 * Create DMA tag for TxD.
1122 error = bus_dma_tag_create(sc->dma_parent_tag,
1123 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1124 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
1125 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
1128 device_printf(sc->dev, "could not creare TxD DMA tag.\n");
1133 * Create DMA tag for TxS.
1135 error = bus_dma_tag_create(sc->dma_parent_tag,
1136 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1137 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
1138 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
1141 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1146 * Create DMA tag for RxD.
1148 error = bus_dma_tag_create(sc->dma_parent_tag,
1149 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1150 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
1151 AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
1154 device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1159 * Allocate TxD DMA memory.
1161 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
1162 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1165 device_printf(sc->dev,
1166 "could not allocate DMA memory for TxD ring.\n");
1169 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
1170 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1171 if (error != 0 || busaddr == 0) {
1172 device_printf(sc->dev,
1173 "could not load DMA map for TxD ring.\n");
1176 sc->dma_txd_busaddr = busaddr;
1179 * Allocate TxS DMA memory.
1181 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
1182 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1185 device_printf(sc->dev,
1186 "could not allocate DMA memory for TxS ring.\n");
1189 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
1190 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1191 if (error != 0 || busaddr == 0) {
1192 device_printf(sc->dev,
1193 "could not load DMA map for TxS ring.\n");
1196 sc->dma_txs_busaddr = busaddr;
1199 * Allocate RxD DMA memory.
1201 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
1202 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1205 device_printf(sc->dev,
1206 "could not allocate DMA memory for RxD ring.\n");
1209 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
1210 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
1211 ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1212 if (error != 0 || busaddr == 0) {
1213 device_printf(sc->dev,
1214 "could not load DMA map for RxD ring.\n");
1217 sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
1218 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
1224 ae_dma_free(ae_softc_t *sc)
1227 if (sc->dma_txd_tag != NULL) {
1228 if (sc->dma_txd_busaddr != 0)
1229 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1230 if (sc->txd_base != NULL)
1231 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1233 bus_dma_tag_destroy(sc->dma_txd_tag);
1234 sc->dma_txd_tag = NULL;
1235 sc->txd_base = NULL;
1236 sc->dma_txd_busaddr = 0;
1238 if (sc->dma_txs_tag != NULL) {
1239 if (sc->dma_txs_busaddr != 0)
1240 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1241 if (sc->txs_base != NULL)
1242 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1244 bus_dma_tag_destroy(sc->dma_txs_tag);
1245 sc->dma_txs_tag = NULL;
1246 sc->txs_base = NULL;
1247 sc->dma_txs_busaddr = 0;
1249 if (sc->dma_rxd_tag != NULL) {
1250 if (sc->dma_rxd_busaddr != 0)
1251 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1252 if (sc->rxd_base_dma != NULL)
1253 bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma,
1255 bus_dma_tag_destroy(sc->dma_rxd_tag);
1256 sc->dma_rxd_tag = NULL;
1257 sc->rxd_base_dma = NULL;
1258 sc->dma_rxd_busaddr = 0;
1260 if (sc->dma_parent_tag != NULL) {
1261 bus_dma_tag_destroy(sc->dma_parent_tag);
1262 sc->dma_parent_tag = NULL;
1267 ae_shutdown(device_t dev)
1272 sc = device_get_softc(dev);
1273 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
1275 error = ae_suspend(dev);
1277 ae_powersave_enable(sc);
1283 ae_powersave_disable(ae_softc_t *sc)
1289 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1290 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1291 if (val & AE_PHY_DBG_POWERSAVE) {
1292 val &= ~AE_PHY_DBG_POWERSAVE;
1293 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1299 ae_powersave_enable(ae_softc_t *sc)
1306 * XXX magic numbers.
1308 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1309 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1310 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1311 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1312 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1313 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1314 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1318 ae_pm_init(ae_softc_t *sc)
1323 struct mii_data *mii;
1329 if ((sc->flags & AE_FLAG_PMG) == 0) {
1330 /* Disable WOL entirely. */
1331 AE_WRITE_4(sc, AE_WOL_REG, 0);
1336 * Configure WOL if enabled.
1338 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
1339 mii = device_get_softc(sc->miibus);
1341 if ((mii->mii_media_status & IFM_AVALID) != 0 &&
1342 (mii->mii_media_status & IFM_ACTIVE) != 0) {
1343 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
1349 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
1350 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
1351 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
1352 AE_HALFBUF_MASK) | \
1353 ((AE_MAC_PREAMBLE_DEFAULT << \
1354 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
1355 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
1356 if ((IFM_OPTIONS(mii->mii_media_active) & \
1358 val |= AE_MAC_FULL_DUPLEX;
1359 AE_WRITE_4(sc, AE_MAC_REG, val);
1361 } else { /* No link. */
1362 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
1364 AE_WRITE_4(sc, AE_MAC_REG, 0);
1367 ae_powersave_enable(sc);
1371 * PCIE hacks. Magic numbers.
1373 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
1374 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
1375 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
1376 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
1377 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
1378 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
1383 if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
1384 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
1385 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1386 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1387 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1388 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1393 ae_suspend(device_t dev)
1397 sc = device_get_softc(dev);
1408 ae_resume(device_t dev)
1412 sc = device_get_softc(dev);
1413 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1416 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1417 if ((if_getflags(sc->ifp) & IFF_UP) != 0)
1425 ae_tx_avail_size(ae_softc_t *sc)
1429 if (sc->txd_cur >= sc->txd_ack)
1430 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1432 avail = sc->txd_ack - sc->txd_cur;
1438 ae_encap(ae_softc_t *sc, struct mbuf **m_head)
1442 unsigned int to_end;
1448 len = m0->m_pkthdr.len;
1450 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
1451 len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
1453 if_printf(sc->ifp, "No free Tx available.\n");
1458 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
1459 bzero(hdr, sizeof(*hdr));
1460 /* Skip header size. */
1461 sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
1462 /* Space available to the end of the ring */
1463 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1464 if (to_end >= len) {
1465 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1467 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1469 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1473 * Set TxD flags and parameters.
1475 if ((m0->m_flags & M_VLANTAG) != 0) {
1476 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
1477 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1479 hdr->len = htole16(len);
1483 * Set current TxD position and round up to a 4-byte boundary.
1485 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1486 if (sc->txd_cur == sc->txd_ack)
1487 sc->flags &= ~AE_FLAG_TXAVAIL;
1489 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1493 * Update TxS position and check if there are empty TxS available.
1495 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1496 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1497 if (sc->txs_cur == sc->txs_ack)
1498 sc->flags &= ~AE_FLAG_TXAVAIL;
1501 * Synchronize DMA memory.
1503 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
1504 BUS_DMASYNC_PREWRITE);
1505 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1506 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1516 sc = if_getsoftc(ifp);
1518 ae_start_locked(ifp);
1523 ae_start_locked(if_t ifp)
1530 sc = if_getsoftc(ifp);
1531 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1535 if_printf(ifp, "Start called.\n");
1538 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1539 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
1543 while (!if_sendq_empty(ifp)) {
1544 m0 = if_dequeue(ifp);
1546 break; /* Nothing to do. */
1548 error = ae_encap(sc, &m0);
1551 if_sendq_prepend(ifp, m0);
1552 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1554 if_printf(ifp, "Setting OACTIVE.\n");
1562 /* Bounce a copy of the frame to BPF. */
1563 ETHER_BPF_MTAP(ifp, m0);
1568 if (count > 0) { /* Something was dequeued. */
1569 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1570 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1572 if_printf(ifp, "%d packets dequeued.\n", count);
1573 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1579 ae_link_task(void *arg, int pending)
1582 struct mii_data *mii;
1586 sc = (ae_softc_t *)arg;
1587 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1591 mii = device_get_softc(sc->miibus);
1592 if (mii == NULL || ifp == NULL ||
1593 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1594 AE_UNLOCK(sc); /* XXX: could happen? */
1598 sc->flags &= ~AE_FLAG_LINK;
1599 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
1600 (IFM_AVALID | IFM_ACTIVE)) {
1601 switch(IFM_SUBTYPE(mii->mii_media_active)) {
1604 sc->flags |= AE_FLAG_LINK;
1617 if ((sc->flags & AE_FLAG_LINK) != 0) {
1621 * Restart DMA engines.
1623 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
1624 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
1627 * Enable Rx and Tx MACs.
1629 val = AE_READ_4(sc, AE_MAC_REG);
1630 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
1631 AE_WRITE_4(sc, AE_MAC_REG, val);
1637 ae_stop_rxmac(ae_softc_t *sc)
1645 * Stop Rx MAC engine.
1647 val = AE_READ_4(sc, AE_MAC_REG);
1648 if ((val & AE_MAC_RX_EN) != 0) {
1649 val &= ~AE_MAC_RX_EN;
1650 AE_WRITE_4(sc, AE_MAC_REG, val);
1654 * Stop Rx DMA engine.
1656 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
1657 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
1660 * Wait for IDLE state.
1662 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1663 val = AE_READ_4(sc, AE_IDLE_REG);
1664 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
1668 if (i == AE_IDLE_TIMEOUT)
1669 device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
1673 ae_stop_txmac(ae_softc_t *sc)
1681 * Stop Tx MAC engine.
1683 val = AE_READ_4(sc, AE_MAC_REG);
1684 if ((val & AE_MAC_TX_EN) != 0) {
1685 val &= ~AE_MAC_TX_EN;
1686 AE_WRITE_4(sc, AE_MAC_REG, val);
1690 * Stop Tx DMA engine.
1692 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
1693 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
1696 * Wait for IDLE state.
1698 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1699 val = AE_READ_4(sc, AE_IDLE_REG);
1700 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
1704 if (i == AE_IDLE_TIMEOUT)
1705 device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
1709 ae_mac_config(ae_softc_t *sc)
1711 struct mii_data *mii;
1716 mii = device_get_softc(sc->miibus);
1717 val = AE_READ_4(sc, AE_MAC_REG);
1718 val &= ~AE_MAC_FULL_DUPLEX;
1719 /* XXX disable AE_MAC_TX_FLOW_EN? */
1721 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1722 val |= AE_MAC_FULL_DUPLEX;
1724 AE_WRITE_4(sc, AE_MAC_REG, val);
1733 sc = (ae_softc_t *)arg;
1734 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1736 val = AE_READ_4(sc, AE_ISR_REG);
1737 if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
1738 return (FILTER_STRAY);
1740 /* Disable interrupts. */
1741 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
1743 /* Schedule interrupt processing. */
1744 taskqueue_enqueue(sc->tq, &sc->int_task);
1746 return (FILTER_HANDLED);
1750 ae_int_task(void *arg, int pending)
1756 sc = (ae_softc_t *)arg;
1762 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */
1769 * Clear interrupts and disable them.
1771 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
1774 if_printf(ifp, "Interrupt received: 0x%08x\n", val);
1777 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1778 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
1779 AE_ISR_PHY_LINKDOWN)) != 0) {
1780 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1785 if ((val & AE_ISR_TX_EVENT) != 0)
1787 if ((val & AE_ISR_RX_EVENT) != 0)
1790 * Re-enable interrupts.
1792 AE_WRITE_4(sc, AE_ISR_REG, 0);
1794 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
1795 if (!if_sendq_empty(ifp))
1796 ae_start_locked(ifp);
1804 ae_tx_intr(ae_softc_t *sc)
1816 if_printf(ifp, "Tx interrupt occuried.\n");
1820 * Syncronize DMA buffers.
1822 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1823 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1824 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1825 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1828 txs = sc->txs_base + sc->txs_ack;
1829 flags = le16toh(txs->flags);
1830 if ((flags & AE_TXS_UPDATE) == 0)
1832 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
1834 ae_update_stats_tx(flags, &sc->stats);
1837 * Update TxS position.
1839 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
1840 sc->flags |= AE_FLAG_TXAVAIL;
1842 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
1843 if (txs->len != txd->len)
1844 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
1845 le16toh(txs->len), le16toh(txd->len));
1848 * Move txd ack and align on 4-byte boundary.
1850 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
1851 sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1853 if ((flags & AE_TXS_SUCCESS) != 0)
1854 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1856 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1861 if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
1862 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1863 if (sc->tx_inproc < 0) {
1864 if_printf(ifp, "Received stray Tx interrupt(s).\n");
1868 if (sc->tx_inproc == 0)
1869 sc->wd_timer = 0; /* Unarm watchdog. */
1872 * Syncronize DMA buffers.
1874 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1875 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1876 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1877 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1881 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
1891 flags = le16toh(rxd->flags);
1894 if_printf(ifp, "Rx interrupt occuried.\n");
1896 size = le16toh(rxd->len) - ETHER_CRC_LEN;
1897 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
1898 if_printf(ifp, "Runt frame received.");
1899 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1903 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
1905 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1909 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
1910 (flags & AE_RXD_HAS_VLAN) != 0) {
1911 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
1912 m->m_flags |= M_VLANTAG;
1915 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1925 ae_rx_intr(ae_softc_t *sc)
1932 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1939 * Syncronize DMA buffers.
1941 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1942 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1944 for (count = 0;; count++) {
1945 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
1946 flags = le16toh(rxd->flags);
1947 if ((flags & AE_RXD_UPDATE) == 0)
1949 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
1951 ae_update_stats_rx(flags, &sc->stats);
1954 * Update position index.
1956 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
1958 if ((flags & AE_RXD_SUCCESS) != 0)
1961 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1965 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1966 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1970 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
1975 ae_watchdog(ae_softc_t *sc)
1979 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1983 if (sc->wd_timer == 0 || --sc->wd_timer != 0)
1984 return; /* Noting to do. */
1986 if ((sc->flags & AE_FLAG_LINK) == 0)
1987 if_printf(ifp, "watchdog timeout (missed link).\n");
1989 if_printf(ifp, "watchdog timeout - resetting.\n");
1991 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1992 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1994 if (!if_sendq_empty(ifp))
1995 ae_start_locked(ifp);
2002 struct mii_data *mii;
2004 sc = (ae_softc_t *)arg;
2005 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2008 mii = device_get_softc(sc->miibus);
2010 ae_watchdog(sc); /* Watchdog check. */
2011 callout_reset(&sc->tick_ch, hz, ae_tick, sc);
2015 ae_rxvlan(ae_softc_t *sc)
2022 val = AE_READ_4(sc, AE_MAC_REG);
2023 val &= ~AE_MAC_RMVLAN_EN;
2024 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2025 val |= AE_MAC_RMVLAN_EN;
2026 AE_WRITE_4(sc, AE_MAC_REG, val);
2030 ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2032 uint32_t crc, *mchash = arg;
2034 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
2035 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2041 ae_rxfilter(ae_softc_t *sc)
2047 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2053 rxcfg = AE_READ_4(sc, AE_MAC_REG);
2054 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
2056 if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
2057 rxcfg |= AE_MAC_BCAST_EN;
2058 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
2059 rxcfg |= AE_MAC_PROMISC_EN;
2060 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
2061 rxcfg |= AE_MAC_MCAST_EN;
2064 * Wipe old settings.
2066 AE_WRITE_4(sc, AE_REG_MHT0, 0);
2067 AE_WRITE_4(sc, AE_REG_MHT1, 0);
2068 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2069 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
2070 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
2071 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2076 * Load multicast tables.
2078 bzero(mchash, sizeof(mchash));
2079 if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash);
2080 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
2081 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
2082 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2086 ae_ioctl(if_t ifp, u_long cmd, caddr_t data)
2088 struct ae_softc *sc;
2090 struct mii_data *mii;
2093 sc = if_getsoftc(ifp);
2094 ifr = (struct ifreq *)data;
2099 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2101 else if (if_getmtu(ifp) != ifr->ifr_mtu) {
2103 if_setmtu(ifp, ifr->ifr_mtu);
2104 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2105 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2113 if ((if_getflags(ifp) & IFF_UP) != 0) {
2114 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2115 if (((if_getflags(ifp) ^ sc->if_flags)
2116 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2119 if ((sc->flags & AE_FLAG_DETACH) == 0)
2123 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2126 sc->if_flags = if_getflags(ifp);
2132 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2138 mii = device_get_softc(sc->miibus);
2139 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2143 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2144 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2145 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2146 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2149 VLAN_CAPABILITIES(ifp);
2153 error = ether_ioctl(ifp, cmd, data);
2160 ae_stop(ae_softc_t *sc)
2168 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2169 sc->flags &= ~AE_FLAG_LINK;
2170 sc->wd_timer = 0; /* Cancel watchdog. */
2171 callout_stop(&sc->tick_ch);
2174 * Clear and disable interrupts.
2176 AE_WRITE_4(sc, AE_IMR_REG, 0);
2177 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
2188 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
2189 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
2192 * Wait for everything to enter idle state.
2194 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
2195 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
2199 if (i == AE_IDLE_TIMEOUT)
2200 device_printf(sc->dev, "could not enter idle state in stop.\n");
2204 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
2207 if ((flags & AE_TXS_BCAST) != 0)
2209 if ((flags & AE_TXS_MCAST) != 0)
2211 if ((flags & AE_TXS_PAUSE) != 0)
2213 if ((flags & AE_TXS_CTRL) != 0)
2215 if ((flags & AE_TXS_DEFER) != 0)
2217 if ((flags & AE_TXS_EXCDEFER) != 0)
2218 stats->tx_excdefer++;
2219 if ((flags & AE_TXS_SINGLECOL) != 0)
2220 stats->tx_singlecol++;
2221 if ((flags & AE_TXS_MULTICOL) != 0)
2222 stats->tx_multicol++;
2223 if ((flags & AE_TXS_LATECOL) != 0)
2224 stats->tx_latecol++;
2225 if ((flags & AE_TXS_ABORTCOL) != 0)
2226 stats->tx_abortcol++;
2227 if ((flags & AE_TXS_UNDERRUN) != 0)
2228 stats->tx_underrun++;
2232 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
2235 if ((flags & AE_RXD_BCAST) != 0)
2237 if ((flags & AE_RXD_MCAST) != 0)
2239 if ((flags & AE_RXD_PAUSE) != 0)
2241 if ((flags & AE_RXD_CTRL) != 0)
2243 if ((flags & AE_RXD_CRCERR) != 0)
2245 if ((flags & AE_RXD_CODEERR) != 0)
2246 stats->rx_codeerr++;
2247 if ((flags & AE_RXD_RUNT) != 0)
2249 if ((flags & AE_RXD_FRAG) != 0)
2251 if ((flags & AE_RXD_TRUNC) != 0)
2253 if ((flags & AE_RXD_ALIGN) != 0)