2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_llc.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/tcp.h>
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
71 #include <machine/atomic.h>
72 #include <machine/bus.h>
73 #include <machine/in_cksum.h>
75 #include <dev/alc/if_alcreg.h>
76 #include <dev/alc/if_alcvar.h>
78 /* "device miibus" required. See GENERIC if you get errors here. */
79 #include "miibus_if.h"
80 #undef ALC_USE_CUSTOM_CSUM
82 #ifdef ALC_USE_CUSTOM_CSUM
83 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
85 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
88 MODULE_DEPEND(alc, pci, 1, 1, 1);
89 MODULE_DEPEND(alc, ether, 1, 1, 1);
90 MODULE_DEPEND(alc, miibus, 1, 1, 1);
93 static int msi_disable = 0;
94 static int msix_disable = 0;
95 TUNABLE_INT("hw.alc.msi_disable", &msi_disable);
96 TUNABLE_INT("hw.alc.msix_disable", &msix_disable);
99 * Devices supported by this driver.
101 static struct alc_dev {
102 uint16_t alc_vendorid;
103 uint16_t alc_deviceid;
104 const char *alc_name;
106 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131,
107 "Atheros AR8131 PCIe Gigabit Ethernet" },
108 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132,
109 "Atheros AR8132 PCIe Fast Ethernet" }
112 static void alc_aspm(struct alc_softc *);
113 static int alc_attach(device_t);
114 static int alc_check_boundary(struct alc_softc *);
115 static int alc_detach(device_t);
116 static void alc_disable_l0s_l1(struct alc_softc *);
117 static int alc_dma_alloc(struct alc_softc *);
118 static void alc_dma_free(struct alc_softc *);
119 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
120 static int alc_encap(struct alc_softc *, struct mbuf **);
121 #ifndef __NO_STRICT_ALIGNMENT
123 alc_fixup_rx(struct ifnet *, struct mbuf *);
125 static void alc_get_macaddr(struct alc_softc *);
126 static void alc_init(void *);
127 static void alc_init_cmb(struct alc_softc *);
128 static void alc_init_locked(struct alc_softc *);
129 static void alc_init_rr_ring(struct alc_softc *);
130 static int alc_init_rx_ring(struct alc_softc *);
131 static void alc_init_smb(struct alc_softc *);
132 static void alc_init_tx_ring(struct alc_softc *);
133 static void alc_int_task(void *, int);
134 static int alc_intr(void *);
135 static int alc_ioctl(struct ifnet *, u_long, caddr_t);
136 static void alc_mac_config(struct alc_softc *);
137 static int alc_miibus_readreg(device_t, int, int);
138 static void alc_miibus_statchg(device_t);
139 static int alc_miibus_writereg(device_t, int, int, int);
140 static int alc_mediachange(struct ifnet *);
141 static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
142 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
143 static void alc_phy_down(struct alc_softc *);
144 static void alc_phy_reset(struct alc_softc *);
145 static int alc_probe(device_t);
146 static void alc_reset(struct alc_softc *);
147 static int alc_resume(device_t);
148 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
149 static int alc_rxintr(struct alc_softc *, int);
150 static void alc_rxfilter(struct alc_softc *);
151 static void alc_rxvlan(struct alc_softc *);
152 static void alc_setlinkspeed(struct alc_softc *);
153 static void alc_setwol(struct alc_softc *);
154 static int alc_shutdown(device_t);
155 static void alc_start(struct ifnet *);
156 static void alc_start_queue(struct alc_softc *);
157 static void alc_stats_clear(struct alc_softc *);
158 static void alc_stats_update(struct alc_softc *);
159 static void alc_stop(struct alc_softc *);
160 static void alc_stop_mac(struct alc_softc *);
161 static void alc_stop_queue(struct alc_softc *);
162 static int alc_suspend(device_t);
163 static void alc_sysctl_node(struct alc_softc *);
164 static void alc_tick(void *);
165 static void alc_tx_task(void *, int);
166 static void alc_txeof(struct alc_softc *);
167 static void alc_watchdog(struct alc_softc *);
168 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
169 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
170 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
172 static device_method_t alc_methods[] = {
173 /* Device interface. */
174 DEVMETHOD(device_probe, alc_probe),
175 DEVMETHOD(device_attach, alc_attach),
176 DEVMETHOD(device_detach, alc_detach),
177 DEVMETHOD(device_shutdown, alc_shutdown),
178 DEVMETHOD(device_suspend, alc_suspend),
179 DEVMETHOD(device_resume, alc_resume),
182 DEVMETHOD(miibus_readreg, alc_miibus_readreg),
183 DEVMETHOD(miibus_writereg, alc_miibus_writereg),
184 DEVMETHOD(miibus_statchg, alc_miibus_statchg),
189 static driver_t alc_driver = {
192 sizeof(struct alc_softc)
195 static devclass_t alc_devclass;
197 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0);
198 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0);
200 static struct resource_spec alc_res_spec_mem[] = {
201 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
205 static struct resource_spec alc_irq_spec_legacy[] = {
206 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
210 static struct resource_spec alc_irq_spec_msi[] = {
211 { SYS_RES_IRQ, 1, RF_ACTIVE },
215 static struct resource_spec alc_irq_spec_msix[] = {
216 { SYS_RES_IRQ, 1, RF_ACTIVE },
220 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
223 alc_miibus_readreg(device_t dev, int phy, int reg)
225 struct alc_softc *sc;
229 sc = device_get_softc(dev);
231 if (phy != sc->alc_phyaddr)
235 * For AR8132 fast ethernet controller, do not report 1000baseT
236 * capability to mii(4). Even though AR8132 uses the same
237 * model/revision number of F1 gigabit PHY, the PHY has no
238 * ability to establish 1000baseT link.
240 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
244 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
245 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
246 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
248 v = CSR_READ_4(sc, ALC_MDIO);
249 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
254 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
258 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
262 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
264 struct alc_softc *sc;
268 sc = device_get_softc(dev);
270 if (phy != sc->alc_phyaddr)
273 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
274 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
275 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
276 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
278 v = CSR_READ_4(sc, ALC_MDIO);
279 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
284 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
290 alc_miibus_statchg(device_t dev)
292 struct alc_softc *sc;
293 struct mii_data *mii;
297 sc = device_get_softc(dev);
299 mii = device_get_softc(sc->alc_miibus);
301 if (mii == NULL || ifp == NULL ||
302 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
305 sc->alc_flags &= ~ALC_FLAG_LINK;
306 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
307 (IFM_ACTIVE | IFM_AVALID)) {
308 switch (IFM_SUBTYPE(mii->mii_media_active)) {
311 sc->alc_flags |= ALC_FLAG_LINK;
314 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
315 sc->alc_flags |= ALC_FLAG_LINK;
322 /* Stop Rx/Tx MACs. */
325 /* Program MACs with resolved speed/duplex/flow-control. */
326 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
329 /* Re-enable Tx/Rx MACs. */
330 reg = CSR_READ_4(sc, ALC_MAC_CFG);
331 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
332 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
338 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
340 struct alc_softc *sc;
341 struct mii_data *mii;
345 if ((ifp->if_flags & IFF_UP) == 0) {
349 mii = device_get_softc(sc->alc_miibus);
353 ifmr->ifm_status = mii->mii_media_status;
354 ifmr->ifm_active = mii->mii_media_active;
358 alc_mediachange(struct ifnet *ifp)
360 struct alc_softc *sc;
361 struct mii_data *mii;
362 struct mii_softc *miisc;
367 mii = device_get_softc(sc->alc_miibus);
368 if (mii->mii_instance != 0) {
369 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
370 mii_phy_reset(miisc);
372 error = mii_mediachg(mii);
379 alc_probe(device_t dev)
383 uint16_t vendor, devid;
385 vendor = pci_get_vendor(dev);
386 devid = pci_get_device(dev);
388 for (i = 0; i < sizeof(alc_devs) / sizeof(alc_devs[0]); i++) {
389 if (vendor == sp->alc_vendorid &&
390 devid == sp->alc_deviceid) {
391 device_set_desc(dev, sp->alc_name);
392 return (BUS_PROBE_DEFAULT);
401 alc_get_macaddr(struct alc_softc *sc)
406 opt = CSR_READ_4(sc, ALC_OPT_CFG);
407 if ((CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
409 * EEPROM found, let TWSI reload EEPROM configuration.
410 * This will set ethernet address of controller.
412 if ((opt & OPT_CFG_CLK_ENB) == 0) {
413 opt |= OPT_CFG_CLK_ENB;
414 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
415 CSR_READ_4(sc, ALC_OPT_CFG);
418 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
419 TWSI_CFG_SW_LD_START);
420 for (i = 100; i > 0; i--) {
422 if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
423 TWSI_CFG_SW_LD_START) == 0)
427 device_printf(sc->alc_dev,
428 "reloading EEPROM timeout!\n");
431 device_printf(sc->alc_dev, "EEPROM not found!\n");
433 if ((opt & OPT_CFG_CLK_ENB) != 0) {
434 opt &= ~OPT_CFG_CLK_ENB;
435 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
436 CSR_READ_4(sc, ALC_OPT_CFG);
440 ea[0] = CSR_READ_4(sc, ALC_PAR0);
441 ea[1] = CSR_READ_4(sc, ALC_PAR1);
442 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
443 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
444 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
445 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
446 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
447 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
451 alc_disable_l0s_l1(struct alc_softc *sc)
455 /* Another magic from vendor. */
456 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
457 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
458 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
459 PM_CFG_SERDES_PD_EX_L1);
460 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
461 PM_CFG_SERDES_L1_ENB;
462 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
466 alc_phy_reset(struct alc_softc *sc)
470 /* Reset magic from Linux. */
471 CSR_WRITE_2(sc, ALC_GPHY_CFG,
472 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
473 CSR_READ_2(sc, ALC_GPHY_CFG);
476 CSR_WRITE_2(sc, ALC_GPHY_CFG,
477 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
478 GPHY_CFG_SEL_ANA_RESET);
479 CSR_READ_2(sc, ALC_GPHY_CFG);
482 /* Load DSP codes, vendor magic. */
483 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
484 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
485 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
486 ALC_MII_DBG_ADDR, MII_ANA_CFG18);
487 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
488 ALC_MII_DBG_DATA, data);
490 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
491 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
493 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
494 ALC_MII_DBG_ADDR, MII_ANA_CFG5);
495 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
496 ALC_MII_DBG_DATA, data);
498 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
499 ANA_LONG_CABLE_TH_100_MASK) |
500 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
501 ANA_SHORT_CABLE_TH_100_SHIFT) |
502 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
503 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
504 ALC_MII_DBG_ADDR, MII_ANA_CFG54);
505 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
506 ALC_MII_DBG_DATA, data);
508 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
509 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
510 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
511 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
512 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
513 ALC_MII_DBG_ADDR, MII_ANA_CFG4);
514 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
515 ALC_MII_DBG_DATA, data);
517 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
518 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
520 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
521 ALC_MII_DBG_ADDR, MII_ANA_CFG0);
522 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
523 ALC_MII_DBG_DATA, data);
528 alc_phy_down(struct alc_softc *sc)
531 /* Force PHY down. */
532 CSR_WRITE_2(sc, ALC_GPHY_CFG,
533 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
534 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW);
539 alc_aspm(struct alc_softc *sc)
545 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
546 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
547 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB;
548 pmcfg |= PM_CFG_SERDES_L1_ENB;
549 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
550 pmcfg |= PM_CFG_MAC_ASPM_CHK;
551 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
552 pmcfg |= PM_CFG_SERDES_PLL_L1_ENB;
553 pmcfg &= ~PM_CFG_CLK_SWH_L1;
554 pmcfg &= ~PM_CFG_ASPM_L1_ENB;
555 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
557 pmcfg &= ~PM_CFG_SERDES_PLL_L1_ENB;
558 pmcfg |= PM_CFG_CLK_SWH_L1;
559 pmcfg &= ~PM_CFG_ASPM_L1_ENB;
560 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
562 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
566 alc_attach(device_t dev)
568 struct alc_softc *sc;
570 char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/l1" };
572 int base, error, i, msic, msixc, pmc, state;
573 uint32_t cap, ctl, val;
576 sc = device_get_softc(dev);
579 mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
581 callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
582 TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
584 /* Map the device. */
585 pci_enable_busmaster(dev);
586 sc->alc_res_spec = alc_res_spec_mem;
587 sc->alc_irq_spec = alc_irq_spec_legacy;
588 error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
590 device_printf(dev, "cannot allocate memory resources.\n");
594 /* Set PHY address. */
595 sc->alc_phyaddr = ALC_PHY_ADDR;
597 /* Initialize DMA parameters. */
598 sc->alc_dma_rd_burst = 0;
599 sc->alc_dma_wr_burst = 0;
600 sc->alc_rcb = DMA_CFG_RCB_64;
601 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
602 sc->alc_flags |= ALC_FLAG_PCIE;
603 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL);
604 sc->alc_dma_rd_burst =
605 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12;
606 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5;
608 device_printf(dev, "Read request size : %u bytes.\n",
609 alc_dma_burst[sc->alc_dma_rd_burst]);
610 device_printf(dev, "TLP payload size : %u bytes.\n",
611 alc_dma_burst[sc->alc_dma_wr_burst]);
613 /* Clear data link and flow-control protocol error. */
614 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
615 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
616 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
617 /* Disable ASPM L0S and L1. */
618 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
619 if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
620 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
621 if ((ctl & 0x08) != 0)
622 sc->alc_rcb = DMA_CFG_RCB_128;
624 device_printf(dev, "RCB %u bytes\n",
625 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
628 device_printf(sc->alc_dev, "ASPM %s %s\n",
630 state == 0 ? "disabled" : "enabled");
632 alc_disable_l0s_l1(sc);
639 /* Reset the ethernet controller. */
643 * One odd thing is AR8132 uses the same PHY hardware(F1
644 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
645 * the PHY supports 1000Mbps but that's not true. The PHY
646 * used in AR8132 can't establish gigabit link even if it
647 * shows the same PHY model/revision number of AR8131.
649 if (pci_get_device(dev) == DEVICEID_ATHEROS_AR8132)
650 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_JUMBO;
652 sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
654 * It seems that AR8131/AR8132 has silicon bug for SMB. In
655 * addition, Atheros said that enabling SMB wouldn't improve
656 * performance. However I think it's bad to access lots of
657 * registers to extract MAC statistics.
659 sc->alc_flags |= ALC_FLAG_SMB_BUG;
661 * Don't use Tx CMB. It is known to have silicon bug.
663 sc->alc_flags |= ALC_FLAG_CMB_BUG;
664 sc->alc_rev = pci_get_revid(dev);
665 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
666 MASTER_CHIP_REV_SHIFT;
668 device_printf(dev, "PCI device revision : 0x%04x\n",
670 device_printf(dev, "Chip id/revision : 0x%04x\n",
673 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
674 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
675 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
677 /* Allocate IRQ resources. */
678 msixc = pci_msix_count(dev);
679 msic = pci_msi_count(dev);
681 device_printf(dev, "MSIX count : %d\n", msixc);
682 device_printf(dev, "MSI count : %d\n", msic);
684 /* Prefer MSIX over MSI. */
685 if (msix_disable == 0 || msi_disable == 0) {
686 if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES &&
687 pci_alloc_msix(dev, &msixc) == 0) {
688 if (msic == ALC_MSIX_MESSAGES) {
690 "Using %d MSIX message(s).\n", msixc);
691 sc->alc_flags |= ALC_FLAG_MSIX;
692 sc->alc_irq_spec = alc_irq_spec_msix;
694 pci_release_msi(dev);
696 if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
697 msic == ALC_MSI_MESSAGES &&
698 pci_alloc_msi(dev, &msic) == 0) {
699 if (msic == ALC_MSI_MESSAGES) {
701 "Using %d MSI message(s).\n", msic);
702 sc->alc_flags |= ALC_FLAG_MSI;
703 sc->alc_irq_spec = alc_irq_spec_msi;
705 pci_release_msi(dev);
709 error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq);
711 device_printf(dev, "cannot allocate IRQ resources.\n");
715 /* Create device sysctl node. */
718 if ((error = alc_dma_alloc(sc) != 0))
721 /* Load station address. */
724 ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
726 device_printf(dev, "cannot allocate ifnet structure.\n");
732 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
733 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
734 ifp->if_ioctl = alc_ioctl;
735 ifp->if_start = alc_start;
736 ifp->if_init = alc_init;
737 ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1;
738 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
739 IFQ_SET_READY(&ifp->if_snd);
740 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
741 ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO;
742 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
743 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
744 ifp->if_capenable = ifp->if_capabilities;
746 /* Set up MII bus. */
747 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange,
748 alc_mediastatus)) != 0) {
749 device_printf(dev, "no PHY found!\n");
753 ether_ifattach(ifp, sc->alc_eaddr);
755 /* VLAN capability setup. */
756 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
757 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
758 ifp->if_capenable = ifp->if_capabilities;
761 * It seems enabling Tx checksum offloading makes more trouble.
762 * Sometimes the controller does not receive any frames when
763 * Tx checksum offloading is enabled. I'm not sure whether this
764 * is a bug in Tx checksum offloading logic or I got broken
765 * sample boards. To safety, don't enable Tx checksum offloading
766 * by default but give chance to users to toggle it if they know
767 * their controllers work without problems.
769 ifp->if_capenable &= ~IFCAP_TXCSUM;
770 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
772 /* Tell the upper layer(s) we support long frames. */
773 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
775 /* Create local taskq. */
776 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp);
777 sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
778 taskqueue_thread_enqueue, &sc->alc_tq);
779 if (sc->alc_tq == NULL) {
780 device_printf(dev, "could not create taskqueue.\n");
785 taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
786 device_get_nameunit(sc->alc_dev));
788 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
789 msic = ALC_MSIX_MESSAGES;
790 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
791 msic = ALC_MSI_MESSAGES;
794 for (i = 0; i < msic; i++) {
795 error = bus_setup_intr(dev, sc->alc_irq[i],
796 INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc,
797 &sc->alc_intrhand[i]);
802 device_printf(dev, "could not set up interrupt handler.\n");
803 taskqueue_free(sc->alc_tq);
817 alc_detach(device_t dev)
819 struct alc_softc *sc;
823 sc = device_get_softc(dev);
826 if (device_is_attached(dev)) {
828 sc->alc_flags |= ALC_FLAG_DETACH;
831 callout_drain(&sc->alc_tick_ch);
832 taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
833 taskqueue_drain(sc->alc_tq, &sc->alc_tx_task);
837 if (sc->alc_tq != NULL) {
838 taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
839 taskqueue_free(sc->alc_tq);
843 if (sc->alc_miibus != NULL) {
844 device_delete_child(dev, sc->alc_miibus);
845 sc->alc_miibus = NULL;
847 bus_generic_detach(dev);
855 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
856 msic = ALC_MSIX_MESSAGES;
857 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
858 msic = ALC_MSI_MESSAGES;
861 for (i = 0; i < msic; i++) {
862 if (sc->alc_intrhand[i] != NULL) {
863 bus_teardown_intr(dev, sc->alc_irq[i],
864 sc->alc_intrhand[i]);
865 sc->alc_intrhand[i] = NULL;
868 if (sc->alc_res[0] != NULL)
870 bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq);
871 if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0)
872 pci_release_msi(dev);
873 bus_release_resources(dev, sc->alc_res_spec, sc->alc_res);
874 mtx_destroy(&sc->alc_mtx);
879 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \
880 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
881 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \
882 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
885 alc_sysctl_node(struct alc_softc *sc)
887 struct sysctl_ctx_list *ctx;
888 struct sysctl_oid_list *child, *parent;
889 struct sysctl_oid *tree;
890 struct alc_hw_stats *stats;
893 stats = &sc->alc_stats;
894 ctx = device_get_sysctl_ctx(sc->alc_dev);
895 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
897 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
898 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
899 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
900 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
901 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
902 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
903 /* Pull in device tunables. */
904 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
905 error = resource_int_value(device_get_name(sc->alc_dev),
906 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
908 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
909 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
910 device_printf(sc->alc_dev, "int_rx_mod value out of "
911 "range; using default: %d\n",
912 ALC_IM_RX_TIMER_DEFAULT);
913 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
916 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
917 error = resource_int_value(device_get_name(sc->alc_dev),
918 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
920 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
921 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
922 device_printf(sc->alc_dev, "int_tx_mod value out of "
923 "range; using default: %d\n",
924 ALC_IM_TX_TIMER_DEFAULT);
925 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
928 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
929 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
930 sysctl_hw_alc_proc_limit, "I",
931 "max number of Rx events to process");
932 /* Pull in device tunables. */
933 sc->alc_process_limit = ALC_PROC_DEFAULT;
934 error = resource_int_value(device_get_name(sc->alc_dev),
935 device_get_unit(sc->alc_dev), "process_limit",
936 &sc->alc_process_limit);
938 if (sc->alc_process_limit < ALC_PROC_MIN ||
939 sc->alc_process_limit > ALC_PROC_MAX) {
940 device_printf(sc->alc_dev,
941 "process_limit value out of range; "
942 "using default: %d\n", ALC_PROC_DEFAULT);
943 sc->alc_process_limit = ALC_PROC_DEFAULT;
947 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
948 NULL, "ALC statistics");
949 parent = SYSCTL_CHILDREN(tree);
952 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
953 NULL, "Rx MAC statistics");
954 child = SYSCTL_CHILDREN(tree);
955 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
956 &stats->rx_frames, "Good frames");
957 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
958 &stats->rx_bcast_frames, "Good broadcast frames");
959 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
960 &stats->rx_mcast_frames, "Good multicast frames");
961 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
962 &stats->rx_pause_frames, "Pause control frames");
963 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
964 &stats->rx_control_frames, "Control frames");
965 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
966 &stats->rx_crcerrs, "CRC errors");
967 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
968 &stats->rx_lenerrs, "Frames with length mismatched");
969 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
970 &stats->rx_bytes, "Good octets");
971 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
972 &stats->rx_bcast_bytes, "Good broadcast octets");
973 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
974 &stats->rx_mcast_bytes, "Good multicast octets");
975 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
976 &stats->rx_runts, "Too short frames");
977 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
978 &stats->rx_fragments, "Fragmented frames");
979 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
980 &stats->rx_pkts_64, "64 bytes frames");
981 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
982 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
983 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
984 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
985 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
986 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
987 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
988 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
989 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
990 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
991 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
992 &stats->rx_pkts_1519_max, "1519 to max frames");
993 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
994 &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
995 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
996 &stats->rx_fifo_oflows, "FIFO overflows");
997 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
998 &stats->rx_rrs_errs, "Return status write-back errors");
999 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
1000 &stats->rx_alignerrs, "Alignment errors");
1001 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
1002 &stats->rx_pkts_filtered,
1003 "Frames dropped due to address filtering");
1005 /* Tx statistics. */
1006 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1007 NULL, "Tx MAC statistics");
1008 child = SYSCTL_CHILDREN(tree);
1009 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1010 &stats->tx_frames, "Good frames");
1011 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1012 &stats->tx_bcast_frames, "Good broadcast frames");
1013 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1014 &stats->tx_mcast_frames, "Good multicast frames");
1015 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1016 &stats->tx_pause_frames, "Pause control frames");
1017 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1018 &stats->tx_control_frames, "Control frames");
1019 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1020 &stats->tx_excess_defer, "Frames with excessive derferrals");
1021 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1022 &stats->tx_excess_defer, "Frames with derferrals");
1023 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1024 &stats->tx_bytes, "Good octets");
1025 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1026 &stats->tx_bcast_bytes, "Good broadcast octets");
1027 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1028 &stats->tx_mcast_bytes, "Good multicast octets");
1029 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1030 &stats->tx_pkts_64, "64 bytes frames");
1031 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1032 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1033 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1034 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1035 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1036 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1037 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1038 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1039 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1040 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1041 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1042 &stats->tx_pkts_1519_max, "1519 to max frames");
1043 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1044 &stats->tx_single_colls, "Single collisions");
1045 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1046 &stats->tx_multi_colls, "Multiple collisions");
1047 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1048 &stats->tx_late_colls, "Late collisions");
1049 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1050 &stats->tx_excess_colls, "Excessive collisions");
1051 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
1052 &stats->tx_abort, "Aborted frames due to Excessive collisions");
1053 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1054 &stats->tx_underrun, "FIFO underruns");
1055 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1056 &stats->tx_desc_underrun, "Descriptor write-back errors");
1057 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1058 &stats->tx_lenerrs, "Frames with length mismatched");
1059 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1060 &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1063 #undef ALC_SYSCTL_STAT_ADD32
1064 #undef ALC_SYSCTL_STAT_ADD64
1066 struct alc_dmamap_arg {
1067 bus_addr_t alc_busaddr;
1071 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1073 struct alc_dmamap_arg *ctx;
1078 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1080 ctx = (struct alc_dmamap_arg *)arg;
1081 ctx->alc_busaddr = segs[0].ds_addr;
1085 * Normal and high Tx descriptors shares single Tx high address.
1086 * Four Rx descriptor/return rings and CMB shares the same Rx
1090 alc_check_boundary(struct alc_softc *sc)
1092 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1094 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1095 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1096 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1097 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1099 /* 4GB boundary crossing is not allowed. */
1100 if ((ALC_ADDR_HI(rx_ring_end) !=
1101 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1102 (ALC_ADDR_HI(rr_ring_end) !=
1103 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1104 (ALC_ADDR_HI(cmb_end) !=
1105 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1106 (ALC_ADDR_HI(tx_ring_end) !=
1107 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1110 * Make sure Rx return descriptor/Rx descriptor/CMB use
1111 * the same high address.
1113 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1114 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1121 alc_dma_alloc(struct alc_softc *sc)
1123 struct alc_txdesc *txd;
1124 struct alc_rxdesc *rxd;
1126 struct alc_dmamap_arg ctx;
1129 lowaddr = BUS_SPACE_MAXADDR;
1131 /* Create parent DMA tag. */
1132 error = bus_dma_tag_create(
1133 bus_get_dma_tag(sc->alc_dev), /* parent */
1134 1, 0, /* alignment, boundary */
1135 lowaddr, /* lowaddr */
1136 BUS_SPACE_MAXADDR, /* highaddr */
1137 NULL, NULL, /* filter, filterarg */
1138 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1140 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1142 NULL, NULL, /* lockfunc, lockarg */
1143 &sc->alc_cdata.alc_parent_tag);
1145 device_printf(sc->alc_dev,
1146 "could not create parent DMA tag.\n");
1150 /* Create DMA tag for Tx descriptor ring. */
1151 error = bus_dma_tag_create(
1152 sc->alc_cdata.alc_parent_tag, /* parent */
1153 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */
1154 BUS_SPACE_MAXADDR, /* lowaddr */
1155 BUS_SPACE_MAXADDR, /* highaddr */
1156 NULL, NULL, /* filter, filterarg */
1157 ALC_TX_RING_SZ, /* maxsize */
1159 ALC_TX_RING_SZ, /* maxsegsize */
1161 NULL, NULL, /* lockfunc, lockarg */
1162 &sc->alc_cdata.alc_tx_ring_tag);
1164 device_printf(sc->alc_dev,
1165 "could not create Tx ring DMA tag.\n");
1169 /* Create DMA tag for Rx free descriptor ring. */
1170 error = bus_dma_tag_create(
1171 sc->alc_cdata.alc_parent_tag, /* parent */
1172 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */
1173 BUS_SPACE_MAXADDR, /* lowaddr */
1174 BUS_SPACE_MAXADDR, /* highaddr */
1175 NULL, NULL, /* filter, filterarg */
1176 ALC_RX_RING_SZ, /* maxsize */
1178 ALC_RX_RING_SZ, /* maxsegsize */
1180 NULL, NULL, /* lockfunc, lockarg */
1181 &sc->alc_cdata.alc_rx_ring_tag);
1183 device_printf(sc->alc_dev,
1184 "could not create Rx ring DMA tag.\n");
1187 /* Create DMA tag for Rx return descriptor ring. */
1188 error = bus_dma_tag_create(
1189 sc->alc_cdata.alc_parent_tag, /* parent */
1190 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */
1191 BUS_SPACE_MAXADDR, /* lowaddr */
1192 BUS_SPACE_MAXADDR, /* highaddr */
1193 NULL, NULL, /* filter, filterarg */
1194 ALC_RR_RING_SZ, /* maxsize */
1196 ALC_RR_RING_SZ, /* maxsegsize */
1198 NULL, NULL, /* lockfunc, lockarg */
1199 &sc->alc_cdata.alc_rr_ring_tag);
1201 device_printf(sc->alc_dev,
1202 "could not create Rx return ring DMA tag.\n");
1206 /* Create DMA tag for coalescing message block. */
1207 error = bus_dma_tag_create(
1208 sc->alc_cdata.alc_parent_tag, /* parent */
1209 ALC_CMB_ALIGN, 0, /* alignment, boundary */
1210 BUS_SPACE_MAXADDR, /* lowaddr */
1211 BUS_SPACE_MAXADDR, /* highaddr */
1212 NULL, NULL, /* filter, filterarg */
1213 ALC_CMB_SZ, /* maxsize */
1215 ALC_CMB_SZ, /* maxsegsize */
1217 NULL, NULL, /* lockfunc, lockarg */
1218 &sc->alc_cdata.alc_cmb_tag);
1220 device_printf(sc->alc_dev,
1221 "could not create CMB DMA tag.\n");
1224 /* Create DMA tag for status message block. */
1225 error = bus_dma_tag_create(
1226 sc->alc_cdata.alc_parent_tag, /* parent */
1227 ALC_SMB_ALIGN, 0, /* alignment, boundary */
1228 BUS_SPACE_MAXADDR, /* lowaddr */
1229 BUS_SPACE_MAXADDR, /* highaddr */
1230 NULL, NULL, /* filter, filterarg */
1231 ALC_SMB_SZ, /* maxsize */
1233 ALC_SMB_SZ, /* maxsegsize */
1235 NULL, NULL, /* lockfunc, lockarg */
1236 &sc->alc_cdata.alc_smb_tag);
1238 device_printf(sc->alc_dev,
1239 "could not create SMB DMA tag.\n");
1243 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1244 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
1245 (void **)&sc->alc_rdata.alc_tx_ring,
1246 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1247 &sc->alc_cdata.alc_tx_ring_map);
1249 device_printf(sc->alc_dev,
1250 "could not allocate DMA'able memory for Tx ring.\n");
1253 ctx.alc_busaddr = 0;
1254 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
1255 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
1256 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1257 if (error != 0 || ctx.alc_busaddr == 0) {
1258 device_printf(sc->alc_dev,
1259 "could not load DMA'able memory for Tx ring.\n");
1262 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
1264 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1265 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
1266 (void **)&sc->alc_rdata.alc_rx_ring,
1267 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1268 &sc->alc_cdata.alc_rx_ring_map);
1270 device_printf(sc->alc_dev,
1271 "could not allocate DMA'able memory for Rx ring.\n");
1274 ctx.alc_busaddr = 0;
1275 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
1276 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
1277 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1278 if (error != 0 || ctx.alc_busaddr == 0) {
1279 device_printf(sc->alc_dev,
1280 "could not load DMA'able memory for Rx ring.\n");
1283 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
1285 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */
1286 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
1287 (void **)&sc->alc_rdata.alc_rr_ring,
1288 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1289 &sc->alc_cdata.alc_rr_ring_map);
1291 device_printf(sc->alc_dev,
1292 "could not allocate DMA'able memory for Rx return ring.\n");
1295 ctx.alc_busaddr = 0;
1296 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
1297 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
1298 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
1299 if (error != 0 || ctx.alc_busaddr == 0) {
1300 device_printf(sc->alc_dev,
1301 "could not load DMA'able memory for Tx ring.\n");
1304 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
1306 /* Allocate DMA'able memory and load the DMA map for CMB. */
1307 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
1308 (void **)&sc->alc_rdata.alc_cmb,
1309 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1310 &sc->alc_cdata.alc_cmb_map);
1312 device_printf(sc->alc_dev,
1313 "could not allocate DMA'able memory for CMB.\n");
1316 ctx.alc_busaddr = 0;
1317 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
1318 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
1319 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
1320 if (error != 0 || ctx.alc_busaddr == 0) {
1321 device_printf(sc->alc_dev,
1322 "could not load DMA'able memory for CMB.\n");
1325 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
1327 /* Allocate DMA'able memory and load the DMA map for SMB. */
1328 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
1329 (void **)&sc->alc_rdata.alc_smb,
1330 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1331 &sc->alc_cdata.alc_smb_map);
1333 device_printf(sc->alc_dev,
1334 "could not allocate DMA'able memory for SMB.\n");
1337 ctx.alc_busaddr = 0;
1338 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
1339 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
1340 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
1341 if (error != 0 || ctx.alc_busaddr == 0) {
1342 device_printf(sc->alc_dev,
1343 "could not load DMA'able memory for CMB.\n");
1346 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
1348 /* Make sure we've not crossed 4GB boundary. */
1349 if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1350 (error = alc_check_boundary(sc)) != 0) {
1351 device_printf(sc->alc_dev, "4GB boundary crossed, "
1352 "switching to 32bit DMA addressing mode.\n");
1355 * Limit max allowable DMA address space to 32bit
1358 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1363 * Create Tx buffer parent tag.
1364 * AR8131/AR8132 allows 64bit DMA addressing of Tx/Rx buffers
1365 * so it needs separate parent DMA tag as parent DMA address
1366 * space could be restricted to be within 32bit address space
1367 * by 4GB boundary crossing.
1369 error = bus_dma_tag_create(
1370 bus_get_dma_tag(sc->alc_dev), /* parent */
1371 1, 0, /* alignment, boundary */
1372 BUS_SPACE_MAXADDR, /* lowaddr */
1373 BUS_SPACE_MAXADDR, /* highaddr */
1374 NULL, NULL, /* filter, filterarg */
1375 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1377 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1379 NULL, NULL, /* lockfunc, lockarg */
1380 &sc->alc_cdata.alc_buffer_tag);
1382 device_printf(sc->alc_dev,
1383 "could not create parent buffer DMA tag.\n");
1387 /* Create DMA tag for Tx buffers. */
1388 error = bus_dma_tag_create(
1389 sc->alc_cdata.alc_buffer_tag, /* parent */
1390 1, 0, /* alignment, boundary */
1391 BUS_SPACE_MAXADDR, /* lowaddr */
1392 BUS_SPACE_MAXADDR, /* highaddr */
1393 NULL, NULL, /* filter, filterarg */
1394 ALC_TSO_MAXSIZE, /* maxsize */
1395 ALC_MAXTXSEGS, /* nsegments */
1396 ALC_TSO_MAXSEGSIZE, /* maxsegsize */
1398 NULL, NULL, /* lockfunc, lockarg */
1399 &sc->alc_cdata.alc_tx_tag);
1401 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
1405 /* Create DMA tag for Rx buffers. */
1406 error = bus_dma_tag_create(
1407 sc->alc_cdata.alc_buffer_tag, /* parent */
1408 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */
1409 BUS_SPACE_MAXADDR, /* lowaddr */
1410 BUS_SPACE_MAXADDR, /* highaddr */
1411 NULL, NULL, /* filter, filterarg */
1412 MCLBYTES, /* maxsize */
1414 MCLBYTES, /* maxsegsize */
1416 NULL, NULL, /* lockfunc, lockarg */
1417 &sc->alc_cdata.alc_rx_tag);
1419 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
1422 /* Create DMA maps for Tx buffers. */
1423 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1424 txd = &sc->alc_cdata.alc_txdesc[i];
1426 txd->tx_dmamap = NULL;
1427 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0,
1430 device_printf(sc->alc_dev,
1431 "could not create Tx dmamap.\n");
1435 /* Create DMA maps for Rx buffers. */
1436 if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1437 &sc->alc_cdata.alc_rx_sparemap)) != 0) {
1438 device_printf(sc->alc_dev,
1439 "could not create spare Rx dmamap.\n");
1442 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1443 rxd = &sc->alc_cdata.alc_rxdesc[i];
1445 rxd->rx_dmamap = NULL;
1446 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1449 device_printf(sc->alc_dev,
1450 "could not create Rx dmamap.\n");
1460 alc_dma_free(struct alc_softc *sc)
1462 struct alc_txdesc *txd;
1463 struct alc_rxdesc *rxd;
1467 if (sc->alc_cdata.alc_tx_tag != NULL) {
1468 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1469 txd = &sc->alc_cdata.alc_txdesc[i];
1470 if (txd->tx_dmamap != NULL) {
1471 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
1473 txd->tx_dmamap = NULL;
1476 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
1477 sc->alc_cdata.alc_tx_tag = NULL;
1480 if (sc->alc_cdata.alc_rx_tag != NULL) {
1481 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1482 rxd = &sc->alc_cdata.alc_rxdesc[i];
1483 if (rxd->rx_dmamap != NULL) {
1484 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1486 rxd->rx_dmamap = NULL;
1489 if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1490 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1491 sc->alc_cdata.alc_rx_sparemap);
1492 sc->alc_cdata.alc_rx_sparemap = NULL;
1494 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
1495 sc->alc_cdata.alc_rx_tag = NULL;
1497 /* Tx descriptor ring. */
1498 if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
1499 if (sc->alc_cdata.alc_tx_ring_map != NULL)
1500 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
1501 sc->alc_cdata.alc_tx_ring_map);
1502 if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1503 sc->alc_rdata.alc_tx_ring != NULL)
1504 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
1505 sc->alc_rdata.alc_tx_ring,
1506 sc->alc_cdata.alc_tx_ring_map);
1507 sc->alc_rdata.alc_tx_ring = NULL;
1508 sc->alc_cdata.alc_tx_ring_map = NULL;
1509 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
1510 sc->alc_cdata.alc_tx_ring_tag = NULL;
1513 if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
1514 if (sc->alc_cdata.alc_rx_ring_map != NULL)
1515 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
1516 sc->alc_cdata.alc_rx_ring_map);
1517 if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1518 sc->alc_rdata.alc_rx_ring != NULL)
1519 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
1520 sc->alc_rdata.alc_rx_ring,
1521 sc->alc_cdata.alc_rx_ring_map);
1522 sc->alc_rdata.alc_rx_ring = NULL;
1523 sc->alc_cdata.alc_rx_ring_map = NULL;
1524 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
1525 sc->alc_cdata.alc_rx_ring_tag = NULL;
1527 /* Rx return ring. */
1528 if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
1529 if (sc->alc_cdata.alc_rr_ring_map != NULL)
1530 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
1531 sc->alc_cdata.alc_rr_ring_map);
1532 if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1533 sc->alc_rdata.alc_rr_ring != NULL)
1534 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
1535 sc->alc_rdata.alc_rr_ring,
1536 sc->alc_cdata.alc_rr_ring_map);
1537 sc->alc_rdata.alc_rr_ring = NULL;
1538 sc->alc_cdata.alc_rr_ring_map = NULL;
1539 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
1540 sc->alc_cdata.alc_rr_ring_tag = NULL;
1543 if (sc->alc_cdata.alc_cmb_tag != NULL) {
1544 if (sc->alc_cdata.alc_cmb_map != NULL)
1545 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
1546 sc->alc_cdata.alc_cmb_map);
1547 if (sc->alc_cdata.alc_cmb_map != NULL &&
1548 sc->alc_rdata.alc_cmb != NULL)
1549 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
1550 sc->alc_rdata.alc_cmb,
1551 sc->alc_cdata.alc_cmb_map);
1552 sc->alc_rdata.alc_cmb = NULL;
1553 sc->alc_cdata.alc_cmb_map = NULL;
1554 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
1555 sc->alc_cdata.alc_cmb_tag = NULL;
1558 if (sc->alc_cdata.alc_smb_tag != NULL) {
1559 if (sc->alc_cdata.alc_smb_map != NULL)
1560 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
1561 sc->alc_cdata.alc_smb_map);
1562 if (sc->alc_cdata.alc_smb_map != NULL &&
1563 sc->alc_rdata.alc_smb != NULL)
1564 bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
1565 sc->alc_rdata.alc_smb,
1566 sc->alc_cdata.alc_smb_map);
1567 sc->alc_rdata.alc_smb = NULL;
1568 sc->alc_cdata.alc_smb_map = NULL;
1569 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
1570 sc->alc_cdata.alc_smb_tag = NULL;
1572 if (sc->alc_cdata.alc_buffer_tag != NULL) {
1573 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
1574 sc->alc_cdata.alc_buffer_tag = NULL;
1576 if (sc->alc_cdata.alc_parent_tag != NULL) {
1577 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
1578 sc->alc_cdata.alc_parent_tag = NULL;
1583 alc_shutdown(device_t dev)
1586 return (alc_suspend(dev));
1590 * Note, this driver resets the link speed to 10/100Mbps by
1591 * restarting auto-negotiation in suspend/shutdown phase but we
1592 * don't know whether that auto-negotiation would succeed or not
1593 * as driver has no control after powering off/suspend operation.
1594 * If the renegotiation fail WOL may not work. Running at 1Gbps
1595 * will draw more power than 375mA at 3.3V which is specified in
1596 * PCI specification and that would result in complete
1597 * shutdowning power to ethernet controller.
1600 * Save current negotiated media speed/duplex/flow-control to
1601 * softc and restore the same link again after resuming. PHY
1602 * handling such as power down/resetting to 100Mbps may be better
1603 * handled in suspend method in phy driver.
1606 alc_setlinkspeed(struct alc_softc *sc)
1608 struct mii_data *mii;
1611 mii = device_get_softc(sc->alc_miibus);
1614 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1615 (IFM_ACTIVE | IFM_AVALID)) {
1616 switch IFM_SUBTYPE(mii->mii_media_active) {
1627 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
1628 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1629 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1630 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1631 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1635 * Poll link state until alc(4) get a 10/100Mbps link.
1637 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1639 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1640 == (IFM_ACTIVE | IFM_AVALID)) {
1641 switch (IFM_SUBTYPE(
1642 mii->mii_media_active)) {
1652 pause("alclnk", hz);
1655 if (i == MII_ANEGTICKS_GIGE)
1656 device_printf(sc->alc_dev,
1657 "establishing a link failed, WOL may not work!");
1660 * No link, force MAC to have 100Mbps, full-duplex link.
1661 * This is the last resort and may/may not work.
1663 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1664 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1669 alc_setwol(struct alc_softc *sc)
1672 uint32_t cap, reg, pmcs;
1676 ALC_LOCK_ASSERT(sc);
1678 if (pci_find_extcap(sc->alc_dev, PCIY_EXPRESS, &base) == 0) {
1679 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
1680 if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
1681 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
1682 alc_disable_l0s_l1(sc);
1685 if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) != 0) {
1687 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1688 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1689 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1690 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1691 /* Force PHY power down. */
1697 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1698 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
1699 alc_setlinkspeed(sc);
1700 reg = CSR_READ_4(sc, ALC_MASTER_CFG);
1701 reg &= ~MASTER_CLK_SEL_DIS;
1702 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
1706 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1707 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1708 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
1709 reg = CSR_READ_4(sc, ALC_MAC_CFG);
1710 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1712 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1713 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1714 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1715 reg |= MAC_CFG_RX_ENB;
1716 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1718 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1719 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1720 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1721 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1722 /* WOL disabled, PHY power down. */
1726 pmstat = pci_read_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, 2);
1727 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1728 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1729 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1730 pci_write_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1734 alc_suspend(device_t dev)
1736 struct alc_softc *sc;
1738 sc = device_get_softc(dev);
1749 alc_resume(device_t dev)
1751 struct alc_softc *sc;
1756 sc = device_get_softc(dev);
1759 if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) == 0) {
1760 /* Disable PME and clear PME status. */
1761 pmstat = pci_read_config(sc->alc_dev,
1762 pmc + PCIR_POWER_STATUS, 2);
1763 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1764 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1765 pci_write_config(sc->alc_dev,
1766 pmc + PCIR_POWER_STATUS, pmstat, 2);
1772 if ((ifp->if_flags & IFF_UP) != 0) {
1773 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1774 alc_init_locked(sc);
1782 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1784 struct alc_txdesc *txd, *txd_last;
1785 struct tx_desc *desc;
1789 bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
1791 uint32_t cflags, hdrlen, poff, vtag;
1792 int error, idx, nsegs, prod;
1794 ALC_LOCK_ASSERT(sc);
1796 M_ASSERTPKTHDR((*m_head));
1802 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
1804 * AR8131/AR8132 requires offset of TCP/UDP header in its
1805 * Tx descriptor to perform Tx checksum offloading. TSO
1806 * also requires TCP header offset and modification of
1807 * IP/TCP header. This kind of operation takes many CPU
1808 * cycles on FreeBSD so fast host CPU is required to get
1809 * smooth TSO performance.
1812 if (M_WRITABLE(m) == 0) {
1813 /* Get a writable copy. */
1814 m = m_dup(*m_head, M_DONTWAIT);
1815 /* Release original mbufs. */
1824 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
1829 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
1830 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
1831 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1832 m = m_pullup(m, poff + sizeof(struct tcphdr));
1837 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1838 m = m_pullup(m, poff + (tcp->th_off << 2));
1844 * Due to strict adherence of Microsoft NDIS
1845 * Large Send specification, hardware expects
1846 * a pseudo TCP checksum inserted by upper
1847 * stack. Unfortunately the pseudo TCP
1848 * checksum that NDIS refers to does not include
1849 * TCP payload length so driver should recompute
1850 * the pseudo checksum here. Hopefully this
1851 * wouldn't be much burden on modern CPUs.
1853 * Reset IP checksum and recompute TCP pseudo
1854 * checksum as NDIS specification said.
1857 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1858 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1863 prod = sc->alc_cdata.alc_tx_prod;
1864 txd = &sc->alc_cdata.alc_txdesc[prod];
1866 map = txd->tx_dmamap;
1868 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1869 *m_head, txsegs, &nsegs, 0);
1870 if (error == EFBIG) {
1871 m = m_collapse(*m_head, M_DONTWAIT, ALC_MAXTXSEGS);
1878 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1879 *m_head, txsegs, &nsegs, 0);
1885 } else if (error != 0)
1893 /* Check descriptor overrun. */
1894 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1895 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
1898 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
1901 cflags = TD_ETHERNET;
1905 /* Configure VLAN hardware tag insertion. */
1906 if ((m->m_flags & M_VLANTAG) != 0) {
1907 vtag = htons(m->m_pkthdr.ether_vtag);
1908 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1909 cflags |= TD_INS_VLAN_TAG;
1911 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1912 /* Request TSO and set MSS. */
1913 cflags |= TD_TSO | TD_TSO_DESCV1;
1914 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
1916 /* Set TCP header offset. */
1917 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
1918 TD_TCPHDR_OFFSET_MASK;
1920 * AR8131/AR8132 requires the first buffer should
1921 * only hold IP/TCP header data. Payload should
1922 * be handled in other descriptors.
1924 hdrlen = poff + (tcp->th_off << 2);
1925 desc = &sc->alc_rdata.alc_tx_ring[prod];
1926 desc->len = htole32(TX_BYTES(hdrlen | vtag));
1927 desc->flags = htole32(cflags);
1928 desc->addr = htole64(txsegs[0].ds_addr);
1929 sc->alc_cdata.alc_tx_cnt++;
1930 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1931 if (m->m_len - hdrlen > 0) {
1932 /* Handle remaining payload of the first fragment. */
1933 desc = &sc->alc_rdata.alc_tx_ring[prod];
1934 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
1936 desc->flags = htole32(cflags);
1937 desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
1938 sc->alc_cdata.alc_tx_cnt++;
1939 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1941 /* Handle remaining fragments. */
1943 } else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1944 /* Configure Tx checksum offload. */
1945 #ifdef ALC_USE_CUSTOM_CSUM
1946 cflags |= TD_CUSTOM_CSUM;
1947 /* Set checksum start offset. */
1948 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1949 TD_PLOAD_OFFSET_MASK;
1950 /* Set checksum insertion position of TCP/UDP. */
1951 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
1952 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
1954 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1955 cflags |= TD_IPCSUM;
1956 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1957 cflags |= TD_TCPCSUM;
1958 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1959 cflags |= TD_UDPCSUM;
1960 /* Set TCP/UDP header offset. */
1961 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
1962 TD_L4HDR_OFFSET_MASK;
1965 for (; idx < nsegs; idx++) {
1966 desc = &sc->alc_rdata.alc_tx_ring[prod];
1967 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
1968 desc->flags = htole32(cflags);
1969 desc->addr = htole64(txsegs[idx].ds_addr);
1970 sc->alc_cdata.alc_tx_cnt++;
1971 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1973 /* Update producer index. */
1974 sc->alc_cdata.alc_tx_prod = prod;
1976 /* Finally set EOP on the last descriptor. */
1977 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1978 desc = &sc->alc_rdata.alc_tx_ring[prod];
1979 desc->flags |= htole32(TD_EOP);
1981 /* Swap dmamap of the first and the last. */
1982 txd = &sc->alc_cdata.alc_txdesc[prod];
1983 map = txd_last->tx_dmamap;
1984 txd_last->tx_dmamap = txd->tx_dmamap;
1985 txd->tx_dmamap = map;
1992 alc_tx_task(void *arg, int pending)
1996 ifp = (struct ifnet *)arg;
2001 alc_start(struct ifnet *ifp)
2003 struct alc_softc *sc;
2004 struct mbuf *m_head;
2011 /* Reclaim transmitted frames. */
2012 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2015 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2016 IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0) {
2021 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2022 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2026 * Pack the data into the transmit ring. If we
2027 * don't have room, set the OACTIVE flag and wait
2028 * for the NIC to drain the ring.
2030 if (alc_encap(sc, &m_head)) {
2033 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2034 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2040 * If there's a BPF listener, bounce a copy of this frame
2043 ETHER_BPF_MTAP(ifp, m_head);
2047 /* Sync descriptors. */
2048 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2049 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2050 /* Kick. Assume we're using normal Tx priority queue. */
2051 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2052 (sc->alc_cdata.alc_tx_prod <<
2053 MBOX_TD_PROD_LO_IDX_SHIFT) &
2054 MBOX_TD_PROD_LO_IDX_MASK);
2055 /* Set a timeout in case the chip goes out to lunch. */
2056 sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2063 alc_watchdog(struct alc_softc *sc)
2067 ALC_LOCK_ASSERT(sc);
2069 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2073 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2074 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2076 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2077 alc_init_locked(sc);
2080 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2082 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2083 alc_init_locked(sc);
2084 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2085 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2089 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2091 struct alc_softc *sc;
2093 struct mii_data *mii;
2097 ifr = (struct ifreq *)data;
2101 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALC_JUMBO_MTU ||
2102 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2103 ifr->ifr_mtu > ETHERMTU))
2105 else if (ifp->if_mtu != ifr->ifr_mtu) {
2107 ifp->if_mtu = ifr->ifr_mtu;
2108 /* AR8131/AR8132 has 13 bits MSS field. */
2109 if (ifp->if_mtu > ALC_TSO_MTU &&
2110 (ifp->if_capenable & IFCAP_TSO4) != 0) {
2111 ifp->if_capenable &= ~IFCAP_TSO4;
2112 ifp->if_hwassist &= ~CSUM_TSO;
2113 VLAN_CAPABILITIES(ifp);
2120 if ((ifp->if_flags & IFF_UP) != 0) {
2121 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2122 ((ifp->if_flags ^ sc->alc_if_flags) &
2123 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2125 else if ((sc->alc_flags & ALC_FLAG_DETACH) == 0)
2126 alc_init_locked(sc);
2127 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2129 sc->alc_if_flags = ifp->if_flags;
2135 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2141 mii = device_get_softc(sc->alc_miibus);
2142 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2146 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2147 if ((mask & IFCAP_TXCSUM) != 0 &&
2148 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2149 ifp->if_capenable ^= IFCAP_TXCSUM;
2150 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2151 ifp->if_hwassist |= ALC_CSUM_FEATURES;
2153 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2155 if ((mask & IFCAP_TSO4) != 0 &&
2156 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2157 ifp->if_capenable ^= IFCAP_TSO4;
2158 if ((ifp->if_capenable & IFCAP_TSO4) != 0) {
2159 /* AR8131/AR8132 has 13 bits MSS field. */
2160 if (ifp->if_mtu > ALC_TSO_MTU) {
2161 ifp->if_capenable &= ~IFCAP_TSO4;
2162 ifp->if_hwassist &= ~CSUM_TSO;
2164 ifp->if_hwassist |= CSUM_TSO;
2166 ifp->if_hwassist &= ~CSUM_TSO;
2168 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2169 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2170 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2171 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2172 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2173 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2174 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2175 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2176 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2179 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2180 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2181 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2182 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2183 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2184 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2185 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2186 ifp->if_capenable &=
2187 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
2189 VLAN_CAPABILITIES(ifp);
2192 error = ether_ioctl(ifp, cmd, data);
2200 alc_mac_config(struct alc_softc *sc)
2202 struct mii_data *mii;
2205 ALC_LOCK_ASSERT(sc);
2207 mii = device_get_softc(sc->alc_miibus);
2208 reg = CSR_READ_4(sc, ALC_MAC_CFG);
2209 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2210 MAC_CFG_SPEED_MASK);
2211 /* Reprogram MAC with resolved speed/duplex. */
2212 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2215 reg |= MAC_CFG_SPEED_10_100;
2218 reg |= MAC_CFG_SPEED_1000;
2221 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2222 reg |= MAC_CFG_FULL_DUPLEX;
2224 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2225 reg |= MAC_CFG_TX_FC;
2226 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2227 reg |= MAC_CFG_RX_FC;
2230 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2234 alc_stats_clear(struct alc_softc *sc)
2236 struct smb sb, *smb;
2240 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2241 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2242 sc->alc_cdata.alc_smb_map,
2243 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2244 smb = sc->alc_rdata.alc_smb;
2245 /* Update done, clear. */
2247 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2248 sc->alc_cdata.alc_smb_map,
2249 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2251 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2253 CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2254 i += sizeof(uint32_t);
2256 /* Read Tx statistics. */
2257 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2259 CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2260 i += sizeof(uint32_t);
2266 alc_stats_update(struct alc_softc *sc)
2268 struct alc_hw_stats *stat;
2269 struct smb sb, *smb;
2274 ALC_LOCK_ASSERT(sc);
2277 stat = &sc->alc_stats;
2278 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2279 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2280 sc->alc_cdata.alc_smb_map,
2281 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2282 smb = sc->alc_rdata.alc_smb;
2283 if (smb->updated == 0)
2287 /* Read Rx statistics. */
2288 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2290 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2291 i += sizeof(uint32_t);
2293 /* Read Tx statistics. */
2294 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2296 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2297 i += sizeof(uint32_t);
2302 stat->rx_frames += smb->rx_frames;
2303 stat->rx_bcast_frames += smb->rx_bcast_frames;
2304 stat->rx_mcast_frames += smb->rx_mcast_frames;
2305 stat->rx_pause_frames += smb->rx_pause_frames;
2306 stat->rx_control_frames += smb->rx_control_frames;
2307 stat->rx_crcerrs += smb->rx_crcerrs;
2308 stat->rx_lenerrs += smb->rx_lenerrs;
2309 stat->rx_bytes += smb->rx_bytes;
2310 stat->rx_runts += smb->rx_runts;
2311 stat->rx_fragments += smb->rx_fragments;
2312 stat->rx_pkts_64 += smb->rx_pkts_64;
2313 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2314 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2315 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2316 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2317 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2318 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2319 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2320 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2321 stat->rx_rrs_errs += smb->rx_rrs_errs;
2322 stat->rx_alignerrs += smb->rx_alignerrs;
2323 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2324 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2325 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2328 stat->tx_frames += smb->tx_frames;
2329 stat->tx_bcast_frames += smb->tx_bcast_frames;
2330 stat->tx_mcast_frames += smb->tx_mcast_frames;
2331 stat->tx_pause_frames += smb->tx_pause_frames;
2332 stat->tx_excess_defer += smb->tx_excess_defer;
2333 stat->tx_control_frames += smb->tx_control_frames;
2334 stat->tx_deferred += smb->tx_deferred;
2335 stat->tx_bytes += smb->tx_bytes;
2336 stat->tx_pkts_64 += smb->tx_pkts_64;
2337 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2338 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2339 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2340 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2341 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2342 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2343 stat->tx_single_colls += smb->tx_single_colls;
2344 stat->tx_multi_colls += smb->tx_multi_colls;
2345 stat->tx_late_colls += smb->tx_late_colls;
2346 stat->tx_excess_colls += smb->tx_excess_colls;
2347 stat->tx_abort += smb->tx_abort;
2348 stat->tx_underrun += smb->tx_underrun;
2349 stat->tx_desc_underrun += smb->tx_desc_underrun;
2350 stat->tx_lenerrs += smb->tx_lenerrs;
2351 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2352 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2353 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2355 /* Update counters in ifnet. */
2356 ifp->if_opackets += smb->tx_frames;
2358 ifp->if_collisions += smb->tx_single_colls +
2359 smb->tx_multi_colls * 2 + smb->tx_late_colls +
2360 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
2364 * tx_pkts_truncated counter looks suspicious. It constantly
2365 * increments with no sign of Tx errors. This may indicate
2366 * the counter name is not correct one so I've removed the
2367 * counter in output errors.
2369 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
2372 ifp->if_ipackets += smb->rx_frames;
2374 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2375 smb->rx_runts + smb->rx_pkts_truncated +
2376 smb->rx_fifo_oflows + smb->rx_rrs_errs +
2379 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2380 /* Update done, clear. */
2382 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2383 sc->alc_cdata.alc_smb_map,
2384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2391 struct alc_softc *sc;
2394 sc = (struct alc_softc *)arg;
2396 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2397 if ((status & ALC_INTRS) == 0)
2398 return (FILTER_STRAY);
2399 /* Disable interrupts. */
2400 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2401 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2403 return (FILTER_HANDLED);
2407 alc_int_task(void *arg, int pending)
2409 struct alc_softc *sc;
2414 sc = (struct alc_softc *)arg;
2417 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2418 more = atomic_readandclear_int(&sc->alc_morework);
2420 status |= INTR_RX_PKT;
2421 if ((status & ALC_INTRS) == 0)
2424 /* Acknowledge interrupts but still disable interrupts. */
2425 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2428 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2429 if ((status & INTR_RX_PKT) != 0) {
2430 more = alc_rxintr(sc, sc->alc_process_limit);
2432 atomic_set_int(&sc->alc_morework, 1);
2433 else if (more == EIO) {
2435 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2436 alc_init_locked(sc);
2441 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2442 INTR_TXQ_TO_RST)) != 0) {
2443 if ((status & INTR_DMA_RD_TO_RST) != 0)
2444 device_printf(sc->alc_dev,
2445 "DMA read error! -- resetting\n");
2446 if ((status & INTR_DMA_WR_TO_RST) != 0)
2447 device_printf(sc->alc_dev,
2448 "DMA write error! -- resetting\n");
2449 if ((status & INTR_TXQ_TO_RST) != 0)
2450 device_printf(sc->alc_dev,
2451 "TxQ reset! -- resetting\n");
2453 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2454 alc_init_locked(sc);
2458 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2459 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2460 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2463 if (more == EAGAIN ||
2464 (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) {
2465 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2470 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2471 /* Re-enable interrupts if we're running. */
2472 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2477 alc_txeof(struct alc_softc *sc)
2480 struct alc_txdesc *txd;
2481 uint32_t cons, prod;
2484 ALC_LOCK_ASSERT(sc);
2488 if (sc->alc_cdata.alc_tx_cnt == 0)
2490 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2491 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
2492 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2493 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2494 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
2495 prod = sc->alc_rdata.alc_cmb->cons;
2497 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2498 /* Assume we're using normal Tx priority queue. */
2499 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2500 MBOX_TD_CONS_LO_IDX_SHIFT;
2501 cons = sc->alc_cdata.alc_tx_cons;
2503 * Go through our Tx list and free mbufs for those
2504 * frames which have been transmitted.
2506 for (prog = 0; cons != prod; prog++,
2507 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2508 if (sc->alc_cdata.alc_tx_cnt <= 0)
2511 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2512 sc->alc_cdata.alc_tx_cnt--;
2513 txd = &sc->alc_cdata.alc_txdesc[cons];
2514 if (txd->tx_m != NULL) {
2515 /* Reclaim transmitted mbufs. */
2516 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
2517 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2518 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
2525 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2526 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2527 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
2528 sc->alc_cdata.alc_tx_cons = cons;
2530 * Unarm watchdog timer only when there is no pending
2531 * frames in Tx queue.
2533 if (sc->alc_cdata.alc_tx_cnt == 0)
2534 sc->alc_watchdog_timer = 0;
2538 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2541 bus_dma_segment_t segs[1];
2545 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2548 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2549 #ifndef __NO_STRICT_ALIGNMENT
2550 m_adj(m, sizeof(uint64_t));
2553 if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag,
2554 sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2558 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2560 if (rxd->rx_m != NULL) {
2561 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2562 BUS_DMASYNC_POSTREAD);
2563 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
2565 map = rxd->rx_dmamap;
2566 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2567 sc->alc_cdata.alc_rx_sparemap = map;
2568 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2569 BUS_DMASYNC_PREREAD);
2571 rxd->rx_desc->addr = htole64(segs[0].ds_addr);
2576 alc_rxintr(struct alc_softc *sc, int count)
2579 struct rx_rdesc *rrd;
2580 uint32_t nsegs, status;
2583 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2584 sc->alc_cdata.alc_rr_ring_map,
2585 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2586 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2587 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2588 rr_cons = sc->alc_cdata.alc_rr_cons;
2590 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) {
2593 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2594 status = le32toh(rrd->status);
2595 if ((status & RRD_VALID) == 0)
2597 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2599 /* This should not happen! */
2600 device_printf(sc->alc_dev,
2601 "unexpected segment count -- resetting\n");
2605 /* Clear Rx return status. */
2607 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2608 sc->alc_cdata.alc_rx_cons += nsegs;
2609 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2614 /* Update the consumer index. */
2615 sc->alc_cdata.alc_rr_cons = rr_cons;
2616 /* Sync Rx return descriptors. */
2617 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2618 sc->alc_cdata.alc_rr_ring_map,
2619 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2621 * Sync updated Rx descriptors such that controller see
2622 * modified buffer addresses.
2624 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2625 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
2627 * Let controller know availability of new Rx buffers.
2628 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2629 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2630 * only when Rx buffer pre-fetching is required. In
2631 * addition we already set ALC_RX_RD_FREE_THRESH to
2632 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2633 * it still seems that pre-fetching needs more
2636 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2637 sc->alc_cdata.alc_rx_cons);
2640 return (count > 0 ? 0 : EAGAIN);
2643 #ifndef __NO_STRICT_ALIGNMENT
2644 static struct mbuf *
2645 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2649 uint16_t *src, *dst;
2651 src = mtod(m, uint16_t *);
2654 if (m->m_next == NULL) {
2655 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2661 * Append a new mbuf to received mbuf chain and copy ethernet
2662 * header from the mbuf chain. This can save lots of CPU
2663 * cycles for jumbo frame.
2665 MGETHDR(n, M_DONTWAIT, MT_DATA);
2671 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2672 m->m_data += ETHER_HDR_LEN;
2673 m->m_len -= ETHER_HDR_LEN;
2674 n->m_len = ETHER_HDR_LEN;
2675 M_MOVE_PKTHDR(n, m);
2681 /* Receive a frame. */
2683 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2685 struct alc_rxdesc *rxd;
2687 struct mbuf *mp, *m;
2688 uint32_t rdinfo, status, vtag;
2689 int count, nsegs, rx_cons;
2692 status = le32toh(rrd->status);
2693 rdinfo = le32toh(rrd->rdinfo);
2694 rx_cons = RRD_RD_IDX(rdinfo);
2695 nsegs = RRD_RD_CNT(rdinfo);
2697 sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2698 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
2700 * We want to pass the following frames to upper
2701 * layer regardless of error status of Rx return
2704 * o IP/TCP/UDP checksum is bad.
2705 * o frame length and protocol specific length
2708 * Force network stack compute checksum for
2711 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2712 if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC |
2717 for (count = 0; count < nsegs; count++,
2718 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2719 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2721 /* Add a new receive buffer to the ring. */
2722 if (alc_newbuf(sc, rxd) != 0) {
2724 /* Reuse Rx buffers. */
2725 if (sc->alc_cdata.alc_rxhead != NULL)
2726 m_freem(sc->alc_cdata.alc_rxhead);
2731 * Assume we've received a full sized frame.
2732 * Actual size is fixed when we encounter the end of
2733 * multi-segmented frame.
2735 mp->m_len = sc->alc_buf_size;
2737 /* Chain received mbufs. */
2738 if (sc->alc_cdata.alc_rxhead == NULL) {
2739 sc->alc_cdata.alc_rxhead = mp;
2740 sc->alc_cdata.alc_rxtail = mp;
2742 mp->m_flags &= ~M_PKTHDR;
2743 sc->alc_cdata.alc_rxprev_tail =
2744 sc->alc_cdata.alc_rxtail;
2745 sc->alc_cdata.alc_rxtail->m_next = mp;
2746 sc->alc_cdata.alc_rxtail = mp;
2749 if (count == nsegs - 1) {
2750 /* Last desc. for this frame. */
2751 m = sc->alc_cdata.alc_rxhead;
2752 m->m_flags |= M_PKTHDR;
2754 * It seems that L1C/L2C controller has no way
2755 * to tell hardware to strip CRC bytes.
2758 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2760 /* Set last mbuf size. */
2761 mp->m_len = sc->alc_cdata.alc_rxlen -
2762 (nsegs - 1) * sc->alc_buf_size;
2763 /* Remove the CRC bytes in chained mbufs. */
2764 if (mp->m_len <= ETHER_CRC_LEN) {
2765 sc->alc_cdata.alc_rxtail =
2766 sc->alc_cdata.alc_rxprev_tail;
2767 sc->alc_cdata.alc_rxtail->m_len -=
2768 (ETHER_CRC_LEN - mp->m_len);
2769 sc->alc_cdata.alc_rxtail->m_next = NULL;
2772 mp->m_len -= ETHER_CRC_LEN;
2775 m->m_len = m->m_pkthdr.len;
2776 m->m_pkthdr.rcvif = ifp;
2778 * Due to hardware bugs, Rx checksum offloading
2779 * was intentionally disabled.
2781 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2782 (status & RRD_VLAN_TAG) != 0) {
2783 vtag = RRD_VLAN(le32toh(rrd->vtag));
2784 m->m_pkthdr.ether_vtag = ntohs(vtag);
2785 m->m_flags |= M_VLANTAG;
2787 #ifndef __NO_STRICT_ALIGNMENT
2788 m = alc_fixup_rx(ifp, m);
2793 (*ifp->if_input)(ifp, m);
2797 /* Reset mbuf chains. */
2798 ALC_RXCHAIN_RESET(sc);
2804 struct alc_softc *sc;
2805 struct mii_data *mii;
2807 sc = (struct alc_softc *)arg;
2809 ALC_LOCK_ASSERT(sc);
2811 mii = device_get_softc(sc->alc_miibus);
2813 alc_stats_update(sc);
2815 * alc(4) does not rely on Tx completion interrupts to reclaim
2816 * transferred buffers. Instead Tx completion interrupts are
2817 * used to hint for scheduling Tx task. So it's necessary to
2818 * release transmitted buffers by kicking Tx completion
2819 * handler. This limits the maximum reclamation delay to a hz.
2823 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
2827 alc_reset(struct alc_softc *sc)
2832 CSR_WRITE_4(sc, ALC_MASTER_CFG, MASTER_RESET);
2833 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2835 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2839 device_printf(sc->alc_dev, "master reset timeout!\n");
2841 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2842 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2848 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
2854 struct alc_softc *sc;
2856 sc = (struct alc_softc *)xsc;
2858 alc_init_locked(sc);
2863 alc_init_locked(struct alc_softc *sc)
2866 struct mii_data *mii;
2867 uint8_t eaddr[ETHER_ADDR_LEN];
2869 uint32_t reg, rxf_hi, rxf_lo;
2871 ALC_LOCK_ASSERT(sc);
2874 mii = device_get_softc(sc->alc_miibus);
2876 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2879 * Cancel any pending I/O.
2883 * Reset the chip to a known state.
2887 /* Initialize Rx descriptors. */
2888 if (alc_init_rx_ring(sc) != 0) {
2889 device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
2893 alc_init_rr_ring(sc);
2894 alc_init_tx_ring(sc);
2898 /* Reprogram the station address. */
2899 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2900 CSR_WRITE_4(sc, ALC_PAR0,
2901 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2902 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2904 * Clear WOL status and disable all WOL feature as WOL
2905 * would interfere Rx operation under normal environments.
2907 CSR_READ_4(sc, ALC_WOL_CFG);
2908 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2909 /* Set Tx descriptor base addresses. */
2910 paddr = sc->alc_rdata.alc_tx_ring_paddr;
2911 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2912 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2913 /* We don't use high priority ring. */
2914 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2915 /* Set Tx descriptor counter. */
2916 CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2917 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2918 /* Set Rx descriptor base addresses. */
2919 paddr = sc->alc_rdata.alc_rx_ring_paddr;
2920 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2921 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2922 /* We use one Rx ring. */
2923 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2924 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2925 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2926 /* Set Rx descriptor counter. */
2927 CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2928 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2931 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2932 * if it do not fit the buffer size. Rx return descriptor holds
2933 * a counter that indicates how many fragments were made by the
2934 * hardware. The buffer size should be multiple of 8 bytes.
2935 * Since hardware has limit on the size of buffer size, always
2936 * use the maximum value.
2937 * For strict-alignment architectures make sure to reduce buffer
2938 * size by 8 bytes to make room for alignment fixup.
2940 #ifndef __NO_STRICT_ALIGNMENT
2941 sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t);
2943 sc->alc_buf_size = RX_BUF_SIZE_MAX;
2945 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2947 paddr = sc->alc_rdata.alc_rr_ring_paddr;
2948 /* Set Rx return descriptor base addresses. */
2949 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2950 /* We use one Rx return ring. */
2951 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2952 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2953 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2954 /* Set Rx return descriptor counter. */
2955 CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2956 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2957 paddr = sc->alc_rdata.alc_cmb_paddr;
2958 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2959 paddr = sc->alc_rdata.alc_smb_paddr;
2960 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2961 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2963 /* Tell hardware that we're ready to load DMA blocks. */
2964 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2966 /* Configure interrupt moderation timer. */
2967 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2968 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2969 CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2970 reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2971 reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
2973 * We don't want to automatic interrupt clear as task queue
2974 * for the interrupt should know interrupt status.
2976 reg &= ~MASTER_INTR_RD_CLR;
2977 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2978 if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2979 reg |= MASTER_IM_RX_TIMER_ENB;
2980 if (ALC_USECS(sc->alc_int_tx_mod) != 0)
2981 reg |= MASTER_IM_TX_TIMER_ENB;
2982 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2984 * Disable interrupt re-trigger timer. We don't want automatic
2985 * re-triggering of un-ACKed interrupts.
2987 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2988 /* Configure CMB. */
2989 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2990 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2991 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2993 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2995 * Hardware can be configured to issue SMB interrupt based
2996 * on programmed interval. Since there is a callout that is
2997 * invoked for every hz in driver we use that instead of
2998 * relying on periodic SMB interrupt.
3000 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3001 /* Clear MAC statistics. */
3002 alc_stats_clear(sc);
3005 * Always use maximum frame size that controller can support.
3006 * Otherwise received frames that has larger frame length
3007 * than alc(4) MTU would be silently dropped in hardware. This
3008 * would make path-MTU discovery hard as sender wouldn't get
3009 * any responses from receiver. alc(4) supports
3010 * multi-fragmented frames on Rx path so it has no issue on
3011 * assembling fragmented frames. Using maximum frame size also
3012 * removes the need to reinitialize hardware when interface
3013 * MTU configuration was changed.
3015 * Be conservative in what you do, be liberal in what you
3016 * accept from others - RFC 793.
3018 CSR_WRITE_4(sc, ALC_FRAME_SIZE, ALC_JUMBO_FRAMELEN);
3020 /* Disable header split(?) */
3021 CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3023 /* Configure IPG/IFG parameters. */
3024 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3025 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
3026 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
3027 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
3028 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
3029 /* Set parameters for half-duplex media. */
3030 CSR_WRITE_4(sc, ALC_HDPX_CFG,
3031 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3032 HDPX_CFG_LCOL_MASK) |
3033 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3034 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3035 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3036 HDPX_CFG_ABEBT_MASK) |
3037 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3038 HDPX_CFG_JAMIPG_MASK));
3040 * Set TSO/checksum offload threshold. For frames that is
3041 * larger than this threshold, hardware wouldn't do
3042 * TSO/checksum offloading.
3044 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
3045 (ALC_JUMBO_FRAMELEN >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3046 TSO_OFFLOAD_THRESH_MASK);
3047 /* Configure TxQ. */
3048 reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3049 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3050 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3051 TXQ_CFG_TD_BURST_MASK;
3052 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3054 /* Configure Rx free descriptor pre-fetching. */
3055 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3056 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
3057 RX_RD_FREE_THRESH_HI_MASK) |
3058 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
3059 RX_RD_FREE_THRESH_LO_MASK));
3062 * Configure flow control parameters.
3063 * XON : 80% of Rx FIFO
3064 * XOFF : 30% of Rx FIFO
3066 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3067 rxf_hi = (reg * 8) / 10;
3068 rxf_lo = (reg * 3)/ 10;
3069 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3070 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3071 RX_FIFO_PAUSE_THRESH_LO_MASK) |
3072 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3073 RX_FIFO_PAUSE_THRESH_HI_MASK));
3075 /* Disable RSS until I understand L1C/L2C's RSS logic. */
3076 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3077 CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3079 /* Configure RxQ. */
3080 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3081 RXQ_CFG_RD_BURST_MASK;
3082 reg |= RXQ_CFG_RSS_MODE_DIS;
3083 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
3084 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
3085 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3087 /* Configure Rx DMAW request thresold. */
3088 CSR_WRITE_4(sc, ALC_RD_DMA_CFG,
3089 ((RD_DMA_CFG_THRESH_DEFAULT << RD_DMA_CFG_THRESH_SHIFT) &
3090 RD_DMA_CFG_THRESH_MASK) |
3091 ((ALC_RD_DMA_CFG_USECS(0) << RD_DMA_CFG_TIMER_SHIFT) &
3092 RD_DMA_CFG_TIMER_MASK));
3093 /* Configure DMA parameters. */
3094 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3096 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3097 reg |= DMA_CFG_CMB_ENB;
3098 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3099 reg |= DMA_CFG_SMB_ENB;
3101 reg |= DMA_CFG_SMB_DIS;
3102 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3103 DMA_CFG_RD_BURST_SHIFT;
3104 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3105 DMA_CFG_WR_BURST_SHIFT;
3106 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3107 DMA_CFG_RD_DELAY_CNT_MASK;
3108 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3109 DMA_CFG_WR_DELAY_CNT_MASK;
3110 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3113 * Configure Tx/Rx MACs.
3114 * - Auto-padding for short frames.
3115 * - Enable CRC generation.
3116 * Actual reconfiguration of MAC for resolved speed/duplex
3117 * is followed after detection of link establishment.
3118 * AR8131/AR8132 always does checksum computation regardless
3119 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3120 * have bug in protocol field in Rx return structure so
3121 * these controllers can't handle fragmented frames. Disable
3122 * Rx checksum offloading until there is a newer controller
3123 * that has sane implementation.
3125 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3126 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3127 MAC_CFG_PREAMBLE_MASK);
3128 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3129 reg |= MAC_CFG_SPEED_10_100;
3131 reg |= MAC_CFG_SPEED_1000;
3132 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3134 /* Set up the receive filter. */
3138 /* Acknowledge all pending interrupts and clear it. */
3139 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3140 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3141 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3143 sc->alc_flags &= ~ALC_FLAG_LINK;
3144 /* Switch to the current media. */
3147 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3149 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3150 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3154 alc_stop(struct alc_softc *sc)
3157 struct alc_txdesc *txd;
3158 struct alc_rxdesc *rxd;
3162 ALC_LOCK_ASSERT(sc);
3164 * Mark the interface down and cancel the watchdog timer.
3167 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3168 sc->alc_flags &= ~ALC_FLAG_LINK;
3169 callout_stop(&sc->alc_tick_ch);
3170 sc->alc_watchdog_timer = 0;
3171 alc_stats_update(sc);
3172 /* Disable interrupts. */
3173 CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3174 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3177 reg = CSR_READ_4(sc, ALC_DMA_CFG);
3178 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3179 reg |= DMA_CFG_SMB_DIS;
3180 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3182 /* Stop Rx/Tx MACs. */
3184 /* Disable interrupts which might be touched in taskq handler. */
3185 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3187 /* Reclaim Rx buffers that have been processed. */
3188 if (sc->alc_cdata.alc_rxhead != NULL)
3189 m_freem(sc->alc_cdata.alc_rxhead);
3190 ALC_RXCHAIN_RESET(sc);
3192 * Free Tx/Rx mbufs still in the queues.
3194 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3195 rxd = &sc->alc_cdata.alc_rxdesc[i];
3196 if (rxd->rx_m != NULL) {
3197 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
3198 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3199 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
3205 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3206 txd = &sc->alc_cdata.alc_txdesc[i];
3207 if (txd->tx_m != NULL) {
3208 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3209 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3210 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3219 alc_stop_mac(struct alc_softc *sc)
3224 ALC_LOCK_ASSERT(sc);
3226 /* Disable Rx/Tx MAC. */
3227 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3228 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3229 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
3230 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3232 for (i = ALC_TIMEOUT; i > 0; i--) {
3233 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3239 device_printf(sc->alc_dev,
3240 "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
3244 alc_start_queue(struct alc_softc *sc)
3249 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3250 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3255 ALC_LOCK_ASSERT(sc);
3258 cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3259 cfg &= ~RXQ_CFG_ENB;
3261 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3263 cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3265 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3269 alc_stop_queue(struct alc_softc *sc)
3274 ALC_LOCK_ASSERT(sc);
3277 reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3278 if ((reg & RXQ_CFG_ENB) != 0) {
3279 reg &= ~RXQ_CFG_ENB;
3280 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3283 reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3284 if ((reg & TXQ_CFG_ENB) == 0) {
3285 reg &= ~TXQ_CFG_ENB;
3286 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3288 for (i = ALC_TIMEOUT; i > 0; i--) {
3289 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3290 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3295 device_printf(sc->alc_dev,
3296 "could not disable RxQ/TxQ (0x%08x)!\n", reg);
3300 alc_init_tx_ring(struct alc_softc *sc)
3302 struct alc_ring_data *rd;
3303 struct alc_txdesc *txd;
3306 ALC_LOCK_ASSERT(sc);
3308 sc->alc_cdata.alc_tx_prod = 0;
3309 sc->alc_cdata.alc_tx_cons = 0;
3310 sc->alc_cdata.alc_tx_cnt = 0;
3312 rd = &sc->alc_rdata;
3313 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3314 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3315 txd = &sc->alc_cdata.alc_txdesc[i];
3319 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3320 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
3324 alc_init_rx_ring(struct alc_softc *sc)
3326 struct alc_ring_data *rd;
3327 struct alc_rxdesc *rxd;
3330 ALC_LOCK_ASSERT(sc);
3332 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3333 sc->alc_morework = 0;
3334 rd = &sc->alc_rdata;
3335 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3336 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3337 rxd = &sc->alc_cdata.alc_rxdesc[i];
3339 rxd->rx_desc = &rd->alc_rx_ring[i];
3340 if (alc_newbuf(sc, rxd) != 0)
3345 * Since controller does not update Rx descriptors, driver
3346 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3347 * is enough to ensure coherence.
3349 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3350 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3351 /* Let controller know availability of new Rx buffers. */
3352 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3358 alc_init_rr_ring(struct alc_softc *sc)
3360 struct alc_ring_data *rd;
3362 ALC_LOCK_ASSERT(sc);
3364 sc->alc_cdata.alc_rr_cons = 0;
3365 ALC_RXCHAIN_RESET(sc);
3367 rd = &sc->alc_rdata;
3368 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3369 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3370 sc->alc_cdata.alc_rr_ring_map,
3371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3375 alc_init_cmb(struct alc_softc *sc)
3377 struct alc_ring_data *rd;
3379 ALC_LOCK_ASSERT(sc);
3381 rd = &sc->alc_rdata;
3382 bzero(rd->alc_cmb, ALC_CMB_SZ);
3383 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
3384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3388 alc_init_smb(struct alc_softc *sc)
3390 struct alc_ring_data *rd;
3392 ALC_LOCK_ASSERT(sc);
3394 rd = &sc->alc_rdata;
3395 bzero(rd->alc_smb, ALC_SMB_SZ);
3396 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
3397 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3401 alc_rxvlan(struct alc_softc *sc)
3406 ALC_LOCK_ASSERT(sc);
3409 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3410 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3411 reg |= MAC_CFG_VLAN_TAG_STRIP;
3413 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3414 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3418 alc_rxfilter(struct alc_softc *sc)
3421 struct ifmultiaddr *ifma;
3426 ALC_LOCK_ASSERT(sc);
3430 bzero(mchash, sizeof(mchash));
3431 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3432 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3433 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3434 rxcfg |= MAC_CFG_BCAST;
3435 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3436 if ((ifp->if_flags & IFF_PROMISC) != 0)
3437 rxcfg |= MAC_CFG_PROMISC;
3438 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3439 rxcfg |= MAC_CFG_ALLMULTI;
3440 mchash[0] = 0xFFFFFFFF;
3441 mchash[1] = 0xFFFFFFFF;
3445 if_maddr_rlock(ifp);
3446 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
3447 if (ifma->ifma_addr->sa_family != AF_LINK)
3449 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3450 ifma->ifma_addr), ETHER_ADDR_LEN);
3451 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3453 if_maddr_runlock(ifp);
3456 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3457 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3458 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3462 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3468 value = *(int *)arg1;
3469 error = sysctl_handle_int(oidp, &value, 0, req);
3470 if (error || req->newptr == NULL)
3472 if (value < low || value > high)
3474 *(int *)arg1 = value;
3480 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
3482 return (sysctl_int_range(oidp, arg1, arg2, req,
3483 ALC_PROC_MIN, ALC_PROC_MAX));
3487 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
3490 return (sysctl_int_range(oidp, arg1, arg2, req,
3491 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));