2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_llc.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/tcp.h>
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
71 #include <machine/atomic.h>
72 #include <machine/bus.h>
73 #include <machine/in_cksum.h>
75 #include <dev/alc/if_alcreg.h>
76 #include <dev/alc/if_alcvar.h>
78 /* "device miibus" required. See GENERIC if you get errors here. */
79 #include "miibus_if.h"
80 #undef ALC_USE_CUSTOM_CSUM
82 #ifdef ALC_USE_CUSTOM_CSUM
83 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
85 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
87 #ifndef IFCAP_VLAN_HWTSO
88 #define IFCAP_VLAN_HWTSO 0
91 MODULE_DEPEND(alc, pci, 1, 1, 1);
92 MODULE_DEPEND(alc, ether, 1, 1, 1);
93 MODULE_DEPEND(alc, miibus, 1, 1, 1);
96 static int msi_disable = 0;
97 static int msix_disable = 0;
98 TUNABLE_INT("hw.alc.msi_disable", &msi_disable);
99 TUNABLE_INT("hw.alc.msix_disable", &msix_disable);
102 * Devices supported by this driver.
104 static struct alc_dev {
105 uint16_t alc_vendorid;
106 uint16_t alc_deviceid;
107 const char *alc_name;
109 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131,
110 "Atheros AR8131 PCIe Gigabit Ethernet" },
111 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132,
112 "Atheros AR8132 PCIe Fast Ethernet" }
115 static void alc_aspm(struct alc_softc *);
116 static int alc_attach(device_t);
117 static int alc_check_boundary(struct alc_softc *);
118 static int alc_detach(device_t);
119 static void alc_disable_l0s_l1(struct alc_softc *);
120 static int alc_dma_alloc(struct alc_softc *);
121 static void alc_dma_free(struct alc_softc *);
122 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
123 static int alc_encap(struct alc_softc *, struct mbuf **);
124 #ifndef __NO_STRICT_ALIGNMENT
126 alc_fixup_rx(struct ifnet *, struct mbuf *);
128 static void alc_get_macaddr(struct alc_softc *);
129 static void alc_init(void *);
130 static void alc_init_cmb(struct alc_softc *);
131 static void alc_init_locked(struct alc_softc *);
132 static void alc_init_rr_ring(struct alc_softc *);
133 static int alc_init_rx_ring(struct alc_softc *);
134 static void alc_init_smb(struct alc_softc *);
135 static void alc_init_tx_ring(struct alc_softc *);
136 static void alc_int_task(void *, int);
137 static int alc_intr(void *);
138 static int alc_ioctl(struct ifnet *, u_long, caddr_t);
139 static void alc_mac_config(struct alc_softc *);
140 static int alc_miibus_readreg(device_t, int, int);
141 static void alc_miibus_statchg(device_t);
142 static int alc_miibus_writereg(device_t, int, int, int);
143 static int alc_mediachange(struct ifnet *);
144 static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
145 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
146 static void alc_phy_down(struct alc_softc *);
147 static void alc_phy_reset(struct alc_softc *);
148 static int alc_probe(device_t);
149 static void alc_reset(struct alc_softc *);
150 static int alc_resume(device_t);
151 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
152 static int alc_rxintr(struct alc_softc *, int);
153 static void alc_rxfilter(struct alc_softc *);
154 static void alc_rxvlan(struct alc_softc *);
155 static void alc_setlinkspeed(struct alc_softc *);
156 static void alc_setwol(struct alc_softc *);
157 static int alc_shutdown(device_t);
158 static void alc_start(struct ifnet *);
159 static void alc_start_queue(struct alc_softc *);
160 static void alc_stats_clear(struct alc_softc *);
161 static void alc_stats_update(struct alc_softc *);
162 static void alc_stop(struct alc_softc *);
163 static void alc_stop_mac(struct alc_softc *);
164 static void alc_stop_queue(struct alc_softc *);
165 static int alc_suspend(device_t);
166 static void alc_sysctl_node(struct alc_softc *);
167 static void alc_tick(void *);
168 static void alc_tx_task(void *, int);
169 static void alc_txeof(struct alc_softc *);
170 static void alc_watchdog(struct alc_softc *);
171 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
172 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
173 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
175 static device_method_t alc_methods[] = {
176 /* Device interface. */
177 DEVMETHOD(device_probe, alc_probe),
178 DEVMETHOD(device_attach, alc_attach),
179 DEVMETHOD(device_detach, alc_detach),
180 DEVMETHOD(device_shutdown, alc_shutdown),
181 DEVMETHOD(device_suspend, alc_suspend),
182 DEVMETHOD(device_resume, alc_resume),
185 DEVMETHOD(miibus_readreg, alc_miibus_readreg),
186 DEVMETHOD(miibus_writereg, alc_miibus_writereg),
187 DEVMETHOD(miibus_statchg, alc_miibus_statchg),
192 static driver_t alc_driver = {
195 sizeof(struct alc_softc)
198 static devclass_t alc_devclass;
200 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0);
201 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0);
203 static struct resource_spec alc_res_spec_mem[] = {
204 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
208 static struct resource_spec alc_irq_spec_legacy[] = {
209 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
213 static struct resource_spec alc_irq_spec_msi[] = {
214 { SYS_RES_IRQ, 1, RF_ACTIVE },
218 static struct resource_spec alc_irq_spec_msix[] = {
219 { SYS_RES_IRQ, 1, RF_ACTIVE },
223 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
226 alc_miibus_readreg(device_t dev, int phy, int reg)
228 struct alc_softc *sc;
232 sc = device_get_softc(dev);
234 if (phy != sc->alc_phyaddr)
237 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
238 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
239 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
241 v = CSR_READ_4(sc, ALC_MDIO);
242 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
247 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
251 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
255 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
257 struct alc_softc *sc;
261 sc = device_get_softc(dev);
263 if (phy != sc->alc_phyaddr)
266 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
267 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
268 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
269 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
271 v = CSR_READ_4(sc, ALC_MDIO);
272 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
277 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
283 alc_miibus_statchg(device_t dev)
285 struct alc_softc *sc;
286 struct mii_data *mii;
290 sc = device_get_softc(dev);
292 mii = device_get_softc(sc->alc_miibus);
294 if (mii == NULL || ifp == NULL ||
295 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
298 sc->alc_flags &= ~ALC_FLAG_LINK;
299 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
300 (IFM_ACTIVE | IFM_AVALID)) {
301 switch (IFM_SUBTYPE(mii->mii_media_active)) {
304 sc->alc_flags |= ALC_FLAG_LINK;
307 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
308 sc->alc_flags |= ALC_FLAG_LINK;
315 /* Stop Rx/Tx MACs. */
318 /* Program MACs with resolved speed/duplex/flow-control. */
319 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
322 /* Re-enable Tx/Rx MACs. */
323 reg = CSR_READ_4(sc, ALC_MAC_CFG);
324 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
325 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
331 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
333 struct alc_softc *sc;
334 struct mii_data *mii;
338 if ((ifp->if_flags & IFF_UP) == 0) {
342 mii = device_get_softc(sc->alc_miibus);
346 ifmr->ifm_status = mii->mii_media_status;
347 ifmr->ifm_active = mii->mii_media_active;
351 alc_mediachange(struct ifnet *ifp)
353 struct alc_softc *sc;
354 struct mii_data *mii;
355 struct mii_softc *miisc;
360 mii = device_get_softc(sc->alc_miibus);
361 if (mii->mii_instance != 0) {
362 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
363 mii_phy_reset(miisc);
365 error = mii_mediachg(mii);
372 alc_probe(device_t dev)
376 uint16_t vendor, devid;
378 vendor = pci_get_vendor(dev);
379 devid = pci_get_device(dev);
381 for (i = 0; i < sizeof(alc_devs) / sizeof(alc_devs[0]); i++) {
382 if (vendor == sp->alc_vendorid &&
383 devid == sp->alc_deviceid) {
384 device_set_desc(dev, sp->alc_name);
385 return (BUS_PROBE_DEFAULT);
394 alc_get_macaddr(struct alc_softc *sc)
399 opt = CSR_READ_4(sc, ALC_OPT_CFG);
400 if ((CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
402 * EEPROM found, let TWSI reload EEPROM configuration.
403 * This will set ethernet address of controller.
405 if ((opt & OPT_CFG_CLK_ENB) == 0) {
406 opt |= OPT_CFG_CLK_ENB;
407 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
408 CSR_READ_4(sc, ALC_OPT_CFG);
411 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
412 TWSI_CFG_SW_LD_START);
413 for (i = 100; i > 0; i--) {
415 if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
416 TWSI_CFG_SW_LD_START) == 0)
420 device_printf(sc->alc_dev,
421 "reloading EEPROM timeout!\n");
424 device_printf(sc->alc_dev, "EEPROM not found!\n");
426 if ((opt & OPT_CFG_CLK_ENB) != 0) {
427 opt &= ~OPT_CFG_CLK_ENB;
428 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
429 CSR_READ_4(sc, ALC_OPT_CFG);
433 ea[0] = CSR_READ_4(sc, ALC_PAR0);
434 ea[1] = CSR_READ_4(sc, ALC_PAR1);
435 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
436 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
437 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
438 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
439 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
440 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
444 alc_disable_l0s_l1(struct alc_softc *sc)
448 /* Another magic from vendor. */
449 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
450 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
451 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
452 PM_CFG_SERDES_PD_EX_L1);
453 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
454 PM_CFG_SERDES_L1_ENB;
455 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
459 alc_phy_reset(struct alc_softc *sc)
463 /* Reset magic from Linux. */
464 CSR_WRITE_2(sc, ALC_GPHY_CFG,
465 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
466 CSR_READ_2(sc, ALC_GPHY_CFG);
469 CSR_WRITE_2(sc, ALC_GPHY_CFG,
470 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
471 GPHY_CFG_SEL_ANA_RESET);
472 CSR_READ_2(sc, ALC_GPHY_CFG);
475 /* Load DSP codes, vendor magic. */
476 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
477 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
478 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
479 ALC_MII_DBG_ADDR, MII_ANA_CFG18);
480 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
481 ALC_MII_DBG_DATA, data);
483 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
484 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
486 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
487 ALC_MII_DBG_ADDR, MII_ANA_CFG5);
488 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
489 ALC_MII_DBG_DATA, data);
491 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
492 ANA_LONG_CABLE_TH_100_MASK) |
493 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
494 ANA_SHORT_CABLE_TH_100_SHIFT) |
495 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
496 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
497 ALC_MII_DBG_ADDR, MII_ANA_CFG54);
498 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
499 ALC_MII_DBG_DATA, data);
501 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
502 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
503 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
504 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
505 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
506 ALC_MII_DBG_ADDR, MII_ANA_CFG4);
507 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
508 ALC_MII_DBG_DATA, data);
510 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
511 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
513 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
514 ALC_MII_DBG_ADDR, MII_ANA_CFG0);
515 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
516 ALC_MII_DBG_DATA, data);
521 alc_phy_down(struct alc_softc *sc)
524 /* Force PHY down. */
525 CSR_WRITE_2(sc, ALC_GPHY_CFG,
526 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
527 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW);
532 alc_aspm(struct alc_softc *sc)
538 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
539 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
540 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB;
541 pmcfg |= PM_CFG_SERDES_L1_ENB;
542 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
543 pmcfg |= PM_CFG_MAC_ASPM_CHK;
544 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
545 pmcfg |= PM_CFG_SERDES_PLL_L1_ENB;
546 pmcfg &= ~PM_CFG_CLK_SWH_L1;
547 pmcfg &= ~PM_CFG_ASPM_L1_ENB;
548 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
550 pmcfg &= ~PM_CFG_SERDES_PLL_L1_ENB;
551 pmcfg |= PM_CFG_CLK_SWH_L1;
552 pmcfg &= ~PM_CFG_ASPM_L1_ENB;
553 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
555 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
559 alc_attach(device_t dev)
561 struct alc_softc *sc;
563 char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/l1" };
565 int base, error, i, msic, msixc, pmc, state;
566 uint32_t cap, ctl, val;
569 sc = device_get_softc(dev);
572 mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
574 callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
575 TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
577 /* Map the device. */
578 pci_enable_busmaster(dev);
579 sc->alc_res_spec = alc_res_spec_mem;
580 sc->alc_irq_spec = alc_irq_spec_legacy;
581 error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
583 device_printf(dev, "cannot allocate memory resources.\n");
587 /* Set PHY address. */
588 sc->alc_phyaddr = ALC_PHY_ADDR;
590 /* Initialize DMA parameters. */
591 sc->alc_dma_rd_burst = 0;
592 sc->alc_dma_wr_burst = 0;
593 sc->alc_rcb = DMA_CFG_RCB_64;
594 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
595 sc->alc_flags |= ALC_FLAG_PCIE;
596 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL);
597 sc->alc_dma_rd_burst =
598 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12;
599 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5;
601 device_printf(dev, "Read request size : %u bytes.\n",
602 alc_dma_burst[sc->alc_dma_rd_burst]);
603 device_printf(dev, "TLP payload size : %u bytes.\n",
604 alc_dma_burst[sc->alc_dma_wr_burst]);
606 /* Clear data link and flow-control protocol error. */
607 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
608 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
609 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
610 /* Disable ASPM L0S and L1. */
611 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
612 if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
613 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
614 if ((ctl & 0x08) != 0)
615 sc->alc_rcb = DMA_CFG_RCB_128;
617 device_printf(dev, "RCB %u bytes\n",
618 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
621 device_printf(sc->alc_dev, "ASPM %s %s\n",
623 state == 0 ? "disabled" : "enabled");
625 alc_disable_l0s_l1(sc);
632 /* Reset the ethernet controller. */
636 * One odd thing is AR8132 uses the same PHY hardware(F1
637 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
638 * the PHY supports 1000Mbps but that's not true. The PHY
639 * used in AR8132 can't establish gigabit link even if it
640 * shows the same PHY model/revision number of AR8131.
642 if (pci_get_device(dev) == DEVICEID_ATHEROS_AR8132)
643 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_JUMBO;
645 sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
647 * It seems that AR8131/AR8132 has silicon bug for SMB. In
648 * addition, Atheros said that enabling SMB wouldn't improve
649 * performance. However I think it's bad to access lots of
650 * registers to extract MAC statistics.
652 sc->alc_flags |= ALC_FLAG_SMB_BUG;
654 * Don't use Tx CMB. It is known to have silicon bug.
656 sc->alc_flags |= ALC_FLAG_CMB_BUG;
657 sc->alc_rev = pci_get_revid(dev);
658 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
659 MASTER_CHIP_REV_SHIFT;
661 device_printf(dev, "PCI device revision : 0x%04x\n",
663 device_printf(dev, "Chip id/revision : 0x%04x\n",
666 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
667 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
668 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
670 /* Allocate IRQ resources. */
671 msixc = pci_msix_count(dev);
672 msic = pci_msi_count(dev);
674 device_printf(dev, "MSIX count : %d\n", msixc);
675 device_printf(dev, "MSI count : %d\n", msic);
677 /* Prefer MSIX over MSI. */
678 if (msix_disable == 0 || msi_disable == 0) {
679 if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES &&
680 pci_alloc_msix(dev, &msixc) == 0) {
681 if (msic == ALC_MSIX_MESSAGES) {
683 "Using %d MSIX message(s).\n", msixc);
684 sc->alc_flags |= ALC_FLAG_MSIX;
685 sc->alc_irq_spec = alc_irq_spec_msix;
687 pci_release_msi(dev);
689 if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
690 msic == ALC_MSI_MESSAGES &&
691 pci_alloc_msi(dev, &msic) == 0) {
692 if (msic == ALC_MSI_MESSAGES) {
694 "Using %d MSI message(s).\n", msic);
695 sc->alc_flags |= ALC_FLAG_MSI;
696 sc->alc_irq_spec = alc_irq_spec_msi;
698 pci_release_msi(dev);
702 error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq);
704 device_printf(dev, "cannot allocate IRQ resources.\n");
708 /* Create device sysctl node. */
711 if ((error = alc_dma_alloc(sc) != 0))
714 /* Load station address. */
717 ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
719 device_printf(dev, "cannot allocate ifnet structure.\n");
725 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
726 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
727 ifp->if_ioctl = alc_ioctl;
728 ifp->if_start = alc_start;
729 ifp->if_init = alc_init;
730 ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1;
731 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
732 IFQ_SET_READY(&ifp->if_snd);
733 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
734 ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO;
735 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
736 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
737 ifp->if_capenable = ifp->if_capabilities;
739 /* Set up MII bus. */
740 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange,
741 alc_mediastatus)) != 0) {
742 device_printf(dev, "no PHY found!\n");
746 ether_ifattach(ifp, sc->alc_eaddr);
748 /* VLAN capability setup. */
749 ifp->if_capabilities |= IFCAP_VLAN_MTU;
750 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
751 ifp->if_capenable = ifp->if_capabilities;
754 * It seems enabling Tx checksum offloading makes more trouble.
755 * Sometimes the controller does not receive any frames when
756 * Tx checksum offloading is enabled. I'm not sure whether this
757 * is a bug in Tx checksum offloading logic or I got broken
758 * sample boards. To safety, don't enable Tx checksum offloading
759 * by default but give chance to users to toggle it if they know
760 * their controllers work without problems.
762 ifp->if_capenable &= ~IFCAP_TXCSUM;
763 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
765 /* Tell the upper layer(s) we support long frames. */
766 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
768 /* Create local taskq. */
769 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp);
770 sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
771 taskqueue_thread_enqueue, &sc->alc_tq);
772 if (sc->alc_tq == NULL) {
773 device_printf(dev, "could not create taskqueue.\n");
778 taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
779 device_get_nameunit(sc->alc_dev));
781 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
782 msic = ALC_MSIX_MESSAGES;
783 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
784 msic = ALC_MSI_MESSAGES;
787 for (i = 0; i < msic; i++) {
788 error = bus_setup_intr(dev, sc->alc_irq[i],
789 INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc,
790 &sc->alc_intrhand[i]);
795 device_printf(dev, "could not set up interrupt handler.\n");
796 taskqueue_free(sc->alc_tq);
810 alc_detach(device_t dev)
812 struct alc_softc *sc;
816 sc = device_get_softc(dev);
819 if (device_is_attached(dev)) {
821 sc->alc_flags |= ALC_FLAG_DETACH;
824 callout_drain(&sc->alc_tick_ch);
825 taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
826 taskqueue_drain(sc->alc_tq, &sc->alc_tx_task);
830 if (sc->alc_tq != NULL) {
831 taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
832 taskqueue_free(sc->alc_tq);
836 if (sc->alc_miibus != NULL) {
837 device_delete_child(dev, sc->alc_miibus);
838 sc->alc_miibus = NULL;
840 bus_generic_detach(dev);
848 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
849 msic = ALC_MSIX_MESSAGES;
850 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
851 msic = ALC_MSI_MESSAGES;
854 for (i = 0; i < msic; i++) {
855 if (sc->alc_intrhand[i] != NULL) {
856 bus_teardown_intr(dev, sc->alc_irq[i],
857 sc->alc_intrhand[i]);
858 sc->alc_intrhand[i] = NULL;
861 if (sc->alc_res[0] != NULL)
863 bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq);
864 if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0)
865 pci_release_msi(dev);
866 bus_release_resources(dev, sc->alc_res_spec, sc->alc_res);
867 mtx_destroy(&sc->alc_mtx);
872 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \
873 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
874 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \
875 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
878 alc_sysctl_node(struct alc_softc *sc)
880 struct sysctl_ctx_list *ctx;
881 struct sysctl_oid_list *child, *parent;
882 struct sysctl_oid *tree;
883 struct alc_hw_stats *stats;
886 stats = &sc->alc_stats;
887 ctx = device_get_sysctl_ctx(sc->alc_dev);
888 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
890 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
891 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
892 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
893 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
894 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
895 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
896 /* Pull in device tunables. */
897 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
898 error = resource_int_value(device_get_name(sc->alc_dev),
899 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
901 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
902 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
903 device_printf(sc->alc_dev, "int_rx_mod value out of "
904 "range; using default: %d\n",
905 ALC_IM_RX_TIMER_DEFAULT);
906 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
909 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
910 error = resource_int_value(device_get_name(sc->alc_dev),
911 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
913 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
914 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
915 device_printf(sc->alc_dev, "int_tx_mod value out of "
916 "range; using default: %d\n",
917 ALC_IM_TX_TIMER_DEFAULT);
918 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
921 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
922 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
923 sysctl_hw_alc_proc_limit, "I",
924 "max number of Rx events to process");
925 /* Pull in device tunables. */
926 sc->alc_process_limit = ALC_PROC_DEFAULT;
927 error = resource_int_value(device_get_name(sc->alc_dev),
928 device_get_unit(sc->alc_dev), "process_limit",
929 &sc->alc_process_limit);
931 if (sc->alc_process_limit < ALC_PROC_MIN ||
932 sc->alc_process_limit > ALC_PROC_MAX) {
933 device_printf(sc->alc_dev,
934 "process_limit value out of range; "
935 "using default: %d\n", ALC_PROC_DEFAULT);
936 sc->alc_process_limit = ALC_PROC_DEFAULT;
940 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
941 NULL, "ALC statistics");
942 parent = SYSCTL_CHILDREN(tree);
945 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
946 NULL, "Rx MAC statistics");
947 child = SYSCTL_CHILDREN(tree);
948 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
949 &stats->rx_frames, "Good frames");
950 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
951 &stats->rx_bcast_frames, "Good broadcast frames");
952 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
953 &stats->rx_mcast_frames, "Good multicast frames");
954 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
955 &stats->rx_pause_frames, "Pause control frames");
956 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
957 &stats->rx_control_frames, "Control frames");
958 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
959 &stats->rx_crcerrs, "CRC errors");
960 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
961 &stats->rx_lenerrs, "Frames with length mismatched");
962 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
963 &stats->rx_bytes, "Good octets");
964 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
965 &stats->rx_bcast_bytes, "Good broadcast octets");
966 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
967 &stats->rx_mcast_bytes, "Good multicast octets");
968 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
969 &stats->rx_runts, "Too short frames");
970 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
971 &stats->rx_fragments, "Fragmented frames");
972 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
973 &stats->rx_pkts_64, "64 bytes frames");
974 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
975 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
976 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
977 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
978 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
979 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
980 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
981 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
982 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
983 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
984 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
985 &stats->rx_pkts_1519_max, "1519 to max frames");
986 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
987 &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
988 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
989 &stats->rx_fifo_oflows, "FIFO overflows");
990 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
991 &stats->rx_rrs_errs, "Return status write-back errors");
992 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
993 &stats->rx_alignerrs, "Alignment errors");
994 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
995 &stats->rx_pkts_filtered,
996 "Frames dropped due to address filtering");
999 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1000 NULL, "Tx MAC statistics");
1001 child = SYSCTL_CHILDREN(tree);
1002 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1003 &stats->tx_frames, "Good frames");
1004 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1005 &stats->tx_bcast_frames, "Good broadcast frames");
1006 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1007 &stats->tx_mcast_frames, "Good multicast frames");
1008 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1009 &stats->tx_pause_frames, "Pause control frames");
1010 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1011 &stats->tx_control_frames, "Control frames");
1012 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1013 &stats->tx_excess_defer, "Frames with excessive derferrals");
1014 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1015 &stats->tx_excess_defer, "Frames with derferrals");
1016 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1017 &stats->tx_bytes, "Good octets");
1018 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1019 &stats->tx_bcast_bytes, "Good broadcast octets");
1020 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1021 &stats->tx_mcast_bytes, "Good multicast octets");
1022 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1023 &stats->tx_pkts_64, "64 bytes frames");
1024 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1025 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1026 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1027 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1028 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1029 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1030 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1031 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1032 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1033 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1034 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1035 &stats->tx_pkts_1519_max, "1519 to max frames");
1036 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1037 &stats->tx_single_colls, "Single collisions");
1038 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1039 &stats->tx_multi_colls, "Multiple collisions");
1040 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1041 &stats->tx_late_colls, "Late collisions");
1042 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1043 &stats->tx_excess_colls, "Excessive collisions");
1044 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
1045 &stats->tx_abort, "Aborted frames due to Excessive collisions");
1046 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1047 &stats->tx_underrun, "FIFO underruns");
1048 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1049 &stats->tx_desc_underrun, "Descriptor write-back errors");
1050 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1051 &stats->tx_lenerrs, "Frames with length mismatched");
1052 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1053 &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1056 #undef ALC_SYSCTL_STAT_ADD32
1057 #undef ALC_SYSCTL_STAT_ADD64
1059 struct alc_dmamap_arg {
1060 bus_addr_t alc_busaddr;
1064 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1066 struct alc_dmamap_arg *ctx;
1071 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1073 ctx = (struct alc_dmamap_arg *)arg;
1074 ctx->alc_busaddr = segs[0].ds_addr;
1078 * Normal and high Tx descriptors shares single Tx high address.
1079 * Four Rx descriptor/return rings and CMB shares the same Rx
1083 alc_check_boundary(struct alc_softc *sc)
1085 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1087 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1088 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1089 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1090 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1092 /* 4GB boundary crossing is not allowed. */
1093 if ((ALC_ADDR_HI(rx_ring_end) !=
1094 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1095 (ALC_ADDR_HI(rr_ring_end) !=
1096 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1097 (ALC_ADDR_HI(cmb_end) !=
1098 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1099 (ALC_ADDR_HI(tx_ring_end) !=
1100 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1103 * Make sure Rx return descriptor/Rx descriptor/CMB use
1104 * the same high address.
1106 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1107 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1114 alc_dma_alloc(struct alc_softc *sc)
1116 struct alc_txdesc *txd;
1117 struct alc_rxdesc *rxd;
1119 struct alc_dmamap_arg ctx;
1122 lowaddr = BUS_SPACE_MAXADDR;
1124 /* Create parent DMA tag. */
1125 error = bus_dma_tag_create(
1126 bus_get_dma_tag(sc->alc_dev), /* parent */
1127 1, 0, /* alignment, boundary */
1128 lowaddr, /* lowaddr */
1129 BUS_SPACE_MAXADDR, /* highaddr */
1130 NULL, NULL, /* filter, filterarg */
1131 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1133 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1135 NULL, NULL, /* lockfunc, lockarg */
1136 &sc->alc_cdata.alc_parent_tag);
1138 device_printf(sc->alc_dev,
1139 "could not create parent DMA tag.\n");
1143 /* Create DMA tag for Tx descriptor ring. */
1144 error = bus_dma_tag_create(
1145 sc->alc_cdata.alc_parent_tag, /* parent */
1146 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */
1147 BUS_SPACE_MAXADDR, /* lowaddr */
1148 BUS_SPACE_MAXADDR, /* highaddr */
1149 NULL, NULL, /* filter, filterarg */
1150 ALC_TX_RING_SZ, /* maxsize */
1152 ALC_TX_RING_SZ, /* maxsegsize */
1154 NULL, NULL, /* lockfunc, lockarg */
1155 &sc->alc_cdata.alc_tx_ring_tag);
1157 device_printf(sc->alc_dev,
1158 "could not create Tx ring DMA tag.\n");
1162 /* Create DMA tag for Rx free descriptor ring. */
1163 error = bus_dma_tag_create(
1164 sc->alc_cdata.alc_parent_tag, /* parent */
1165 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */
1166 BUS_SPACE_MAXADDR, /* lowaddr */
1167 BUS_SPACE_MAXADDR, /* highaddr */
1168 NULL, NULL, /* filter, filterarg */
1169 ALC_RX_RING_SZ, /* maxsize */
1171 ALC_RX_RING_SZ, /* maxsegsize */
1173 NULL, NULL, /* lockfunc, lockarg */
1174 &sc->alc_cdata.alc_rx_ring_tag);
1176 device_printf(sc->alc_dev,
1177 "could not create Rx ring DMA tag.\n");
1180 /* Create DMA tag for Rx return descriptor ring. */
1181 error = bus_dma_tag_create(
1182 sc->alc_cdata.alc_parent_tag, /* parent */
1183 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */
1184 BUS_SPACE_MAXADDR, /* lowaddr */
1185 BUS_SPACE_MAXADDR, /* highaddr */
1186 NULL, NULL, /* filter, filterarg */
1187 ALC_RR_RING_SZ, /* maxsize */
1189 ALC_RR_RING_SZ, /* maxsegsize */
1191 NULL, NULL, /* lockfunc, lockarg */
1192 &sc->alc_cdata.alc_rr_ring_tag);
1194 device_printf(sc->alc_dev,
1195 "could not create Rx return ring DMA tag.\n");
1199 /* Create DMA tag for coalescing message block. */
1200 error = bus_dma_tag_create(
1201 sc->alc_cdata.alc_parent_tag, /* parent */
1202 ALC_CMB_ALIGN, 0, /* alignment, boundary */
1203 BUS_SPACE_MAXADDR, /* lowaddr */
1204 BUS_SPACE_MAXADDR, /* highaddr */
1205 NULL, NULL, /* filter, filterarg */
1206 ALC_CMB_SZ, /* maxsize */
1208 ALC_CMB_SZ, /* maxsegsize */
1210 NULL, NULL, /* lockfunc, lockarg */
1211 &sc->alc_cdata.alc_cmb_tag);
1213 device_printf(sc->alc_dev,
1214 "could not create CMB DMA tag.\n");
1217 /* Create DMA tag for status message block. */
1218 error = bus_dma_tag_create(
1219 sc->alc_cdata.alc_parent_tag, /* parent */
1220 ALC_SMB_ALIGN, 0, /* alignment, boundary */
1221 BUS_SPACE_MAXADDR, /* lowaddr */
1222 BUS_SPACE_MAXADDR, /* highaddr */
1223 NULL, NULL, /* filter, filterarg */
1224 ALC_SMB_SZ, /* maxsize */
1226 ALC_SMB_SZ, /* maxsegsize */
1228 NULL, NULL, /* lockfunc, lockarg */
1229 &sc->alc_cdata.alc_smb_tag);
1231 device_printf(sc->alc_dev,
1232 "could not create SMB DMA tag.\n");
1236 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1237 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
1238 (void **)&sc->alc_rdata.alc_tx_ring,
1239 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1240 &sc->alc_cdata.alc_tx_ring_map);
1242 device_printf(sc->alc_dev,
1243 "could not allocate DMA'able memory for Tx ring.\n");
1246 ctx.alc_busaddr = 0;
1247 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
1248 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
1249 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1250 if (error != 0 || ctx.alc_busaddr == 0) {
1251 device_printf(sc->alc_dev,
1252 "could not load DMA'able memory for Tx ring.\n");
1255 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
1257 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1258 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
1259 (void **)&sc->alc_rdata.alc_rx_ring,
1260 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1261 &sc->alc_cdata.alc_rx_ring_map);
1263 device_printf(sc->alc_dev,
1264 "could not allocate DMA'able memory for Rx ring.\n");
1267 ctx.alc_busaddr = 0;
1268 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
1269 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
1270 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1271 if (error != 0 || ctx.alc_busaddr == 0) {
1272 device_printf(sc->alc_dev,
1273 "could not load DMA'able memory for Rx ring.\n");
1276 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
1278 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */
1279 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
1280 (void **)&sc->alc_rdata.alc_rr_ring,
1281 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1282 &sc->alc_cdata.alc_rr_ring_map);
1284 device_printf(sc->alc_dev,
1285 "could not allocate DMA'able memory for Rx return ring.\n");
1288 ctx.alc_busaddr = 0;
1289 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
1290 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
1291 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
1292 if (error != 0 || ctx.alc_busaddr == 0) {
1293 device_printf(sc->alc_dev,
1294 "could not load DMA'able memory for Tx ring.\n");
1297 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
1299 /* Allocate DMA'able memory and load the DMA map for CMB. */
1300 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
1301 (void **)&sc->alc_rdata.alc_cmb,
1302 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1303 &sc->alc_cdata.alc_cmb_map);
1305 device_printf(sc->alc_dev,
1306 "could not allocate DMA'able memory for CMB.\n");
1309 ctx.alc_busaddr = 0;
1310 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
1311 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
1312 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
1313 if (error != 0 || ctx.alc_busaddr == 0) {
1314 device_printf(sc->alc_dev,
1315 "could not load DMA'able memory for CMB.\n");
1318 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
1320 /* Allocate DMA'able memory and load the DMA map for SMB. */
1321 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
1322 (void **)&sc->alc_rdata.alc_smb,
1323 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1324 &sc->alc_cdata.alc_smb_map);
1326 device_printf(sc->alc_dev,
1327 "could not allocate DMA'able memory for SMB.\n");
1330 ctx.alc_busaddr = 0;
1331 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
1332 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
1333 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
1334 if (error != 0 || ctx.alc_busaddr == 0) {
1335 device_printf(sc->alc_dev,
1336 "could not load DMA'able memory for CMB.\n");
1339 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
1341 /* Make sure we've not crossed 4GB boundary. */
1342 if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1343 (error = alc_check_boundary(sc)) != 0) {
1344 device_printf(sc->alc_dev, "4GB boundary crossed, "
1345 "switching to 32bit DMA addressing mode.\n");
1348 * Limit max allowable DMA address space to 32bit
1351 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1356 * Create Tx buffer parent tag.
1357 * AR8131/AR8132 allows 64bit DMA addressing of Tx/Rx buffers
1358 * so it needs separate parent DMA tag as parent DMA address
1359 * space could be restricted to be within 32bit address space
1360 * by 4GB boundary crossing.
1362 error = bus_dma_tag_create(
1363 bus_get_dma_tag(sc->alc_dev), /* parent */
1364 1, 0, /* alignment, boundary */
1365 BUS_SPACE_MAXADDR, /* lowaddr */
1366 BUS_SPACE_MAXADDR, /* highaddr */
1367 NULL, NULL, /* filter, filterarg */
1368 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1370 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1372 NULL, NULL, /* lockfunc, lockarg */
1373 &sc->alc_cdata.alc_buffer_tag);
1375 device_printf(sc->alc_dev,
1376 "could not create parent buffer DMA tag.\n");
1380 /* Create DMA tag for Tx buffers. */
1381 error = bus_dma_tag_create(
1382 sc->alc_cdata.alc_buffer_tag, /* parent */
1383 1, 0, /* alignment, boundary */
1384 BUS_SPACE_MAXADDR, /* lowaddr */
1385 BUS_SPACE_MAXADDR, /* highaddr */
1386 NULL, NULL, /* filter, filterarg */
1387 ALC_TSO_MAXSIZE, /* maxsize */
1388 ALC_MAXTXSEGS, /* nsegments */
1389 ALC_TSO_MAXSEGSIZE, /* maxsegsize */
1391 NULL, NULL, /* lockfunc, lockarg */
1392 &sc->alc_cdata.alc_tx_tag);
1394 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
1398 /* Create DMA tag for Rx buffers. */
1399 error = bus_dma_tag_create(
1400 sc->alc_cdata.alc_buffer_tag, /* parent */
1401 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */
1402 BUS_SPACE_MAXADDR, /* lowaddr */
1403 BUS_SPACE_MAXADDR, /* highaddr */
1404 NULL, NULL, /* filter, filterarg */
1405 MCLBYTES, /* maxsize */
1407 MCLBYTES, /* maxsegsize */
1409 NULL, NULL, /* lockfunc, lockarg */
1410 &sc->alc_cdata.alc_rx_tag);
1412 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
1415 /* Create DMA maps for Tx buffers. */
1416 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1417 txd = &sc->alc_cdata.alc_txdesc[i];
1419 txd->tx_dmamap = NULL;
1420 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0,
1423 device_printf(sc->alc_dev,
1424 "could not create Tx dmamap.\n");
1428 /* Create DMA maps for Rx buffers. */
1429 if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1430 &sc->alc_cdata.alc_rx_sparemap)) != 0) {
1431 device_printf(sc->alc_dev,
1432 "could not create spare Rx dmamap.\n");
1435 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1436 rxd = &sc->alc_cdata.alc_rxdesc[i];
1438 rxd->rx_dmamap = NULL;
1439 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1442 device_printf(sc->alc_dev,
1443 "could not create Rx dmamap.\n");
1453 alc_dma_free(struct alc_softc *sc)
1455 struct alc_txdesc *txd;
1456 struct alc_rxdesc *rxd;
1460 if (sc->alc_cdata.alc_tx_tag != NULL) {
1461 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1462 txd = &sc->alc_cdata.alc_txdesc[i];
1463 if (txd->tx_dmamap != NULL) {
1464 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
1466 txd->tx_dmamap = NULL;
1469 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
1470 sc->alc_cdata.alc_tx_tag = NULL;
1473 if (sc->alc_cdata.alc_rx_tag != NULL) {
1474 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1475 rxd = &sc->alc_cdata.alc_rxdesc[i];
1476 if (rxd->rx_dmamap != NULL) {
1477 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1479 rxd->rx_dmamap = NULL;
1482 if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1483 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1484 sc->alc_cdata.alc_rx_sparemap);
1485 sc->alc_cdata.alc_rx_sparemap = NULL;
1487 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
1488 sc->alc_cdata.alc_rx_tag = NULL;
1490 /* Tx descriptor ring. */
1491 if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
1492 if (sc->alc_cdata.alc_tx_ring_map != NULL)
1493 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
1494 sc->alc_cdata.alc_tx_ring_map);
1495 if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1496 sc->alc_rdata.alc_tx_ring != NULL)
1497 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
1498 sc->alc_rdata.alc_tx_ring,
1499 sc->alc_cdata.alc_tx_ring_map);
1500 sc->alc_rdata.alc_tx_ring = NULL;
1501 sc->alc_cdata.alc_tx_ring_map = NULL;
1502 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
1503 sc->alc_cdata.alc_tx_ring_tag = NULL;
1506 if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
1507 if (sc->alc_cdata.alc_rx_ring_map != NULL)
1508 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
1509 sc->alc_cdata.alc_rx_ring_map);
1510 if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1511 sc->alc_rdata.alc_rx_ring != NULL)
1512 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
1513 sc->alc_rdata.alc_rx_ring,
1514 sc->alc_cdata.alc_rx_ring_map);
1515 sc->alc_rdata.alc_rx_ring = NULL;
1516 sc->alc_cdata.alc_rx_ring_map = NULL;
1517 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
1518 sc->alc_cdata.alc_rx_ring_tag = NULL;
1520 /* Rx return ring. */
1521 if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
1522 if (sc->alc_cdata.alc_rr_ring_map != NULL)
1523 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
1524 sc->alc_cdata.alc_rr_ring_map);
1525 if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1526 sc->alc_rdata.alc_rr_ring != NULL)
1527 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
1528 sc->alc_rdata.alc_rr_ring,
1529 sc->alc_cdata.alc_rr_ring_map);
1530 sc->alc_rdata.alc_rr_ring = NULL;
1531 sc->alc_cdata.alc_rr_ring_map = NULL;
1532 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
1533 sc->alc_cdata.alc_rr_ring_tag = NULL;
1536 if (sc->alc_cdata.alc_cmb_tag != NULL) {
1537 if (sc->alc_cdata.alc_cmb_map != NULL)
1538 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
1539 sc->alc_cdata.alc_cmb_map);
1540 if (sc->alc_cdata.alc_cmb_map != NULL &&
1541 sc->alc_rdata.alc_cmb != NULL)
1542 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
1543 sc->alc_rdata.alc_cmb,
1544 sc->alc_cdata.alc_cmb_map);
1545 sc->alc_rdata.alc_cmb = NULL;
1546 sc->alc_cdata.alc_cmb_map = NULL;
1547 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
1548 sc->alc_cdata.alc_cmb_tag = NULL;
1551 if (sc->alc_cdata.alc_smb_tag != NULL) {
1552 if (sc->alc_cdata.alc_smb_map != NULL)
1553 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
1554 sc->alc_cdata.alc_smb_map);
1555 if (sc->alc_cdata.alc_smb_map != NULL &&
1556 sc->alc_rdata.alc_smb != NULL)
1557 bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
1558 sc->alc_rdata.alc_smb,
1559 sc->alc_cdata.alc_smb_map);
1560 sc->alc_rdata.alc_smb = NULL;
1561 sc->alc_cdata.alc_smb_map = NULL;
1562 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
1563 sc->alc_cdata.alc_smb_tag = NULL;
1565 if (sc->alc_cdata.alc_buffer_tag != NULL) {
1566 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
1567 sc->alc_cdata.alc_buffer_tag = NULL;
1569 if (sc->alc_cdata.alc_parent_tag != NULL) {
1570 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
1571 sc->alc_cdata.alc_parent_tag = NULL;
1576 alc_shutdown(device_t dev)
1579 return (alc_suspend(dev));
1583 * Note, this driver resets the link speed to 10/100Mbps by
1584 * restarting auto-negotiation in suspend/shutdown phase but we
1585 * don't know whether that auto-negotiation would succeed or not
1586 * as driver has no control after powering off/suspend operation.
1587 * If the renegotiation fail WOL may not work. Running at 1Gbps
1588 * will draw more power than 375mA at 3.3V which is specified in
1589 * PCI specification and that would result in complete
1590 * shutdowning power to ethernet controller.
1593 * Save current negotiated media speed/duplex/flow-control to
1594 * softc and restore the same link again after resuming. PHY
1595 * handling such as power down/resetting to 100Mbps may be better
1596 * handled in suspend method in phy driver.
1599 alc_setlinkspeed(struct alc_softc *sc)
1601 struct mii_data *mii;
1604 mii = device_get_softc(sc->alc_miibus);
1607 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1608 (IFM_ACTIVE | IFM_AVALID)) {
1609 switch IFM_SUBTYPE(mii->mii_media_active) {
1620 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
1621 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1622 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1623 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1624 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1628 * Poll link state until alc(4) get a 10/100Mbps link.
1630 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1632 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1633 == (IFM_ACTIVE | IFM_AVALID)) {
1634 switch (IFM_SUBTYPE(
1635 mii->mii_media_active)) {
1645 pause("alclnk", hz);
1648 if (i == MII_ANEGTICKS_GIGE)
1649 device_printf(sc->alc_dev,
1650 "establishing a link failed, WOL may not work!");
1653 * No link, force MAC to have 100Mbps, full-duplex link.
1654 * This is the last resort and may/may not work.
1656 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1657 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1662 alc_setwol(struct alc_softc *sc)
1665 uint32_t cap, reg, pmcs;
1669 ALC_LOCK_ASSERT(sc);
1671 if (pci_find_extcap(sc->alc_dev, PCIY_EXPRESS, &base) == 0) {
1672 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
1673 if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
1674 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
1675 alc_disable_l0s_l1(sc);
1678 if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) != 0) {
1680 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1681 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1682 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1683 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1684 /* Force PHY power down. */
1690 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1691 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
1692 alc_setlinkspeed(sc);
1693 reg = CSR_READ_4(sc, ALC_MASTER_CFG);
1694 reg &= ~MASTER_CLK_SEL_DIS;
1695 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
1699 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1700 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1701 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
1702 reg = CSR_READ_4(sc, ALC_MAC_CFG);
1703 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1705 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1706 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1707 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1708 reg |= MAC_CFG_RX_ENB;
1709 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1711 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1712 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1713 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1714 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1715 /* WOL disabled, PHY power down. */
1719 pmstat = pci_read_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, 2);
1720 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1721 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1722 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1723 pci_write_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1727 alc_suspend(device_t dev)
1729 struct alc_softc *sc;
1731 sc = device_get_softc(dev);
1742 alc_resume(device_t dev)
1744 struct alc_softc *sc;
1749 sc = device_get_softc(dev);
1752 if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) == 0) {
1753 /* Disable PME and clear PME status. */
1754 pmstat = pci_read_config(sc->alc_dev,
1755 pmc + PCIR_POWER_STATUS, 2);
1756 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1757 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1758 pci_write_config(sc->alc_dev,
1759 pmc + PCIR_POWER_STATUS, pmstat, 2);
1765 if ((ifp->if_flags & IFF_UP) != 0) {
1766 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1767 alc_init_locked(sc);
1775 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1777 struct alc_txdesc *txd, *txd_last;
1778 struct tx_desc *desc;
1782 bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
1784 uint32_t cflags, hdrlen, ip_off, poff, vtag;
1785 int error, idx, nsegs, prod;
1787 ALC_LOCK_ASSERT(sc);
1789 M_ASSERTPKTHDR((*m_head));
1795 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
1797 * AR8131/AR8132 requires offset of TCP/UDP header in its
1798 * Tx descriptor to perform Tx checksum offloading. TSO
1799 * also requires TCP header offset and modification of
1800 * IP/TCP header. This kind of operation takes many CPU
1801 * cycles on FreeBSD so fast host CPU is required to get
1802 * smooth TSO performance.
1804 struct ether_header *eh;
1806 if (M_WRITABLE(m) == 0) {
1807 /* Get a writable copy. */
1808 m = m_dup(*m_head, M_DONTWAIT);
1809 /* Release original mbufs. */
1818 ip_off = sizeof(struct ether_header);
1819 m = m_pullup(m, ip_off);
1824 eh = mtod(m, struct ether_header *);
1826 * Check if hardware VLAN insertion is off.
1827 * Additional check for LLC/SNAP frame?
1829 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1830 ip_off = sizeof(struct ether_vlan_header);
1831 m = m_pullup(m, ip_off);
1837 m = m_pullup(m, ip_off + sizeof(struct ip));
1842 ip = (struct ip *)(mtod(m, char *) + ip_off);
1843 poff = ip_off + (ip->ip_hl << 2);
1844 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1845 m = m_pullup(m, poff + sizeof(struct tcphdr));
1850 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1851 m = m_pullup(m, poff + (tcp->th_off << 2));
1857 * Due to strict adherence of Microsoft NDIS
1858 * Large Send specification, hardware expects
1859 * a pseudo TCP checksum inserted by upper
1860 * stack. Unfortunately the pseudo TCP
1861 * checksum that NDIS refers to does not include
1862 * TCP payload length so driver should recompute
1863 * the pseudo checksum here. Hopefully this
1864 * wouldn't be much burden on modern CPUs.
1866 * Reset IP checksum and recompute TCP pseudo
1867 * checksum as NDIS specification said.
1870 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1871 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1876 prod = sc->alc_cdata.alc_tx_prod;
1877 txd = &sc->alc_cdata.alc_txdesc[prod];
1879 map = txd->tx_dmamap;
1881 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1882 *m_head, txsegs, &nsegs, 0);
1883 if (error == EFBIG) {
1884 m = m_collapse(*m_head, M_DONTWAIT, ALC_MAXTXSEGS);
1891 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1892 *m_head, txsegs, &nsegs, 0);
1898 } else if (error != 0)
1906 /* Check descriptor overrun. */
1907 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1908 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
1911 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
1914 cflags = TD_ETHERNET;
1918 /* Configure VLAN hardware tag insertion. */
1919 if ((m->m_flags & M_VLANTAG) != 0) {
1920 vtag = htons(m->m_pkthdr.ether_vtag);
1921 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1922 cflags |= TD_INS_VLAN_TAG;
1924 /* Configure Tx checksum offload. */
1925 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1926 #ifdef ALC_USE_CUSTOM_CSUM
1927 cflags |= TD_CUSTOM_CSUM;
1928 /* Set checksum start offset. */
1929 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1930 TD_PLOAD_OFFSET_MASK;
1931 /* Set checksum insertion position of TCP/UDP. */
1932 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
1933 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
1935 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1936 cflags |= TD_IPCSUM;
1937 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1938 cflags |= TD_TCPCSUM;
1939 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1940 cflags |= TD_UDPCSUM;
1941 /* Set TCP/UDP header offset. */
1942 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
1943 TD_L4HDR_OFFSET_MASK;
1945 } else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1946 /* Request TSO and set MSS. */
1947 cflags |= TD_TSO | TD_TSO_DESCV1;
1948 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
1950 /* Set TCP header offset. */
1951 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
1952 TD_TCPHDR_OFFSET_MASK;
1954 * AR8131/AR8132 requires the first buffer should
1955 * only hold IP/TCP header data. Payload should
1956 * be handled in other descriptors.
1958 hdrlen = poff + (tcp->th_off << 2);
1959 desc = &sc->alc_rdata.alc_tx_ring[prod];
1960 desc->len = htole32(TX_BYTES(hdrlen | vtag));
1961 desc->flags = htole32(cflags);
1962 desc->addr = htole64(txsegs[0].ds_addr);
1963 sc->alc_cdata.alc_tx_cnt++;
1964 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1965 if (m->m_len - hdrlen > 0) {
1966 /* Handle remaining payload of the first fragment. */
1967 desc = &sc->alc_rdata.alc_tx_ring[prod];
1968 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
1970 desc->flags = htole32(cflags);
1971 desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
1972 sc->alc_cdata.alc_tx_cnt++;
1973 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1975 /* Handle remaining fragments. */
1978 for (; idx < nsegs; idx++) {
1979 desc = &sc->alc_rdata.alc_tx_ring[prod];
1980 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
1981 desc->flags = htole32(cflags);
1982 desc->addr = htole64(txsegs[idx].ds_addr);
1983 sc->alc_cdata.alc_tx_cnt++;
1984 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1986 /* Update producer index. */
1987 sc->alc_cdata.alc_tx_prod = prod;
1989 /* Finally set EOP on the last descriptor. */
1990 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1991 desc = &sc->alc_rdata.alc_tx_ring[prod];
1992 desc->flags |= htole32(TD_EOP);
1994 /* Swap dmamap of the first and the last. */
1995 txd = &sc->alc_cdata.alc_txdesc[prod];
1996 map = txd_last->tx_dmamap;
1997 txd_last->tx_dmamap = txd->tx_dmamap;
1998 txd->tx_dmamap = map;
2005 alc_tx_task(void *arg, int pending)
2009 ifp = (struct ifnet *)arg;
2014 alc_start(struct ifnet *ifp)
2016 struct alc_softc *sc;
2017 struct mbuf *m_head;
2024 /* Reclaim transmitted frames. */
2025 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2028 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2029 IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0) {
2034 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2035 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2039 * Pack the data into the transmit ring. If we
2040 * don't have room, set the OACTIVE flag and wait
2041 * for the NIC to drain the ring.
2043 if (alc_encap(sc, &m_head)) {
2046 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2047 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2053 * If there's a BPF listener, bounce a copy of this frame
2056 ETHER_BPF_MTAP(ifp, m_head);
2060 /* Sync descriptors. */
2061 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2062 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2063 /* Kick. Assume we're using normal Tx priority queue. */
2064 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2065 (sc->alc_cdata.alc_tx_prod <<
2066 MBOX_TD_PROD_LO_IDX_SHIFT) &
2067 MBOX_TD_PROD_LO_IDX_MASK);
2068 /* Set a timeout in case the chip goes out to lunch. */
2069 sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2076 alc_watchdog(struct alc_softc *sc)
2080 ALC_LOCK_ASSERT(sc);
2082 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2086 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2087 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2089 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2090 alc_init_locked(sc);
2093 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2095 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2096 alc_init_locked(sc);
2097 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2098 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2102 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2104 struct alc_softc *sc;
2106 struct mii_data *mii;
2110 ifr = (struct ifreq *)data;
2114 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALC_JUMBO_MTU ||
2115 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2116 ifr->ifr_mtu > ETHERMTU))
2118 else if (ifp->if_mtu != ifr->ifr_mtu) {
2120 ifp->if_mtu = ifr->ifr_mtu;
2121 /* AR8131/AR8132 has 13 bits MSS field. */
2122 if (ifp->if_mtu > ALC_TSO_MTU &&
2123 (ifp->if_capenable & IFCAP_TSO4) != 0) {
2124 ifp->if_capenable &= ~IFCAP_TSO4;
2125 ifp->if_hwassist &= ~CSUM_TSO;
2132 if ((ifp->if_flags & IFF_UP) != 0) {
2133 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2134 ((ifp->if_flags ^ sc->alc_if_flags) &
2135 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2137 else if ((sc->alc_flags & ALC_FLAG_DETACH) == 0)
2138 alc_init_locked(sc);
2139 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2141 sc->alc_if_flags = ifp->if_flags;
2147 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2153 mii = device_get_softc(sc->alc_miibus);
2154 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2158 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2159 if ((mask & IFCAP_TXCSUM) != 0 &&
2160 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2161 ifp->if_capenable ^= IFCAP_TXCSUM;
2162 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2163 ifp->if_hwassist |= ALC_CSUM_FEATURES;
2165 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2167 if ((mask & IFCAP_TSO4) != 0 &&
2168 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2169 ifp->if_capenable ^= IFCAP_TSO4;
2170 if ((ifp->if_capenable & IFCAP_TSO4) != 0) {
2171 /* AR8131/AR8132 has 13 bits MSS field. */
2172 if (ifp->if_mtu > ALC_TSO_MTU) {
2173 ifp->if_capenable &= ~IFCAP_TSO4;
2174 ifp->if_hwassist &= ~CSUM_TSO;
2176 ifp->if_hwassist |= CSUM_TSO;
2178 ifp->if_hwassist &= ~CSUM_TSO;
2180 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2181 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2182 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2183 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2184 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2185 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2186 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2187 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2188 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2191 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2192 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2193 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2194 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2195 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2196 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2198 * VLAN hardware tagging is required to do checksum
2199 * offload or TSO on VLAN interface. Checksum offload
2200 * on VLAN interface also requires hardware checksum
2201 * offload of parent interface.
2203 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
2204 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
2205 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2206 ifp->if_capenable &=
2207 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
2209 VLAN_CAPABILITIES(ifp);
2212 error = ether_ioctl(ifp, cmd, data);
2220 alc_mac_config(struct alc_softc *sc)
2222 struct mii_data *mii;
2225 ALC_LOCK_ASSERT(sc);
2227 mii = device_get_softc(sc->alc_miibus);
2228 reg = CSR_READ_4(sc, ALC_MAC_CFG);
2229 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2230 MAC_CFG_SPEED_MASK);
2231 /* Reprogram MAC with resolved speed/duplex. */
2232 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2235 reg |= MAC_CFG_SPEED_10_100;
2238 reg |= MAC_CFG_SPEED_1000;
2241 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2242 reg |= MAC_CFG_FULL_DUPLEX;
2244 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2245 reg |= MAC_CFG_TX_FC;
2246 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2247 reg |= MAC_CFG_RX_FC;
2250 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2254 alc_stats_clear(struct alc_softc *sc)
2256 struct smb sb, *smb;
2260 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2261 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2262 sc->alc_cdata.alc_smb_map,
2263 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2264 smb = sc->alc_rdata.alc_smb;
2265 /* Update done, clear. */
2267 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2268 sc->alc_cdata.alc_smb_map,
2269 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2271 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2273 CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2274 i += sizeof(uint32_t);
2276 /* Read Tx statistics. */
2277 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2279 CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2280 i += sizeof(uint32_t);
2286 alc_stats_update(struct alc_softc *sc)
2288 struct alc_hw_stats *stat;
2289 struct smb sb, *smb;
2294 ALC_LOCK_ASSERT(sc);
2297 stat = &sc->alc_stats;
2298 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2299 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2300 sc->alc_cdata.alc_smb_map,
2301 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2302 smb = sc->alc_rdata.alc_smb;
2303 if (smb->updated == 0)
2307 /* Read Rx statistics. */
2308 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2310 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2311 i += sizeof(uint32_t);
2313 /* Read Tx statistics. */
2314 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2316 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2317 i += sizeof(uint32_t);
2322 stat->rx_frames += smb->rx_frames;
2323 stat->rx_bcast_frames += smb->rx_bcast_frames;
2324 stat->rx_mcast_frames += smb->rx_mcast_frames;
2325 stat->rx_pause_frames += smb->rx_pause_frames;
2326 stat->rx_control_frames += smb->rx_control_frames;
2327 stat->rx_crcerrs += smb->rx_crcerrs;
2328 stat->rx_lenerrs += smb->rx_lenerrs;
2329 stat->rx_bytes += smb->rx_bytes;
2330 stat->rx_runts += smb->rx_runts;
2331 stat->rx_fragments += smb->rx_fragments;
2332 stat->rx_pkts_64 += smb->rx_pkts_64;
2333 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2334 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2335 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2336 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2337 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2338 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2339 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2340 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2341 stat->rx_rrs_errs += smb->rx_rrs_errs;
2342 stat->rx_alignerrs += smb->rx_alignerrs;
2343 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2344 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2345 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2348 stat->tx_frames += smb->tx_frames;
2349 stat->tx_bcast_frames += smb->tx_bcast_frames;
2350 stat->tx_mcast_frames += smb->tx_mcast_frames;
2351 stat->tx_pause_frames += smb->tx_pause_frames;
2352 stat->tx_excess_defer += smb->tx_excess_defer;
2353 stat->tx_control_frames += smb->tx_control_frames;
2354 stat->tx_deferred += smb->tx_deferred;
2355 stat->tx_bytes += smb->tx_bytes;
2356 stat->tx_pkts_64 += smb->tx_pkts_64;
2357 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2358 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2359 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2360 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2361 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2362 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2363 stat->tx_single_colls += smb->tx_single_colls;
2364 stat->tx_multi_colls += smb->tx_multi_colls;
2365 stat->tx_late_colls += smb->tx_late_colls;
2366 stat->tx_excess_colls += smb->tx_excess_colls;
2367 stat->tx_abort += smb->tx_abort;
2368 stat->tx_underrun += smb->tx_underrun;
2369 stat->tx_desc_underrun += smb->tx_desc_underrun;
2370 stat->tx_lenerrs += smb->tx_lenerrs;
2371 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2372 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2373 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2375 /* Update counters in ifnet. */
2376 ifp->if_opackets += smb->tx_frames;
2378 ifp->if_collisions += smb->tx_single_colls +
2379 smb->tx_multi_colls * 2 + smb->tx_late_colls +
2380 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
2384 * tx_pkts_truncated counter looks suspicious. It constantly
2385 * increments with no sign of Tx errors. This may indicate
2386 * the counter name is not correct one so I've removed the
2387 * counter in output errors.
2389 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
2392 ifp->if_ipackets += smb->rx_frames;
2394 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2395 smb->rx_runts + smb->rx_pkts_truncated +
2396 smb->rx_fifo_oflows + smb->rx_rrs_errs +
2399 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2400 /* Update done, clear. */
2402 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2403 sc->alc_cdata.alc_smb_map,
2404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2411 struct alc_softc *sc;
2414 sc = (struct alc_softc *)arg;
2416 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2417 if ((status & ALC_INTRS) == 0)
2418 return (FILTER_STRAY);
2419 /* Disable interrupts. */
2420 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2421 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2423 return (FILTER_HANDLED);
2427 alc_int_task(void *arg, int pending)
2429 struct alc_softc *sc;
2434 sc = (struct alc_softc *)arg;
2437 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2438 more = atomic_readandclear_int(&sc->alc_morework);
2440 status |= INTR_RX_PKT;
2441 if ((status & ALC_INTRS) == 0)
2444 /* Acknowledge interrupts but still disable interrupts. */
2445 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2448 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2449 if ((status & INTR_RX_PKT) != 0) {
2450 more = alc_rxintr(sc, sc->alc_process_limit);
2452 atomic_set_int(&sc->alc_morework, 1);
2453 else if (more == EIO) {
2455 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2456 alc_init_locked(sc);
2461 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2462 INTR_TXQ_TO_RST)) != 0) {
2463 if ((status & INTR_DMA_RD_TO_RST) != 0)
2464 device_printf(sc->alc_dev,
2465 "DMA read error! -- resetting\n");
2466 if ((status & INTR_DMA_WR_TO_RST) != 0)
2467 device_printf(sc->alc_dev,
2468 "DMA write error! -- resetting\n");
2469 if ((status & INTR_TXQ_TO_RST) != 0)
2470 device_printf(sc->alc_dev,
2471 "TxQ reset! -- resetting\n");
2473 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2474 alc_init_locked(sc);
2478 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2479 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2480 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2483 if (more == EAGAIN ||
2484 (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) {
2485 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2490 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2491 /* Re-enable interrupts if we're running. */
2492 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2497 alc_txeof(struct alc_softc *sc)
2500 struct alc_txdesc *txd;
2501 uint32_t cons, prod;
2504 ALC_LOCK_ASSERT(sc);
2508 if (sc->alc_cdata.alc_tx_cnt == 0)
2510 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2511 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
2512 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2513 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2514 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
2515 prod = sc->alc_rdata.alc_cmb->cons;
2517 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2518 /* Assume we're using normal Tx priority queue. */
2519 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2520 MBOX_TD_CONS_LO_IDX_SHIFT;
2521 cons = sc->alc_cdata.alc_tx_cons;
2523 * Go through our Tx list and free mbufs for those
2524 * frames which have been transmitted.
2526 for (prog = 0; cons != prod; prog++,
2527 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2528 if (sc->alc_cdata.alc_tx_cnt <= 0)
2531 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2532 sc->alc_cdata.alc_tx_cnt--;
2533 txd = &sc->alc_cdata.alc_txdesc[cons];
2534 if (txd->tx_m != NULL) {
2535 /* Reclaim transmitted mbufs. */
2536 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
2537 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2538 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
2545 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2546 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2547 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
2548 sc->alc_cdata.alc_tx_cons = cons;
2550 * Unarm watchdog timer only when there is no pending
2551 * frames in Tx queue.
2553 if (sc->alc_cdata.alc_tx_cnt == 0)
2554 sc->alc_watchdog_timer = 0;
2558 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2561 bus_dma_segment_t segs[1];
2565 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2568 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2569 #ifndef __NO_STRICT_ALIGNMENT
2570 m_adj(m, sizeof(uint64_t));
2573 if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag,
2574 sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2578 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2580 if (rxd->rx_m != NULL) {
2581 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2582 BUS_DMASYNC_POSTREAD);
2583 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
2585 map = rxd->rx_dmamap;
2586 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2587 sc->alc_cdata.alc_rx_sparemap = map;
2588 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2589 BUS_DMASYNC_PREREAD);
2591 rxd->rx_desc->addr = htole64(segs[0].ds_addr);
2596 alc_rxintr(struct alc_softc *sc, int count)
2599 struct rx_rdesc *rrd;
2600 uint32_t nsegs, status;
2603 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2604 sc->alc_cdata.alc_rr_ring_map,
2605 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2606 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2607 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2608 rr_cons = sc->alc_cdata.alc_rr_cons;
2610 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) {
2613 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2614 status = le32toh(rrd->status);
2615 if ((status & RRD_VALID) == 0)
2617 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2619 /* This should not happen! */
2620 device_printf(sc->alc_dev,
2621 "unexpected segment count -- resetting\n");
2625 /* Clear Rx return status. */
2627 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2628 sc->alc_cdata.alc_rx_cons += nsegs;
2629 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2634 /* Update the consumer index. */
2635 sc->alc_cdata.alc_rr_cons = rr_cons;
2636 /* Sync Rx return descriptors. */
2637 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2638 sc->alc_cdata.alc_rr_ring_map,
2639 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2641 * Sync updated Rx descriptors such that controller see
2642 * modified buffer addresses.
2644 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2645 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
2647 * Let controller know availability of new Rx buffers.
2648 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2649 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2650 * only when Rx buffer pre-fetching is required. In
2651 * addition we already set ALC_RX_RD_FREE_THRESH to
2652 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2653 * it still seems that pre-fetching needs more
2656 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2657 sc->alc_cdata.alc_rx_cons);
2660 return (count > 0 ? 0 : EAGAIN);
2663 #ifndef __NO_STRICT_ALIGNMENT
2664 static struct mbuf *
2665 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2669 uint16_t *src, *dst;
2671 src = mtod(m, uint16_t *);
2674 if (m->m_next == NULL) {
2675 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2681 * Append a new mbuf to received mbuf chain and copy ethernet
2682 * header from the mbuf chain. This can save lots of CPU
2683 * cycles for jumbo frame.
2685 MGETHDR(n, M_DONTWAIT, MT_DATA);
2691 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2692 m->m_data += ETHER_HDR_LEN;
2693 m->m_len -= ETHER_HDR_LEN;
2694 n->m_len = ETHER_HDR_LEN;
2695 M_MOVE_PKTHDR(n, m);
2701 /* Receive a frame. */
2703 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2705 struct alc_rxdesc *rxd;
2707 struct mbuf *mp, *m;
2708 uint32_t rdinfo, status, vtag;
2709 int count, nsegs, rx_cons;
2712 status = le32toh(rrd->status);
2713 rdinfo = le32toh(rrd->rdinfo);
2714 rx_cons = RRD_RD_IDX(rdinfo);
2715 nsegs = RRD_RD_CNT(rdinfo);
2717 sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2718 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
2720 * We want to pass the following frames to upper
2721 * layer regardless of error status of Rx return
2724 * o IP/TCP/UDP checksum is bad.
2725 * o frame length and protocol specific length
2728 * Force network stack compute checksum for
2731 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2732 if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC |
2737 for (count = 0; count < nsegs; count++,
2738 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2739 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2741 /* Add a new receive buffer to the ring. */
2742 if (alc_newbuf(sc, rxd) != 0) {
2744 /* Reuse Rx buffers. */
2745 if (sc->alc_cdata.alc_rxhead != NULL)
2746 m_freem(sc->alc_cdata.alc_rxhead);
2751 * Assume we've received a full sized frame.
2752 * Actual size is fixed when we encounter the end of
2753 * multi-segmented frame.
2755 mp->m_len = sc->alc_buf_size;
2757 /* Chain received mbufs. */
2758 if (sc->alc_cdata.alc_rxhead == NULL) {
2759 sc->alc_cdata.alc_rxhead = mp;
2760 sc->alc_cdata.alc_rxtail = mp;
2762 mp->m_flags &= ~M_PKTHDR;
2763 sc->alc_cdata.alc_rxprev_tail =
2764 sc->alc_cdata.alc_rxtail;
2765 sc->alc_cdata.alc_rxtail->m_next = mp;
2766 sc->alc_cdata.alc_rxtail = mp;
2769 if (count == nsegs - 1) {
2770 /* Last desc. for this frame. */
2771 m = sc->alc_cdata.alc_rxhead;
2772 m->m_flags |= M_PKTHDR;
2774 * It seems that L1C/L2C controller has no way
2775 * to tell hardware to strip CRC bytes.
2778 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2780 /* Set last mbuf size. */
2781 mp->m_len = sc->alc_cdata.alc_rxlen -
2782 (nsegs - 1) * sc->alc_buf_size;
2783 /* Remove the CRC bytes in chained mbufs. */
2784 if (mp->m_len <= ETHER_CRC_LEN) {
2785 sc->alc_cdata.alc_rxtail =
2786 sc->alc_cdata.alc_rxprev_tail;
2787 sc->alc_cdata.alc_rxtail->m_len -=
2788 (ETHER_CRC_LEN - mp->m_len);
2789 sc->alc_cdata.alc_rxtail->m_next = NULL;
2792 mp->m_len -= ETHER_CRC_LEN;
2795 m->m_len = m->m_pkthdr.len;
2796 m->m_pkthdr.rcvif = ifp;
2798 * Due to hardware bugs, Rx checksum offloading
2799 * was intentionally disabled.
2801 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2802 (status & RRD_VLAN_TAG) != 0) {
2803 vtag = RRD_VLAN(le32toh(rrd->vtag));
2804 m->m_pkthdr.ether_vtag = ntohs(vtag);
2805 m->m_flags |= M_VLANTAG;
2807 #ifndef __NO_STRICT_ALIGNMENT
2808 m = alc_fixup_rx(ifp, m);
2813 (*ifp->if_input)(ifp, m);
2817 /* Reset mbuf chains. */
2818 ALC_RXCHAIN_RESET(sc);
2824 struct alc_softc *sc;
2825 struct mii_data *mii;
2827 sc = (struct alc_softc *)arg;
2829 ALC_LOCK_ASSERT(sc);
2831 mii = device_get_softc(sc->alc_miibus);
2833 alc_stats_update(sc);
2835 * alc(4) does not rely on Tx completion interrupts to reclaim
2836 * transferred buffers. Instead Tx completion interrupts are
2837 * used to hint for scheduling Tx task. So it's necessary to
2838 * release transmitted buffers by kicking Tx completion
2839 * handler. This limits the maximum reclamation delay to a hz.
2843 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
2847 alc_reset(struct alc_softc *sc)
2852 CSR_WRITE_4(sc, ALC_MASTER_CFG, MASTER_RESET);
2853 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2855 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2859 device_printf(sc->alc_dev, "master reset timeout!\n");
2861 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2862 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2868 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
2874 struct alc_softc *sc;
2876 sc = (struct alc_softc *)xsc;
2878 alc_init_locked(sc);
2883 alc_init_locked(struct alc_softc *sc)
2886 struct mii_data *mii;
2887 uint8_t eaddr[ETHER_ADDR_LEN];
2889 uint32_t reg, rxf_hi, rxf_lo;
2891 ALC_LOCK_ASSERT(sc);
2894 mii = device_get_softc(sc->alc_miibus);
2896 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2899 * Cancel any pending I/O.
2903 * Reset the chip to a known state.
2907 /* Initialize Rx descriptors. */
2908 if (alc_init_rx_ring(sc) != 0) {
2909 device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
2913 alc_init_rr_ring(sc);
2914 alc_init_tx_ring(sc);
2918 /* Reprogram the station address. */
2919 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2920 CSR_WRITE_4(sc, ALC_PAR0,
2921 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2922 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2924 * Clear WOL status and disable all WOL feature as WOL
2925 * would interfere Rx operation under normal environments.
2927 CSR_READ_4(sc, ALC_WOL_CFG);
2928 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2929 /* Set Tx descriptor base addresses. */
2930 paddr = sc->alc_rdata.alc_tx_ring_paddr;
2931 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2932 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2933 /* We don't use high priority ring. */
2934 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2935 /* Set Tx descriptor counter. */
2936 CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2937 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2938 /* Set Rx descriptor base addresses. */
2939 paddr = sc->alc_rdata.alc_rx_ring_paddr;
2940 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2941 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2942 /* We use one Rx ring. */
2943 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2944 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2945 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2946 /* Set Rx descriptor counter. */
2947 CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2948 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2951 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2952 * if it do not fit the buffer size. Rx return descriptor holds
2953 * a counter that indicates how many fragments were made by the
2954 * hardware. The buffer size should be multiple of 8 bytes.
2955 * Since hardware has limit on the size of buffer size, always
2956 * use the maximum value.
2957 * For strict-alignment architectures make sure to reduce buffer
2958 * size by 8 bytes to make room for alignment fixup.
2960 #ifndef __NO_STRICT_ALIGNMENT
2961 sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t);
2963 sc->alc_buf_size = RX_BUF_SIZE_MAX;
2965 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2967 paddr = sc->alc_rdata.alc_rr_ring_paddr;
2968 /* Set Rx return descriptor base addresses. */
2969 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2970 /* We use one Rx return ring. */
2971 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2972 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2973 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2974 /* Set Rx return descriptor counter. */
2975 CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2976 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2977 paddr = sc->alc_rdata.alc_cmb_paddr;
2978 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2979 paddr = sc->alc_rdata.alc_smb_paddr;
2980 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2981 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2983 /* Tell hardware that we're ready to load DMA blocks. */
2984 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2986 /* Configure interrupt moderation timer. */
2987 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2988 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2989 CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2990 reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2991 reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
2993 * We don't want to automatic interrupt clear as task queue
2994 * for the interrupt should know interrupt status.
2996 reg &= ~MASTER_INTR_RD_CLR;
2997 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2998 if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2999 reg |= MASTER_IM_RX_TIMER_ENB;
3000 if (ALC_USECS(sc->alc_int_tx_mod) != 0)
3001 reg |= MASTER_IM_TX_TIMER_ENB;
3002 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3004 * Disable interrupt re-trigger timer. We don't want automatic
3005 * re-triggering of un-ACKed interrupts.
3007 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
3008 /* Configure CMB. */
3009 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
3010 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3011 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
3013 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
3015 * Hardware can be configured to issue SMB interrupt based
3016 * on programmed interval. Since there is a callout that is
3017 * invoked for every hz in driver we use that instead of
3018 * relying on periodic SMB interrupt.
3020 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3021 /* Clear MAC statistics. */
3022 alc_stats_clear(sc);
3025 * Always use maximum frame size that controller can support.
3026 * Otherwise received frames that has larger frame length
3027 * than alc(4) MTU would be silently dropped in hardware. This
3028 * would make path-MTU discovery hard as sender wouldn't get
3029 * any responses from receiver. alc(4) supports
3030 * multi-fragmented frames on Rx path so it has no issue on
3031 * assembling fragmented frames. Using maximum frame size also
3032 * removes the need to reinitialize hardware when interface
3033 * MTU configuration was changed.
3035 * Be conservative in what you do, be liberal in what you
3036 * accept from others - RFC 793.
3038 CSR_WRITE_4(sc, ALC_FRAME_SIZE, ALC_JUMBO_FRAMELEN);
3040 /* Disable header split(?) */
3041 CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3043 /* Configure IPG/IFG parameters. */
3044 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3045 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
3046 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
3047 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
3048 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
3049 /* Set parameters for half-duplex media. */
3050 CSR_WRITE_4(sc, ALC_HDPX_CFG,
3051 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3052 HDPX_CFG_LCOL_MASK) |
3053 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3054 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3055 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3056 HDPX_CFG_ABEBT_MASK) |
3057 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3058 HDPX_CFG_JAMIPG_MASK));
3060 * Set TSO/checksum offload threshold. For frames that is
3061 * larger than this threshold, hardware wouldn't do
3062 * TSO/checksum offloading.
3064 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
3065 (ALC_JUMBO_FRAMELEN >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3066 TSO_OFFLOAD_THRESH_MASK);
3067 /* Configure TxQ. */
3068 reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3069 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3070 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3071 TXQ_CFG_TD_BURST_MASK;
3072 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3074 /* Configure Rx free descriptor pre-fetching. */
3075 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3076 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
3077 RX_RD_FREE_THRESH_HI_MASK) |
3078 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
3079 RX_RD_FREE_THRESH_LO_MASK));
3082 * Configure flow control parameters.
3083 * XON : 80% of Rx FIFO
3084 * XOFF : 30% of Rx FIFO
3086 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3087 rxf_hi = (reg * 8) / 10;
3088 rxf_lo = (reg * 3)/ 10;
3089 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3090 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3091 RX_FIFO_PAUSE_THRESH_LO_MASK) |
3092 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3093 RX_FIFO_PAUSE_THRESH_HI_MASK));
3095 /* Disable RSS until I understand L1C/L2C's RSS logic. */
3096 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3097 CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3099 /* Configure RxQ. */
3100 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3101 RXQ_CFG_RD_BURST_MASK;
3102 reg |= RXQ_CFG_RSS_MODE_DIS;
3103 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
3104 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
3105 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3107 /* Configure Rx DMAW request thresold. */
3108 CSR_WRITE_4(sc, ALC_RD_DMA_CFG,
3109 ((RD_DMA_CFG_THRESH_DEFAULT << RD_DMA_CFG_THRESH_SHIFT) &
3110 RD_DMA_CFG_THRESH_MASK) |
3111 ((ALC_RD_DMA_CFG_USECS(0) << RD_DMA_CFG_TIMER_SHIFT) &
3112 RD_DMA_CFG_TIMER_MASK));
3113 /* Configure DMA parameters. */
3114 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3116 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3117 reg |= DMA_CFG_CMB_ENB;
3118 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3119 reg |= DMA_CFG_SMB_ENB;
3121 reg |= DMA_CFG_SMB_DIS;
3122 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3123 DMA_CFG_RD_BURST_SHIFT;
3124 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3125 DMA_CFG_WR_BURST_SHIFT;
3126 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3127 DMA_CFG_RD_DELAY_CNT_MASK;
3128 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3129 DMA_CFG_WR_DELAY_CNT_MASK;
3130 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3133 * Configure Tx/Rx MACs.
3134 * - Auto-padding for short frames.
3135 * - Enable CRC generation.
3136 * Actual reconfiguration of MAC for resolved speed/duplex
3137 * is followed after detection of link establishment.
3138 * AR8131/AR8132 always does checksum computation regardless
3139 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3140 * have bug in protocol field in Rx return structure so
3141 * these controllers can't handle fragmented frames. Disable
3142 * Rx checksum offloading until there is a newer controller
3143 * that has sane implementation.
3145 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3146 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3147 MAC_CFG_PREAMBLE_MASK);
3148 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3149 reg |= MAC_CFG_SPEED_10_100;
3151 reg |= MAC_CFG_SPEED_1000;
3152 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3154 /* Set up the receive filter. */
3158 /* Acknowledge all pending interrupts and clear it. */
3159 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3160 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3161 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3163 sc->alc_flags &= ~ALC_FLAG_LINK;
3164 /* Switch to the current media. */
3167 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3169 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3170 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3174 alc_stop(struct alc_softc *sc)
3177 struct alc_txdesc *txd;
3178 struct alc_rxdesc *rxd;
3182 ALC_LOCK_ASSERT(sc);
3184 * Mark the interface down and cancel the watchdog timer.
3187 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3188 sc->alc_flags &= ~ALC_FLAG_LINK;
3189 callout_stop(&sc->alc_tick_ch);
3190 sc->alc_watchdog_timer = 0;
3191 alc_stats_update(sc);
3192 /* Disable interrupts. */
3193 CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3194 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3197 reg = CSR_READ_4(sc, ALC_DMA_CFG);
3198 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3199 reg |= DMA_CFG_SMB_DIS;
3200 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3202 /* Stop Rx/Tx MACs. */
3204 /* Disable interrupts which might be touched in taskq handler. */
3205 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3207 /* Reclaim Rx buffers that have been processed. */
3208 if (sc->alc_cdata.alc_rxhead != NULL)
3209 m_freem(sc->alc_cdata.alc_rxhead);
3210 ALC_RXCHAIN_RESET(sc);
3212 * Free Tx/Rx mbufs still in the queues.
3214 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3215 rxd = &sc->alc_cdata.alc_rxdesc[i];
3216 if (rxd->rx_m != NULL) {
3217 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
3218 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3219 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
3225 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3226 txd = &sc->alc_cdata.alc_txdesc[i];
3227 if (txd->tx_m != NULL) {
3228 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3229 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3230 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3239 alc_stop_mac(struct alc_softc *sc)
3244 ALC_LOCK_ASSERT(sc);
3246 /* Disable Rx/Tx MAC. */
3247 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3248 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3249 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
3250 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3252 for (i = ALC_TIMEOUT; i > 0; i--) {
3253 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3259 device_printf(sc->alc_dev,
3260 "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
3264 alc_start_queue(struct alc_softc *sc)
3269 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3270 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3275 ALC_LOCK_ASSERT(sc);
3278 cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3279 cfg &= ~RXQ_CFG_ENB;
3281 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3283 cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3285 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3289 alc_stop_queue(struct alc_softc *sc)
3294 ALC_LOCK_ASSERT(sc);
3297 reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3298 if ((reg & RXQ_CFG_ENB) != 0) {
3299 reg &= ~RXQ_CFG_ENB;
3300 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3303 reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3304 if ((reg & TXQ_CFG_ENB) == 0) {
3305 reg &= ~TXQ_CFG_ENB;
3306 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3308 for (i = ALC_TIMEOUT; i > 0; i--) {
3309 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3310 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3315 device_printf(sc->alc_dev,
3316 "could not disable RxQ/TxQ (0x%08x)!\n", reg);
3320 alc_init_tx_ring(struct alc_softc *sc)
3322 struct alc_ring_data *rd;
3323 struct alc_txdesc *txd;
3326 ALC_LOCK_ASSERT(sc);
3328 sc->alc_cdata.alc_tx_prod = 0;
3329 sc->alc_cdata.alc_tx_cons = 0;
3330 sc->alc_cdata.alc_tx_cnt = 0;
3332 rd = &sc->alc_rdata;
3333 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3334 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3335 txd = &sc->alc_cdata.alc_txdesc[i];
3339 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3340 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
3344 alc_init_rx_ring(struct alc_softc *sc)
3346 struct alc_ring_data *rd;
3347 struct alc_rxdesc *rxd;
3350 ALC_LOCK_ASSERT(sc);
3352 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3353 sc->alc_morework = 0;
3354 rd = &sc->alc_rdata;
3355 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3356 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3357 rxd = &sc->alc_cdata.alc_rxdesc[i];
3359 rxd->rx_desc = &rd->alc_rx_ring[i];
3360 if (alc_newbuf(sc, rxd) != 0)
3365 * Since controller does not update Rx descriptors, driver
3366 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3367 * is enough to ensure coherence.
3369 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3370 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3371 /* Let controller know availability of new Rx buffers. */
3372 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3378 alc_init_rr_ring(struct alc_softc *sc)
3380 struct alc_ring_data *rd;
3382 ALC_LOCK_ASSERT(sc);
3384 sc->alc_cdata.alc_rr_cons = 0;
3385 ALC_RXCHAIN_RESET(sc);
3387 rd = &sc->alc_rdata;
3388 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3389 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3390 sc->alc_cdata.alc_rr_ring_map,
3391 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3395 alc_init_cmb(struct alc_softc *sc)
3397 struct alc_ring_data *rd;
3399 ALC_LOCK_ASSERT(sc);
3401 rd = &sc->alc_rdata;
3402 bzero(rd->alc_cmb, ALC_CMB_SZ);
3403 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
3404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3408 alc_init_smb(struct alc_softc *sc)
3410 struct alc_ring_data *rd;
3412 ALC_LOCK_ASSERT(sc);
3414 rd = &sc->alc_rdata;
3415 bzero(rd->alc_smb, ALC_SMB_SZ);
3416 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
3417 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3421 alc_rxvlan(struct alc_softc *sc)
3426 ALC_LOCK_ASSERT(sc);
3429 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3430 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3431 reg |= MAC_CFG_VLAN_TAG_STRIP;
3433 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3434 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3438 alc_rxfilter(struct alc_softc *sc)
3441 struct ifmultiaddr *ifma;
3446 ALC_LOCK_ASSERT(sc);
3450 bzero(mchash, sizeof(mchash));
3451 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3452 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3453 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3454 rxcfg |= MAC_CFG_BCAST;
3455 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3456 if ((ifp->if_flags & IFF_PROMISC) != 0)
3457 rxcfg |= MAC_CFG_PROMISC;
3458 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3459 rxcfg |= MAC_CFG_ALLMULTI;
3460 mchash[0] = 0xFFFFFFFF;
3461 mchash[1] = 0xFFFFFFFF;
3465 if_maddr_rlock(ifp);
3466 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
3467 if (ifma->ifma_addr->sa_family != AF_LINK)
3469 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
3470 ifma->ifma_addr), ETHER_ADDR_LEN);
3471 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3473 if_maddr_runlock(ifp);
3476 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3477 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3478 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3482 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3488 value = *(int *)arg1;
3489 error = sysctl_handle_int(oidp, &value, 0, req);
3490 if (error || req->newptr == NULL)
3492 if (value < low || value > high)
3494 *(int *)arg1 = value;
3500 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
3502 return (sysctl_int_range(oidp, arg1, arg2, req,
3503 ALC_PROC_MIN, ALC_PROC_MAX));
3507 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
3510 return (sysctl_int_range(oidp, arg1, arg2, req,
3511 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));