2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
39 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_llc.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/tcp.h>
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
71 #include <machine/atomic.h>
72 #include <machine/bus.h>
73 #include <machine/in_cksum.h>
75 #include <dev/alc/if_alcreg.h>
76 #include <dev/alc/if_alcvar.h>
78 /* "device miibus" required. See GENERIC if you get errors here. */
79 #include "miibus_if.h"
80 #undef ALC_USE_CUSTOM_CSUM
82 #ifdef ALC_USE_CUSTOM_CSUM
83 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
85 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
88 MODULE_DEPEND(alc, pci, 1, 1, 1);
89 MODULE_DEPEND(alc, ether, 1, 1, 1);
90 MODULE_DEPEND(alc, miibus, 1, 1, 1);
93 static int msi_disable = 0;
94 static int msix_disable = 0;
95 TUNABLE_INT("hw.alc.msi_disable", &msi_disable);
96 TUNABLE_INT("hw.alc.msix_disable", &msix_disable);
99 * Devices supported by this driver.
101 static struct alc_ident alc_ident_table[] = {
102 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024,
103 "Atheros AR8131 PCIe Gigabit Ethernet" },
104 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024,
105 "Atheros AR8132 PCIe Fast Ethernet" },
106 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024,
107 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
108 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024,
109 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
110 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024,
111 "Atheros AR8152 v1.1 PCIe Fast Ethernet" },
112 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024,
113 "Atheros AR8152 v2.0 PCIe Fast Ethernet" },
117 static void alc_aspm(struct alc_softc *, int);
118 static int alc_attach(device_t);
119 static int alc_check_boundary(struct alc_softc *);
120 static int alc_detach(device_t);
121 static void alc_disable_l0s_l1(struct alc_softc *);
122 static int alc_dma_alloc(struct alc_softc *);
123 static void alc_dma_free(struct alc_softc *);
124 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
125 static int alc_encap(struct alc_softc *, struct mbuf **);
126 static struct alc_ident *
127 alc_find_ident(device_t);
128 #ifndef __NO_STRICT_ALIGNMENT
130 alc_fixup_rx(struct ifnet *, struct mbuf *);
132 static void alc_get_macaddr(struct alc_softc *);
133 static void alc_init(void *);
134 static void alc_init_cmb(struct alc_softc *);
135 static void alc_init_locked(struct alc_softc *);
136 static void alc_init_rr_ring(struct alc_softc *);
137 static int alc_init_rx_ring(struct alc_softc *);
138 static void alc_init_smb(struct alc_softc *);
139 static void alc_init_tx_ring(struct alc_softc *);
140 static void alc_int_task(void *, int);
141 static int alc_intr(void *);
142 static int alc_ioctl(struct ifnet *, u_long, caddr_t);
143 static void alc_mac_config(struct alc_softc *);
144 static int alc_miibus_readreg(device_t, int, int);
145 static void alc_miibus_statchg(device_t);
146 static int alc_miibus_writereg(device_t, int, int, int);
147 static int alc_mediachange(struct ifnet *);
148 static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
149 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
150 static void alc_phy_down(struct alc_softc *);
151 static void alc_phy_reset(struct alc_softc *);
152 static int alc_probe(device_t);
153 static void alc_reset(struct alc_softc *);
154 static int alc_resume(device_t);
155 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
156 static int alc_rxintr(struct alc_softc *, int);
157 static void alc_rxfilter(struct alc_softc *);
158 static void alc_rxvlan(struct alc_softc *);
159 static void alc_setlinkspeed(struct alc_softc *);
160 static void alc_setwol(struct alc_softc *);
161 static int alc_shutdown(device_t);
162 static void alc_start(struct ifnet *);
163 static void alc_start_queue(struct alc_softc *);
164 static void alc_stats_clear(struct alc_softc *);
165 static void alc_stats_update(struct alc_softc *);
166 static void alc_stop(struct alc_softc *);
167 static void alc_stop_mac(struct alc_softc *);
168 static void alc_stop_queue(struct alc_softc *);
169 static int alc_suspend(device_t);
170 static void alc_sysctl_node(struct alc_softc *);
171 static void alc_tick(void *);
172 static void alc_tx_task(void *, int);
173 static void alc_txeof(struct alc_softc *);
174 static void alc_watchdog(struct alc_softc *);
175 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
176 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
177 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
179 static device_method_t alc_methods[] = {
180 /* Device interface. */
181 DEVMETHOD(device_probe, alc_probe),
182 DEVMETHOD(device_attach, alc_attach),
183 DEVMETHOD(device_detach, alc_detach),
184 DEVMETHOD(device_shutdown, alc_shutdown),
185 DEVMETHOD(device_suspend, alc_suspend),
186 DEVMETHOD(device_resume, alc_resume),
189 DEVMETHOD(miibus_readreg, alc_miibus_readreg),
190 DEVMETHOD(miibus_writereg, alc_miibus_writereg),
191 DEVMETHOD(miibus_statchg, alc_miibus_statchg),
196 static driver_t alc_driver = {
199 sizeof(struct alc_softc)
202 static devclass_t alc_devclass;
204 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0);
205 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0);
207 static struct resource_spec alc_res_spec_mem[] = {
208 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
212 static struct resource_spec alc_irq_spec_legacy[] = {
213 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
217 static struct resource_spec alc_irq_spec_msi[] = {
218 { SYS_RES_IRQ, 1, RF_ACTIVE },
222 static struct resource_spec alc_irq_spec_msix[] = {
223 { SYS_RES_IRQ, 1, RF_ACTIVE },
227 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
230 alc_miibus_readreg(device_t dev, int phy, int reg)
232 struct alc_softc *sc;
236 sc = device_get_softc(dev);
239 * For AR8132 fast ethernet controller, do not report 1000baseT
240 * capability to mii(4). Even though AR8132 uses the same
241 * model/revision number of F1 gigabit PHY, the PHY has no
242 * ability to establish 1000baseT link.
244 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
248 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
249 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
250 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
252 v = CSR_READ_4(sc, ALC_MDIO);
253 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
258 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
262 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
266 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
268 struct alc_softc *sc;
272 sc = device_get_softc(dev);
274 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
275 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
276 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
277 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
279 v = CSR_READ_4(sc, ALC_MDIO);
280 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
285 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
291 alc_miibus_statchg(device_t dev)
293 struct alc_softc *sc;
294 struct mii_data *mii;
298 sc = device_get_softc(dev);
300 mii = device_get_softc(sc->alc_miibus);
302 if (mii == NULL || ifp == NULL ||
303 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
306 sc->alc_flags &= ~ALC_FLAG_LINK;
307 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
308 (IFM_ACTIVE | IFM_AVALID)) {
309 switch (IFM_SUBTYPE(mii->mii_media_active)) {
312 sc->alc_flags |= ALC_FLAG_LINK;
315 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
316 sc->alc_flags |= ALC_FLAG_LINK;
323 /* Stop Rx/Tx MACs. */
326 /* Program MACs with resolved speed/duplex/flow-control. */
327 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
330 /* Re-enable Tx/Rx MACs. */
331 reg = CSR_READ_4(sc, ALC_MAC_CFG);
332 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
333 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
334 alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
339 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
341 struct alc_softc *sc;
342 struct mii_data *mii;
346 if ((ifp->if_flags & IFF_UP) == 0) {
350 mii = device_get_softc(sc->alc_miibus);
354 ifmr->ifm_status = mii->mii_media_status;
355 ifmr->ifm_active = mii->mii_media_active;
359 alc_mediachange(struct ifnet *ifp)
361 struct alc_softc *sc;
362 struct mii_data *mii;
363 struct mii_softc *miisc;
368 mii = device_get_softc(sc->alc_miibus);
369 if (mii->mii_instance != 0) {
370 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
371 mii_phy_reset(miisc);
373 error = mii_mediachg(mii);
379 static struct alc_ident *
380 alc_find_ident(device_t dev)
382 struct alc_ident *ident;
383 uint16_t vendor, devid;
385 vendor = pci_get_vendor(dev);
386 devid = pci_get_device(dev);
387 for (ident = alc_ident_table; ident->name != NULL; ident++) {
388 if (vendor == ident->vendorid && devid == ident->deviceid)
396 alc_probe(device_t dev)
398 struct alc_ident *ident;
400 ident = alc_find_ident(dev);
402 device_set_desc(dev, ident->name);
403 return (BUS_PROBE_DEFAULT);
410 alc_get_macaddr(struct alc_softc *sc)
417 opt = CSR_READ_4(sc, ALC_OPT_CFG);
418 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
419 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
421 * EEPROM found, let TWSI reload EEPROM configuration.
422 * This will set ethernet address of controller.
425 switch (sc->alc_ident->deviceid) {
426 case DEVICEID_ATHEROS_AR8131:
427 case DEVICEID_ATHEROS_AR8132:
428 if ((opt & OPT_CFG_CLK_ENB) == 0) {
429 opt |= OPT_CFG_CLK_ENB;
430 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
431 CSR_READ_4(sc, ALC_OPT_CFG);
435 case DEVICEID_ATHEROS_AR8151:
436 case DEVICEID_ATHEROS_AR8151_V2:
437 case DEVICEID_ATHEROS_AR8152_B:
438 case DEVICEID_ATHEROS_AR8152_B2:
439 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
440 ALC_MII_DBG_ADDR, 0x00);
441 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
443 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
444 ALC_MII_DBG_DATA, val & 0xFF7F);
445 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
446 ALC_MII_DBG_ADDR, 0x3B);
447 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
449 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
450 ALC_MII_DBG_DATA, val | 0x0008);
455 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
456 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
457 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
458 CSR_READ_4(sc, ALC_WOL_CFG);
460 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
461 TWSI_CFG_SW_LD_START);
462 for (i = 100; i > 0; i--) {
464 if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
465 TWSI_CFG_SW_LD_START) == 0)
469 device_printf(sc->alc_dev,
470 "reloading EEPROM timeout!\n");
473 device_printf(sc->alc_dev, "EEPROM not found!\n");
476 switch (sc->alc_ident->deviceid) {
477 case DEVICEID_ATHEROS_AR8131:
478 case DEVICEID_ATHEROS_AR8132:
479 if ((opt & OPT_CFG_CLK_ENB) != 0) {
480 opt &= ~OPT_CFG_CLK_ENB;
481 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
482 CSR_READ_4(sc, ALC_OPT_CFG);
486 case DEVICEID_ATHEROS_AR8151:
487 case DEVICEID_ATHEROS_AR8151_V2:
488 case DEVICEID_ATHEROS_AR8152_B:
489 case DEVICEID_ATHEROS_AR8152_B2:
490 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
491 ALC_MII_DBG_ADDR, 0x00);
492 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
494 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
495 ALC_MII_DBG_DATA, val | 0x0080);
496 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
497 ALC_MII_DBG_ADDR, 0x3B);
498 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
500 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
501 ALC_MII_DBG_DATA, val & 0xFFF7);
507 ea[0] = CSR_READ_4(sc, ALC_PAR0);
508 ea[1] = CSR_READ_4(sc, ALC_PAR1);
509 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
510 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
511 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
512 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
513 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
514 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
518 alc_disable_l0s_l1(struct alc_softc *sc)
522 /* Another magic from vendor. */
523 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
524 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
525 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
526 PM_CFG_SERDES_PD_EX_L1);
527 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
528 PM_CFG_SERDES_L1_ENB;
529 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
533 alc_phy_reset(struct alc_softc *sc)
537 /* Reset magic from Linux. */
538 CSR_WRITE_2(sc, ALC_GPHY_CFG,
539 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
540 CSR_READ_2(sc, ALC_GPHY_CFG);
543 CSR_WRITE_2(sc, ALC_GPHY_CFG,
544 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
545 GPHY_CFG_SEL_ANA_RESET);
546 CSR_READ_2(sc, ALC_GPHY_CFG);
549 /* DSP fixup, Vendor magic. */
550 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
551 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
552 ALC_MII_DBG_ADDR, 0x000A);
553 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
555 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
556 ALC_MII_DBG_DATA, data & 0xDFFF);
558 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
559 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
560 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
561 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
562 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
563 ALC_MII_DBG_ADDR, 0x003B);
564 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
566 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
567 ALC_MII_DBG_DATA, data & 0xFFF7);
570 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) {
571 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
572 ALC_MII_DBG_ADDR, 0x0029);
573 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
574 ALC_MII_DBG_DATA, 0x929D);
576 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
577 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 ||
578 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
579 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
580 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
581 ALC_MII_DBG_ADDR, 0x0029);
582 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
583 ALC_MII_DBG_DATA, 0xB6DD);
586 /* Load DSP codes, vendor magic. */
587 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
588 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
589 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
590 ALC_MII_DBG_ADDR, MII_ANA_CFG18);
591 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
592 ALC_MII_DBG_DATA, data);
594 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
595 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
597 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
598 ALC_MII_DBG_ADDR, MII_ANA_CFG5);
599 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
600 ALC_MII_DBG_DATA, data);
602 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
603 ANA_LONG_CABLE_TH_100_MASK) |
604 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
605 ANA_SHORT_CABLE_TH_100_SHIFT) |
606 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
607 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
608 ALC_MII_DBG_ADDR, MII_ANA_CFG54);
609 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
610 ALC_MII_DBG_DATA, data);
612 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
613 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
614 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
615 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
616 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
617 ALC_MII_DBG_ADDR, MII_ANA_CFG4);
618 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
619 ALC_MII_DBG_DATA, data);
621 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
622 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
624 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
625 ALC_MII_DBG_ADDR, MII_ANA_CFG0);
626 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
627 ALC_MII_DBG_DATA, data);
632 alc_phy_down(struct alc_softc *sc)
635 switch (sc->alc_ident->deviceid) {
636 case DEVICEID_ATHEROS_AR8151:
637 case DEVICEID_ATHEROS_AR8151_V2:
639 * GPHY power down caused more problems on AR8151 v2.0.
640 * When driver is reloaded after GPHY power down,
641 * accesses to PHY/MAC registers hung the system. Only
642 * cold boot recovered from it. I'm not sure whether
643 * AR8151 v1.0 also requires this one though. I don't
644 * have AR8151 v1.0 controller in hand.
645 * The only option left is to isolate the PHY and
646 * initiates power down the PHY which in turn saves
647 * more power when driver is unloaded.
649 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
650 MII_BMCR, BMCR_ISO | BMCR_PDOWN);
653 /* Force PHY down. */
654 CSR_WRITE_2(sc, ALC_GPHY_CFG,
655 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
656 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
664 alc_aspm(struct alc_softc *sc, int media)
671 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
672 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
673 (ALC_FLAG_APS | ALC_FLAG_PCIE))
674 linkcfg = CSR_READ_2(sc, sc->alc_expcap +
675 PCIR_EXPRESS_LINK_CTL);
678 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
679 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
680 pmcfg |= PM_CFG_MAC_ASPM_CHK;
681 pmcfg |= PM_CFG_SERDES_ENB | PM_CFG_RBER_ENB;
682 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
684 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
685 /* Disable extended sync except AR8152 B v1.0 */
687 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
688 sc->alc_rev == ATHEROS_AR8152_B_V10)
690 CSR_WRITE_2(sc, sc->alc_expcap + PCIR_EXPRESS_LINK_CTL,
692 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
694 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
695 PM_CFG_L1_ENTRY_TIMER_SHIFT);
696 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
697 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
698 PM_CFG_PM_REQ_TIMER_SHIFT);
699 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
702 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
703 if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
704 pmcfg |= PM_CFG_ASPM_L0S_ENB;
705 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
706 pmcfg |= PM_CFG_ASPM_L1_ENB;
707 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
708 if (sc->alc_ident->deviceid ==
709 DEVICEID_ATHEROS_AR8152_B)
710 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
711 pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
712 PM_CFG_SERDES_PLL_L1_ENB |
713 PM_CFG_SERDES_BUDS_RX_L1_ENB);
714 pmcfg |= PM_CFG_CLK_SWH_L1;
715 if (media == IFM_100_TX || media == IFM_1000_T) {
716 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
717 switch (sc->alc_ident->deviceid) {
718 case DEVICEID_ATHEROS_AR8152_B:
720 PM_CFG_L1_ENTRY_TIMER_SHIFT);
722 case DEVICEID_ATHEROS_AR8152_B2:
723 case DEVICEID_ATHEROS_AR8151_V2:
725 PM_CFG_L1_ENTRY_TIMER_SHIFT);
729 PM_CFG_L1_ENTRY_TIMER_SHIFT);
734 pmcfg |= PM_CFG_SERDES_L1_ENB |
735 PM_CFG_SERDES_PLL_L1_ENB |
736 PM_CFG_SERDES_BUDS_RX_L1_ENB;
737 pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
738 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
741 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
742 PM_CFG_SERDES_PLL_L1_ENB);
743 pmcfg |= PM_CFG_CLK_SWH_L1;
744 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
745 pmcfg |= PM_CFG_ASPM_L1_ENB;
747 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
751 alc_attach(device_t dev)
753 struct alc_softc *sc;
755 char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
757 int base, error, i, msic, msixc, state;
758 uint32_t cap, ctl, val;
761 sc = device_get_softc(dev);
764 mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
766 callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
767 TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
768 sc->alc_ident = alc_find_ident(dev);
770 /* Map the device. */
771 pci_enable_busmaster(dev);
772 sc->alc_res_spec = alc_res_spec_mem;
773 sc->alc_irq_spec = alc_irq_spec_legacy;
774 error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
776 device_printf(dev, "cannot allocate memory resources.\n");
780 /* Set PHY address. */
781 sc->alc_phyaddr = ALC_PHY_ADDR;
783 /* Initialize DMA parameters. */
784 sc->alc_dma_rd_burst = 0;
785 sc->alc_dma_wr_burst = 0;
786 sc->alc_rcb = DMA_CFG_RCB_64;
787 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
788 sc->alc_flags |= ALC_FLAG_PCIE;
789 sc->alc_expcap = base;
790 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL);
791 sc->alc_dma_rd_burst =
792 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12;
793 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5;
795 device_printf(dev, "Read request size : %u bytes.\n",
796 alc_dma_burst[sc->alc_dma_rd_burst]);
797 device_printf(dev, "TLP payload size : %u bytes.\n",
798 alc_dma_burst[sc->alc_dma_wr_burst]);
800 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
801 sc->alc_dma_rd_burst = 3;
802 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
803 sc->alc_dma_wr_burst = 3;
804 /* Clear data link and flow-control protocol error. */
805 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
806 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
807 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
808 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
809 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
810 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
811 CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
812 PCIE_PHYMISC_FORCE_RCV_DET);
813 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
814 sc->alc_rev == ATHEROS_AR8152_B_V10) {
815 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
816 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
817 PCIE_PHYMISC2_SERDES_TH_MASK);
818 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
819 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
820 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
822 /* Disable ASPM L0S and L1. */
823 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
824 if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
825 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
826 if ((ctl & 0x08) != 0)
827 sc->alc_rcb = DMA_CFG_RCB_128;
829 device_printf(dev, "RCB %u bytes\n",
830 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
833 sc->alc_flags |= ALC_FLAG_L0S;
835 sc->alc_flags |= ALC_FLAG_L1S;
837 device_printf(sc->alc_dev, "ASPM %s %s\n",
839 state == 0 ? "disabled" : "enabled");
840 alc_disable_l0s_l1(sc);
843 device_printf(sc->alc_dev,
844 "no ASPM support\n");
851 /* Reset the ethernet controller. */
855 * One odd thing is AR8132 uses the same PHY hardware(F1
856 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
857 * the PHY supports 1000Mbps but that's not true. The PHY
858 * used in AR8132 can't establish gigabit link even if it
859 * shows the same PHY model/revision number of AR8131.
861 switch (sc->alc_ident->deviceid) {
862 case DEVICEID_ATHEROS_AR8152_B:
863 case DEVICEID_ATHEROS_AR8152_B2:
864 sc->alc_flags |= ALC_FLAG_APS;
866 case DEVICEID_ATHEROS_AR8132:
867 sc->alc_flags |= ALC_FLAG_FASTETHER;
869 case DEVICEID_ATHEROS_AR8151:
870 case DEVICEID_ATHEROS_AR8151_V2:
871 sc->alc_flags |= ALC_FLAG_APS;
876 sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO;
879 * It seems that AR813x/AR815x has silicon bug for SMB. In
880 * addition, Atheros said that enabling SMB wouldn't improve
881 * performance. However I think it's bad to access lots of
882 * registers to extract MAC statistics.
884 sc->alc_flags |= ALC_FLAG_SMB_BUG;
886 * Don't use Tx CMB. It is known to have silicon bug.
888 sc->alc_flags |= ALC_FLAG_CMB_BUG;
889 sc->alc_rev = pci_get_revid(dev);
890 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
891 MASTER_CHIP_REV_SHIFT;
893 device_printf(dev, "PCI device revision : 0x%04x\n",
895 device_printf(dev, "Chip id/revision : 0x%04x\n",
898 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
899 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
900 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
902 /* Allocate IRQ resources. */
903 msixc = pci_msix_count(dev);
904 msic = pci_msi_count(dev);
906 device_printf(dev, "MSIX count : %d\n", msixc);
907 device_printf(dev, "MSI count : %d\n", msic);
909 /* Prefer MSIX over MSI. */
910 if (msix_disable == 0 || msi_disable == 0) {
911 if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES &&
912 pci_alloc_msix(dev, &msixc) == 0) {
913 if (msic == ALC_MSIX_MESSAGES) {
915 "Using %d MSIX message(s).\n", msixc);
916 sc->alc_flags |= ALC_FLAG_MSIX;
917 sc->alc_irq_spec = alc_irq_spec_msix;
919 pci_release_msi(dev);
921 if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
922 msic == ALC_MSI_MESSAGES &&
923 pci_alloc_msi(dev, &msic) == 0) {
924 if (msic == ALC_MSI_MESSAGES) {
926 "Using %d MSI message(s).\n", msic);
927 sc->alc_flags |= ALC_FLAG_MSI;
928 sc->alc_irq_spec = alc_irq_spec_msi;
930 pci_release_msi(dev);
934 error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq);
936 device_printf(dev, "cannot allocate IRQ resources.\n");
940 /* Create device sysctl node. */
943 if ((error = alc_dma_alloc(sc) != 0))
946 /* Load station address. */
949 ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
951 device_printf(dev, "cannot allocate ifnet structure.\n");
957 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
958 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
959 ifp->if_ioctl = alc_ioctl;
960 ifp->if_start = alc_start;
961 ifp->if_init = alc_init;
962 ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1;
963 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
964 IFQ_SET_READY(&ifp->if_snd);
965 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
966 ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO;
967 if (pci_find_extcap(dev, PCIY_PMG, &base) == 0) {
968 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
969 sc->alc_flags |= ALC_FLAG_PM;
970 sc->alc_pmcap = base;
972 ifp->if_capenable = ifp->if_capabilities;
974 /* Set up MII bus. */
975 error = mii_attach(dev, &sc->alc_miibus, ifp, alc_mediachange,
976 alc_mediastatus, BMSR_DEFCAPMASK, sc->alc_phyaddr, MII_OFFSET_ANY,
979 device_printf(dev, "attaching PHYs failed\n");
983 ether_ifattach(ifp, sc->alc_eaddr);
985 /* VLAN capability setup. */
986 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
987 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
988 ifp->if_capenable = ifp->if_capabilities;
991 * It seems enabling Tx checksum offloading makes more trouble.
992 * Sometimes the controller does not receive any frames when
993 * Tx checksum offloading is enabled. I'm not sure whether this
994 * is a bug in Tx checksum offloading logic or I got broken
995 * sample boards. To safety, don't enable Tx checksum offloading
996 * by default but give chance to users to toggle it if they know
997 * their controllers work without problems.
999 ifp->if_capenable &= ~IFCAP_TXCSUM;
1000 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
1002 /* Tell the upper layer(s) we support long frames. */
1003 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1005 /* Create local taskq. */
1006 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp);
1007 sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
1008 taskqueue_thread_enqueue, &sc->alc_tq);
1009 if (sc->alc_tq == NULL) {
1010 device_printf(dev, "could not create taskqueue.\n");
1011 ether_ifdetach(ifp);
1015 taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
1016 device_get_nameunit(sc->alc_dev));
1018 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
1019 msic = ALC_MSIX_MESSAGES;
1020 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1021 msic = ALC_MSI_MESSAGES;
1024 for (i = 0; i < msic; i++) {
1025 error = bus_setup_intr(dev, sc->alc_irq[i],
1026 INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc,
1027 &sc->alc_intrhand[i]);
1032 device_printf(dev, "could not set up interrupt handler.\n");
1033 taskqueue_free(sc->alc_tq);
1035 ether_ifdetach(ifp);
1047 alc_detach(device_t dev)
1049 struct alc_softc *sc;
1053 sc = device_get_softc(dev);
1056 if (device_is_attached(dev)) {
1058 sc->alc_flags |= ALC_FLAG_DETACH;
1061 callout_drain(&sc->alc_tick_ch);
1062 taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
1063 taskqueue_drain(sc->alc_tq, &sc->alc_tx_task);
1064 ether_ifdetach(ifp);
1067 if (sc->alc_tq != NULL) {
1068 taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
1069 taskqueue_free(sc->alc_tq);
1073 if (sc->alc_miibus != NULL) {
1074 device_delete_child(dev, sc->alc_miibus);
1075 sc->alc_miibus = NULL;
1077 bus_generic_detach(dev);
1085 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
1086 msic = ALC_MSIX_MESSAGES;
1087 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1088 msic = ALC_MSI_MESSAGES;
1091 for (i = 0; i < msic; i++) {
1092 if (sc->alc_intrhand[i] != NULL) {
1093 bus_teardown_intr(dev, sc->alc_irq[i],
1094 sc->alc_intrhand[i]);
1095 sc->alc_intrhand[i] = NULL;
1098 if (sc->alc_res[0] != NULL)
1100 bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq);
1101 if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0)
1102 pci_release_msi(dev);
1103 bus_release_resources(dev, sc->alc_res_spec, sc->alc_res);
1104 mtx_destroy(&sc->alc_mtx);
1109 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \
1110 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
1111 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \
1112 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
1115 alc_sysctl_node(struct alc_softc *sc)
1117 struct sysctl_ctx_list *ctx;
1118 struct sysctl_oid_list *child, *parent;
1119 struct sysctl_oid *tree;
1120 struct alc_hw_stats *stats;
1123 stats = &sc->alc_stats;
1124 ctx = device_get_sysctl_ctx(sc->alc_dev);
1125 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
1127 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
1128 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
1129 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
1130 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
1131 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
1132 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
1133 /* Pull in device tunables. */
1134 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1135 error = resource_int_value(device_get_name(sc->alc_dev),
1136 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
1138 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
1139 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
1140 device_printf(sc->alc_dev, "int_rx_mod value out of "
1141 "range; using default: %d\n",
1142 ALC_IM_RX_TIMER_DEFAULT);
1143 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1146 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1147 error = resource_int_value(device_get_name(sc->alc_dev),
1148 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
1150 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
1151 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
1152 device_printf(sc->alc_dev, "int_tx_mod value out of "
1153 "range; using default: %d\n",
1154 ALC_IM_TX_TIMER_DEFAULT);
1155 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1158 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1159 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
1160 sysctl_hw_alc_proc_limit, "I",
1161 "max number of Rx events to process");
1162 /* Pull in device tunables. */
1163 sc->alc_process_limit = ALC_PROC_DEFAULT;
1164 error = resource_int_value(device_get_name(sc->alc_dev),
1165 device_get_unit(sc->alc_dev), "process_limit",
1166 &sc->alc_process_limit);
1168 if (sc->alc_process_limit < ALC_PROC_MIN ||
1169 sc->alc_process_limit > ALC_PROC_MAX) {
1170 device_printf(sc->alc_dev,
1171 "process_limit value out of range; "
1172 "using default: %d\n", ALC_PROC_DEFAULT);
1173 sc->alc_process_limit = ALC_PROC_DEFAULT;
1177 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1178 NULL, "ALC statistics");
1179 parent = SYSCTL_CHILDREN(tree);
1181 /* Rx statistics. */
1182 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1183 NULL, "Rx MAC statistics");
1184 child = SYSCTL_CHILDREN(tree);
1185 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1186 &stats->rx_frames, "Good frames");
1187 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1188 &stats->rx_bcast_frames, "Good broadcast frames");
1189 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1190 &stats->rx_mcast_frames, "Good multicast frames");
1191 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1192 &stats->rx_pause_frames, "Pause control frames");
1193 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1194 &stats->rx_control_frames, "Control frames");
1195 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1196 &stats->rx_crcerrs, "CRC errors");
1197 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1198 &stats->rx_lenerrs, "Frames with length mismatched");
1199 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1200 &stats->rx_bytes, "Good octets");
1201 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1202 &stats->rx_bcast_bytes, "Good broadcast octets");
1203 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1204 &stats->rx_mcast_bytes, "Good multicast octets");
1205 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
1206 &stats->rx_runts, "Too short frames");
1207 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
1208 &stats->rx_fragments, "Fragmented frames");
1209 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1210 &stats->rx_pkts_64, "64 bytes frames");
1211 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1212 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
1213 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1214 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
1215 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1216 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
1217 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1218 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
1219 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1220 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
1221 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1222 &stats->rx_pkts_1519_max, "1519 to max frames");
1223 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1224 &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
1225 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1226 &stats->rx_fifo_oflows, "FIFO overflows");
1227 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
1228 &stats->rx_rrs_errs, "Return status write-back errors");
1229 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
1230 &stats->rx_alignerrs, "Alignment errors");
1231 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
1232 &stats->rx_pkts_filtered,
1233 "Frames dropped due to address filtering");
1235 /* Tx statistics. */
1236 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1237 NULL, "Tx MAC statistics");
1238 child = SYSCTL_CHILDREN(tree);
1239 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1240 &stats->tx_frames, "Good frames");
1241 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1242 &stats->tx_bcast_frames, "Good broadcast frames");
1243 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1244 &stats->tx_mcast_frames, "Good multicast frames");
1245 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1246 &stats->tx_pause_frames, "Pause control frames");
1247 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1248 &stats->tx_control_frames, "Control frames");
1249 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1250 &stats->tx_excess_defer, "Frames with excessive derferrals");
1251 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1252 &stats->tx_excess_defer, "Frames with derferrals");
1253 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1254 &stats->tx_bytes, "Good octets");
1255 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1256 &stats->tx_bcast_bytes, "Good broadcast octets");
1257 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1258 &stats->tx_mcast_bytes, "Good multicast octets");
1259 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1260 &stats->tx_pkts_64, "64 bytes frames");
1261 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1262 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1263 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1264 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1265 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1266 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1267 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1268 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1269 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1270 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1271 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1272 &stats->tx_pkts_1519_max, "1519 to max frames");
1273 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1274 &stats->tx_single_colls, "Single collisions");
1275 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1276 &stats->tx_multi_colls, "Multiple collisions");
1277 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1278 &stats->tx_late_colls, "Late collisions");
1279 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1280 &stats->tx_excess_colls, "Excessive collisions");
1281 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
1282 &stats->tx_abort, "Aborted frames due to Excessive collisions");
1283 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1284 &stats->tx_underrun, "FIFO underruns");
1285 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1286 &stats->tx_desc_underrun, "Descriptor write-back errors");
1287 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1288 &stats->tx_lenerrs, "Frames with length mismatched");
1289 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1290 &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1293 #undef ALC_SYSCTL_STAT_ADD32
1294 #undef ALC_SYSCTL_STAT_ADD64
1296 struct alc_dmamap_arg {
1297 bus_addr_t alc_busaddr;
1301 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1303 struct alc_dmamap_arg *ctx;
1308 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1310 ctx = (struct alc_dmamap_arg *)arg;
1311 ctx->alc_busaddr = segs[0].ds_addr;
1315 * Normal and high Tx descriptors shares single Tx high address.
1316 * Four Rx descriptor/return rings and CMB shares the same Rx
1320 alc_check_boundary(struct alc_softc *sc)
1322 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1324 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1325 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1326 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1327 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1329 /* 4GB boundary crossing is not allowed. */
1330 if ((ALC_ADDR_HI(rx_ring_end) !=
1331 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1332 (ALC_ADDR_HI(rr_ring_end) !=
1333 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1334 (ALC_ADDR_HI(cmb_end) !=
1335 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1336 (ALC_ADDR_HI(tx_ring_end) !=
1337 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1340 * Make sure Rx return descriptor/Rx descriptor/CMB use
1341 * the same high address.
1343 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1344 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1351 alc_dma_alloc(struct alc_softc *sc)
1353 struct alc_txdesc *txd;
1354 struct alc_rxdesc *rxd;
1356 struct alc_dmamap_arg ctx;
1359 lowaddr = BUS_SPACE_MAXADDR;
1361 /* Create parent DMA tag. */
1362 error = bus_dma_tag_create(
1363 bus_get_dma_tag(sc->alc_dev), /* parent */
1364 1, 0, /* alignment, boundary */
1365 lowaddr, /* lowaddr */
1366 BUS_SPACE_MAXADDR, /* highaddr */
1367 NULL, NULL, /* filter, filterarg */
1368 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1370 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1372 NULL, NULL, /* lockfunc, lockarg */
1373 &sc->alc_cdata.alc_parent_tag);
1375 device_printf(sc->alc_dev,
1376 "could not create parent DMA tag.\n");
1380 /* Create DMA tag for Tx descriptor ring. */
1381 error = bus_dma_tag_create(
1382 sc->alc_cdata.alc_parent_tag, /* parent */
1383 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */
1384 BUS_SPACE_MAXADDR, /* lowaddr */
1385 BUS_SPACE_MAXADDR, /* highaddr */
1386 NULL, NULL, /* filter, filterarg */
1387 ALC_TX_RING_SZ, /* maxsize */
1389 ALC_TX_RING_SZ, /* maxsegsize */
1391 NULL, NULL, /* lockfunc, lockarg */
1392 &sc->alc_cdata.alc_tx_ring_tag);
1394 device_printf(sc->alc_dev,
1395 "could not create Tx ring DMA tag.\n");
1399 /* Create DMA tag for Rx free descriptor ring. */
1400 error = bus_dma_tag_create(
1401 sc->alc_cdata.alc_parent_tag, /* parent */
1402 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */
1403 BUS_SPACE_MAXADDR, /* lowaddr */
1404 BUS_SPACE_MAXADDR, /* highaddr */
1405 NULL, NULL, /* filter, filterarg */
1406 ALC_RX_RING_SZ, /* maxsize */
1408 ALC_RX_RING_SZ, /* maxsegsize */
1410 NULL, NULL, /* lockfunc, lockarg */
1411 &sc->alc_cdata.alc_rx_ring_tag);
1413 device_printf(sc->alc_dev,
1414 "could not create Rx ring DMA tag.\n");
1417 /* Create DMA tag for Rx return descriptor ring. */
1418 error = bus_dma_tag_create(
1419 sc->alc_cdata.alc_parent_tag, /* parent */
1420 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */
1421 BUS_SPACE_MAXADDR, /* lowaddr */
1422 BUS_SPACE_MAXADDR, /* highaddr */
1423 NULL, NULL, /* filter, filterarg */
1424 ALC_RR_RING_SZ, /* maxsize */
1426 ALC_RR_RING_SZ, /* maxsegsize */
1428 NULL, NULL, /* lockfunc, lockarg */
1429 &sc->alc_cdata.alc_rr_ring_tag);
1431 device_printf(sc->alc_dev,
1432 "could not create Rx return ring DMA tag.\n");
1436 /* Create DMA tag for coalescing message block. */
1437 error = bus_dma_tag_create(
1438 sc->alc_cdata.alc_parent_tag, /* parent */
1439 ALC_CMB_ALIGN, 0, /* alignment, boundary */
1440 BUS_SPACE_MAXADDR, /* lowaddr */
1441 BUS_SPACE_MAXADDR, /* highaddr */
1442 NULL, NULL, /* filter, filterarg */
1443 ALC_CMB_SZ, /* maxsize */
1445 ALC_CMB_SZ, /* maxsegsize */
1447 NULL, NULL, /* lockfunc, lockarg */
1448 &sc->alc_cdata.alc_cmb_tag);
1450 device_printf(sc->alc_dev,
1451 "could not create CMB DMA tag.\n");
1454 /* Create DMA tag for status message block. */
1455 error = bus_dma_tag_create(
1456 sc->alc_cdata.alc_parent_tag, /* parent */
1457 ALC_SMB_ALIGN, 0, /* alignment, boundary */
1458 BUS_SPACE_MAXADDR, /* lowaddr */
1459 BUS_SPACE_MAXADDR, /* highaddr */
1460 NULL, NULL, /* filter, filterarg */
1461 ALC_SMB_SZ, /* maxsize */
1463 ALC_SMB_SZ, /* maxsegsize */
1465 NULL, NULL, /* lockfunc, lockarg */
1466 &sc->alc_cdata.alc_smb_tag);
1468 device_printf(sc->alc_dev,
1469 "could not create SMB DMA tag.\n");
1473 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1474 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
1475 (void **)&sc->alc_rdata.alc_tx_ring,
1476 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1477 &sc->alc_cdata.alc_tx_ring_map);
1479 device_printf(sc->alc_dev,
1480 "could not allocate DMA'able memory for Tx ring.\n");
1483 ctx.alc_busaddr = 0;
1484 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
1485 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
1486 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1487 if (error != 0 || ctx.alc_busaddr == 0) {
1488 device_printf(sc->alc_dev,
1489 "could not load DMA'able memory for Tx ring.\n");
1492 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
1494 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1495 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
1496 (void **)&sc->alc_rdata.alc_rx_ring,
1497 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1498 &sc->alc_cdata.alc_rx_ring_map);
1500 device_printf(sc->alc_dev,
1501 "could not allocate DMA'able memory for Rx ring.\n");
1504 ctx.alc_busaddr = 0;
1505 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
1506 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
1507 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1508 if (error != 0 || ctx.alc_busaddr == 0) {
1509 device_printf(sc->alc_dev,
1510 "could not load DMA'able memory for Rx ring.\n");
1513 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
1515 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */
1516 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
1517 (void **)&sc->alc_rdata.alc_rr_ring,
1518 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1519 &sc->alc_cdata.alc_rr_ring_map);
1521 device_printf(sc->alc_dev,
1522 "could not allocate DMA'able memory for Rx return ring.\n");
1525 ctx.alc_busaddr = 0;
1526 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
1527 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
1528 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
1529 if (error != 0 || ctx.alc_busaddr == 0) {
1530 device_printf(sc->alc_dev,
1531 "could not load DMA'able memory for Tx ring.\n");
1534 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
1536 /* Allocate DMA'able memory and load the DMA map for CMB. */
1537 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
1538 (void **)&sc->alc_rdata.alc_cmb,
1539 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1540 &sc->alc_cdata.alc_cmb_map);
1542 device_printf(sc->alc_dev,
1543 "could not allocate DMA'able memory for CMB.\n");
1546 ctx.alc_busaddr = 0;
1547 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
1548 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
1549 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
1550 if (error != 0 || ctx.alc_busaddr == 0) {
1551 device_printf(sc->alc_dev,
1552 "could not load DMA'able memory for CMB.\n");
1555 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
1557 /* Allocate DMA'able memory and load the DMA map for SMB. */
1558 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
1559 (void **)&sc->alc_rdata.alc_smb,
1560 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1561 &sc->alc_cdata.alc_smb_map);
1563 device_printf(sc->alc_dev,
1564 "could not allocate DMA'able memory for SMB.\n");
1567 ctx.alc_busaddr = 0;
1568 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
1569 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
1570 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
1571 if (error != 0 || ctx.alc_busaddr == 0) {
1572 device_printf(sc->alc_dev,
1573 "could not load DMA'able memory for CMB.\n");
1576 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
1578 /* Make sure we've not crossed 4GB boundary. */
1579 if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1580 (error = alc_check_boundary(sc)) != 0) {
1581 device_printf(sc->alc_dev, "4GB boundary crossed, "
1582 "switching to 32bit DMA addressing mode.\n");
1585 * Limit max allowable DMA address space to 32bit
1588 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1593 * Create Tx buffer parent tag.
1594 * AR813x/AR815x allows 64bit DMA addressing of Tx/Rx buffers
1595 * so it needs separate parent DMA tag as parent DMA address
1596 * space could be restricted to be within 32bit address space
1597 * by 4GB boundary crossing.
1599 error = bus_dma_tag_create(
1600 bus_get_dma_tag(sc->alc_dev), /* parent */
1601 1, 0, /* alignment, boundary */
1602 BUS_SPACE_MAXADDR, /* lowaddr */
1603 BUS_SPACE_MAXADDR, /* highaddr */
1604 NULL, NULL, /* filter, filterarg */
1605 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1607 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1609 NULL, NULL, /* lockfunc, lockarg */
1610 &sc->alc_cdata.alc_buffer_tag);
1612 device_printf(sc->alc_dev,
1613 "could not create parent buffer DMA tag.\n");
1617 /* Create DMA tag for Tx buffers. */
1618 error = bus_dma_tag_create(
1619 sc->alc_cdata.alc_buffer_tag, /* parent */
1620 1, 0, /* alignment, boundary */
1621 BUS_SPACE_MAXADDR, /* lowaddr */
1622 BUS_SPACE_MAXADDR, /* highaddr */
1623 NULL, NULL, /* filter, filterarg */
1624 ALC_TSO_MAXSIZE, /* maxsize */
1625 ALC_MAXTXSEGS, /* nsegments */
1626 ALC_TSO_MAXSEGSIZE, /* maxsegsize */
1628 NULL, NULL, /* lockfunc, lockarg */
1629 &sc->alc_cdata.alc_tx_tag);
1631 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
1635 /* Create DMA tag for Rx buffers. */
1636 error = bus_dma_tag_create(
1637 sc->alc_cdata.alc_buffer_tag, /* parent */
1638 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */
1639 BUS_SPACE_MAXADDR, /* lowaddr */
1640 BUS_SPACE_MAXADDR, /* highaddr */
1641 NULL, NULL, /* filter, filterarg */
1642 MCLBYTES, /* maxsize */
1644 MCLBYTES, /* maxsegsize */
1646 NULL, NULL, /* lockfunc, lockarg */
1647 &sc->alc_cdata.alc_rx_tag);
1649 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
1652 /* Create DMA maps for Tx buffers. */
1653 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1654 txd = &sc->alc_cdata.alc_txdesc[i];
1656 txd->tx_dmamap = NULL;
1657 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0,
1660 device_printf(sc->alc_dev,
1661 "could not create Tx dmamap.\n");
1665 /* Create DMA maps for Rx buffers. */
1666 if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1667 &sc->alc_cdata.alc_rx_sparemap)) != 0) {
1668 device_printf(sc->alc_dev,
1669 "could not create spare Rx dmamap.\n");
1672 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1673 rxd = &sc->alc_cdata.alc_rxdesc[i];
1675 rxd->rx_dmamap = NULL;
1676 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1679 device_printf(sc->alc_dev,
1680 "could not create Rx dmamap.\n");
1690 alc_dma_free(struct alc_softc *sc)
1692 struct alc_txdesc *txd;
1693 struct alc_rxdesc *rxd;
1697 if (sc->alc_cdata.alc_tx_tag != NULL) {
1698 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1699 txd = &sc->alc_cdata.alc_txdesc[i];
1700 if (txd->tx_dmamap != NULL) {
1701 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
1703 txd->tx_dmamap = NULL;
1706 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
1707 sc->alc_cdata.alc_tx_tag = NULL;
1710 if (sc->alc_cdata.alc_rx_tag != NULL) {
1711 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1712 rxd = &sc->alc_cdata.alc_rxdesc[i];
1713 if (rxd->rx_dmamap != NULL) {
1714 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1716 rxd->rx_dmamap = NULL;
1719 if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1720 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1721 sc->alc_cdata.alc_rx_sparemap);
1722 sc->alc_cdata.alc_rx_sparemap = NULL;
1724 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
1725 sc->alc_cdata.alc_rx_tag = NULL;
1727 /* Tx descriptor ring. */
1728 if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
1729 if (sc->alc_cdata.alc_tx_ring_map != NULL)
1730 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
1731 sc->alc_cdata.alc_tx_ring_map);
1732 if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1733 sc->alc_rdata.alc_tx_ring != NULL)
1734 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
1735 sc->alc_rdata.alc_tx_ring,
1736 sc->alc_cdata.alc_tx_ring_map);
1737 sc->alc_rdata.alc_tx_ring = NULL;
1738 sc->alc_cdata.alc_tx_ring_map = NULL;
1739 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
1740 sc->alc_cdata.alc_tx_ring_tag = NULL;
1743 if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
1744 if (sc->alc_cdata.alc_rx_ring_map != NULL)
1745 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
1746 sc->alc_cdata.alc_rx_ring_map);
1747 if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1748 sc->alc_rdata.alc_rx_ring != NULL)
1749 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
1750 sc->alc_rdata.alc_rx_ring,
1751 sc->alc_cdata.alc_rx_ring_map);
1752 sc->alc_rdata.alc_rx_ring = NULL;
1753 sc->alc_cdata.alc_rx_ring_map = NULL;
1754 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
1755 sc->alc_cdata.alc_rx_ring_tag = NULL;
1757 /* Rx return ring. */
1758 if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
1759 if (sc->alc_cdata.alc_rr_ring_map != NULL)
1760 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
1761 sc->alc_cdata.alc_rr_ring_map);
1762 if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1763 sc->alc_rdata.alc_rr_ring != NULL)
1764 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
1765 sc->alc_rdata.alc_rr_ring,
1766 sc->alc_cdata.alc_rr_ring_map);
1767 sc->alc_rdata.alc_rr_ring = NULL;
1768 sc->alc_cdata.alc_rr_ring_map = NULL;
1769 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
1770 sc->alc_cdata.alc_rr_ring_tag = NULL;
1773 if (sc->alc_cdata.alc_cmb_tag != NULL) {
1774 if (sc->alc_cdata.alc_cmb_map != NULL)
1775 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
1776 sc->alc_cdata.alc_cmb_map);
1777 if (sc->alc_cdata.alc_cmb_map != NULL &&
1778 sc->alc_rdata.alc_cmb != NULL)
1779 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
1780 sc->alc_rdata.alc_cmb,
1781 sc->alc_cdata.alc_cmb_map);
1782 sc->alc_rdata.alc_cmb = NULL;
1783 sc->alc_cdata.alc_cmb_map = NULL;
1784 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
1785 sc->alc_cdata.alc_cmb_tag = NULL;
1788 if (sc->alc_cdata.alc_smb_tag != NULL) {
1789 if (sc->alc_cdata.alc_smb_map != NULL)
1790 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
1791 sc->alc_cdata.alc_smb_map);
1792 if (sc->alc_cdata.alc_smb_map != NULL &&
1793 sc->alc_rdata.alc_smb != NULL)
1794 bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
1795 sc->alc_rdata.alc_smb,
1796 sc->alc_cdata.alc_smb_map);
1797 sc->alc_rdata.alc_smb = NULL;
1798 sc->alc_cdata.alc_smb_map = NULL;
1799 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
1800 sc->alc_cdata.alc_smb_tag = NULL;
1802 if (sc->alc_cdata.alc_buffer_tag != NULL) {
1803 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
1804 sc->alc_cdata.alc_buffer_tag = NULL;
1806 if (sc->alc_cdata.alc_parent_tag != NULL) {
1807 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
1808 sc->alc_cdata.alc_parent_tag = NULL;
1813 alc_shutdown(device_t dev)
1816 return (alc_suspend(dev));
1820 * Note, this driver resets the link speed to 10/100Mbps by
1821 * restarting auto-negotiation in suspend/shutdown phase but we
1822 * don't know whether that auto-negotiation would succeed or not
1823 * as driver has no control after powering off/suspend operation.
1824 * If the renegotiation fail WOL may not work. Running at 1Gbps
1825 * will draw more power than 375mA at 3.3V which is specified in
1826 * PCI specification and that would result in complete
1827 * shutdowning power to ethernet controller.
1830 * Save current negotiated media speed/duplex/flow-control to
1831 * softc and restore the same link again after resuming. PHY
1832 * handling such as power down/resetting to 100Mbps may be better
1833 * handled in suspend method in phy driver.
1836 alc_setlinkspeed(struct alc_softc *sc)
1838 struct mii_data *mii;
1841 mii = device_get_softc(sc->alc_miibus);
1844 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1845 (IFM_ACTIVE | IFM_AVALID)) {
1846 switch IFM_SUBTYPE(mii->mii_media_active) {
1857 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
1858 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1859 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1860 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1861 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1865 * Poll link state until alc(4) get a 10/100Mbps link.
1867 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1869 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1870 == (IFM_ACTIVE | IFM_AVALID)) {
1871 switch (IFM_SUBTYPE(
1872 mii->mii_media_active)) {
1882 pause("alclnk", hz);
1885 if (i == MII_ANEGTICKS_GIGE)
1886 device_printf(sc->alc_dev,
1887 "establishing a link failed, WOL may not work!");
1890 * No link, force MAC to have 100Mbps, full-duplex link.
1891 * This is the last resort and may/may not work.
1893 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1894 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1899 alc_setwol(struct alc_softc *sc)
1905 ALC_LOCK_ASSERT(sc);
1907 alc_disable_l0s_l1(sc);
1909 if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
1911 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1912 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1913 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1914 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1915 /* Force PHY power down. */
1917 CSR_WRITE_4(sc, ALC_MASTER_CFG,
1918 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
1922 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1923 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
1924 alc_setlinkspeed(sc);
1925 CSR_WRITE_4(sc, ALC_MASTER_CFG,
1926 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS);
1930 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1931 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1932 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
1933 reg = CSR_READ_4(sc, ALC_MAC_CFG);
1934 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1936 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1937 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1938 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1939 reg |= MAC_CFG_RX_ENB;
1940 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1942 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1943 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1944 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1945 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1946 /* WOL disabled, PHY power down. */
1948 CSR_WRITE_4(sc, ALC_MASTER_CFG,
1949 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
1952 pmstat = pci_read_config(sc->alc_dev,
1953 sc->alc_pmcap + PCIR_POWER_STATUS, 2);
1954 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1955 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1956 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1957 pci_write_config(sc->alc_dev,
1958 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
1962 alc_suspend(device_t dev)
1964 struct alc_softc *sc;
1966 sc = device_get_softc(dev);
1977 alc_resume(device_t dev)
1979 struct alc_softc *sc;
1983 sc = device_get_softc(dev);
1986 if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
1987 /* Disable PME and clear PME status. */
1988 pmstat = pci_read_config(sc->alc_dev,
1989 sc->alc_pmcap + PCIR_POWER_STATUS, 2);
1990 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1991 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1992 pci_write_config(sc->alc_dev,
1993 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
1999 if ((ifp->if_flags & IFF_UP) != 0) {
2000 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2001 alc_init_locked(sc);
2009 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
2011 struct alc_txdesc *txd, *txd_last;
2012 struct tx_desc *desc;
2016 bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
2018 uint32_t cflags, hdrlen, ip_off, poff, vtag;
2019 int error, idx, nsegs, prod;
2021 ALC_LOCK_ASSERT(sc);
2023 M_ASSERTPKTHDR((*m_head));
2029 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
2031 * AR813x/AR815x requires offset of TCP/UDP header in its
2032 * Tx descriptor to perform Tx checksum offloading. TSO
2033 * also requires TCP header offset and modification of
2034 * IP/TCP header. This kind of operation takes many CPU
2035 * cycles on FreeBSD so fast host CPU is required to get
2036 * smooth TSO performance.
2038 struct ether_header *eh;
2040 if (M_WRITABLE(m) == 0) {
2041 /* Get a writable copy. */
2042 m = m_dup(*m_head, M_DONTWAIT);
2043 /* Release original mbufs. */
2052 ip_off = sizeof(struct ether_header);
2053 m = m_pullup(m, ip_off);
2058 eh = mtod(m, struct ether_header *);
2060 * Check if hardware VLAN insertion is off.
2061 * Additional check for LLC/SNAP frame?
2063 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2064 ip_off = sizeof(struct ether_vlan_header);
2065 m = m_pullup(m, ip_off);
2071 m = m_pullup(m, ip_off + sizeof(struct ip));
2076 ip = (struct ip *)(mtod(m, char *) + ip_off);
2077 poff = ip_off + (ip->ip_hl << 2);
2078 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2079 m = m_pullup(m, poff + sizeof(struct tcphdr));
2084 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
2085 m = m_pullup(m, poff + (tcp->th_off << 2));
2091 * Due to strict adherence of Microsoft NDIS
2092 * Large Send specification, hardware expects
2093 * a pseudo TCP checksum inserted by upper
2094 * stack. Unfortunately the pseudo TCP
2095 * checksum that NDIS refers to does not include
2096 * TCP payload length so driver should recompute
2097 * the pseudo checksum here. Hopefully this
2098 * wouldn't be much burden on modern CPUs.
2100 * Reset IP checksum and recompute TCP pseudo
2101 * checksum as NDIS specification said.
2103 ip = (struct ip *)(mtod(m, char *) + ip_off);
2104 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
2106 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
2107 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2112 prod = sc->alc_cdata.alc_tx_prod;
2113 txd = &sc->alc_cdata.alc_txdesc[prod];
2115 map = txd->tx_dmamap;
2117 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
2118 *m_head, txsegs, &nsegs, 0);
2119 if (error == EFBIG) {
2120 m = m_collapse(*m_head, M_DONTWAIT, ALC_MAXTXSEGS);
2127 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
2128 *m_head, txsegs, &nsegs, 0);
2134 } else if (error != 0)
2142 /* Check descriptor overrun. */
2143 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
2144 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
2147 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2150 cflags = TD_ETHERNET;
2154 /* Configure VLAN hardware tag insertion. */
2155 if ((m->m_flags & M_VLANTAG) != 0) {
2156 vtag = htons(m->m_pkthdr.ether_vtag);
2157 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
2158 cflags |= TD_INS_VLAN_TAG;
2160 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2161 /* Request TSO and set MSS. */
2162 cflags |= TD_TSO | TD_TSO_DESCV1;
2163 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
2165 /* Set TCP header offset. */
2166 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
2167 TD_TCPHDR_OFFSET_MASK;
2169 * AR813x/AR815x requires the first buffer should
2170 * only hold IP/TCP header data. Payload should
2171 * be handled in other descriptors.
2173 hdrlen = poff + (tcp->th_off << 2);
2174 desc = &sc->alc_rdata.alc_tx_ring[prod];
2175 desc->len = htole32(TX_BYTES(hdrlen | vtag));
2176 desc->flags = htole32(cflags);
2177 desc->addr = htole64(txsegs[0].ds_addr);
2178 sc->alc_cdata.alc_tx_cnt++;
2179 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2180 if (m->m_len - hdrlen > 0) {
2181 /* Handle remaining payload of the first fragment. */
2182 desc = &sc->alc_rdata.alc_tx_ring[prod];
2183 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
2185 desc->flags = htole32(cflags);
2186 desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
2187 sc->alc_cdata.alc_tx_cnt++;
2188 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2190 /* Handle remaining fragments. */
2192 } else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
2193 /* Configure Tx checksum offload. */
2194 #ifdef ALC_USE_CUSTOM_CSUM
2195 cflags |= TD_CUSTOM_CSUM;
2196 /* Set checksum start offset. */
2197 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
2198 TD_PLOAD_OFFSET_MASK;
2199 /* Set checksum insertion position of TCP/UDP. */
2200 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
2201 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
2203 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2204 cflags |= TD_IPCSUM;
2205 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2206 cflags |= TD_TCPCSUM;
2207 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2208 cflags |= TD_UDPCSUM;
2209 /* Set TCP/UDP header offset. */
2210 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
2211 TD_L4HDR_OFFSET_MASK;
2214 for (; idx < nsegs; idx++) {
2215 desc = &sc->alc_rdata.alc_tx_ring[prod];
2216 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
2217 desc->flags = htole32(cflags);
2218 desc->addr = htole64(txsegs[idx].ds_addr);
2219 sc->alc_cdata.alc_tx_cnt++;
2220 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2222 /* Update producer index. */
2223 sc->alc_cdata.alc_tx_prod = prod;
2225 /* Finally set EOP on the last descriptor. */
2226 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
2227 desc = &sc->alc_rdata.alc_tx_ring[prod];
2228 desc->flags |= htole32(TD_EOP);
2230 /* Swap dmamap of the first and the last. */
2231 txd = &sc->alc_cdata.alc_txdesc[prod];
2232 map = txd_last->tx_dmamap;
2233 txd_last->tx_dmamap = txd->tx_dmamap;
2234 txd->tx_dmamap = map;
2241 alc_tx_task(void *arg, int pending)
2245 ifp = (struct ifnet *)arg;
2250 alc_start(struct ifnet *ifp)
2252 struct alc_softc *sc;
2253 struct mbuf *m_head;
2260 /* Reclaim transmitted frames. */
2261 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2264 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2265 IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0) {
2270 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2271 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2275 * Pack the data into the transmit ring. If we
2276 * don't have room, set the OACTIVE flag and wait
2277 * for the NIC to drain the ring.
2279 if (alc_encap(sc, &m_head)) {
2282 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2283 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2289 * If there's a BPF listener, bounce a copy of this frame
2292 ETHER_BPF_MTAP(ifp, m_head);
2296 /* Sync descriptors. */
2297 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2298 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2299 /* Kick. Assume we're using normal Tx priority queue. */
2300 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2301 (sc->alc_cdata.alc_tx_prod <<
2302 MBOX_TD_PROD_LO_IDX_SHIFT) &
2303 MBOX_TD_PROD_LO_IDX_MASK);
2304 /* Set a timeout in case the chip goes out to lunch. */
2305 sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2312 alc_watchdog(struct alc_softc *sc)
2316 ALC_LOCK_ASSERT(sc);
2318 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2322 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2323 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2325 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2326 alc_init_locked(sc);
2329 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2331 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2332 alc_init_locked(sc);
2333 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2334 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2338 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2340 struct alc_softc *sc;
2342 struct mii_data *mii;
2346 ifr = (struct ifreq *)data;
2350 if (ifr->ifr_mtu < ETHERMIN ||
2351 ifr->ifr_mtu > (sc->alc_ident->max_framelen -
2352 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) ||
2353 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2354 ifr->ifr_mtu > ETHERMTU))
2356 else if (ifp->if_mtu != ifr->ifr_mtu) {
2358 ifp->if_mtu = ifr->ifr_mtu;
2359 /* AR813x/AR815x has 13 bits MSS field. */
2360 if (ifp->if_mtu > ALC_TSO_MTU &&
2361 (ifp->if_capenable & IFCAP_TSO4) != 0) {
2362 ifp->if_capenable &= ~IFCAP_TSO4;
2363 ifp->if_hwassist &= ~CSUM_TSO;
2364 VLAN_CAPABILITIES(ifp);
2371 if ((ifp->if_flags & IFF_UP) != 0) {
2372 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2373 ((ifp->if_flags ^ sc->alc_if_flags) &
2374 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2376 else if ((sc->alc_flags & ALC_FLAG_DETACH) == 0)
2377 alc_init_locked(sc);
2378 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2380 sc->alc_if_flags = ifp->if_flags;
2386 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2392 mii = device_get_softc(sc->alc_miibus);
2393 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2397 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2398 if ((mask & IFCAP_TXCSUM) != 0 &&
2399 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2400 ifp->if_capenable ^= IFCAP_TXCSUM;
2401 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2402 ifp->if_hwassist |= ALC_CSUM_FEATURES;
2404 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2406 if ((mask & IFCAP_TSO4) != 0 &&
2407 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2408 ifp->if_capenable ^= IFCAP_TSO4;
2409 if ((ifp->if_capenable & IFCAP_TSO4) != 0) {
2410 /* AR813x/AR815x has 13 bits MSS field. */
2411 if (ifp->if_mtu > ALC_TSO_MTU) {
2412 ifp->if_capenable &= ~IFCAP_TSO4;
2413 ifp->if_hwassist &= ~CSUM_TSO;
2415 ifp->if_hwassist |= CSUM_TSO;
2417 ifp->if_hwassist &= ~CSUM_TSO;
2419 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2420 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2421 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2422 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2423 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2424 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2425 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2426 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2427 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2430 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2431 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2432 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2433 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2434 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2435 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2436 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2437 ifp->if_capenable &=
2438 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
2440 VLAN_CAPABILITIES(ifp);
2443 error = ether_ioctl(ifp, cmd, data);
2451 alc_mac_config(struct alc_softc *sc)
2453 struct mii_data *mii;
2456 ALC_LOCK_ASSERT(sc);
2458 mii = device_get_softc(sc->alc_miibus);
2459 reg = CSR_READ_4(sc, ALC_MAC_CFG);
2460 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2461 MAC_CFG_SPEED_MASK);
2462 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
2463 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
2464 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
2465 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2466 /* Reprogram MAC with resolved speed/duplex. */
2467 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2470 reg |= MAC_CFG_SPEED_10_100;
2473 reg |= MAC_CFG_SPEED_1000;
2476 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2477 reg |= MAC_CFG_FULL_DUPLEX;
2478 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2479 reg |= MAC_CFG_TX_FC;
2480 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2481 reg |= MAC_CFG_RX_FC;
2483 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2487 alc_stats_clear(struct alc_softc *sc)
2489 struct smb sb, *smb;
2493 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2494 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2495 sc->alc_cdata.alc_smb_map,
2496 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2497 smb = sc->alc_rdata.alc_smb;
2498 /* Update done, clear. */
2500 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2501 sc->alc_cdata.alc_smb_map,
2502 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2504 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2506 CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2507 i += sizeof(uint32_t);
2509 /* Read Tx statistics. */
2510 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2512 CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2513 i += sizeof(uint32_t);
2519 alc_stats_update(struct alc_softc *sc)
2521 struct alc_hw_stats *stat;
2522 struct smb sb, *smb;
2527 ALC_LOCK_ASSERT(sc);
2530 stat = &sc->alc_stats;
2531 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2532 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2533 sc->alc_cdata.alc_smb_map,
2534 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2535 smb = sc->alc_rdata.alc_smb;
2536 if (smb->updated == 0)
2540 /* Read Rx statistics. */
2541 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2543 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2544 i += sizeof(uint32_t);
2546 /* Read Tx statistics. */
2547 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2549 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2550 i += sizeof(uint32_t);
2555 stat->rx_frames += smb->rx_frames;
2556 stat->rx_bcast_frames += smb->rx_bcast_frames;
2557 stat->rx_mcast_frames += smb->rx_mcast_frames;
2558 stat->rx_pause_frames += smb->rx_pause_frames;
2559 stat->rx_control_frames += smb->rx_control_frames;
2560 stat->rx_crcerrs += smb->rx_crcerrs;
2561 stat->rx_lenerrs += smb->rx_lenerrs;
2562 stat->rx_bytes += smb->rx_bytes;
2563 stat->rx_runts += smb->rx_runts;
2564 stat->rx_fragments += smb->rx_fragments;
2565 stat->rx_pkts_64 += smb->rx_pkts_64;
2566 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2567 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2568 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2569 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2570 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2571 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2572 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2573 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2574 stat->rx_rrs_errs += smb->rx_rrs_errs;
2575 stat->rx_alignerrs += smb->rx_alignerrs;
2576 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2577 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2578 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2581 stat->tx_frames += smb->tx_frames;
2582 stat->tx_bcast_frames += smb->tx_bcast_frames;
2583 stat->tx_mcast_frames += smb->tx_mcast_frames;
2584 stat->tx_pause_frames += smb->tx_pause_frames;
2585 stat->tx_excess_defer += smb->tx_excess_defer;
2586 stat->tx_control_frames += smb->tx_control_frames;
2587 stat->tx_deferred += smb->tx_deferred;
2588 stat->tx_bytes += smb->tx_bytes;
2589 stat->tx_pkts_64 += smb->tx_pkts_64;
2590 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2591 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2592 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2593 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2594 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2595 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2596 stat->tx_single_colls += smb->tx_single_colls;
2597 stat->tx_multi_colls += smb->tx_multi_colls;
2598 stat->tx_late_colls += smb->tx_late_colls;
2599 stat->tx_excess_colls += smb->tx_excess_colls;
2600 stat->tx_abort += smb->tx_abort;
2601 stat->tx_underrun += smb->tx_underrun;
2602 stat->tx_desc_underrun += smb->tx_desc_underrun;
2603 stat->tx_lenerrs += smb->tx_lenerrs;
2604 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2605 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2606 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2608 /* Update counters in ifnet. */
2609 ifp->if_opackets += smb->tx_frames;
2611 ifp->if_collisions += smb->tx_single_colls +
2612 smb->tx_multi_colls * 2 + smb->tx_late_colls +
2613 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
2617 * tx_pkts_truncated counter looks suspicious. It constantly
2618 * increments with no sign of Tx errors. This may indicate
2619 * the counter name is not correct one so I've removed the
2620 * counter in output errors.
2622 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
2625 ifp->if_ipackets += smb->rx_frames;
2627 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2628 smb->rx_runts + smb->rx_pkts_truncated +
2629 smb->rx_fifo_oflows + smb->rx_rrs_errs +
2632 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2633 /* Update done, clear. */
2635 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2636 sc->alc_cdata.alc_smb_map,
2637 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2644 struct alc_softc *sc;
2647 sc = (struct alc_softc *)arg;
2649 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2650 if ((status & ALC_INTRS) == 0)
2651 return (FILTER_STRAY);
2652 /* Disable interrupts. */
2653 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2654 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2656 return (FILTER_HANDLED);
2660 alc_int_task(void *arg, int pending)
2662 struct alc_softc *sc;
2667 sc = (struct alc_softc *)arg;
2670 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2671 more = atomic_readandclear_int(&sc->alc_morework);
2673 status |= INTR_RX_PKT;
2674 if ((status & ALC_INTRS) == 0)
2677 /* Acknowledge interrupts but still disable interrupts. */
2678 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2681 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2682 if ((status & INTR_RX_PKT) != 0) {
2683 more = alc_rxintr(sc, sc->alc_process_limit);
2685 atomic_set_int(&sc->alc_morework, 1);
2686 else if (more == EIO) {
2688 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2689 alc_init_locked(sc);
2694 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2695 INTR_TXQ_TO_RST)) != 0) {
2696 if ((status & INTR_DMA_RD_TO_RST) != 0)
2697 device_printf(sc->alc_dev,
2698 "DMA read error! -- resetting\n");
2699 if ((status & INTR_DMA_WR_TO_RST) != 0)
2700 device_printf(sc->alc_dev,
2701 "DMA write error! -- resetting\n");
2702 if ((status & INTR_TXQ_TO_RST) != 0)
2703 device_printf(sc->alc_dev,
2704 "TxQ reset! -- resetting\n");
2706 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2707 alc_init_locked(sc);
2711 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2712 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2713 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2716 if (more == EAGAIN ||
2717 (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) {
2718 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2723 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2724 /* Re-enable interrupts if we're running. */
2725 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2730 alc_txeof(struct alc_softc *sc)
2733 struct alc_txdesc *txd;
2734 uint32_t cons, prod;
2737 ALC_LOCK_ASSERT(sc);
2741 if (sc->alc_cdata.alc_tx_cnt == 0)
2743 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2744 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
2745 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2746 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2747 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
2748 prod = sc->alc_rdata.alc_cmb->cons;
2750 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2751 /* Assume we're using normal Tx priority queue. */
2752 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2753 MBOX_TD_CONS_LO_IDX_SHIFT;
2754 cons = sc->alc_cdata.alc_tx_cons;
2756 * Go through our Tx list and free mbufs for those
2757 * frames which have been transmitted.
2759 for (prog = 0; cons != prod; prog++,
2760 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2761 if (sc->alc_cdata.alc_tx_cnt <= 0)
2764 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2765 sc->alc_cdata.alc_tx_cnt--;
2766 txd = &sc->alc_cdata.alc_txdesc[cons];
2767 if (txd->tx_m != NULL) {
2768 /* Reclaim transmitted mbufs. */
2769 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
2770 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2771 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
2778 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2779 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2780 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
2781 sc->alc_cdata.alc_tx_cons = cons;
2783 * Unarm watchdog timer only when there is no pending
2784 * frames in Tx queue.
2786 if (sc->alc_cdata.alc_tx_cnt == 0)
2787 sc->alc_watchdog_timer = 0;
2791 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2794 bus_dma_segment_t segs[1];
2798 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2801 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2802 #ifndef __NO_STRICT_ALIGNMENT
2803 m_adj(m, sizeof(uint64_t));
2806 if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag,
2807 sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2811 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2813 if (rxd->rx_m != NULL) {
2814 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2815 BUS_DMASYNC_POSTREAD);
2816 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
2818 map = rxd->rx_dmamap;
2819 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2820 sc->alc_cdata.alc_rx_sparemap = map;
2821 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2822 BUS_DMASYNC_PREREAD);
2824 rxd->rx_desc->addr = htole64(segs[0].ds_addr);
2829 alc_rxintr(struct alc_softc *sc, int count)
2832 struct rx_rdesc *rrd;
2833 uint32_t nsegs, status;
2836 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2837 sc->alc_cdata.alc_rr_ring_map,
2838 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2839 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2840 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2841 rr_cons = sc->alc_cdata.alc_rr_cons;
2843 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) {
2846 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2847 status = le32toh(rrd->status);
2848 if ((status & RRD_VALID) == 0)
2850 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2852 /* This should not happen! */
2853 device_printf(sc->alc_dev,
2854 "unexpected segment count -- resetting\n");
2858 /* Clear Rx return status. */
2860 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2861 sc->alc_cdata.alc_rx_cons += nsegs;
2862 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2867 /* Update the consumer index. */
2868 sc->alc_cdata.alc_rr_cons = rr_cons;
2869 /* Sync Rx return descriptors. */
2870 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2871 sc->alc_cdata.alc_rr_ring_map,
2872 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2874 * Sync updated Rx descriptors such that controller see
2875 * modified buffer addresses.
2877 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2878 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
2880 * Let controller know availability of new Rx buffers.
2881 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2882 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2883 * only when Rx buffer pre-fetching is required. In
2884 * addition we already set ALC_RX_RD_FREE_THRESH to
2885 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2886 * it still seems that pre-fetching needs more
2889 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2890 sc->alc_cdata.alc_rx_cons);
2893 return (count > 0 ? 0 : EAGAIN);
2896 #ifndef __NO_STRICT_ALIGNMENT
2897 static struct mbuf *
2898 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2902 uint16_t *src, *dst;
2904 src = mtod(m, uint16_t *);
2907 if (m->m_next == NULL) {
2908 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2914 * Append a new mbuf to received mbuf chain and copy ethernet
2915 * header from the mbuf chain. This can save lots of CPU
2916 * cycles for jumbo frame.
2918 MGETHDR(n, M_DONTWAIT, MT_DATA);
2924 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2925 m->m_data += ETHER_HDR_LEN;
2926 m->m_len -= ETHER_HDR_LEN;
2927 n->m_len = ETHER_HDR_LEN;
2928 M_MOVE_PKTHDR(n, m);
2934 /* Receive a frame. */
2936 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2938 struct alc_rxdesc *rxd;
2940 struct mbuf *mp, *m;
2941 uint32_t rdinfo, status, vtag;
2942 int count, nsegs, rx_cons;
2945 status = le32toh(rrd->status);
2946 rdinfo = le32toh(rrd->rdinfo);
2947 rx_cons = RRD_RD_IDX(rdinfo);
2948 nsegs = RRD_RD_CNT(rdinfo);
2950 sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2951 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
2953 * We want to pass the following frames to upper
2954 * layer regardless of error status of Rx return
2957 * o IP/TCP/UDP checksum is bad.
2958 * o frame length and protocol specific length
2961 * Force network stack compute checksum for
2964 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2965 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
2966 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
2970 for (count = 0; count < nsegs; count++,
2971 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2972 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2974 /* Add a new receive buffer to the ring. */
2975 if (alc_newbuf(sc, rxd) != 0) {
2977 /* Reuse Rx buffers. */
2978 if (sc->alc_cdata.alc_rxhead != NULL)
2979 m_freem(sc->alc_cdata.alc_rxhead);
2984 * Assume we've received a full sized frame.
2985 * Actual size is fixed when we encounter the end of
2986 * multi-segmented frame.
2988 mp->m_len = sc->alc_buf_size;
2990 /* Chain received mbufs. */
2991 if (sc->alc_cdata.alc_rxhead == NULL) {
2992 sc->alc_cdata.alc_rxhead = mp;
2993 sc->alc_cdata.alc_rxtail = mp;
2995 mp->m_flags &= ~M_PKTHDR;
2996 sc->alc_cdata.alc_rxprev_tail =
2997 sc->alc_cdata.alc_rxtail;
2998 sc->alc_cdata.alc_rxtail->m_next = mp;
2999 sc->alc_cdata.alc_rxtail = mp;
3002 if (count == nsegs - 1) {
3003 /* Last desc. for this frame. */
3004 m = sc->alc_cdata.alc_rxhead;
3005 m->m_flags |= M_PKTHDR;
3007 * It seems that L1C/L2C controller has no way
3008 * to tell hardware to strip CRC bytes.
3011 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
3013 /* Set last mbuf size. */
3014 mp->m_len = sc->alc_cdata.alc_rxlen -
3015 (nsegs - 1) * sc->alc_buf_size;
3016 /* Remove the CRC bytes in chained mbufs. */
3017 if (mp->m_len <= ETHER_CRC_LEN) {
3018 sc->alc_cdata.alc_rxtail =
3019 sc->alc_cdata.alc_rxprev_tail;
3020 sc->alc_cdata.alc_rxtail->m_len -=
3021 (ETHER_CRC_LEN - mp->m_len);
3022 sc->alc_cdata.alc_rxtail->m_next = NULL;
3025 mp->m_len -= ETHER_CRC_LEN;
3028 m->m_len = m->m_pkthdr.len;
3029 m->m_pkthdr.rcvif = ifp;
3031 * Due to hardware bugs, Rx checksum offloading
3032 * was intentionally disabled.
3034 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3035 (status & RRD_VLAN_TAG) != 0) {
3036 vtag = RRD_VLAN(le32toh(rrd->vtag));
3037 m->m_pkthdr.ether_vtag = ntohs(vtag);
3038 m->m_flags |= M_VLANTAG;
3040 #ifndef __NO_STRICT_ALIGNMENT
3041 m = alc_fixup_rx(ifp, m);
3046 (*ifp->if_input)(ifp, m);
3050 /* Reset mbuf chains. */
3051 ALC_RXCHAIN_RESET(sc);
3057 struct alc_softc *sc;
3058 struct mii_data *mii;
3060 sc = (struct alc_softc *)arg;
3062 ALC_LOCK_ASSERT(sc);
3064 mii = device_get_softc(sc->alc_miibus);
3066 alc_stats_update(sc);
3068 * alc(4) does not rely on Tx completion interrupts to reclaim
3069 * transferred buffers. Instead Tx completion interrupts are
3070 * used to hint for scheduling Tx task. So it's necessary to
3071 * release transmitted buffers by kicking Tx completion
3072 * handler. This limits the maximum reclamation delay to a hz.
3076 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3080 alc_reset(struct alc_softc *sc)
3085 reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
3086 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
3087 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3088 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
3090 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
3094 device_printf(sc->alc_dev, "master reset timeout!\n");
3096 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
3097 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
3103 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
3109 struct alc_softc *sc;
3111 sc = (struct alc_softc *)xsc;
3113 alc_init_locked(sc);
3118 alc_init_locked(struct alc_softc *sc)
3121 struct mii_data *mii;
3122 uint8_t eaddr[ETHER_ADDR_LEN];
3124 uint32_t reg, rxf_hi, rxf_lo;
3126 ALC_LOCK_ASSERT(sc);
3129 mii = device_get_softc(sc->alc_miibus);
3131 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3134 * Cancel any pending I/O.
3138 * Reset the chip to a known state.
3142 /* Initialize Rx descriptors. */
3143 if (alc_init_rx_ring(sc) != 0) {
3144 device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
3148 alc_init_rr_ring(sc);
3149 alc_init_tx_ring(sc);
3153 /* Reprogram the station address. */
3154 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3155 CSR_WRITE_4(sc, ALC_PAR0,
3156 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
3157 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
3159 * Clear WOL status and disable all WOL feature as WOL
3160 * would interfere Rx operation under normal environments.
3162 CSR_READ_4(sc, ALC_WOL_CFG);
3163 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
3164 /* Set Tx descriptor base addresses. */
3165 paddr = sc->alc_rdata.alc_tx_ring_paddr;
3166 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3167 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3168 /* We don't use high priority ring. */
3169 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
3170 /* Set Tx descriptor counter. */
3171 CSR_WRITE_4(sc, ALC_TD_RING_CNT,
3172 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
3173 /* Set Rx descriptor base addresses. */
3174 paddr = sc->alc_rdata.alc_rx_ring_paddr;
3175 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3176 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3177 /* We use one Rx ring. */
3178 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
3179 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
3180 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
3181 /* Set Rx descriptor counter. */
3182 CSR_WRITE_4(sc, ALC_RD_RING_CNT,
3183 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
3186 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
3187 * if it do not fit the buffer size. Rx return descriptor holds
3188 * a counter that indicates how many fragments were made by the
3189 * hardware. The buffer size should be multiple of 8 bytes.
3190 * Since hardware has limit on the size of buffer size, always
3191 * use the maximum value.
3192 * For strict-alignment architectures make sure to reduce buffer
3193 * size by 8 bytes to make room for alignment fixup.
3195 #ifndef __NO_STRICT_ALIGNMENT
3196 sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t);
3198 sc->alc_buf_size = RX_BUF_SIZE_MAX;
3200 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
3202 paddr = sc->alc_rdata.alc_rr_ring_paddr;
3203 /* Set Rx return descriptor base addresses. */
3204 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3205 /* We use one Rx return ring. */
3206 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
3207 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
3208 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
3209 /* Set Rx return descriptor counter. */
3210 CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
3211 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
3212 paddr = sc->alc_rdata.alc_cmb_paddr;
3213 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3214 paddr = sc->alc_rdata.alc_smb_paddr;
3215 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3216 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3218 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
3219 /* Reconfigure SRAM - Vendor magic. */
3220 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
3221 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
3222 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
3223 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
3224 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
3225 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
3226 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
3227 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
3230 /* Tell hardware that we're ready to load DMA blocks. */
3231 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
3233 /* Configure interrupt moderation timer. */
3234 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
3235 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
3236 CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
3238 * We don't want to automatic interrupt clear as task queue
3239 * for the interrupt should know interrupt status.
3241 reg = MASTER_SA_TIMER_ENB;
3242 if (ALC_USECS(sc->alc_int_rx_mod) != 0)
3243 reg |= MASTER_IM_RX_TIMER_ENB;
3244 if (ALC_USECS(sc->alc_int_tx_mod) != 0)
3245 reg |= MASTER_IM_TX_TIMER_ENB;
3246 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3248 * Disable interrupt re-trigger timer. We don't want automatic
3249 * re-triggering of un-ACKed interrupts.
3251 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
3252 /* Configure CMB. */
3253 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
3254 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
3255 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
3257 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
3259 * Hardware can be configured to issue SMB interrupt based
3260 * on programmed interval. Since there is a callout that is
3261 * invoked for every hz in driver we use that instead of
3262 * relying on periodic SMB interrupt.
3264 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3265 /* Clear MAC statistics. */
3266 alc_stats_clear(sc);
3269 * Always use maximum frame size that controller can support.
3270 * Otherwise received frames that has larger frame length
3271 * than alc(4) MTU would be silently dropped in hardware. This
3272 * would make path-MTU discovery hard as sender wouldn't get
3273 * any responses from receiver. alc(4) supports
3274 * multi-fragmented frames on Rx path so it has no issue on
3275 * assembling fragmented frames. Using maximum frame size also
3276 * removes the need to reinitialize hardware when interface
3277 * MTU configuration was changed.
3279 * Be conservative in what you do, be liberal in what you
3280 * accept from others - RFC 793.
3282 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
3284 /* Disable header split(?) */
3285 CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3287 /* Configure IPG/IFG parameters. */
3288 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3289 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
3290 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
3291 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
3292 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
3293 /* Set parameters for half-duplex media. */
3294 CSR_WRITE_4(sc, ALC_HDPX_CFG,
3295 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3296 HDPX_CFG_LCOL_MASK) |
3297 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3298 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3299 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3300 HDPX_CFG_ABEBT_MASK) |
3301 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3302 HDPX_CFG_JAMIPG_MASK));
3304 * Set TSO/checksum offload threshold. For frames that is
3305 * larger than this threshold, hardware wouldn't do
3306 * TSO/checksum offloading.
3308 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
3309 (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3310 TSO_OFFLOAD_THRESH_MASK);
3311 /* Configure TxQ. */
3312 reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3313 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3314 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3315 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
3317 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3318 TXQ_CFG_TD_BURST_MASK;
3319 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3321 /* Configure Rx free descriptor pre-fetching. */
3322 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3323 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
3324 RX_RD_FREE_THRESH_HI_MASK) |
3325 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
3326 RX_RD_FREE_THRESH_LO_MASK));
3329 * Configure flow control parameters.
3330 * XON : 80% of Rx FIFO
3331 * XOFF : 30% of Rx FIFO
3333 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
3334 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) {
3335 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3336 rxf_hi = (reg * 8) / 10;
3337 rxf_lo = (reg * 3) / 10;
3338 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3339 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3340 RX_FIFO_PAUSE_THRESH_LO_MASK) |
3341 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3342 RX_FIFO_PAUSE_THRESH_HI_MASK));
3345 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3346 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2)
3347 CSR_WRITE_4(sc, ALC_SERDES_LOCK,
3348 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
3349 SERDES_PHY_CLK_SLOWDOWN);
3351 /* Disable RSS until I understand L1C/L2C's RSS logic. */
3352 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3353 CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3355 /* Configure RxQ. */
3356 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3357 RXQ_CFG_RD_BURST_MASK;
3358 reg |= RXQ_CFG_RSS_MODE_DIS;
3359 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
3360 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
3361 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3363 /* Configure DMA parameters. */
3364 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3366 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3367 reg |= DMA_CFG_CMB_ENB;
3368 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3369 reg |= DMA_CFG_SMB_ENB;
3371 reg |= DMA_CFG_SMB_DIS;
3372 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3373 DMA_CFG_RD_BURST_SHIFT;
3374 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3375 DMA_CFG_WR_BURST_SHIFT;
3376 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3377 DMA_CFG_RD_DELAY_CNT_MASK;
3378 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3379 DMA_CFG_WR_DELAY_CNT_MASK;
3380 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3383 * Configure Tx/Rx MACs.
3384 * - Auto-padding for short frames.
3385 * - Enable CRC generation.
3386 * Actual reconfiguration of MAC for resolved speed/duplex
3387 * is followed after detection of link establishment.
3388 * AR813x/AR815x always does checksum computation regardless
3389 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3390 * have bug in protocol field in Rx return structure so
3391 * these controllers can't handle fragmented frames. Disable
3392 * Rx checksum offloading until there is a newer controller
3393 * that has sane implementation.
3395 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3396 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3397 MAC_CFG_PREAMBLE_MASK);
3398 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
3399 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
3400 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
3401 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3402 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3403 reg |= MAC_CFG_SPEED_10_100;
3405 reg |= MAC_CFG_SPEED_1000;
3406 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3408 /* Set up the receive filter. */
3412 /* Acknowledge all pending interrupts and clear it. */
3413 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3414 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3415 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3417 sc->alc_flags &= ~ALC_FLAG_LINK;
3418 /* Switch to the current media. */
3421 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3423 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3424 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3428 alc_stop(struct alc_softc *sc)
3431 struct alc_txdesc *txd;
3432 struct alc_rxdesc *rxd;
3436 ALC_LOCK_ASSERT(sc);
3438 * Mark the interface down and cancel the watchdog timer.
3441 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3442 sc->alc_flags &= ~ALC_FLAG_LINK;
3443 callout_stop(&sc->alc_tick_ch);
3444 sc->alc_watchdog_timer = 0;
3445 alc_stats_update(sc);
3446 /* Disable interrupts. */
3447 CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3448 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3451 reg = CSR_READ_4(sc, ALC_DMA_CFG);
3452 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3453 reg |= DMA_CFG_SMB_DIS;
3454 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3456 /* Stop Rx/Tx MACs. */
3458 /* Disable interrupts which might be touched in taskq handler. */
3459 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3461 /* Reclaim Rx buffers that have been processed. */
3462 if (sc->alc_cdata.alc_rxhead != NULL)
3463 m_freem(sc->alc_cdata.alc_rxhead);
3464 ALC_RXCHAIN_RESET(sc);
3466 * Free Tx/Rx mbufs still in the queues.
3468 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3469 rxd = &sc->alc_cdata.alc_rxdesc[i];
3470 if (rxd->rx_m != NULL) {
3471 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
3472 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3473 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
3479 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3480 txd = &sc->alc_cdata.alc_txdesc[i];
3481 if (txd->tx_m != NULL) {
3482 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3483 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3484 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3493 alc_stop_mac(struct alc_softc *sc)
3498 ALC_LOCK_ASSERT(sc);
3500 /* Disable Rx/Tx MAC. */
3501 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3502 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3503 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
3504 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3506 for (i = ALC_TIMEOUT; i > 0; i--) {
3507 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3513 device_printf(sc->alc_dev,
3514 "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
3518 alc_start_queue(struct alc_softc *sc)
3523 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3524 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3529 ALC_LOCK_ASSERT(sc);
3532 cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3533 cfg &= ~RXQ_CFG_ENB;
3535 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3537 cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3539 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3543 alc_stop_queue(struct alc_softc *sc)
3548 ALC_LOCK_ASSERT(sc);
3551 reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3552 if ((reg & RXQ_CFG_ENB) != 0) {
3553 reg &= ~RXQ_CFG_ENB;
3554 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3557 reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3558 if ((reg & TXQ_CFG_ENB) == 0) {
3559 reg &= ~TXQ_CFG_ENB;
3560 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3562 for (i = ALC_TIMEOUT; i > 0; i--) {
3563 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3564 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3569 device_printf(sc->alc_dev,
3570 "could not disable RxQ/TxQ (0x%08x)!\n", reg);
3574 alc_init_tx_ring(struct alc_softc *sc)
3576 struct alc_ring_data *rd;
3577 struct alc_txdesc *txd;
3580 ALC_LOCK_ASSERT(sc);
3582 sc->alc_cdata.alc_tx_prod = 0;
3583 sc->alc_cdata.alc_tx_cons = 0;
3584 sc->alc_cdata.alc_tx_cnt = 0;
3586 rd = &sc->alc_rdata;
3587 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3588 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3589 txd = &sc->alc_cdata.alc_txdesc[i];
3593 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3594 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
3598 alc_init_rx_ring(struct alc_softc *sc)
3600 struct alc_ring_data *rd;
3601 struct alc_rxdesc *rxd;
3604 ALC_LOCK_ASSERT(sc);
3606 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3607 sc->alc_morework = 0;
3608 rd = &sc->alc_rdata;
3609 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3610 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3611 rxd = &sc->alc_cdata.alc_rxdesc[i];
3613 rxd->rx_desc = &rd->alc_rx_ring[i];
3614 if (alc_newbuf(sc, rxd) != 0)
3619 * Since controller does not update Rx descriptors, driver
3620 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3621 * is enough to ensure coherence.
3623 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3624 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3625 /* Let controller know availability of new Rx buffers. */
3626 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3632 alc_init_rr_ring(struct alc_softc *sc)
3634 struct alc_ring_data *rd;
3636 ALC_LOCK_ASSERT(sc);
3638 sc->alc_cdata.alc_rr_cons = 0;
3639 ALC_RXCHAIN_RESET(sc);
3641 rd = &sc->alc_rdata;
3642 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3643 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3644 sc->alc_cdata.alc_rr_ring_map,
3645 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3649 alc_init_cmb(struct alc_softc *sc)
3651 struct alc_ring_data *rd;
3653 ALC_LOCK_ASSERT(sc);
3655 rd = &sc->alc_rdata;
3656 bzero(rd->alc_cmb, ALC_CMB_SZ);
3657 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
3658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3662 alc_init_smb(struct alc_softc *sc)
3664 struct alc_ring_data *rd;
3666 ALC_LOCK_ASSERT(sc);
3668 rd = &sc->alc_rdata;
3669 bzero(rd->alc_smb, ALC_SMB_SZ);
3670 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
3671 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3675 alc_rxvlan(struct alc_softc *sc)
3680 ALC_LOCK_ASSERT(sc);
3683 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3684 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3685 reg |= MAC_CFG_VLAN_TAG_STRIP;
3687 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3688 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3692 alc_rxfilter(struct alc_softc *sc)
3695 struct ifmultiaddr *ifma;
3700 ALC_LOCK_ASSERT(sc);
3704 bzero(mchash, sizeof(mchash));
3705 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3706 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3707 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3708 rxcfg |= MAC_CFG_BCAST;
3709 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3710 if ((ifp->if_flags & IFF_PROMISC) != 0)
3711 rxcfg |= MAC_CFG_PROMISC;
3712 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3713 rxcfg |= MAC_CFG_ALLMULTI;
3714 mchash[0] = 0xFFFFFFFF;
3715 mchash[1] = 0xFFFFFFFF;
3719 if_maddr_rlock(ifp);
3720 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
3721 if (ifma->ifma_addr->sa_family != AF_LINK)
3723 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3724 ifma->ifma_addr), ETHER_ADDR_LEN);
3725 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3727 if_maddr_runlock(ifp);
3730 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3731 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3732 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3736 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3742 value = *(int *)arg1;
3743 error = sysctl_handle_int(oidp, &value, 0, req);
3744 if (error || req->newptr == NULL)
3746 if (value < low || value > high)
3748 *(int *)arg1 = value;
3754 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
3756 return (sysctl_int_range(oidp, arg1, arg2, req,
3757 ALC_PROC_MIN, ALC_PROC_MAX));
3761 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
3764 return (sysctl_int_range(oidp, arg1, arg2, req,
3765 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));