2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $Id: if_ax.c,v 1.6 1999/01/08 19:40:59 wpaul Exp $
36 * ASIX AX88140A fast ethernet PCI NIC driver.
38 * Written by Bill Paul <wpaul@ctr.columbia.edu>
39 * Electrical Engineering Department
40 * Columbia University, New York City
44 * The ASIX Electronics AX88140A is still another DEC 21x4x clone. It's
45 * a reasonably close copy of the tulip, except for the receiver filter
46 * programming. Where the DEC chip has a special setup frame that
47 * needs to be downloaded into the transmit DMA engine, the ASIX chip
48 * has a less complicated setup frame which is written into one of
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/sockio.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
63 #include <net/if_arp.h>
64 #include <net/ethernet.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
72 #include <vm/vm.h> /* for vtophys */
73 #include <vm/pmap.h> /* for vtophys */
74 #include <machine/clock.h> /* for DELAY */
75 #include <machine/bus_pio.h>
76 #include <machine/bus_memio.h>
77 #include <machine/bus.h>
79 #include <pci/pcireg.h>
80 #include <pci/pcivar.h>
84 /* #define AX_BACKGROUND_AUTONEG */
86 #include <pci/if_axreg.h>
89 static const char rcsid[] =
90 "$Id: if_ax.c,v 1.6 1999/01/08 19:40:59 wpaul Exp $";
94 * Various supported device vendors/types and their names.
96 static struct ax_type ax_devs[] = {
97 { AX_VENDORID, AX_DEVICEID_AX88140A,
98 "ASIX AX88140A 10/100BaseTX" },
103 * Various supported PHY vendors/types and their names. Note that
104 * this driver will work with pretty much any MII-compliant PHY,
105 * so failure to positively identify the chip is not a fatal error.
108 static struct ax_type ax_phys[] = {
109 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
110 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
111 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
112 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
113 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
114 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
115 { 0, 0, "<MII-compliant physical interface>" }
118 static unsigned long ax_count = 0;
119 static const char *ax_probe __P((pcici_t, pcidi_t));
120 static void ax_attach __P((pcici_t, int));
122 static int ax_newbuf __P((struct ax_softc *,
123 struct ax_chain_onefrag *));
124 static int ax_encap __P((struct ax_softc *, struct ax_chain *,
127 static void ax_rxeof __P((struct ax_softc *));
128 static void ax_rxeoc __P((struct ax_softc *));
129 static void ax_txeof __P((struct ax_softc *));
130 static void ax_txeoc __P((struct ax_softc *));
131 static void ax_intr __P((void *));
132 static void ax_start __P((struct ifnet *));
133 static int ax_ioctl __P((struct ifnet *, u_long, caddr_t));
134 static void ax_init __P((void *));
135 static void ax_stop __P((struct ax_softc *));
136 static void ax_watchdog __P((struct ifnet *));
137 static void ax_shutdown __P((int, void *));
138 static int ax_ifmedia_upd __P((struct ifnet *));
139 static void ax_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
141 static void ax_delay __P((struct ax_softc *));
142 static void ax_eeprom_idle __P((struct ax_softc *));
143 static void ax_eeprom_putbyte __P((struct ax_softc *, int));
144 static void ax_eeprom_getword __P((struct ax_softc *, int, u_int16_t *));
145 static void ax_read_eeprom __P((struct ax_softc *, caddr_t, int,
148 static void ax_mii_writebit __P((struct ax_softc *, int));
149 static int ax_mii_readbit __P((struct ax_softc *));
150 static void ax_mii_sync __P((struct ax_softc *));
151 static void ax_mii_send __P((struct ax_softc *, u_int32_t, int));
152 static int ax_mii_readreg __P((struct ax_softc *, struct ax_mii_frame *));
153 static int ax_mii_writereg __P((struct ax_softc *, struct ax_mii_frame *));
154 static u_int16_t ax_phy_readreg __P((struct ax_softc *, int));
155 static void ax_phy_writereg __P((struct ax_softc *, int, int));
157 static void ax_autoneg_xmit __P((struct ax_softc *));
158 static void ax_autoneg_mii __P((struct ax_softc *, int, int));
159 static void ax_setmode_mii __P((struct ax_softc *, int));
160 static void ax_setmode __P((struct ax_softc *, int, int));
161 static void ax_getmode_mii __P((struct ax_softc *));
162 static void ax_setcfg __P((struct ax_softc *, int));
163 static u_int32_t ax_calchash __P((caddr_t));
164 static void ax_setmulti __P((struct ax_softc *));
165 static void ax_reset __P((struct ax_softc *));
166 static int ax_list_rx_init __P((struct ax_softc *));
167 static int ax_list_tx_init __P((struct ax_softc *));
169 #define AX_SETBIT(sc, reg, x) \
170 CSR_WRITE_4(sc, reg, \
171 CSR_READ_4(sc, reg) | x)
173 #define AX_CLRBIT(sc, reg, x) \
174 CSR_WRITE_4(sc, reg, \
175 CSR_READ_4(sc, reg) & ~x)
178 CSR_WRITE_4(sc, AX_SIO, \
179 CSR_READ_4(sc, AX_SIO) | x)
182 CSR_WRITE_4(sc, AX_SIO, \
183 CSR_READ_4(sc, AX_SIO) & ~x)
185 static void ax_delay(sc)
190 for (idx = (300 / 33) + 1; idx > 0; idx--)
191 CSR_READ_4(sc, AX_BUSCTL);
194 static void ax_eeprom_idle(sc)
199 CSR_WRITE_4(sc, AX_SIO, AX_SIO_EESEL);
201 AX_SETBIT(sc, AX_SIO, AX_SIO_ROMCTL_READ);
203 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CS);
205 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
208 for (i = 0; i < 25; i++) {
209 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CLK);
211 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
215 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CLK);
217 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CS);
219 CSR_WRITE_4(sc, AX_SIO, 0x00000000);
225 * Send a read command and address to the EEPROM, check for ACK.
227 static void ax_eeprom_putbyte(sc, addr)
233 d = addr | AX_EECMD_READ;
236 * Feed in each bit and stobe the clock.
238 for (i = 0x400; i; i >>= 1) {
240 SIO_SET(AX_SIO_EE_DATAIN);
242 SIO_CLR(AX_SIO_EE_DATAIN);
245 SIO_SET(AX_SIO_EE_CLK);
247 SIO_CLR(AX_SIO_EE_CLK);
255 * Read a word of data stored in the EEPROM at address 'addr.'
257 static void ax_eeprom_getword(sc, addr, dest)
265 /* Force EEPROM to idle state. */
268 /* Enter EEPROM access mode. */
269 CSR_WRITE_4(sc, AX_SIO, AX_SIO_EESEL);
271 AX_SETBIT(sc, AX_SIO, AX_SIO_ROMCTL_READ);
273 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CS);
275 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
279 * Send address of word we want to read.
281 ax_eeprom_putbyte(sc, addr);
284 * Start reading bits from EEPROM.
286 for (i = 0x8000; i; i >>= 1) {
287 SIO_SET(AX_SIO_EE_CLK);
289 if (CSR_READ_4(sc, AX_SIO) & AX_SIO_EE_DATAOUT)
292 SIO_CLR(AX_SIO_EE_CLK);
296 /* Turn off EEPROM access mode. */
305 * Read a sequence of words from the EEPROM.
307 static void ax_read_eeprom(sc, dest, off, cnt, swap)
315 u_int16_t word = 0, *ptr;
317 for (i = 0; i < cnt; i++) {
318 ax_eeprom_getword(sc, off + i, &word);
319 ptr = (u_int16_t *)(dest + (i * 2));
330 * Write a bit to the MII bus.
332 static void ax_mii_writebit(sc, bit)
337 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE|AX_SIO_MII_DATAOUT);
339 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE);
341 AX_SETBIT(sc, AX_SIO, AX_SIO_MII_CLK);
342 AX_CLRBIT(sc, AX_SIO, AX_SIO_MII_CLK);
348 * Read a bit from the MII bus.
350 static int ax_mii_readbit(sc)
353 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_READ|AX_SIO_MII_DIR);
354 CSR_READ_4(sc, AX_SIO);
355 AX_SETBIT(sc, AX_SIO, AX_SIO_MII_CLK);
356 AX_CLRBIT(sc, AX_SIO, AX_SIO_MII_CLK);
357 if (CSR_READ_4(sc, AX_SIO) & AX_SIO_MII_DATAIN)
364 * Sync the PHYs by setting data bit and strobing the clock 32 times.
366 static void ax_mii_sync(sc)
371 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE);
373 for (i = 0; i < 32; i++)
374 ax_mii_writebit(sc, 1);
380 * Clock a series of bits through the MII.
382 static void ax_mii_send(sc, bits, cnt)
389 for (i = (0x1 << (cnt - 1)); i; i >>= 1)
390 ax_mii_writebit(sc, bits & i);
394 * Read an PHY register through the MII.
396 static int ax_mii_readreg(sc, frame)
398 struct ax_mii_frame *frame;
406 * Set up frame for RX.
408 frame->mii_stdelim = AX_MII_STARTDELIM;
409 frame->mii_opcode = AX_MII_READOP;
410 frame->mii_turnaround = 0;
419 * Send command/address info.
421 ax_mii_send(sc, frame->mii_stdelim, 2);
422 ax_mii_send(sc, frame->mii_opcode, 2);
423 ax_mii_send(sc, frame->mii_phyaddr, 5);
424 ax_mii_send(sc, frame->mii_regaddr, 5);
428 ax_mii_writebit(sc, 1);
429 ax_mii_writebit(sc, 0);
433 ack = ax_mii_readbit(sc);
436 * Now try reading data bits. If the ack failed, we still
437 * need to clock through 16 cycles to keep the PHY(s) in sync.
440 for(i = 0; i < 16; i++) {
446 for (i = 0x8000; i; i >>= 1) {
448 if (ax_mii_readbit(sc))
449 frame->mii_data |= i;
455 ax_mii_writebit(sc, 0);
456 ax_mii_writebit(sc, 0);
466 * Write to a PHY register through the MII.
468 static int ax_mii_writereg(sc, frame)
470 struct ax_mii_frame *frame;
477 * Set up frame for TX.
480 frame->mii_stdelim = AX_MII_STARTDELIM;
481 frame->mii_opcode = AX_MII_WRITEOP;
482 frame->mii_turnaround = AX_MII_TURNAROUND;
489 ax_mii_send(sc, frame->mii_stdelim, 2);
490 ax_mii_send(sc, frame->mii_opcode, 2);
491 ax_mii_send(sc, frame->mii_phyaddr, 5);
492 ax_mii_send(sc, frame->mii_regaddr, 5);
493 ax_mii_send(sc, frame->mii_turnaround, 2);
494 ax_mii_send(sc, frame->mii_data, 16);
497 ax_mii_writebit(sc, 0);
498 ax_mii_writebit(sc, 0);
505 static u_int16_t ax_phy_readreg(sc, reg)
509 struct ax_mii_frame frame;
511 bzero((char *)&frame, sizeof(frame));
513 frame.mii_phyaddr = sc->ax_phy_addr;
514 frame.mii_regaddr = reg;
515 ax_mii_readreg(sc, &frame);
517 return(frame.mii_data);
520 static void ax_phy_writereg(sc, reg, data)
525 struct ax_mii_frame frame;
527 bzero((char *)&frame, sizeof(frame));
529 frame.mii_phyaddr = sc->ax_phy_addr;
530 frame.mii_regaddr = reg;
531 frame.mii_data = data;
533 ax_mii_writereg(sc, &frame);
539 * Calculate CRC of a multicast group address, return the lower 6 bits.
541 static u_int32_t ax_calchash(addr)
544 u_int32_t crc, carry;
548 /* Compute CRC for the address value. */
549 crc = 0xFFFFFFFF; /* initial value */
551 for (i = 0; i < 6; i++) {
553 for (j = 0; j < 8; j++) {
554 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
558 crc = (crc ^ 0x04c11db6) | carry;
562 /* return the filter bit position */
563 return((crc >> 26) & 0x0000003F);
566 static void ax_setmulti(sc)
571 u_int32_t hashes[2] = { 0, 0 };
572 struct ifmultiaddr *ifma;
575 ifp = &sc->arpcom.ac_if;
577 rxfilt = CSR_READ_4(sc, AX_NETCFG);
579 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
580 rxfilt |= AX_NETCFG_RX_ALLMULTI;
581 CSR_WRITE_4(sc, AX_NETCFG, rxfilt);
584 rxfilt &= ~AX_NETCFG_RX_ALLMULTI;
586 /* first, zot all the existing hash bits */
587 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR0);
588 CSR_WRITE_4(sc, AX_FILTDATA, 0);
589 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR1);
590 CSR_WRITE_4(sc, AX_FILTDATA, 0);
592 /* now program new ones */
593 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
594 ifma = ifma->ifma_link.le_next) {
595 if (ifma->ifma_addr->sa_family != AF_LINK)
597 h = ax_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
599 hashes[0] |= (1 << h);
601 hashes[1] |= (1 << (h - 32));
604 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR0);
605 CSR_WRITE_4(sc, AX_FILTDATA, hashes[0]);
606 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR1);
607 CSR_WRITE_4(sc, AX_FILTDATA, hashes[1]);
608 CSR_WRITE_4(sc, AX_NETCFG, rxfilt);
614 * Initiate an autonegotiation session.
616 static void ax_autoneg_xmit(sc)
621 ax_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
623 while(ax_phy_readreg(sc, PHY_BMCR)
626 phy_sts = ax_phy_readreg(sc, PHY_BMCR);
627 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
628 ax_phy_writereg(sc, PHY_BMCR, phy_sts);
634 * Invoke autonegotiation on a PHY.
636 static void ax_autoneg_mii(sc, flag, verbose)
641 u_int16_t phy_sts = 0, media, advert, ability;
646 ifp = &sc->arpcom.ac_if;
648 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
651 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
652 * bit cleared in the status register, but has the 'autoneg enabled'
653 * bit set in the control register. This is a contradiction, and
654 * I'm not sure how to handle it. If you want to force an attempt
655 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
656 * and see what happens.
658 #ifndef FORCE_AUTONEG_TFOUR
660 * First, see if autoneg is supported. If not, there's
661 * no point in continuing.
663 phy_sts = ax_phy_readreg(sc, PHY_BMSR);
664 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
666 printf("ax%d: autonegotiation not supported\n",
668 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
674 case AX_FLAG_FORCEDELAY:
676 * XXX Never use this option anywhere but in the probe
677 * routine: making the kernel stop dead in its tracks
678 * for three whole seconds after we've gone multi-user
679 * is really bad manners.
684 case AX_FLAG_SCHEDDELAY:
686 * Wait for the transmitter to go idle before starting
687 * an autoneg session, otherwise ax_start() may clobber
688 * our timeout, and we don't want to allow transmission
689 * during an autoneg session since that can screw it up.
691 if (sc->ax_cdata.ax_tx_head != NULL) {
692 sc->ax_want_auto = 1;
698 sc->ax_want_auto = 0;
701 case AX_FLAG_DELAYTIMEO:
706 printf("ax%d: invalid autoneg flag: %d\n", sc->ax_unit, flag);
710 if (ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
712 printf("ax%d: autoneg complete, ", sc->ax_unit);
713 phy_sts = ax_phy_readreg(sc, PHY_BMSR);
716 printf("ax%d: autoneg not complete, ", sc->ax_unit);
719 media = ax_phy_readreg(sc, PHY_BMCR);
721 /* Link is good. Report modes and set duplex mode. */
722 if (ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
724 printf("link status good ");
725 advert = ax_phy_readreg(sc, PHY_ANAR);
726 ability = ax_phy_readreg(sc, PHY_LPAR);
728 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
729 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
730 media |= PHY_BMCR_SPEEDSEL;
731 media &= ~PHY_BMCR_DUPLEX;
732 printf("(100baseT4)\n");
733 } else if (advert & PHY_ANAR_100BTXFULL &&
734 ability & PHY_ANAR_100BTXFULL) {
735 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
736 media |= PHY_BMCR_SPEEDSEL;
737 media |= PHY_BMCR_DUPLEX;
738 printf("(full-duplex, 100Mbps)\n");
739 } else if (advert & PHY_ANAR_100BTXHALF &&
740 ability & PHY_ANAR_100BTXHALF) {
741 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
742 media |= PHY_BMCR_SPEEDSEL;
743 media &= ~PHY_BMCR_DUPLEX;
744 printf("(half-duplex, 100Mbps)\n");
745 } else if (advert & PHY_ANAR_10BTFULL &&
746 ability & PHY_ANAR_10BTFULL) {
747 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
748 media &= ~PHY_BMCR_SPEEDSEL;
749 media |= PHY_BMCR_DUPLEX;
750 printf("(full-duplex, 10Mbps)\n");
751 } else if (advert & PHY_ANAR_10BTHALF &&
752 ability & PHY_ANAR_10BTHALF) {
753 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
754 media &= ~PHY_BMCR_SPEEDSEL;
755 media &= ~PHY_BMCR_DUPLEX;
756 printf("(half-duplex, 10Mbps)\n");
759 media &= ~PHY_BMCR_AUTONEGENBL;
761 /* Set ASIC's duplex mode to match the PHY. */
762 ax_setcfg(sc, media);
763 ax_phy_writereg(sc, PHY_BMCR, media);
766 printf("no carrier\n");
771 if (sc->ax_tx_pend) {
780 static void ax_getmode_mii(sc)
786 ifp = &sc->arpcom.ac_if;
788 bmsr = ax_phy_readreg(sc, PHY_BMSR);
790 printf("ax%d: PHY status word: %x\n", sc->ax_unit, bmsr);
793 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
795 if (bmsr & PHY_BMSR_10BTHALF) {
797 printf("ax%d: 10Mbps half-duplex mode supported\n",
799 ifmedia_add(&sc->ifmedia,
800 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
801 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
804 if (bmsr & PHY_BMSR_10BTFULL) {
806 printf("ax%d: 10Mbps full-duplex mode supported\n",
808 ifmedia_add(&sc->ifmedia,
809 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
810 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
813 if (bmsr & PHY_BMSR_100BTXHALF) {
815 printf("ax%d: 100Mbps half-duplex mode supported\n",
817 ifp->if_baudrate = 100000000;
818 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
819 ifmedia_add(&sc->ifmedia,
820 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
821 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
824 if (bmsr & PHY_BMSR_100BTXFULL) {
826 printf("ax%d: 100Mbps full-duplex mode supported\n",
828 ifp->if_baudrate = 100000000;
829 ifmedia_add(&sc->ifmedia,
830 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
831 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
834 /* Some also support 100BaseT4. */
835 if (bmsr & PHY_BMSR_100BT4) {
837 printf("ax%d: 100baseT4 mode supported\n", sc->ax_unit);
838 ifp->if_baudrate = 100000000;
839 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
840 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
841 #ifdef FORCE_AUTONEG_TFOUR
843 printf("ax%d: forcing on autoneg support for BT4\n",
845 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
846 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
850 if (bmsr & PHY_BMSR_CANAUTONEG) {
852 printf("ax%d: autoneg supported\n", sc->ax_unit);
853 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
854 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
861 * Set speed and duplex mode.
863 static void ax_setmode_mii(sc, media)
870 ifp = &sc->arpcom.ac_if;
873 * If an autoneg session is in progress, stop it.
875 if (sc->ax_autoneg) {
876 printf("ax%d: canceling autoneg session\n", sc->ax_unit);
877 ifp->if_timer = sc->ax_autoneg = sc->ax_want_auto = 0;
878 bmcr = ax_phy_readreg(sc, PHY_BMCR);
879 bmcr &= ~PHY_BMCR_AUTONEGENBL;
880 ax_phy_writereg(sc, PHY_BMCR, bmcr);
883 printf("ax%d: selecting MII, ", sc->ax_unit);
885 bmcr = ax_phy_readreg(sc, PHY_BMCR);
887 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
888 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
890 if (IFM_SUBTYPE(media) == IFM_100_T4) {
891 printf("100Mbps/T4, half-duplex\n");
892 bmcr |= PHY_BMCR_SPEEDSEL;
893 bmcr &= ~PHY_BMCR_DUPLEX;
896 if (IFM_SUBTYPE(media) == IFM_100_TX) {
898 bmcr |= PHY_BMCR_SPEEDSEL;
901 if (IFM_SUBTYPE(media) == IFM_10_T) {
903 bmcr &= ~PHY_BMCR_SPEEDSEL;
906 if ((media & IFM_GMASK) == IFM_FDX) {
907 printf("full duplex\n");
908 bmcr |= PHY_BMCR_DUPLEX;
910 printf("half duplex\n");
911 bmcr &= ~PHY_BMCR_DUPLEX;
915 ax_phy_writereg(sc, PHY_BMCR, bmcr);
921 * Set speed and duplex mode on internal transceiver.
923 static void ax_setmode(sc, media, verbose)
931 ifp = &sc->arpcom.ac_if;
934 printf("ax%d: selecting internal xcvr, ", sc->ax_unit);
936 mode = CSR_READ_4(sc, AX_NETCFG);
938 mode &= ~(AX_NETCFG_FULLDUPLEX|AX_NETCFG_PORTSEL|
939 AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER|AX_NETCFG_SPEEDSEL);
941 if (IFM_SUBTYPE(media) == IFM_100_T4) {
943 printf("100Mbps/T4, half-duplex\n");
944 mode |= AX_NETCFG_PORTSEL|AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER;
947 if (IFM_SUBTYPE(media) == IFM_100_TX) {
950 mode |= AX_NETCFG_PORTSEL|AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER;
953 if (IFM_SUBTYPE(media) == IFM_10_T) {
956 mode &= ~AX_NETCFG_PORTSEL;
957 mode |= AX_NETCFG_SPEEDSEL;
960 if ((media & IFM_GMASK) == IFM_FDX) {
962 printf("full duplex\n");
963 mode |= AX_NETCFG_FULLDUPLEX;
966 printf("half duplex\n");
967 mode &= ~AX_NETCFG_FULLDUPLEX;
970 CSR_WRITE_4(sc, AX_NETCFG, mode);
976 * In order to fiddle with the
977 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
978 * first have to put the transmit and/or receive logic in the idle state.
980 static void ax_setcfg(sc, bmcr)
986 if (CSR_READ_4(sc, AX_NETCFG) & (AX_NETCFG_TX_ON|AX_NETCFG_RX_ON)) {
988 AX_CLRBIT(sc, AX_NETCFG, (AX_NETCFG_TX_ON|AX_NETCFG_RX_ON));
990 for (i = 0; i < AX_TIMEOUT; i++) {
992 if (CSR_READ_4(sc, AX_ISR) & AX_ISR_TX_IDLE)
997 printf("ax%d: failed to force tx and "
998 "rx to idle state\n", sc->ax_unit);
1002 if (bmcr & PHY_BMCR_SPEEDSEL)
1003 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1005 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1007 if (bmcr & PHY_BMCR_DUPLEX)
1008 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_FULLDUPLEX);
1010 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_FULLDUPLEX);
1013 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON|AX_NETCFG_RX_ON);
1018 static void ax_reset(sc)
1019 struct ax_softc *sc;
1023 AX_SETBIT(sc, AX_BUSCTL, AX_BUSCTL_RESET);
1025 for (i = 0; i < AX_TIMEOUT; i++) {
1027 if (!(CSR_READ_4(sc, AX_BUSCTL) & AX_BUSCTL_RESET))
1031 if (i == AX_TIMEOUT)
1032 printf("ax%d: reset never completed!\n", sc->ax_unit);
1034 CSR_WRITE_4(sc, AX_BUSCTL, AX_BUSCTL_CONFIG);
1036 /* Wait a little while for the chip to get its brains in order. */
1042 * Probe for an ASIX chip. Check the PCI vendor and device
1043 * IDs against our list and return a device name if we find a match.
1046 ax_probe(config_id, device_id)
1054 while(t->ax_name != NULL) {
1055 if ((device_id & 0xFFFF) == t->ax_vid &&
1056 ((device_id >> 16) & 0xFFFF) == t->ax_did) {
1066 * Attach the interface. Allocate softc structures, do ifmedia
1067 * setup and ethernet/BPF attach.
1070 ax_attach(config_id, unit)
1075 #ifndef AX_USEIOSPACE
1076 vm_offset_t pbase, vbase;
1078 u_char eaddr[ETHER_ADDR_LEN];
1080 struct ax_softc *sc;
1082 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1086 u_int16_t phy_vid, phy_did, phy_sts;
1090 sc = malloc(sizeof(struct ax_softc), M_DEVBUF, M_NOWAIT);
1092 printf("ax%d: no memory for softc struct!\n", unit);
1095 bzero(sc, sizeof(struct ax_softc));
1098 * Handle power management nonsense.
1101 command = pci_conf_read(config_id, AX_PCI_CAPID) & 0x000000FF;
1102 if (command == 0x01) {
1104 command = pci_conf_read(config_id, AX_PCI_PWRMGMTCTRL);
1105 if (command & AX_PSTATE_MASK) {
1106 u_int32_t iobase, membase, irq;
1108 /* Save important PCI config data. */
1109 iobase = pci_conf_read(config_id, AX_PCI_LOIO);
1110 membase = pci_conf_read(config_id, AX_PCI_LOMEM);
1111 irq = pci_conf_read(config_id, AX_PCI_INTLINE);
1113 /* Reset the power state. */
1114 printf("ax%d: chip is in D%d power mode "
1115 "-- setting to D0\n", unit, command & AX_PSTATE_MASK);
1116 command &= 0xFFFFFFFC;
1117 pci_conf_write(config_id, AX_PCI_PWRMGMTCTRL, command);
1119 /* Restore PCI config data. */
1120 pci_conf_write(config_id, AX_PCI_LOIO, iobase);
1121 pci_conf_write(config_id, AX_PCI_LOMEM, membase);
1122 pci_conf_write(config_id, AX_PCI_INTLINE, irq);
1127 * Map control/status registers.
1129 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1130 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1131 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
1132 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1134 #ifdef AX_USEIOSPACE
1135 if (!(command & PCIM_CMD_PORTEN)) {
1136 printf("ax%d: failed to enable I/O ports!\n", unit);
1141 if (!pci_map_port(config_id, AX_PCI_LOIO,
1142 (u_short *)&(sc->ax_bhandle))) {
1143 printf ("ax%d: couldn't map ports\n", unit);
1146 sc->ax_btag = I386_BUS_SPACE_IO;
1148 if (!(command & PCIM_CMD_MEMEN)) {
1149 printf("ax%d: failed to enable memory mapping!\n", unit);
1153 if (!pci_map_mem(config_id, AX_PCI_LOMEM, &vbase, &pbase)) {
1154 printf ("ax%d: couldn't map memory\n", unit);
1157 sc->ax_btag = I386_BUS_SPACE_MEM;
1158 sc->ax_bhandle = vbase;
1161 /* Allocate interrupt */
1162 if (!pci_map_int(config_id, ax_intr, sc, &net_imask)) {
1163 printf("ax%d: couldn't map interrupt\n", unit);
1167 /* Reset the adapter. */
1171 * Get station address from the EEPROM.
1173 ax_read_eeprom(sc, (caddr_t)&eaddr, AX_EE_NODEADDR, 3, 0);
1176 * An ASIX chip was detected. Inform the world.
1178 printf("ax%d: Ethernet address: %6D\n", unit, eaddr, ":");
1181 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1183 sc->ax_ldata_ptr = malloc(sizeof(struct ax_list_data) + 8,
1184 M_DEVBUF, M_NOWAIT);
1185 if (sc->ax_ldata_ptr == NULL) {
1187 printf("ax%d: no memory for list buffers!\n", unit);
1191 sc->ax_ldata = (struct ax_list_data *)sc->ax_ldata_ptr;
1192 round = (unsigned int)sc->ax_ldata_ptr & 0xF;
1193 roundptr = sc->ax_ldata_ptr;
1194 for (i = 0; i < 8; i++) {
1201 sc->ax_ldata = (struct ax_list_data *)roundptr;
1202 bzero(sc->ax_ldata, sizeof(struct ax_list_data));
1204 ifp = &sc->arpcom.ac_if;
1206 ifp->if_unit = unit;
1207 ifp->if_name = "ax";
1208 ifp->if_mtu = ETHERMTU;
1209 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1210 ifp->if_ioctl = ax_ioctl;
1211 ifp->if_output = ether_output;
1212 ifp->if_start = ax_start;
1213 ifp->if_watchdog = ax_watchdog;
1214 ifp->if_init = ax_init;
1215 ifp->if_baudrate = 10000000;
1219 printf("ax%d: probing for a PHY\n", sc->ax_unit);
1220 for (i = AX_PHYADDR_MIN; i < AX_PHYADDR_MAX + 1; i++) {
1222 printf("ax%d: checking address: %d\n",
1224 sc->ax_phy_addr = i;
1225 ax_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1227 while(ax_phy_readreg(sc, PHY_BMCR)
1229 if ((phy_sts = ax_phy_readreg(sc, PHY_BMSR)))
1233 phy_vid = ax_phy_readreg(sc, PHY_VENID);
1234 phy_did = ax_phy_readreg(sc, PHY_DEVID);
1236 printf("ax%d: found PHY at address %d, ",
1237 sc->ax_unit, sc->ax_phy_addr);
1239 printf("vendor id: %x device id: %x\n",
1243 if (phy_vid == p->ax_vid &&
1244 (phy_did | 0x000F) == p->ax_did) {
1250 if (sc->ax_pinfo == NULL)
1251 sc->ax_pinfo = &ax_phys[PHY_UNKNOWN];
1253 printf("ax%d: PHY type: %s\n",
1254 sc->ax_unit, sc->ax_pinfo->ax_name);
1257 printf("ax%d: MII without any phy!\n", sc->ax_unit);
1264 ifmedia_init(&sc->ifmedia, 0, ax_ifmedia_upd, ax_ifmedia_sts);
1266 if (sc->ax_pinfo != NULL) {
1268 ax_autoneg_mii(sc, AX_FLAG_FORCEDELAY, 1);
1270 ifmedia_add(&sc->ifmedia,
1271 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1272 ifmedia_add(&sc->ifmedia,
1273 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1274 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1275 ifmedia_add(&sc->ifmedia,
1276 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
1277 ifmedia_add(&sc->ifmedia,
1278 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1279 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1280 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1283 media = sc->ifmedia.ifm_media;
1286 ifmedia_set(&sc->ifmedia, media);
1289 * Call MI attach routines.
1292 ether_ifattach(ifp);
1295 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1297 at_shutdown(ax_shutdown, sc, SHUTDOWN_POST_SYNC);
1305 * Initialize the transmit descriptors.
1307 static int ax_list_tx_init(sc)
1308 struct ax_softc *sc;
1310 struct ax_chain_data *cd;
1311 struct ax_list_data *ld;
1316 for (i = 0; i < AX_TX_LIST_CNT; i++) {
1317 cd->ax_tx_chain[i].ax_ptr = &ld->ax_tx_list[i];
1318 if (i == (AX_TX_LIST_CNT - 1))
1319 cd->ax_tx_chain[i].ax_nextdesc =
1320 &cd->ax_tx_chain[0];
1322 cd->ax_tx_chain[i].ax_nextdesc =
1323 &cd->ax_tx_chain[i + 1];
1326 cd->ax_tx_free = &cd->ax_tx_chain[0];
1327 cd->ax_tx_tail = cd->ax_tx_head = NULL;
1334 * Initialize the RX descriptors and allocate mbufs for them. Note that
1335 * we arrange the descriptors in a closed ring, so that the last descriptor
1336 * points back to the first.
1338 static int ax_list_rx_init(sc)
1339 struct ax_softc *sc;
1341 struct ax_chain_data *cd;
1342 struct ax_list_data *ld;
1348 for (i = 0; i < AX_RX_LIST_CNT; i++) {
1349 cd->ax_rx_chain[i].ax_ptr =
1350 (struct ax_desc *)&ld->ax_rx_list[i];
1351 if (ax_newbuf(sc, &cd->ax_rx_chain[i]) == ENOBUFS)
1353 if (i == (AX_RX_LIST_CNT - 1)) {
1354 cd->ax_rx_chain[i].ax_nextdesc =
1355 &cd->ax_rx_chain[0];
1356 ld->ax_rx_list[i].ax_next =
1357 vtophys(&ld->ax_rx_list[0]);
1359 cd->ax_rx_chain[i].ax_nextdesc =
1360 &cd->ax_rx_chain[i + 1];
1361 ld->ax_rx_list[i].ax_next =
1362 vtophys(&ld->ax_rx_list[i + 1]);
1366 cd->ax_rx_head = &cd->ax_rx_chain[0];
1372 * Initialize an RX descriptor and attach an MBUF cluster.
1373 * Note: the length fields are only 11 bits wide, which means the
1374 * largest size we can specify is 2047. This is important because
1375 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1376 * overflow the field and make a mess.
1378 static int ax_newbuf(sc, c)
1379 struct ax_softc *sc;
1380 struct ax_chain_onefrag *c;
1382 struct mbuf *m_new = NULL;
1384 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1385 if (m_new == NULL) {
1386 printf("ax%d: no memory for rx list -- packet dropped!\n",
1391 MCLGET(m_new, M_DONTWAIT);
1392 if (!(m_new->m_flags & M_EXT)) {
1393 printf("ax%d: no memory for rx list -- packet dropped!\n",
1400 c->ax_ptr->ax_status = AX_RXSTAT;
1401 c->ax_ptr->ax_data = vtophys(mtod(m_new, caddr_t));
1402 c->ax_ptr->ax_ctl = MCLBYTES - 1;
1408 * A frame has been uploaded: pass the resulting mbuf chain up to
1409 * the higher level protocols.
1411 static void ax_rxeof(sc)
1412 struct ax_softc *sc;
1414 struct ether_header *eh;
1417 struct ax_chain_onefrag *cur_rx;
1421 ifp = &sc->arpcom.ac_if;
1423 while(!((rxstat = sc->ax_cdata.ax_rx_head->ax_ptr->ax_status) &
1425 cur_rx = sc->ax_cdata.ax_rx_head;
1426 sc->ax_cdata.ax_rx_head = cur_rx->ax_nextdesc;
1429 * If an error occurs, update stats, clear the
1430 * status word and leave the mbuf cluster in place:
1431 * it should simply get re-used next time this descriptor
1432 * comes up in the ring.
1434 if (rxstat & AX_RXSTAT_RXERR) {
1436 if (rxstat & AX_RXSTAT_COLLSEEN)
1437 ifp->if_collisions++;
1438 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1439 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1443 /* No errors; receive the packet. */
1444 m = cur_rx->ax_mbuf;
1445 total_len = AX_RXBYTES(cur_rx->ax_ptr->ax_status);
1447 total_len -= ETHER_CRC_LEN;
1450 * Try to conjure up a new mbuf cluster. If that
1451 * fails, it means we have an out of memory condition and
1452 * should leave the buffer in place and continue. This will
1453 * result in a lost packet, but there's little else we
1454 * can do in this situation.
1456 if (ax_newbuf(sc, cur_rx) == ENOBUFS) {
1458 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1459 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1464 eh = mtod(m, struct ether_header *);
1465 m->m_pkthdr.rcvif = ifp;
1466 m->m_pkthdr.len = m->m_len = total_len;
1469 * Handle BPF listeners. Let the BPF user see the packet, but
1470 * don't pass it up to the ether_input() layer unless it's
1471 * a broadcast packet, multicast packet, matches our ethernet
1472 * address or the interface is in promiscuous mode.
1476 if (ifp->if_flags & IFF_PROMISC &&
1477 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1479 (eh->ether_dhost[0] & 1) == 0)) {
1485 /* Remove header from mbuf and pass it on. */
1486 m_adj(m, sizeof(struct ether_header));
1487 ether_input(ifp, eh, m);
1494 struct ax_softc *sc;
1498 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_ON);
1499 CSR_WRITE_4(sc, AX_RXADDR, vtophys(sc->ax_cdata.ax_rx_head->ax_ptr));
1500 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_ON);
1501 CSR_WRITE_4(sc, AX_RXSTART, 0xFFFFFFFF);
1507 * A frame was downloaded to the chip. It's safe for us to clean up
1511 static void ax_txeof(sc)
1512 struct ax_softc *sc;
1514 struct ax_chain *cur_tx;
1517 ifp = &sc->arpcom.ac_if;
1519 /* Clear the timeout timer. */
1522 if (sc->ax_cdata.ax_tx_head == NULL)
1526 * Go through our tx list and free mbufs for those
1527 * frames that have been transmitted.
1529 while(sc->ax_cdata.ax_tx_head->ax_mbuf != NULL) {
1532 cur_tx = sc->ax_cdata.ax_tx_head;
1533 txstat = AX_TXSTATUS(cur_tx);
1535 if (txstat & AX_TXSTAT_OWN)
1538 if (txstat & AX_TXSTAT_ERRSUM) {
1540 if (txstat & AX_TXSTAT_EXCESSCOLL)
1541 ifp->if_collisions++;
1542 if (txstat & AX_TXSTAT_LATECOLL)
1543 ifp->if_collisions++;
1546 ifp->if_collisions += (txstat & AX_TXSTAT_COLLCNT) >> 3;
1549 m_freem(cur_tx->ax_mbuf);
1550 cur_tx->ax_mbuf = NULL;
1552 if (sc->ax_cdata.ax_tx_head == sc->ax_cdata.ax_tx_tail) {
1553 sc->ax_cdata.ax_tx_head = NULL;
1554 sc->ax_cdata.ax_tx_tail = NULL;
1558 sc->ax_cdata.ax_tx_head = cur_tx->ax_nextdesc;
1565 * TX 'end of channel' interrupt handler.
1567 static void ax_txeoc(sc)
1568 struct ax_softc *sc;
1572 ifp = &sc->arpcom.ac_if;
1576 if (sc->ax_cdata.ax_tx_head == NULL) {
1577 ifp->if_flags &= ~IFF_OACTIVE;
1578 sc->ax_cdata.ax_tx_tail = NULL;
1579 if (sc->ax_want_auto)
1580 ax_autoneg_mii(sc, AX_FLAG_DELAYTIMEO, 1);
1586 static void ax_intr(arg)
1589 struct ax_softc *sc;
1594 ifp = &sc->arpcom.ac_if;
1596 /* Supress unwanted interrupts */
1597 if (!(ifp->if_flags & IFF_UP)) {
1602 /* Disable interrupts. */
1603 CSR_WRITE_4(sc, AX_IMR, 0x00000000);
1606 status = CSR_READ_4(sc, AX_ISR);
1608 CSR_WRITE_4(sc, AX_ISR, status);
1610 if ((status & AX_INTRS) == 0)
1613 if ((status & AX_ISR_TX_OK) || (status & AX_ISR_TX_EARLY))
1616 if (status & AX_ISR_TX_NOBUF)
1619 if (status & AX_ISR_TX_IDLE) {
1621 if (sc->ax_cdata.ax_tx_head != NULL) {
1622 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON);
1623 CSR_WRITE_4(sc, AX_TXSTART, 0xFFFFFFFF);
1627 if (status & AX_ISR_TX_UNDERRUN) {
1629 cfg = CSR_READ_4(sc, AX_NETCFG);
1630 if ((cfg & AX_NETCFG_TX_THRESH) == AX_TXTHRESH_160BYTES)
1631 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_STORENFWD);
1633 CSR_WRITE_4(sc, AX_NETCFG, cfg + 0x4000);
1636 if (status & AX_ISR_RX_OK)
1639 if ((status & AX_ISR_RX_WATDOGTIMEO)
1640 || (status & AX_ISR_RX_NOBUF))
1643 if (status & AX_ISR_BUS_ERR) {
1649 /* Re-enable interrupts. */
1650 CSR_WRITE_4(sc, AX_IMR, AX_INTRS);
1652 if (ifp->if_snd.ifq_head != NULL) {
1660 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1661 * pointers to the fragment pointers.
1663 static int ax_encap(sc, c, m_head)
1664 struct ax_softc *sc;
1666 struct mbuf *m_head;
1669 volatile struct ax_desc *f = NULL;
1674 * Start packing the mbufs in this chain into
1675 * the fragment pointers. Stop when we run out
1676 * of fragments or hit the end of the mbuf chain.
1681 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1682 if (m->m_len != 0) {
1683 if (frag == AX_MAXFRAGS)
1685 total_len += m->m_len;
1686 f = &c->ax_ptr->ax_frag[frag];
1687 f->ax_ctl = m->m_len;
1690 f->ax_ctl |= AX_TXCTL_FIRSTFRAG;
1692 f->ax_status = AX_TXSTAT_OWN;
1693 f->ax_next = vtophys(&c->ax_ptr->ax_frag[frag + 1]);
1694 f->ax_data = vtophys(mtod(m, vm_offset_t));
1700 * Handle special case: we ran out of fragments,
1701 * but we have more mbufs left in the chain. Copy the
1702 * data into an mbuf cluster. Note that we don't
1703 * bother clearing the values in the other fragment
1704 * pointers/counters; it wouldn't gain us anything,
1705 * and would waste cycles.
1708 struct mbuf *m_new = NULL;
1710 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1711 if (m_new == NULL) {
1712 printf("ax%d: no memory for tx list", sc->ax_unit);
1715 if (m_head->m_pkthdr.len > MHLEN) {
1716 MCLGET(m_new, M_DONTWAIT);
1717 if (!(m_new->m_flags & M_EXT)) {
1719 printf("ax%d: no memory for tx list",
1724 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1725 mtod(m_new, caddr_t));
1726 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1729 f = &c->ax_ptr->ax_frag[0];
1731 f->ax_data = vtophys(mtod(m_new, caddr_t));
1732 f->ax_ctl = total_len = m_new->m_len;
1733 f->ax_ctl |= AX_TXCTL_FIRSTFRAG;
1738 if (total_len < AX_MIN_FRAMELEN) {
1739 f = &c->ax_ptr->ax_frag[frag];
1740 f->ax_ctl = AX_MIN_FRAMELEN - total_len;
1741 f->ax_data = vtophys(&sc->ax_cdata.ax_pad);
1742 f->ax_status = AX_TXSTAT_OWN;
1746 c->ax_mbuf = m_head;
1747 c->ax_lastdesc = frag - 1;
1748 AX_TXCTL(c) |= AX_TXCTL_LASTFRAG|AX_TXCTL_FINT;
1749 AX_TXNEXT(c) = vtophys(&c->ax_nextdesc->ax_ptr->ax_frag[0]);
1754 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1755 * to the mbuf data regions directly in the transmit lists. We also save a
1756 * copy of the pointers since the transmit list fragment pointers are
1757 * physical addresses.
1760 static void ax_start(ifp)
1763 struct ax_softc *sc;
1764 struct mbuf *m_head = NULL;
1765 struct ax_chain *cur_tx = NULL, *start_tx;
1769 if (sc->ax_autoneg) {
1775 * Check for an available queue slot. If there are none,
1778 if (sc->ax_cdata.ax_tx_free->ax_mbuf != NULL) {
1779 ifp->if_flags |= IFF_OACTIVE;
1783 start_tx = sc->ax_cdata.ax_tx_free;
1785 while(sc->ax_cdata.ax_tx_free->ax_mbuf == NULL) {
1786 IF_DEQUEUE(&ifp->if_snd, m_head);
1790 /* Pick a descriptor off the free list. */
1791 cur_tx = sc->ax_cdata.ax_tx_free;
1792 sc->ax_cdata.ax_tx_free = cur_tx->ax_nextdesc;
1794 /* Pack the data into the descriptor. */
1795 ax_encap(sc, cur_tx, m_head);
1796 if (cur_tx != start_tx)
1797 AX_TXOWN(cur_tx) = AX_TXSTAT_OWN;
1801 * If there's a BPF listener, bounce a copy of this frame
1805 bpf_mtap(ifp, cur_tx->ax_mbuf);
1807 AX_TXOWN(cur_tx) = AX_TXSTAT_OWN;
1808 CSR_WRITE_4(sc, AX_TXSTART, 0xFFFFFFFF);
1811 sc->ax_cdata.ax_tx_tail = cur_tx;
1812 if (sc->ax_cdata.ax_tx_head == NULL)
1813 sc->ax_cdata.ax_tx_head = start_tx;
1816 * Set a timeout in case the chip goes out to lunch.
1823 static void ax_init(xsc)
1826 struct ax_softc *sc = xsc;
1827 struct ifnet *ifp = &sc->arpcom.ac_if;
1828 u_int16_t phy_bmcr = 0;
1836 if (sc->ax_pinfo != NULL)
1837 phy_bmcr = ax_phy_readreg(sc, PHY_BMCR);
1840 * Cancel pending I/O and free all RX/TX buffers.
1846 * Set cache alignment and burst length.
1848 CSR_WRITE_4(sc, AX_BUSCTL, AX_BUSCTL_CONFIG);
1850 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_HEARTBEAT);
1851 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_STORENFWD);
1853 if (sc->ax_pinfo != NULL) {
1854 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_PORTSEL);
1855 ax_setcfg(sc, ax_phy_readreg(sc, PHY_BMCR));
1857 ax_setmode(sc, sc->ifmedia.ifm_media, 0);
1859 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_TX_THRESH);
1860 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1862 if (IFM_SUBTYPE(sc->ifmedia.ifm_media) == IFM_10_T)
1863 AX_SETBIT(sc, AX_NETCFG, AX_TXTHRESH_160BYTES);
1865 AX_SETBIT(sc, AX_NETCFG, AX_TXTHRESH_72BYTES);
1867 /* Init our MAC address */
1868 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_PAR0);
1869 CSR_WRITE_4(sc, AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1870 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_PAR1);
1871 CSR_WRITE_4(sc, AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1873 /* Init circular RX list. */
1874 if (ax_list_rx_init(sc) == ENOBUFS) {
1875 printf("ax%d: initialization failed: no "
1876 "memory for rx buffers\n", sc->ax_unit);
1883 * Init tx descriptors.
1885 ax_list_tx_init(sc);
1887 /* If we want promiscuous mode, set the allframes bit. */
1888 if (ifp->if_flags & IFF_PROMISC) {
1889 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_PROMISC);
1891 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_PROMISC);
1895 * Set the capture broadcast bit to capture broadcast frames.
1897 if (ifp->if_flags & IFF_BROADCAST) {
1898 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_BROAD);
1900 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_BROAD);
1904 * Load the multicast filter.
1909 * Load the address of the RX list.
1911 CSR_WRITE_4(sc, AX_RXADDR, vtophys(sc->ax_cdata.ax_rx_head->ax_ptr));
1912 CSR_WRITE_4(sc, AX_TXADDR, vtophys(&sc->ax_ldata->ax_tx_list[0]));
1915 * Enable interrupts.
1917 CSR_WRITE_4(sc, AX_IMR, AX_INTRS);
1918 CSR_WRITE_4(sc, AX_ISR, 0xFFFFFFFF);
1920 /* Enable receiver and transmitter. */
1921 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON|AX_NETCFG_RX_ON);
1922 CSR_WRITE_4(sc, AX_RXSTART, 0xFFFFFFFF);
1924 /* Restore state of BMCR */
1925 if (sc->ax_pinfo != NULL)
1926 ax_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1928 ifp->if_flags |= IFF_RUNNING;
1929 ifp->if_flags &= ~IFF_OACTIVE;
1937 * Set media options.
1939 static int ax_ifmedia_upd(ifp)
1942 struct ax_softc *sc;
1943 struct ifmedia *ifm;
1948 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1951 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1952 ax_autoneg_mii(sc, AX_FLAG_SCHEDDELAY, 1);
1954 if (sc->ax_pinfo == NULL)
1955 ax_setmode(sc, ifm->ifm_media, 1);
1957 ax_setmode_mii(sc, ifm->ifm_media);
1964 * Report current media status.
1966 static void ax_ifmedia_sts(ifp, ifmr)
1968 struct ifmediareq *ifmr;
1970 struct ax_softc *sc;
1971 u_int16_t advert = 0, ability = 0;
1972 u_int32_t media = 0;
1976 ifmr->ifm_active = IFM_ETHER;
1978 if (sc->ax_pinfo == NULL) {
1979 media = CSR_READ_4(sc, AX_NETCFG);
1980 if (media & AX_NETCFG_PORTSEL)
1981 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1983 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1984 if (media & AX_NETCFG_FULLDUPLEX)
1985 ifmr->ifm_active |= IFM_FDX;
1987 ifmr->ifm_active |= IFM_HDX;
1991 if (!(ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1992 if (ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1993 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1995 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1996 if (ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1997 ifmr->ifm_active |= IFM_FDX;
1999 ifmr->ifm_active |= IFM_HDX;
2003 ability = ax_phy_readreg(sc, PHY_LPAR);
2004 advert = ax_phy_readreg(sc, PHY_ANAR);
2005 if (advert & PHY_ANAR_100BT4 &&
2006 ability & PHY_ANAR_100BT4) {
2007 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
2008 } else if (advert & PHY_ANAR_100BTXFULL &&
2009 ability & PHY_ANAR_100BTXFULL) {
2010 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
2011 } else if (advert & PHY_ANAR_100BTXHALF &&
2012 ability & PHY_ANAR_100BTXHALF) {
2013 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
2014 } else if (advert & PHY_ANAR_10BTFULL &&
2015 ability & PHY_ANAR_10BTFULL) {
2016 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
2017 } else if (advert & PHY_ANAR_10BTHALF &&
2018 ability & PHY_ANAR_10BTHALF) {
2019 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
2025 static int ax_ioctl(ifp, command, data)
2030 struct ax_softc *sc = ifp->if_softc;
2031 struct ifreq *ifr = (struct ifreq *) data;
2040 error = ether_ioctl(ifp, command, data);
2043 if (ifp->if_flags & IFF_UP) {
2046 if (ifp->if_flags & IFF_RUNNING)
2058 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2070 static void ax_watchdog(ifp)
2073 struct ax_softc *sc;
2077 if (sc->ax_autoneg) {
2078 ax_autoneg_mii(sc, AX_FLAG_DELAYTIMEO, 1);
2083 printf("ax%d: watchdog timeout\n", sc->ax_unit);
2085 if (sc->ax_pinfo != NULL) {
2086 if (!(ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
2087 printf("ax%d: no carrier - transceiver "
2088 "cable problem?\n", sc->ax_unit);
2095 if (ifp->if_snd.ifq_head != NULL)
2102 * Stop the adapter and free any mbufs allocated to the
2105 static void ax_stop(sc)
2106 struct ax_softc *sc;
2111 ifp = &sc->arpcom.ac_if;
2114 AX_CLRBIT(sc, AX_NETCFG, (AX_NETCFG_RX_ON|AX_NETCFG_TX_ON));
2115 CSR_WRITE_4(sc, AX_IMR, 0x00000000);
2116 CSR_WRITE_4(sc, AX_TXADDR, 0x00000000);
2117 CSR_WRITE_4(sc, AX_RXADDR, 0x00000000);
2120 * Free data in the RX lists.
2122 for (i = 0; i < AX_RX_LIST_CNT; i++) {
2123 if (sc->ax_cdata.ax_rx_chain[i].ax_mbuf != NULL) {
2124 m_freem(sc->ax_cdata.ax_rx_chain[i].ax_mbuf);
2125 sc->ax_cdata.ax_rx_chain[i].ax_mbuf = NULL;
2128 bzero((char *)&sc->ax_ldata->ax_rx_list,
2129 sizeof(sc->ax_ldata->ax_rx_list));
2132 * Free the TX list buffers.
2134 for (i = 0; i < AX_TX_LIST_CNT; i++) {
2135 if (sc->ax_cdata.ax_tx_chain[i].ax_mbuf != NULL) {
2136 m_freem(sc->ax_cdata.ax_tx_chain[i].ax_mbuf);
2137 sc->ax_cdata.ax_tx_chain[i].ax_mbuf = NULL;
2141 bzero((char *)&sc->ax_ldata->ax_tx_list,
2142 sizeof(sc->ax_ldata->ax_tx_list));
2144 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2150 * Stop all chip I/O so that the kernel's probe routines don't
2151 * get confused by errant DMAs when rebooting.
2153 static void ax_shutdown(howto, arg)
2157 struct ax_softc *sc = (struct ax_softc *)arg;
2164 static struct pci_device ax_device = {
2171 DATA_SET(pcidevice_set, ax_device);