2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $Id: if_ax.c,v 1.8 1999/01/16 20:33:34 wpaul Exp $
36 * ASIX AX88140A fast ethernet PCI NIC driver.
38 * Written by Bill Paul <wpaul@ctr.columbia.edu>
39 * Electrical Engineering Department
40 * Columbia University, New York City
44 * The ASIX Electronics AX88140A is still another DEC 21x4x clone. It's
45 * a reasonably close copy of the tulip, except for the receiver filter
46 * programming. Where the DEC chip has a special setup frame that
47 * needs to be downloaded into the transmit DMA engine, the ASIX chip
48 * has a less complicated setup frame which is written into one of
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/sockio.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
63 #include <net/if_arp.h>
64 #include <net/ethernet.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
72 #include <vm/vm.h> /* for vtophys */
73 #include <vm/pmap.h> /* for vtophys */
74 #include <machine/clock.h> /* for DELAY */
75 #include <machine/bus_pio.h>
76 #include <machine/bus_memio.h>
77 #include <machine/bus.h>
79 #include <pci/pcireg.h>
80 #include <pci/pcivar.h>
84 /* #define AX_BACKGROUND_AUTONEG */
86 #include <pci/if_axreg.h>
89 static const char rcsid[] =
90 "$Id: if_ax.c,v 1.8 1999/01/16 20:33:34 wpaul Exp $";
94 * Various supported device vendors/types and their names.
96 static struct ax_type ax_devs[] = {
97 { AX_VENDORID, AX_DEVICEID_AX88140A,
98 "ASIX AX88140A 10/100BaseTX" },
103 * Various supported PHY vendors/types and their names. Note that
104 * this driver will work with pretty much any MII-compliant PHY,
105 * so failure to positively identify the chip is not a fatal error.
108 static struct ax_type ax_phys[] = {
109 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" },
110 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" },
111 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"},
112 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" },
113 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" },
114 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" },
115 { 0, 0, "<MII-compliant physical interface>" }
118 static unsigned long ax_count = 0;
119 static const char *ax_probe __P((pcici_t, pcidi_t));
120 static void ax_attach __P((pcici_t, int));
122 static int ax_newbuf __P((struct ax_softc *,
123 struct ax_chain_onefrag *));
124 static int ax_encap __P((struct ax_softc *, struct ax_chain *,
127 static void ax_rxeof __P((struct ax_softc *));
128 static void ax_rxeoc __P((struct ax_softc *));
129 static void ax_txeof __P((struct ax_softc *));
130 static void ax_txeoc __P((struct ax_softc *));
131 static void ax_intr __P((void *));
132 static void ax_start __P((struct ifnet *));
133 static int ax_ioctl __P((struct ifnet *, u_long, caddr_t));
134 static void ax_init __P((void *));
135 static void ax_stop __P((struct ax_softc *));
136 static void ax_watchdog __P((struct ifnet *));
137 static void ax_shutdown __P((int, void *));
138 static int ax_ifmedia_upd __P((struct ifnet *));
139 static void ax_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
141 static void ax_delay __P((struct ax_softc *));
142 static void ax_eeprom_idle __P((struct ax_softc *));
143 static void ax_eeprom_putbyte __P((struct ax_softc *, int));
144 static void ax_eeprom_getword __P((struct ax_softc *, int, u_int16_t *));
145 static void ax_read_eeprom __P((struct ax_softc *, caddr_t, int,
148 static void ax_mii_writebit __P((struct ax_softc *, int));
149 static int ax_mii_readbit __P((struct ax_softc *));
150 static void ax_mii_sync __P((struct ax_softc *));
151 static void ax_mii_send __P((struct ax_softc *, u_int32_t, int));
152 static int ax_mii_readreg __P((struct ax_softc *, struct ax_mii_frame *));
153 static int ax_mii_writereg __P((struct ax_softc *, struct ax_mii_frame *));
154 static u_int16_t ax_phy_readreg __P((struct ax_softc *, int));
155 static void ax_phy_writereg __P((struct ax_softc *, int, int));
157 static void ax_autoneg_xmit __P((struct ax_softc *));
158 static void ax_autoneg_mii __P((struct ax_softc *, int, int));
159 static void ax_setmode_mii __P((struct ax_softc *, int));
160 static void ax_setmode __P((struct ax_softc *, int, int));
161 static void ax_getmode_mii __P((struct ax_softc *));
162 static void ax_setcfg __P((struct ax_softc *, int));
163 static u_int32_t ax_calchash __P((caddr_t));
164 static void ax_setmulti __P((struct ax_softc *));
165 static void ax_reset __P((struct ax_softc *));
166 static int ax_list_rx_init __P((struct ax_softc *));
167 static int ax_list_tx_init __P((struct ax_softc *));
169 #define AX_SETBIT(sc, reg, x) \
170 CSR_WRITE_4(sc, reg, \
171 CSR_READ_4(sc, reg) | x)
173 #define AX_CLRBIT(sc, reg, x) \
174 CSR_WRITE_4(sc, reg, \
175 CSR_READ_4(sc, reg) & ~x)
178 CSR_WRITE_4(sc, AX_SIO, \
179 CSR_READ_4(sc, AX_SIO) | x)
182 CSR_WRITE_4(sc, AX_SIO, \
183 CSR_READ_4(sc, AX_SIO) & ~x)
185 static void ax_delay(sc)
190 for (idx = (300 / 33) + 1; idx > 0; idx--)
191 CSR_READ_4(sc, AX_BUSCTL);
194 static void ax_eeprom_idle(sc)
199 CSR_WRITE_4(sc, AX_SIO, AX_SIO_EESEL);
201 AX_SETBIT(sc, AX_SIO, AX_SIO_ROMCTL_READ);
203 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CS);
205 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
208 for (i = 0; i < 25; i++) {
209 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CLK);
211 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
215 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CLK);
217 AX_CLRBIT(sc, AX_SIO, AX_SIO_EE_CS);
219 CSR_WRITE_4(sc, AX_SIO, 0x00000000);
225 * Send a read command and address to the EEPROM, check for ACK.
227 static void ax_eeprom_putbyte(sc, addr)
233 d = addr | AX_EECMD_READ;
236 * Feed in each bit and stobe the clock.
238 for (i = 0x400; i; i >>= 1) {
240 SIO_SET(AX_SIO_EE_DATAIN);
242 SIO_CLR(AX_SIO_EE_DATAIN);
245 SIO_SET(AX_SIO_EE_CLK);
247 SIO_CLR(AX_SIO_EE_CLK);
255 * Read a word of data stored in the EEPROM at address 'addr.'
257 static void ax_eeprom_getword(sc, addr, dest)
265 /* Force EEPROM to idle state. */
268 /* Enter EEPROM access mode. */
269 CSR_WRITE_4(sc, AX_SIO, AX_SIO_EESEL);
271 AX_SETBIT(sc, AX_SIO, AX_SIO_ROMCTL_READ);
273 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CS);
275 AX_SETBIT(sc, AX_SIO, AX_SIO_EE_CLK);
279 * Send address of word we want to read.
281 ax_eeprom_putbyte(sc, addr);
284 * Start reading bits from EEPROM.
286 for (i = 0x8000; i; i >>= 1) {
287 SIO_SET(AX_SIO_EE_CLK);
289 if (CSR_READ_4(sc, AX_SIO) & AX_SIO_EE_DATAOUT)
292 SIO_CLR(AX_SIO_EE_CLK);
296 /* Turn off EEPROM access mode. */
305 * Read a sequence of words from the EEPROM.
307 static void ax_read_eeprom(sc, dest, off, cnt, swap)
315 u_int16_t word = 0, *ptr;
317 for (i = 0; i < cnt; i++) {
318 ax_eeprom_getword(sc, off + i, &word);
319 ptr = (u_int16_t *)(dest + (i * 2));
330 * Write a bit to the MII bus.
332 static void ax_mii_writebit(sc, bit)
337 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE|AX_SIO_MII_DATAOUT);
339 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE);
341 AX_SETBIT(sc, AX_SIO, AX_SIO_MII_CLK);
342 AX_CLRBIT(sc, AX_SIO, AX_SIO_MII_CLK);
348 * Read a bit from the MII bus.
350 static int ax_mii_readbit(sc)
353 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_READ|AX_SIO_MII_DIR);
354 CSR_READ_4(sc, AX_SIO);
355 AX_SETBIT(sc, AX_SIO, AX_SIO_MII_CLK);
356 AX_CLRBIT(sc, AX_SIO, AX_SIO_MII_CLK);
357 if (CSR_READ_4(sc, AX_SIO) & AX_SIO_MII_DATAIN)
364 * Sync the PHYs by setting data bit and strobing the clock 32 times.
366 static void ax_mii_sync(sc)
371 CSR_WRITE_4(sc, AX_SIO, AX_SIO_ROMCTL_WRITE);
373 for (i = 0; i < 32; i++)
374 ax_mii_writebit(sc, 1);
380 * Clock a series of bits through the MII.
382 static void ax_mii_send(sc, bits, cnt)
389 for (i = (0x1 << (cnt - 1)); i; i >>= 1)
390 ax_mii_writebit(sc, bits & i);
394 * Read an PHY register through the MII.
396 static int ax_mii_readreg(sc, frame)
398 struct ax_mii_frame *frame;
406 * Set up frame for RX.
408 frame->mii_stdelim = AX_MII_STARTDELIM;
409 frame->mii_opcode = AX_MII_READOP;
410 frame->mii_turnaround = 0;
419 * Send command/address info.
421 ax_mii_send(sc, frame->mii_stdelim, 2);
422 ax_mii_send(sc, frame->mii_opcode, 2);
423 ax_mii_send(sc, frame->mii_phyaddr, 5);
424 ax_mii_send(sc, frame->mii_regaddr, 5);
428 ax_mii_writebit(sc, 1);
429 ax_mii_writebit(sc, 0);
433 ack = ax_mii_readbit(sc);
436 * Now try reading data bits. If the ack failed, we still
437 * need to clock through 16 cycles to keep the PHY(s) in sync.
440 for(i = 0; i < 16; i++) {
446 for (i = 0x8000; i; i >>= 1) {
448 if (ax_mii_readbit(sc))
449 frame->mii_data |= i;
455 ax_mii_writebit(sc, 0);
456 ax_mii_writebit(sc, 0);
466 * Write to a PHY register through the MII.
468 static int ax_mii_writereg(sc, frame)
470 struct ax_mii_frame *frame;
477 * Set up frame for TX.
480 frame->mii_stdelim = AX_MII_STARTDELIM;
481 frame->mii_opcode = AX_MII_WRITEOP;
482 frame->mii_turnaround = AX_MII_TURNAROUND;
489 ax_mii_send(sc, frame->mii_stdelim, 2);
490 ax_mii_send(sc, frame->mii_opcode, 2);
491 ax_mii_send(sc, frame->mii_phyaddr, 5);
492 ax_mii_send(sc, frame->mii_regaddr, 5);
493 ax_mii_send(sc, frame->mii_turnaround, 2);
494 ax_mii_send(sc, frame->mii_data, 16);
497 ax_mii_writebit(sc, 0);
498 ax_mii_writebit(sc, 0);
505 static u_int16_t ax_phy_readreg(sc, reg)
509 struct ax_mii_frame frame;
511 bzero((char *)&frame, sizeof(frame));
513 frame.mii_phyaddr = sc->ax_phy_addr;
514 frame.mii_regaddr = reg;
515 ax_mii_readreg(sc, &frame);
517 return(frame.mii_data);
520 static void ax_phy_writereg(sc, reg, data)
525 struct ax_mii_frame frame;
527 bzero((char *)&frame, sizeof(frame));
529 frame.mii_phyaddr = sc->ax_phy_addr;
530 frame.mii_regaddr = reg;
531 frame.mii_data = data;
533 ax_mii_writereg(sc, &frame);
539 * Calculate CRC of a multicast group address, return the lower 6 bits.
541 static u_int32_t ax_calchash(addr)
544 u_int32_t crc, carry;
548 /* Compute CRC for the address value. */
549 crc = 0xFFFFFFFF; /* initial value */
551 for (i = 0; i < 6; i++) {
553 for (j = 0; j < 8; j++) {
554 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
558 crc = (crc ^ 0x04c11db6) | carry;
562 /* return the filter bit position */
563 return((crc >> 26) & 0x0000003F);
566 static void ax_setmulti(sc)
571 u_int32_t hashes[2] = { 0, 0 };
572 struct ifmultiaddr *ifma;
575 ifp = &sc->arpcom.ac_if;
577 rxfilt = CSR_READ_4(sc, AX_NETCFG);
579 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
580 rxfilt |= AX_NETCFG_RX_ALLMULTI;
581 CSR_WRITE_4(sc, AX_NETCFG, rxfilt);
584 rxfilt &= ~AX_NETCFG_RX_ALLMULTI;
586 /* first, zot all the existing hash bits */
587 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR0);
588 CSR_WRITE_4(sc, AX_FILTDATA, 0);
589 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR1);
590 CSR_WRITE_4(sc, AX_FILTDATA, 0);
592 /* now program new ones */
593 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
594 ifma = ifma->ifma_link.le_next) {
595 if (ifma->ifma_addr->sa_family != AF_LINK)
597 h = ax_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
599 hashes[0] |= (1 << h);
601 hashes[1] |= (1 << (h - 32));
604 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR0);
605 CSR_WRITE_4(sc, AX_FILTDATA, hashes[0]);
606 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_MAR1);
607 CSR_WRITE_4(sc, AX_FILTDATA, hashes[1]);
608 CSR_WRITE_4(sc, AX_NETCFG, rxfilt);
614 * Initiate an autonegotiation session.
616 static void ax_autoneg_xmit(sc)
621 ax_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
623 while(ax_phy_readreg(sc, PHY_BMCR)
626 phy_sts = ax_phy_readreg(sc, PHY_BMCR);
627 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR;
628 ax_phy_writereg(sc, PHY_BMCR, phy_sts);
634 * Invoke autonegotiation on a PHY.
636 static void ax_autoneg_mii(sc, flag, verbose)
641 u_int16_t phy_sts = 0, media, advert, ability;
646 ifp = &sc->arpcom.ac_if;
648 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
651 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported'
652 * bit cleared in the status register, but has the 'autoneg enabled'
653 * bit set in the control register. This is a contradiction, and
654 * I'm not sure how to handle it. If you want to force an attempt
655 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR
656 * and see what happens.
658 #ifndef FORCE_AUTONEG_TFOUR
660 * First, see if autoneg is supported. If not, there's
661 * no point in continuing.
663 phy_sts = ax_phy_readreg(sc, PHY_BMSR);
664 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
666 printf("ax%d: autonegotiation not supported\n",
668 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
674 case AX_FLAG_FORCEDELAY:
676 * XXX Never use this option anywhere but in the probe
677 * routine: making the kernel stop dead in its tracks
678 * for three whole seconds after we've gone multi-user
679 * is really bad manners.
684 case AX_FLAG_SCHEDDELAY:
686 * Wait for the transmitter to go idle before starting
687 * an autoneg session, otherwise ax_start() may clobber
688 * our timeout, and we don't want to allow transmission
689 * during an autoneg session since that can screw it up.
691 if (sc->ax_cdata.ax_tx_head != NULL) {
692 sc->ax_want_auto = 1;
698 sc->ax_want_auto = 0;
701 case AX_FLAG_DELAYTIMEO:
706 printf("ax%d: invalid autoneg flag: %d\n", sc->ax_unit, flag);
710 if (ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
712 printf("ax%d: autoneg complete, ", sc->ax_unit);
713 phy_sts = ax_phy_readreg(sc, PHY_BMSR);
716 printf("ax%d: autoneg not complete, ", sc->ax_unit);
719 media = ax_phy_readreg(sc, PHY_BMCR);
721 /* Link is good. Report modes and set duplex mode. */
722 if (ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
724 printf("link status good ");
725 advert = ax_phy_readreg(sc, PHY_ANAR);
726 ability = ax_phy_readreg(sc, PHY_LPAR);
728 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
729 ifm->ifm_media = IFM_ETHER|IFM_100_T4;
730 media |= PHY_BMCR_SPEEDSEL;
731 media &= ~PHY_BMCR_DUPLEX;
732 printf("(100baseT4)\n");
733 } else if (advert & PHY_ANAR_100BTXFULL &&
734 ability & PHY_ANAR_100BTXFULL) {
735 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
736 media |= PHY_BMCR_SPEEDSEL;
737 media |= PHY_BMCR_DUPLEX;
738 printf("(full-duplex, 100Mbps)\n");
739 } else if (advert & PHY_ANAR_100BTXHALF &&
740 ability & PHY_ANAR_100BTXHALF) {
741 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
742 media |= PHY_BMCR_SPEEDSEL;
743 media &= ~PHY_BMCR_DUPLEX;
744 printf("(half-duplex, 100Mbps)\n");
745 } else if (advert & PHY_ANAR_10BTFULL &&
746 ability & PHY_ANAR_10BTFULL) {
747 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
748 media &= ~PHY_BMCR_SPEEDSEL;
749 media |= PHY_BMCR_DUPLEX;
750 printf("(full-duplex, 10Mbps)\n");
751 } else if (advert & PHY_ANAR_10BTHALF &&
752 ability & PHY_ANAR_10BTHALF) {
753 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
754 media &= ~PHY_BMCR_SPEEDSEL;
755 media &= ~PHY_BMCR_DUPLEX;
756 printf("(half-duplex, 10Mbps)\n");
759 media &= ~PHY_BMCR_AUTONEGENBL;
761 /* Set ASIC's duplex mode to match the PHY. */
762 ax_setcfg(sc, media);
763 ax_phy_writereg(sc, PHY_BMCR, media);
766 printf("no carrier\n");
771 if (sc->ax_tx_pend) {
780 static void ax_getmode_mii(sc)
786 ifp = &sc->arpcom.ac_if;
788 bmsr = ax_phy_readreg(sc, PHY_BMSR);
790 printf("ax%d: PHY status word: %x\n", sc->ax_unit, bmsr);
793 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX;
795 if (bmsr & PHY_BMSR_10BTHALF) {
797 printf("ax%d: 10Mbps half-duplex mode supported\n",
799 ifmedia_add(&sc->ifmedia,
800 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
801 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
804 if (bmsr & PHY_BMSR_10BTFULL) {
806 printf("ax%d: 10Mbps full-duplex mode supported\n",
808 ifmedia_add(&sc->ifmedia,
809 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
810 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX;
813 if (bmsr & PHY_BMSR_100BTXHALF) {
815 printf("ax%d: 100Mbps half-duplex mode supported\n",
817 ifp->if_baudrate = 100000000;
818 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
819 ifmedia_add(&sc->ifmedia,
820 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
821 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX;
824 if (bmsr & PHY_BMSR_100BTXFULL) {
826 printf("ax%d: 100Mbps full-duplex mode supported\n",
828 ifp->if_baudrate = 100000000;
829 ifmedia_add(&sc->ifmedia,
830 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
831 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX;
834 /* Some also support 100BaseT4. */
835 if (bmsr & PHY_BMSR_100BT4) {
837 printf("ax%d: 100baseT4 mode supported\n", sc->ax_unit);
838 ifp->if_baudrate = 100000000;
839 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL);
840 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4;
841 #ifdef FORCE_AUTONEG_TFOUR
843 printf("ax%d: forcing on autoneg support for BT4\n",
845 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL):
846 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
850 if (bmsr & PHY_BMSR_CANAUTONEG) {
852 printf("ax%d: autoneg supported\n", sc->ax_unit);
853 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
854 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO;
861 * Set speed and duplex mode.
863 static void ax_setmode_mii(sc, media)
870 ifp = &sc->arpcom.ac_if;
873 * If an autoneg session is in progress, stop it.
875 if (sc->ax_autoneg) {
876 printf("ax%d: canceling autoneg session\n", sc->ax_unit);
877 ifp->if_timer = sc->ax_autoneg = sc->ax_want_auto = 0;
878 bmcr = ax_phy_readreg(sc, PHY_BMCR);
879 bmcr &= ~PHY_BMCR_AUTONEGENBL;
880 ax_phy_writereg(sc, PHY_BMCR, bmcr);
883 printf("ax%d: selecting MII, ", sc->ax_unit);
885 bmcr = ax_phy_readreg(sc, PHY_BMCR);
887 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL|
888 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK);
890 if (IFM_SUBTYPE(media) == IFM_100_T4) {
891 printf("100Mbps/T4, half-duplex\n");
892 bmcr |= PHY_BMCR_SPEEDSEL;
893 bmcr &= ~PHY_BMCR_DUPLEX;
896 if (IFM_SUBTYPE(media) == IFM_100_TX) {
898 bmcr |= PHY_BMCR_SPEEDSEL;
901 if (IFM_SUBTYPE(media) == IFM_10_T) {
903 bmcr &= ~PHY_BMCR_SPEEDSEL;
906 if ((media & IFM_GMASK) == IFM_FDX) {
907 printf("full duplex\n");
908 bmcr |= PHY_BMCR_DUPLEX;
910 printf("half duplex\n");
911 bmcr &= ~PHY_BMCR_DUPLEX;
915 ax_phy_writereg(sc, PHY_BMCR, bmcr);
921 * Set speed and duplex mode on internal transceiver.
923 static void ax_setmode(sc, media, verbose)
931 ifp = &sc->arpcom.ac_if;
934 printf("ax%d: selecting internal xcvr, ", sc->ax_unit);
936 mode = CSR_READ_4(sc, AX_NETCFG);
938 mode &= ~(AX_NETCFG_FULLDUPLEX|AX_NETCFG_PORTSEL|
939 AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER|AX_NETCFG_SPEEDSEL);
941 if (IFM_SUBTYPE(media) == IFM_100_T4) {
943 printf("100Mbps/T4, half-duplex\n");
944 mode |= AX_NETCFG_PORTSEL|AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER;
947 if (IFM_SUBTYPE(media) == IFM_100_TX) {
950 mode |= AX_NETCFG_PORTSEL|AX_NETCFG_PCS|AX_NETCFG_SCRAMBLER;
953 if (IFM_SUBTYPE(media) == IFM_10_T) {
956 mode &= ~AX_NETCFG_PORTSEL;
957 mode |= AX_NETCFG_SPEEDSEL;
960 if ((media & IFM_GMASK) == IFM_FDX) {
962 printf("full duplex\n");
963 mode |= AX_NETCFG_FULLDUPLEX;
966 printf("half duplex\n");
967 mode &= ~AX_NETCFG_FULLDUPLEX;
970 CSR_WRITE_4(sc, AX_NETCFG, mode);
976 * In order to fiddle with the
977 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
978 * first have to put the transmit and/or receive logic in the idle state.
980 static void ax_setcfg(sc, bmcr)
986 if (CSR_READ_4(sc, AX_NETCFG) & (AX_NETCFG_TX_ON|AX_NETCFG_RX_ON)) {
988 AX_CLRBIT(sc, AX_NETCFG, (AX_NETCFG_TX_ON|AX_NETCFG_RX_ON));
990 for (i = 0; i < AX_TIMEOUT; i++) {
992 if (CSR_READ_4(sc, AX_ISR) & AX_ISR_TX_IDLE)
997 printf("ax%d: failed to force tx and "
998 "rx to idle state\n", sc->ax_unit);
1002 if (bmcr & PHY_BMCR_SPEEDSEL)
1003 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1005 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1007 if (bmcr & PHY_BMCR_DUPLEX)
1008 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_FULLDUPLEX);
1010 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_FULLDUPLEX);
1013 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON|AX_NETCFG_RX_ON);
1018 static void ax_reset(sc)
1019 struct ax_softc *sc;
1023 AX_SETBIT(sc, AX_BUSCTL, AX_BUSCTL_RESET);
1025 for (i = 0; i < AX_TIMEOUT; i++) {
1027 if (!(CSR_READ_4(sc, AX_BUSCTL) & AX_BUSCTL_RESET))
1031 if (i == AX_TIMEOUT)
1032 printf("ax%d: reset never completed!\n", sc->ax_unit);
1034 CSR_WRITE_4(sc, AX_BUSCTL, AX_BUSCTL_CONFIG);
1036 /* Wait a little while for the chip to get its brains in order. */
1042 * Probe for an ASIX chip. Check the PCI vendor and device
1043 * IDs against our list and return a device name if we find a match.
1046 ax_probe(config_id, device_id)
1054 while(t->ax_name != NULL) {
1055 if ((device_id & 0xFFFF) == t->ax_vid &&
1056 ((device_id >> 16) & 0xFFFF) == t->ax_did) {
1066 * Attach the interface. Allocate softc structures, do ifmedia
1067 * setup and ethernet/BPF attach.
1070 ax_attach(config_id, unit)
1075 #ifndef AX_USEIOSPACE
1076 vm_offset_t pbase, vbase;
1078 u_char eaddr[ETHER_ADDR_LEN];
1080 struct ax_softc *sc;
1082 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
1086 u_int16_t phy_vid, phy_did, phy_sts;
1090 sc = malloc(sizeof(struct ax_softc), M_DEVBUF, M_NOWAIT);
1092 printf("ax%d: no memory for softc struct!\n", unit);
1095 bzero(sc, sizeof(struct ax_softc));
1098 * Handle power management nonsense.
1101 command = pci_conf_read(config_id, AX_PCI_CAPID) & 0x000000FF;
1102 if (command == 0x01) {
1104 command = pci_conf_read(config_id, AX_PCI_PWRMGMTCTRL);
1105 if (command & AX_PSTATE_MASK) {
1106 u_int32_t iobase, membase, irq;
1108 /* Save important PCI config data. */
1109 iobase = pci_conf_read(config_id, AX_PCI_LOIO);
1110 membase = pci_conf_read(config_id, AX_PCI_LOMEM);
1111 irq = pci_conf_read(config_id, AX_PCI_INTLINE);
1113 /* Reset the power state. */
1114 printf("ax%d: chip is in D%d power mode "
1115 "-- setting to D0\n", unit, command & AX_PSTATE_MASK);
1116 command &= 0xFFFFFFFC;
1117 pci_conf_write(config_id, AX_PCI_PWRMGMTCTRL, command);
1119 /* Restore PCI config data. */
1120 pci_conf_write(config_id, AX_PCI_LOIO, iobase);
1121 pci_conf_write(config_id, AX_PCI_LOMEM, membase);
1122 pci_conf_write(config_id, AX_PCI_INTLINE, irq);
1127 * Map control/status registers.
1129 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1130 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1131 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command);
1132 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG);
1134 #ifdef AX_USEIOSPACE
1135 if (!(command & PCIM_CMD_PORTEN)) {
1136 printf("ax%d: failed to enable I/O ports!\n", unit);
1141 if (!pci_map_port(config_id, AX_PCI_LOIO,
1142 (u_short *)&(sc->ax_bhandle))) {
1143 printf ("ax%d: couldn't map ports\n", unit);
1146 sc->ax_btag = I386_BUS_SPACE_IO;
1148 if (!(command & PCIM_CMD_MEMEN)) {
1149 printf("ax%d: failed to enable memory mapping!\n", unit);
1153 if (!pci_map_mem(config_id, AX_PCI_LOMEM, &vbase, &pbase)) {
1154 printf ("ax%d: couldn't map memory\n", unit);
1157 sc->ax_btag = I386_BUS_SPACE_MEM;
1158 sc->ax_bhandle = vbase;
1161 /* Allocate interrupt */
1162 if (!pci_map_int(config_id, ax_intr, sc, &net_imask)) {
1163 printf("ax%d: couldn't map interrupt\n", unit);
1167 /* Reset the adapter. */
1171 * Get station address from the EEPROM.
1173 ax_read_eeprom(sc, (caddr_t)&eaddr, AX_EE_NODEADDR, 3, 0);
1176 * An ASIX chip was detected. Inform the world.
1178 printf("ax%d: Ethernet address: %6D\n", unit, eaddr, ":");
1181 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
1183 sc->ax_ldata_ptr = malloc(sizeof(struct ax_list_data) + 8,
1184 M_DEVBUF, M_NOWAIT);
1185 if (sc->ax_ldata_ptr == NULL) {
1187 printf("ax%d: no memory for list buffers!\n", unit);
1191 sc->ax_ldata = (struct ax_list_data *)sc->ax_ldata_ptr;
1192 round = (unsigned int)sc->ax_ldata_ptr & 0xF;
1193 roundptr = sc->ax_ldata_ptr;
1194 for (i = 0; i < 8; i++) {
1201 sc->ax_ldata = (struct ax_list_data *)roundptr;
1202 bzero(sc->ax_ldata, sizeof(struct ax_list_data));
1204 ifp = &sc->arpcom.ac_if;
1206 ifp->if_unit = unit;
1207 ifp->if_name = "ax";
1208 ifp->if_mtu = ETHERMTU;
1209 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1210 ifp->if_ioctl = ax_ioctl;
1211 ifp->if_output = ether_output;
1212 ifp->if_start = ax_start;
1213 ifp->if_watchdog = ax_watchdog;
1214 ifp->if_init = ax_init;
1215 ifp->if_baudrate = 10000000;
1219 printf("ax%d: probing for a PHY\n", sc->ax_unit);
1220 for (i = AX_PHYADDR_MIN; i < AX_PHYADDR_MAX + 1; i++) {
1222 printf("ax%d: checking address: %d\n",
1224 sc->ax_phy_addr = i;
1225 ax_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
1227 while(ax_phy_readreg(sc, PHY_BMCR)
1229 if ((phy_sts = ax_phy_readreg(sc, PHY_BMSR)))
1233 phy_vid = ax_phy_readreg(sc, PHY_VENID);
1234 phy_did = ax_phy_readreg(sc, PHY_DEVID);
1236 printf("ax%d: found PHY at address %d, ",
1237 sc->ax_unit, sc->ax_phy_addr);
1239 printf("vendor id: %x device id: %x\n",
1243 if (phy_vid == p->ax_vid &&
1244 (phy_did | 0x000F) == p->ax_did) {
1250 if (sc->ax_pinfo == NULL)
1251 sc->ax_pinfo = &ax_phys[PHY_UNKNOWN];
1253 printf("ax%d: PHY type: %s\n",
1254 sc->ax_unit, sc->ax_pinfo->ax_name);
1257 printf("ax%d: MII without any phy!\n", sc->ax_unit);
1264 ifmedia_init(&sc->ifmedia, 0, ax_ifmedia_upd, ax_ifmedia_sts);
1266 if (sc->ax_pinfo != NULL) {
1268 ax_autoneg_mii(sc, AX_FLAG_FORCEDELAY, 1);
1270 ifmedia_add(&sc->ifmedia,
1271 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1272 ifmedia_add(&sc->ifmedia,
1273 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1274 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1275 ifmedia_add(&sc->ifmedia,
1276 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
1277 ifmedia_add(&sc->ifmedia,
1278 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
1279 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
1280 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1283 media = sc->ifmedia.ifm_media;
1286 ifmedia_set(&sc->ifmedia, media);
1289 * Call MI attach routines.
1292 ether_ifattach(ifp);
1295 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1297 at_shutdown(ax_shutdown, sc, SHUTDOWN_POST_SYNC);
1305 * Initialize the transmit descriptors.
1307 static int ax_list_tx_init(sc)
1308 struct ax_softc *sc;
1310 struct ax_chain_data *cd;
1311 struct ax_list_data *ld;
1316 for (i = 0; i < AX_TX_LIST_CNT; i++) {
1317 cd->ax_tx_chain[i].ax_ptr = &ld->ax_tx_list[i];
1318 if (i == (AX_TX_LIST_CNT - 1))
1319 cd->ax_tx_chain[i].ax_nextdesc =
1320 &cd->ax_tx_chain[0];
1322 cd->ax_tx_chain[i].ax_nextdesc =
1323 &cd->ax_tx_chain[i + 1];
1326 cd->ax_tx_free = &cd->ax_tx_chain[0];
1327 cd->ax_tx_tail = cd->ax_tx_head = NULL;
1334 * Initialize the RX descriptors and allocate mbufs for them. Note that
1335 * we arrange the descriptors in a closed ring, so that the last descriptor
1336 * points back to the first.
1338 static int ax_list_rx_init(sc)
1339 struct ax_softc *sc;
1341 struct ax_chain_data *cd;
1342 struct ax_list_data *ld;
1348 for (i = 0; i < AX_RX_LIST_CNT; i++) {
1349 cd->ax_rx_chain[i].ax_ptr =
1350 (struct ax_desc *)&ld->ax_rx_list[i];
1351 if (ax_newbuf(sc, &cd->ax_rx_chain[i]) == ENOBUFS)
1353 if (i == (AX_RX_LIST_CNT - 1)) {
1354 cd->ax_rx_chain[i].ax_nextdesc =
1355 &cd->ax_rx_chain[0];
1356 ld->ax_rx_list[i].ax_next =
1357 vtophys(&ld->ax_rx_list[0]);
1359 cd->ax_rx_chain[i].ax_nextdesc =
1360 &cd->ax_rx_chain[i + 1];
1361 ld->ax_rx_list[i].ax_next =
1362 vtophys(&ld->ax_rx_list[i + 1]);
1366 cd->ax_rx_head = &cd->ax_rx_chain[0];
1372 * Initialize an RX descriptor and attach an MBUF cluster.
1373 * Note: the length fields are only 11 bits wide, which means the
1374 * largest size we can specify is 2047. This is important because
1375 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1376 * overflow the field and make a mess.
1378 static int ax_newbuf(sc, c)
1379 struct ax_softc *sc;
1380 struct ax_chain_onefrag *c;
1382 struct mbuf *m_new = NULL;
1384 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1385 if (m_new == NULL) {
1386 printf("ax%d: no memory for rx list -- packet dropped!\n",
1391 MCLGET(m_new, M_DONTWAIT);
1392 if (!(m_new->m_flags & M_EXT)) {
1393 printf("ax%d: no memory for rx list -- packet dropped!\n",
1400 c->ax_ptr->ax_status = AX_RXSTAT;
1401 c->ax_ptr->ax_data = vtophys(mtod(m_new, caddr_t));
1402 c->ax_ptr->ax_ctl = MCLBYTES - 1;
1408 * A frame has been uploaded: pass the resulting mbuf chain up to
1409 * the higher level protocols.
1411 static void ax_rxeof(sc)
1412 struct ax_softc *sc;
1414 struct ether_header *eh;
1417 struct ax_chain_onefrag *cur_rx;
1421 ifp = &sc->arpcom.ac_if;
1423 while(!((rxstat = sc->ax_cdata.ax_rx_head->ax_ptr->ax_status) &
1425 cur_rx = sc->ax_cdata.ax_rx_head;
1426 sc->ax_cdata.ax_rx_head = cur_rx->ax_nextdesc;
1429 * If an error occurs, update stats, clear the
1430 * status word and leave the mbuf cluster in place:
1431 * it should simply get re-used next time this descriptor
1432 * comes up in the ring.
1434 if (rxstat & AX_RXSTAT_RXERR) {
1436 if (rxstat & AX_RXSTAT_COLLSEEN)
1437 ifp->if_collisions++;
1438 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1439 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1443 /* No errors; receive the packet. */
1444 m = cur_rx->ax_mbuf;
1445 total_len = AX_RXBYTES(cur_rx->ax_ptr->ax_status);
1447 total_len -= ETHER_CRC_LEN;
1449 if (total_len < MINCLSIZE) {
1450 m = m_devget(mtod(cur_rx->ax_mbuf, char *),
1451 total_len, 0, ifp, NULL);
1452 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1453 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1459 m = cur_rx->ax_mbuf;
1461 * Try to conjure up a new mbuf cluster. If that
1462 * fails, it means we have an out of memory condition and
1463 * should leave the buffer in place and continue. This will
1464 * result in a lost packet, but there's little else we
1465 * can do in this situation.
1467 if (ax_newbuf(sc, cur_rx) == ENOBUFS) {
1469 cur_rx->ax_ptr->ax_status = AX_RXSTAT;
1470 cur_rx->ax_ptr->ax_ctl = (MCLBYTES - 1);
1473 m->m_pkthdr.rcvif = ifp;
1474 m->m_pkthdr.len = m->m_len = total_len;
1478 eh = mtod(m, struct ether_header *);
1481 * Handle BPF listeners. Let the BPF user see the packet, but
1482 * don't pass it up to the ether_input() layer unless it's
1483 * a broadcast packet, multicast packet, matches our ethernet
1484 * address or the interface is in promiscuous mode.
1488 if (ifp->if_flags & IFF_PROMISC &&
1489 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr,
1491 (eh->ether_dhost[0] & 1) == 0)) {
1497 /* Remove header from mbuf and pass it on. */
1498 m_adj(m, sizeof(struct ether_header));
1499 ether_input(ifp, eh, m);
1506 struct ax_softc *sc;
1510 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_ON);
1511 CSR_WRITE_4(sc, AX_RXADDR, vtophys(sc->ax_cdata.ax_rx_head->ax_ptr));
1512 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_ON);
1513 CSR_WRITE_4(sc, AX_RXSTART, 0xFFFFFFFF);
1519 * A frame was downloaded to the chip. It's safe for us to clean up
1523 static void ax_txeof(sc)
1524 struct ax_softc *sc;
1526 struct ax_chain *cur_tx;
1529 ifp = &sc->arpcom.ac_if;
1531 /* Clear the timeout timer. */
1534 if (sc->ax_cdata.ax_tx_head == NULL)
1538 * Go through our tx list and free mbufs for those
1539 * frames that have been transmitted.
1541 while(sc->ax_cdata.ax_tx_head->ax_mbuf != NULL) {
1544 cur_tx = sc->ax_cdata.ax_tx_head;
1545 txstat = AX_TXSTATUS(cur_tx);
1547 if (txstat & AX_TXSTAT_OWN)
1550 if (txstat & AX_TXSTAT_ERRSUM) {
1552 if (txstat & AX_TXSTAT_EXCESSCOLL)
1553 ifp->if_collisions++;
1554 if (txstat & AX_TXSTAT_LATECOLL)
1555 ifp->if_collisions++;
1558 ifp->if_collisions += (txstat & AX_TXSTAT_COLLCNT) >> 3;
1561 m_freem(cur_tx->ax_mbuf);
1562 cur_tx->ax_mbuf = NULL;
1564 if (sc->ax_cdata.ax_tx_head == sc->ax_cdata.ax_tx_tail) {
1565 sc->ax_cdata.ax_tx_head = NULL;
1566 sc->ax_cdata.ax_tx_tail = NULL;
1570 sc->ax_cdata.ax_tx_head = cur_tx->ax_nextdesc;
1577 * TX 'end of channel' interrupt handler.
1579 static void ax_txeoc(sc)
1580 struct ax_softc *sc;
1584 ifp = &sc->arpcom.ac_if;
1588 if (sc->ax_cdata.ax_tx_head == NULL) {
1589 ifp->if_flags &= ~IFF_OACTIVE;
1590 sc->ax_cdata.ax_tx_tail = NULL;
1591 if (sc->ax_want_auto)
1592 ax_autoneg_mii(sc, AX_FLAG_DELAYTIMEO, 1);
1598 static void ax_intr(arg)
1601 struct ax_softc *sc;
1606 ifp = &sc->arpcom.ac_if;
1608 /* Supress unwanted interrupts */
1609 if (!(ifp->if_flags & IFF_UP)) {
1614 /* Disable interrupts. */
1615 CSR_WRITE_4(sc, AX_IMR, 0x00000000);
1618 status = CSR_READ_4(sc, AX_ISR);
1620 CSR_WRITE_4(sc, AX_ISR, status);
1622 if ((status & AX_INTRS) == 0)
1625 if ((status & AX_ISR_TX_OK) || (status & AX_ISR_TX_EARLY))
1628 if (status & AX_ISR_TX_NOBUF)
1631 if (status & AX_ISR_TX_IDLE) {
1633 if (sc->ax_cdata.ax_tx_head != NULL) {
1634 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON);
1635 CSR_WRITE_4(sc, AX_TXSTART, 0xFFFFFFFF);
1639 if (status & AX_ISR_TX_UNDERRUN) {
1641 cfg = CSR_READ_4(sc, AX_NETCFG);
1642 if ((cfg & AX_NETCFG_TX_THRESH) == AX_TXTHRESH_160BYTES)
1643 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_STORENFWD);
1645 CSR_WRITE_4(sc, AX_NETCFG, cfg + 0x4000);
1648 if (status & AX_ISR_RX_OK)
1651 if ((status & AX_ISR_RX_WATDOGTIMEO)
1652 || (status & AX_ISR_RX_NOBUF))
1655 if (status & AX_ISR_BUS_ERR) {
1661 /* Re-enable interrupts. */
1662 CSR_WRITE_4(sc, AX_IMR, AX_INTRS);
1664 if (ifp->if_snd.ifq_head != NULL) {
1672 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1673 * pointers to the fragment pointers.
1675 static int ax_encap(sc, c, m_head)
1676 struct ax_softc *sc;
1678 struct mbuf *m_head;
1681 volatile struct ax_desc *f = NULL;
1686 * Start packing the mbufs in this chain into
1687 * the fragment pointers. Stop when we run out
1688 * of fragments or hit the end of the mbuf chain.
1693 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1694 if (m->m_len != 0) {
1695 if (frag == AX_MAXFRAGS)
1697 total_len += m->m_len;
1698 f = &c->ax_ptr->ax_frag[frag];
1699 f->ax_ctl = m->m_len;
1702 f->ax_ctl |= AX_TXCTL_FIRSTFRAG;
1704 f->ax_status = AX_TXSTAT_OWN;
1705 f->ax_next = vtophys(&c->ax_ptr->ax_frag[frag + 1]);
1706 f->ax_data = vtophys(mtod(m, vm_offset_t));
1712 * Handle special case: we ran out of fragments,
1713 * but we have more mbufs left in the chain. Copy the
1714 * data into an mbuf cluster. Note that we don't
1715 * bother clearing the values in the other fragment
1716 * pointers/counters; it wouldn't gain us anything,
1717 * and would waste cycles.
1720 struct mbuf *m_new = NULL;
1722 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1723 if (m_new == NULL) {
1724 printf("ax%d: no memory for tx list", sc->ax_unit);
1727 if (m_head->m_pkthdr.len > MHLEN) {
1728 MCLGET(m_new, M_DONTWAIT);
1729 if (!(m_new->m_flags & M_EXT)) {
1731 printf("ax%d: no memory for tx list",
1736 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1737 mtod(m_new, caddr_t));
1738 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1741 f = &c->ax_ptr->ax_frag[0];
1743 f->ax_data = vtophys(mtod(m_new, caddr_t));
1744 f->ax_ctl = total_len = m_new->m_len;
1745 f->ax_ctl |= AX_TXCTL_FIRSTFRAG;
1749 c->ax_mbuf = m_head;
1750 c->ax_lastdesc = frag - 1;
1751 AX_TXCTL(c) |= AX_TXCTL_LASTFRAG|AX_TXCTL_FINT;
1752 c->ax_ptr->ax_frag[0].ax_ctl |= AX_TXCTL_FINT;
1753 AX_TXNEXT(c) = vtophys(&c->ax_nextdesc->ax_ptr->ax_frag[0]);
1758 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1759 * to the mbuf data regions directly in the transmit lists. We also save a
1760 * copy of the pointers since the transmit list fragment pointers are
1761 * physical addresses.
1764 static void ax_start(ifp)
1767 struct ax_softc *sc;
1768 struct mbuf *m_head = NULL;
1769 struct ax_chain *cur_tx = NULL, *start_tx;
1773 if (sc->ax_autoneg) {
1779 * Check for an available queue slot. If there are none,
1782 if (sc->ax_cdata.ax_tx_free->ax_mbuf != NULL) {
1783 ifp->if_flags |= IFF_OACTIVE;
1787 start_tx = sc->ax_cdata.ax_tx_free;
1789 while(sc->ax_cdata.ax_tx_free->ax_mbuf == NULL) {
1790 IF_DEQUEUE(&ifp->if_snd, m_head);
1794 /* Pick a descriptor off the free list. */
1795 cur_tx = sc->ax_cdata.ax_tx_free;
1796 sc->ax_cdata.ax_tx_free = cur_tx->ax_nextdesc;
1798 /* Pack the data into the descriptor. */
1799 ax_encap(sc, cur_tx, m_head);
1800 if (cur_tx != start_tx)
1801 AX_TXOWN(cur_tx) = AX_TXSTAT_OWN;
1805 * If there's a BPF listener, bounce a copy of this frame
1809 bpf_mtap(ifp, cur_tx->ax_mbuf);
1811 AX_TXOWN(cur_tx) = AX_TXSTAT_OWN;
1812 CSR_WRITE_4(sc, AX_TXSTART, 0xFFFFFFFF);
1815 sc->ax_cdata.ax_tx_tail = cur_tx;
1816 if (sc->ax_cdata.ax_tx_head == NULL)
1817 sc->ax_cdata.ax_tx_head = start_tx;
1820 * Set a timeout in case the chip goes out to lunch.
1827 static void ax_init(xsc)
1830 struct ax_softc *sc = xsc;
1831 struct ifnet *ifp = &sc->arpcom.ac_if;
1832 u_int16_t phy_bmcr = 0;
1840 if (sc->ax_pinfo != NULL)
1841 phy_bmcr = ax_phy_readreg(sc, PHY_BMCR);
1844 * Cancel pending I/O and free all RX/TX buffers.
1850 * Set cache alignment and burst length.
1852 CSR_WRITE_4(sc, AX_BUSCTL, AX_BUSCTL_CONFIG);
1854 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_HEARTBEAT);
1855 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_STORENFWD);
1857 if (sc->ax_pinfo != NULL) {
1858 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_PORTSEL);
1859 ax_setcfg(sc, ax_phy_readreg(sc, PHY_BMCR));
1861 ax_setmode(sc, sc->ifmedia.ifm_media, 0);
1863 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_TX_THRESH);
1864 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_SPEEDSEL);
1866 if (IFM_SUBTYPE(sc->ifmedia.ifm_media) == IFM_10_T)
1867 AX_SETBIT(sc, AX_NETCFG, AX_TXTHRESH_160BYTES);
1869 AX_SETBIT(sc, AX_NETCFG, AX_TXTHRESH_72BYTES);
1871 /* Init our MAC address */
1872 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_PAR0);
1873 CSR_WRITE_4(sc, AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1874 CSR_WRITE_4(sc, AX_FILTIDX, AX_FILTIDX_PAR1);
1875 CSR_WRITE_4(sc, AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1877 /* Init circular RX list. */
1878 if (ax_list_rx_init(sc) == ENOBUFS) {
1879 printf("ax%d: initialization failed: no "
1880 "memory for rx buffers\n", sc->ax_unit);
1887 * Init tx descriptors.
1889 ax_list_tx_init(sc);
1891 /* If we want promiscuous mode, set the allframes bit. */
1892 if (ifp->if_flags & IFF_PROMISC) {
1893 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_PROMISC);
1895 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_PROMISC);
1899 * Set the capture broadcast bit to capture broadcast frames.
1901 if (ifp->if_flags & IFF_BROADCAST) {
1902 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_RX_BROAD);
1904 AX_CLRBIT(sc, AX_NETCFG, AX_NETCFG_RX_BROAD);
1908 * Load the multicast filter.
1913 * Load the address of the RX list.
1915 CSR_WRITE_4(sc, AX_RXADDR, vtophys(sc->ax_cdata.ax_rx_head->ax_ptr));
1916 CSR_WRITE_4(sc, AX_TXADDR, vtophys(&sc->ax_ldata->ax_tx_list[0]));
1919 * Enable interrupts.
1921 CSR_WRITE_4(sc, AX_IMR, AX_INTRS);
1922 CSR_WRITE_4(sc, AX_ISR, 0xFFFFFFFF);
1924 /* Enable receiver and transmitter. */
1925 AX_SETBIT(sc, AX_NETCFG, AX_NETCFG_TX_ON|AX_NETCFG_RX_ON);
1926 CSR_WRITE_4(sc, AX_RXSTART, 0xFFFFFFFF);
1928 /* Restore state of BMCR */
1929 if (sc->ax_pinfo != NULL)
1930 ax_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1932 ifp->if_flags |= IFF_RUNNING;
1933 ifp->if_flags &= ~IFF_OACTIVE;
1941 * Set media options.
1943 static int ax_ifmedia_upd(ifp)
1946 struct ax_softc *sc;
1947 struct ifmedia *ifm;
1952 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1955 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1956 ax_autoneg_mii(sc, AX_FLAG_SCHEDDELAY, 1);
1958 if (sc->ax_pinfo == NULL)
1959 ax_setmode(sc, ifm->ifm_media, 1);
1961 ax_setmode_mii(sc, ifm->ifm_media);
1968 * Report current media status.
1970 static void ax_ifmedia_sts(ifp, ifmr)
1972 struct ifmediareq *ifmr;
1974 struct ax_softc *sc;
1975 u_int16_t advert = 0, ability = 0;
1976 u_int32_t media = 0;
1980 ifmr->ifm_active = IFM_ETHER;
1982 if (sc->ax_pinfo == NULL) {
1983 media = CSR_READ_4(sc, AX_NETCFG);
1984 if (media & AX_NETCFG_PORTSEL)
1985 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1987 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1988 if (media & AX_NETCFG_FULLDUPLEX)
1989 ifmr->ifm_active |= IFM_FDX;
1991 ifmr->ifm_active |= IFM_HDX;
1995 if (!(ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1996 if (ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1997 ifmr->ifm_active = IFM_ETHER|IFM_100_TX;
1999 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2000 if (ax_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
2001 ifmr->ifm_active |= IFM_FDX;
2003 ifmr->ifm_active |= IFM_HDX;
2007 ability = ax_phy_readreg(sc, PHY_LPAR);
2008 advert = ax_phy_readreg(sc, PHY_ANAR);
2009 if (advert & PHY_ANAR_100BT4 &&
2010 ability & PHY_ANAR_100BT4) {
2011 ifmr->ifm_active = IFM_ETHER|IFM_100_T4;
2012 } else if (advert & PHY_ANAR_100BTXFULL &&
2013 ability & PHY_ANAR_100BTXFULL) {
2014 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX;
2015 } else if (advert & PHY_ANAR_100BTXHALF &&
2016 ability & PHY_ANAR_100BTXHALF) {
2017 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX;
2018 } else if (advert & PHY_ANAR_10BTFULL &&
2019 ability & PHY_ANAR_10BTFULL) {
2020 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX;
2021 } else if (advert & PHY_ANAR_10BTHALF &&
2022 ability & PHY_ANAR_10BTHALF) {
2023 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX;
2029 static int ax_ioctl(ifp, command, data)
2034 struct ax_softc *sc = ifp->if_softc;
2035 struct ifreq *ifr = (struct ifreq *) data;
2044 error = ether_ioctl(ifp, command, data);
2047 if (ifp->if_flags & IFF_UP) {
2050 if (ifp->if_flags & IFF_RUNNING)
2062 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2074 static void ax_watchdog(ifp)
2077 struct ax_softc *sc;
2081 if (sc->ax_autoneg) {
2082 ax_autoneg_mii(sc, AX_FLAG_DELAYTIMEO, 1);
2087 printf("ax%d: watchdog timeout\n", sc->ax_unit);
2089 if (sc->ax_pinfo != NULL) {
2090 if (!(ax_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
2091 printf("ax%d: no carrier - transceiver "
2092 "cable problem?\n", sc->ax_unit);
2099 if (ifp->if_snd.ifq_head != NULL)
2106 * Stop the adapter and free any mbufs allocated to the
2109 static void ax_stop(sc)
2110 struct ax_softc *sc;
2115 ifp = &sc->arpcom.ac_if;
2118 AX_CLRBIT(sc, AX_NETCFG, (AX_NETCFG_RX_ON|AX_NETCFG_TX_ON));
2119 CSR_WRITE_4(sc, AX_IMR, 0x00000000);
2120 CSR_WRITE_4(sc, AX_TXADDR, 0x00000000);
2121 CSR_WRITE_4(sc, AX_RXADDR, 0x00000000);
2124 * Free data in the RX lists.
2126 for (i = 0; i < AX_RX_LIST_CNT; i++) {
2127 if (sc->ax_cdata.ax_rx_chain[i].ax_mbuf != NULL) {
2128 m_freem(sc->ax_cdata.ax_rx_chain[i].ax_mbuf);
2129 sc->ax_cdata.ax_rx_chain[i].ax_mbuf = NULL;
2132 bzero((char *)&sc->ax_ldata->ax_rx_list,
2133 sizeof(sc->ax_ldata->ax_rx_list));
2136 * Free the TX list buffers.
2138 for (i = 0; i < AX_TX_LIST_CNT; i++) {
2139 if (sc->ax_cdata.ax_tx_chain[i].ax_mbuf != NULL) {
2140 m_freem(sc->ax_cdata.ax_tx_chain[i].ax_mbuf);
2141 sc->ax_cdata.ax_tx_chain[i].ax_mbuf = NULL;
2145 bzero((char *)&sc->ax_ldata->ax_tx_list,
2146 sizeof(sc->ax_ldata->ax_tx_list));
2148 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2154 * Stop all chip I/O so that the kernel's probe routines don't
2155 * get confused by errant DMAs when rebooting.
2157 static void ax_shutdown(howto, arg)
2161 struct ax_softc *sc = (struct ax_softc *)arg;
2168 static struct pci_device ax_device = {
2175 DATA_SET(pcidevice_set, ax_device);