2 * Copyright (c) 2008 Benno Rice. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
29 * Driver for SMSC LAN91C111, may work for older variants.
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/errno.h>
39 #include <sys/kernel.h>
40 #include <sys/sockio.h>
41 #include <sys/malloc.h>
43 #include <sys/queue.h>
44 #include <sys/socket.h>
45 #include <sys/syslog.h>
46 #include <sys/taskqueue.h>
48 #include <sys/module.h>
51 #include <machine/bus.h>
52 #include <machine/resource.h>
55 #include <net/ethernet.h>
57 #include <net/if_var.h>
58 #include <net/if_arp.h>
59 #include <net/if_dl.h>
60 #include <net/if_types.h>
61 #include <net/if_mib.h>
62 #include <net/if_media.h>
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
72 #include <net/bpfdesc.h>
74 #include <dev/smc/if_smcreg.h>
75 #include <dev/smc/if_smcvar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/mii_bitbang.h>
79 #include <dev/mii/miivar.h>
81 #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx)
82 #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx)
83 #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED)
85 #define SMC_INTR_PRIORITY 0
86 #define SMC_RX_PRIORITY 5
87 #define SMC_TX_PRIORITY 10
89 devclass_t smc_devclass;
91 static const char *smc_chip_ids[16] = {
93 /* 3 */ "SMSC LAN91C90 or LAN91C92",
94 /* 4 */ "SMSC LAN91C94",
95 /* 5 */ "SMSC LAN91C95",
96 /* 6 */ "SMSC LAN91C96",
97 /* 7 */ "SMSC LAN91C100",
98 /* 8 */ "SMSC LAN91C100FD",
99 /* 9 */ "SMSC LAN91C110FD or LAN91C111FD",
104 static void smc_init(void *);
105 static void smc_start(struct ifnet *);
106 static void smc_stop(struct smc_softc *);
107 static int smc_ioctl(struct ifnet *, u_long, caddr_t);
109 static void smc_init_locked(struct smc_softc *);
110 static void smc_start_locked(struct ifnet *);
111 static void smc_reset(struct smc_softc *);
112 static int smc_mii_ifmedia_upd(struct ifnet *);
113 static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *);
114 static void smc_mii_tick(void *);
115 static void smc_mii_mediachg(struct smc_softc *);
116 static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long);
118 static void smc_task_intr(void *, int);
119 static void smc_task_rx(void *, int);
120 static void smc_task_tx(void *, int);
122 static driver_filter_t smc_intr;
123 static timeout_t smc_watchdog;
124 #ifdef DEVICE_POLLING
125 static poll_handler_t smc_poll;
131 static uint32_t smc_mii_bitbang_read(device_t);
132 static void smc_mii_bitbang_write(device_t, uint32_t);
134 static const struct mii_bitbang_ops smc_mii_bitbang_ops = {
135 smc_mii_bitbang_read,
136 smc_mii_bitbang_write,
138 MGMT_MDO, /* MII_BIT_MDO */
139 MGMT_MDI, /* MII_BIT_MDI */
140 MGMT_MCLK, /* MII_BIT_MDC */
141 MGMT_MDOE, /* MII_BIT_DIR_HOST_PHY */
142 0, /* MII_BIT_DIR_PHY_HOST */
147 smc_select_bank(struct smc_softc *sc, uint16_t bank)
150 bus_barrier(sc->smc_reg, BSR, 2,
151 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
152 bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK);
153 bus_barrier(sc->smc_reg, BSR, 2,
154 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
157 /* Never call this when not in bank 2. */
159 smc_mmu_wait(struct smc_softc *sc)
162 KASSERT((bus_read_2(sc->smc_reg, BSR) &
163 BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2",
164 device_get_nameunit(sc->smc_dev)));
165 while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY)
169 static __inline uint8_t
170 smc_read_1(struct smc_softc *sc, bus_size_t offset)
173 return (bus_read_1(sc->smc_reg, offset));
177 smc_write_1(struct smc_softc *sc, bus_size_t offset, uint8_t val)
180 bus_write_1(sc->smc_reg, offset, val);
183 static __inline uint16_t
184 smc_read_2(struct smc_softc *sc, bus_size_t offset)
187 return (bus_read_2(sc->smc_reg, offset));
191 smc_write_2(struct smc_softc *sc, bus_size_t offset, uint16_t val)
194 bus_write_2(sc->smc_reg, offset, val);
198 smc_read_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap,
202 bus_read_multi_2(sc->smc_reg, offset, datap, count);
206 smc_write_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap,
210 bus_write_multi_2(sc->smc_reg, offset, datap, count);
214 smc_barrier(struct smc_softc *sc, bus_size_t offset, bus_size_t length,
218 bus_barrier(sc->smc_reg, offset, length, flags);
222 smc_probe(device_t dev)
224 int rid, type, error;
226 struct smc_softc *sc;
227 struct resource *reg;
229 sc = device_get_softc(dev);
231 type = SYS_RES_IOPORT;
235 type = SYS_RES_MEMORY;
237 reg = bus_alloc_resource(dev, type, &rid, 0, ~0, 16, RF_ACTIVE);
241 "could not allocate I/O resource for probe\n");
245 /* Check for the identification value in the BSR. */
246 val = bus_read_2(reg, BSR);
247 if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) {
249 device_printf(dev, "identification value not in BSR\n");
255 * Try switching banks and make sure we still get the identification
258 bus_write_2(reg, BSR, 0);
259 val = bus_read_2(reg, BSR);
260 if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) {
263 "identification value not in BSR after write\n");
270 bus_write_2(reg, BSR, 1);
271 val = bus_read_2(reg, BAR);
272 val = BAR_ADDRESS(val);
273 if (rman_get_start(reg) != val) {
275 device_printf(dev, "BAR address %x does not match "
276 "I/O resource address %lx\n", val,
277 rman_get_start(reg));
283 /* Compare REV against known chip revisions. */
284 bus_write_2(reg, BSR, 3);
285 val = bus_read_2(reg, REV);
286 val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
287 if (smc_chip_ids[val] == NULL) {
289 device_printf(dev, "Unknown chip revision: %d\n", val);
294 device_set_desc(dev, smc_chip_ids[val]);
297 bus_release_resource(dev, type, rid, reg);
302 smc_attach(device_t dev)
306 u_char eaddr[ETHER_ADDR_LEN];
307 struct smc_softc *sc;
310 sc = device_get_softc(dev);
315 ifp = sc->smc_ifp = if_alloc(IFT_ETHER);
321 mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
323 /* Set up watchdog callout. */
324 callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0);
326 type = SYS_RES_IOPORT;
328 type = SYS_RES_MEMORY;
331 sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0,
333 if (sc->smc_reg == NULL) {
338 sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0,
339 ~0, 1, RF_ACTIVE | RF_SHAREABLE);
340 if (sc->smc_irq == NULL) {
349 smc_select_bank(sc, 3);
350 val = smc_read_2(sc, REV);
351 sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT;
352 sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT;
354 device_printf(dev, "revision %x\n", sc->smc_rev);
356 callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx,
357 CALLOUT_RETURNUNLOCKED);
358 if (sc->smc_chip >= REV_CHIP_91110FD) {
359 (void)mii_attach(dev, &sc->smc_miibus, ifp,
360 smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK,
361 MII_PHY_ANY, MII_OFFSET_ANY, 0);
362 if (sc->smc_miibus != NULL) {
363 sc->smc_mii_tick = smc_mii_tick;
364 sc->smc_mii_mediachg = smc_mii_mediachg;
365 sc->smc_mii_mediaioctl = smc_mii_mediaioctl;
369 smc_select_bank(sc, 1);
370 eaddr[0] = smc_read_1(sc, IAR0);
371 eaddr[1] = smc_read_1(sc, IAR1);
372 eaddr[2] = smc_read_1(sc, IAR2);
373 eaddr[3] = smc_read_1(sc, IAR3);
374 eaddr[4] = smc_read_1(sc, IAR4);
375 eaddr[5] = smc_read_1(sc, IAR5);
377 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
379 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
380 ifp->if_init = smc_init;
381 ifp->if_ioctl = smc_ioctl;
382 ifp->if_start = smc_start;
383 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
384 IFQ_SET_READY(&ifp->if_snd);
386 ifp->if_capabilities = ifp->if_capenable = 0;
388 #ifdef DEVICE_POLLING
389 ifp->if_capabilities |= IFCAP_POLLING;
392 ether_ifattach(ifp, eaddr);
394 /* Set up taskqueue */
395 TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp);
396 TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp);
397 TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp);
398 sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT,
399 taskqueue_thread_enqueue, &sc->smc_tq);
400 taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq",
401 device_get_nameunit(sc->smc_dev));
403 /* Mask all interrupts. */
405 smc_write_1(sc, MSK, 0);
407 /* Wire up interrupt */
408 error = bus_setup_intr(dev, sc->smc_irq,
409 INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih);
420 smc_detach(device_t dev)
423 struct smc_softc *sc;
425 sc = device_get_softc(dev);
430 if (sc->smc_ifp != NULL) {
431 ether_ifdetach(sc->smc_ifp);
434 callout_drain(&sc->smc_watchdog);
435 callout_drain(&sc->smc_mii_tick_ch);
437 #ifdef DEVICE_POLLING
438 if (sc->smc_ifp->if_capenable & IFCAP_POLLING)
439 ether_poll_deregister(sc->smc_ifp);
442 if (sc->smc_ih != NULL)
443 bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih);
445 if (sc->smc_tq != NULL) {
446 taskqueue_drain(sc->smc_tq, &sc->smc_intr);
447 taskqueue_drain(sc->smc_tq, &sc->smc_rx);
448 taskqueue_drain(sc->smc_tq, &sc->smc_tx);
449 taskqueue_free(sc->smc_tq);
453 if (sc->smc_ifp != NULL) {
454 if_free(sc->smc_ifp);
457 if (sc->smc_miibus != NULL) {
458 device_delete_child(sc->smc_dev, sc->smc_miibus);
459 bus_generic_detach(sc->smc_dev);
462 if (sc->smc_reg != NULL) {
463 type = SYS_RES_IOPORT;
465 type = SYS_RES_MEMORY;
467 bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid,
471 if (sc->smc_irq != NULL)
472 bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid,
475 if (mtx_initialized(&sc->smc_mtx))
476 mtx_destroy(&sc->smc_mtx);
482 smc_start(struct ifnet *ifp)
484 struct smc_softc *sc;
488 smc_start_locked(ifp);
493 smc_start_locked(struct ifnet *ifp)
495 struct smc_softc *sc;
497 u_int len, npages, spin_count;
500 SMC_ASSERT_LOCKED(sc);
502 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
504 if (IFQ_IS_EMPTY(&ifp->if_snd))
508 * Grab the next packet. If it's too big, drop it.
510 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
511 len = m_length(m, NULL);
513 if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) {
514 if_printf(ifp, "large packet discarded\n");
515 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
517 return; /* XXX readcheck? */
521 * Flag that we're busy.
523 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
527 * Work out how many 256 byte "pages" we need. We have to include the
528 * control data for the packet in this calculation.
530 npages = (len + PKT_CTRL_DATA_LEN) >> 8;
537 smc_select_bank(sc, 2);
539 smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages);
542 * Spin briefly to see if the allocation succeeds.
544 spin_count = TX_ALLOC_WAIT_TIME;
546 if (smc_read_1(sc, IST) & ALLOC_INT) {
547 smc_write_1(sc, ACK, ALLOC_INT);
550 } while (--spin_count);
553 * If the allocation is taking too long, unmask the alloc interrupt
556 if (spin_count == 0) {
557 sc->smc_mask |= ALLOC_INT;
558 if ((ifp->if_capenable & IFCAP_POLLING) == 0)
559 smc_write_1(sc, MSK, sc->smc_mask);
563 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
567 smc_task_tx(void *context, int pending)
570 struct smc_softc *sc;
577 ifp = (struct ifnet *)context;
582 if (sc->smc_pending == NULL) {
587 m = m0 = sc->smc_pending;
588 sc->smc_pending = NULL;
589 smc_select_bank(sc, 2);
592 * Check the allocation result.
594 packet = smc_read_1(sc, ARR);
597 * If the allocation failed, requeue the packet and retry.
599 if (packet & ARR_FAILED) {
600 IFQ_DRV_PREPEND(&ifp->if_snd, m);
601 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
602 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
603 smc_start_locked(ifp);
609 * Tell the device to write to our packet number.
611 smc_write_1(sc, PNR, packet);
612 smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR);
615 * Tell the device how long the packet is (including control data).
617 len = m_length(m, 0);
618 len += PKT_CTRL_DATA_LEN;
619 smc_write_2(sc, DATA0, 0);
620 smc_write_2(sc, DATA0, len);
623 * Push the data out to the device.
627 for (; m != NULL; m = m->m_next) {
628 data = mtod(m, uint8_t *);
629 smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2);
634 * Push out the control byte and and the odd byte if needed.
636 if ((len & 1) != 0 && data != NULL)
637 smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]);
639 smc_write_2(sc, DATA0, 0);
642 * Unmask the TX empty interrupt.
644 sc->smc_mask |= TX_EMPTY_INT;
645 if ((ifp->if_capenable & IFCAP_POLLING) == 0)
646 smc_write_1(sc, MSK, sc->smc_mask);
649 * Enqueue the packet.
652 smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE);
653 callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc);
658 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
659 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
666 * See if there's anything else to do.
672 smc_task_rx(void *context, int pending)
674 u_int packet, status, len;
677 struct smc_softc *sc;
678 struct mbuf *m, *mhead, *mtail;
681 ifp = (struct ifnet *)context;
683 mhead = mtail = NULL;
687 packet = smc_read_1(sc, FIFO_RX);
688 while ((packet & FIFO_EMPTY) == 0) {
690 * Grab an mbuf and attach a cluster.
692 MGETHDR(m, M_NOWAIT, MT_DATA);
696 if (!(MCLGET(m, M_NOWAIT))) {
702 * Point to the start of the packet.
704 smc_select_bank(sc, 2);
705 smc_write_1(sc, PNR, packet);
706 smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR);
709 * Grab status and packet length.
711 status = smc_read_2(sc, DATA0);
712 len = smc_read_2(sc, DATA0) & RX_LEN_MASK;
714 if (status & RX_ODDFRM)
720 if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) {
722 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE);
723 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
729 * Set the mbuf up the way we want it.
731 m->m_pkthdr.rcvif = ifp;
732 m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */
733 m_adj(m, ETHER_ALIGN);
736 * Pull the packet out of the device. Make sure we're in the
737 * right bank first as things may have changed while we were
738 * allocating our mbuf.
740 smc_select_bank(sc, 2);
741 smc_write_1(sc, PNR, packet);
742 smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR);
743 data = mtod(m, uint8_t *);
744 smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1);
747 *data = smc_read_1(sc, DATA0);
751 * Tell the device we're done.
754 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE);
766 packet = smc_read_1(sc, FIFO_RX);
769 sc->smc_mask |= RCV_INT;
770 if ((ifp->if_capenable & IFCAP_POLLING) == 0)
771 smc_write_1(sc, MSK, sc->smc_mask);
775 while (mhead != NULL) {
777 mhead = mhead->m_next;
779 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
780 (*ifp->if_input)(ifp, m);
784 #ifdef DEVICE_POLLING
786 smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
788 struct smc_softc *sc;
793 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
799 if (cmd == POLL_AND_CHECK_STATUS)
800 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
805 smc_intr(void *context)
807 struct smc_softc *sc;
810 sc = (struct smc_softc *)context;
813 * Save current bank and restore later in this function
815 curbank = (smc_read_2(sc, BSR) & BSR_BANK_MASK);
818 * Block interrupts in order to let smc_task_intr to kick in
820 smc_select_bank(sc, 2);
821 smc_write_1(sc, MSK, 0);
824 smc_select_bank(sc, curbank);
826 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
827 return (FILTER_HANDLED);
831 smc_task_intr(void *context, int pending)
833 struct smc_softc *sc;
835 u_int status, packet, counter, tcr;
838 ifp = (struct ifnet *)context;
843 smc_select_bank(sc, 2);
846 * Find out what interrupts are flagged.
848 status = smc_read_1(sc, IST) & sc->smc_mask;
853 if (status & TX_INT) {
855 * Kill off the packet if there is one and re-enable transmit.
857 packet = smc_read_1(sc, FIFO_TX);
858 if ((packet & FIFO_EMPTY) == 0) {
859 callout_stop(&sc->smc_watchdog);
860 smc_select_bank(sc, 2);
861 smc_write_1(sc, PNR, packet);
862 smc_write_2(sc, PTR, 0 | PTR_READ |
864 smc_select_bank(sc, 0);
865 tcr = smc_read_2(sc, EPHSR);
867 if ((tcr & EPHSR_TX_SUC) == 0)
868 device_printf(sc->smc_dev,
871 smc_select_bank(sc, 2);
873 smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT);
875 smc_select_bank(sc, 0);
876 tcr = smc_read_2(sc, TCR);
877 tcr |= TCR_TXENA | TCR_PAD_EN;
878 smc_write_2(sc, TCR, tcr);
879 smc_select_bank(sc, 2);
880 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
886 smc_write_1(sc, ACK, TX_INT);
892 if (status & RCV_INT) {
893 smc_write_1(sc, ACK, RCV_INT);
894 sc->smc_mask &= ~RCV_INT;
895 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx);
901 if (status & ALLOC_INT) {
902 smc_write_1(sc, ACK, ALLOC_INT);
903 sc->smc_mask &= ~ALLOC_INT;
904 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
910 if (status & RX_OVRN_INT) {
911 smc_write_1(sc, ACK, RX_OVRN_INT);
912 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
918 if (status & TX_EMPTY_INT) {
919 smc_write_1(sc, ACK, TX_EMPTY_INT);
920 sc->smc_mask &= ~TX_EMPTY_INT;
921 callout_stop(&sc->smc_watchdog);
924 * Update collision stats.
926 smc_select_bank(sc, 0);
927 counter = smc_read_2(sc, ECR);
928 smc_select_bank(sc, 2);
929 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
930 ((counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT) +
931 ((counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT));
934 * See if there are any packets to transmit.
936 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx);
940 * Update the interrupt mask.
942 smc_select_bank(sc, 2);
943 if ((ifp->if_capenable & IFCAP_POLLING) == 0)
944 smc_write_1(sc, MSK, sc->smc_mask);
950 smc_mii_bitbang_read(device_t dev)
952 struct smc_softc *sc;
955 sc = device_get_softc(dev);
957 SMC_ASSERT_LOCKED(sc);
958 KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3,
959 ("%s: smc_mii_bitbang_read called with bank %d (!= 3)",
960 device_get_nameunit(sc->smc_dev),
961 smc_read_2(sc, BSR) & BSR_BANK_MASK));
963 val = smc_read_2(sc, MGMT);
964 smc_barrier(sc, MGMT, 2,
965 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
971 smc_mii_bitbang_write(device_t dev, uint32_t val)
973 struct smc_softc *sc;
975 sc = device_get_softc(dev);
977 SMC_ASSERT_LOCKED(sc);
978 KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3,
979 ("%s: smc_mii_bitbang_write called with bank %d (!= 3)",
980 device_get_nameunit(sc->smc_dev),
981 smc_read_2(sc, BSR) & BSR_BANK_MASK));
983 smc_write_2(sc, MGMT, val);
984 smc_barrier(sc, MGMT, 2,
985 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
989 smc_miibus_readreg(device_t dev, int phy, int reg)
991 struct smc_softc *sc;
994 sc = device_get_softc(dev);
998 smc_select_bank(sc, 3);
1000 val = mii_bitbang_readreg(dev, &smc_mii_bitbang_ops, phy, reg);
1007 smc_miibus_writereg(device_t dev, int phy, int reg, int data)
1009 struct smc_softc *sc;
1011 sc = device_get_softc(dev);
1015 smc_select_bank(sc, 3);
1017 mii_bitbang_writereg(dev, &smc_mii_bitbang_ops, phy, reg, data);
1024 smc_miibus_statchg(device_t dev)
1026 struct smc_softc *sc;
1027 struct mii_data *mii;
1030 sc = device_get_softc(dev);
1031 mii = device_get_softc(sc->smc_miibus);
1035 smc_select_bank(sc, 0);
1036 tcr = smc_read_2(sc, TCR);
1038 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1043 smc_write_2(sc, TCR, tcr);
1049 smc_mii_ifmedia_upd(struct ifnet *ifp)
1051 struct smc_softc *sc;
1052 struct mii_data *mii;
1055 if (sc->smc_miibus == NULL)
1058 mii = device_get_softc(sc->smc_miibus);
1059 return (mii_mediachg(mii));
1063 smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1065 struct smc_softc *sc;
1066 struct mii_data *mii;
1069 if (sc->smc_miibus == NULL)
1072 mii = device_get_softc(sc->smc_miibus);
1074 ifmr->ifm_active = mii->mii_media_active;
1075 ifmr->ifm_status = mii->mii_media_status;
1079 smc_mii_tick(void *context)
1081 struct smc_softc *sc;
1083 sc = (struct smc_softc *)context;
1085 if (sc->smc_miibus == NULL)
1090 mii_tick(device_get_softc(sc->smc_miibus));
1091 callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc);
1095 smc_mii_mediachg(struct smc_softc *sc)
1098 if (sc->smc_miibus == NULL)
1100 mii_mediachg(device_get_softc(sc->smc_miibus));
1104 smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command)
1106 struct mii_data *mii;
1108 if (sc->smc_miibus == NULL)
1111 mii = device_get_softc(sc->smc_miibus);
1112 return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command));
1116 smc_reset(struct smc_softc *sc)
1120 SMC_ASSERT_LOCKED(sc);
1122 smc_select_bank(sc, 2);
1125 * Mask all interrupts.
1127 smc_write_1(sc, MSK, 0);
1130 * Tell the device to reset.
1132 smc_select_bank(sc, 0);
1133 smc_write_2(sc, RCR, RCR_SOFT_RST);
1136 * Set up the configuration register.
1138 smc_select_bank(sc, 1);
1139 smc_write_2(sc, CR, CR_EPH_POWER_EN);
1143 * Turn off transmit and receive.
1145 smc_select_bank(sc, 0);
1146 smc_write_2(sc, TCR, 0);
1147 smc_write_2(sc, RCR, 0);
1150 * Set up the control register.
1152 smc_select_bank(sc, 1);
1153 ctr = smc_read_2(sc, CTR);
1154 ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE;
1155 smc_write_2(sc, CTR, ctr);
1160 smc_select_bank(sc, 2);
1162 smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET);
1166 smc_enable(struct smc_softc *sc)
1170 SMC_ASSERT_LOCKED(sc);
1174 * Set up the receive/PHY control register.
1176 smc_select_bank(sc, 0);
1177 smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT)
1178 | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT));
1181 * Set up the transmit and receive control registers.
1183 smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN);
1184 smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC);
1187 * Set up the interrupt mask.
1189 smc_select_bank(sc, 2);
1190 sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT;
1191 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1192 smc_write_1(sc, MSK, sc->smc_mask);
1196 smc_stop(struct smc_softc *sc)
1199 SMC_ASSERT_LOCKED(sc);
1202 * Turn off callouts.
1204 callout_stop(&sc->smc_watchdog);
1205 callout_stop(&sc->smc_mii_tick_ch);
1208 * Mask all interrupts.
1210 smc_select_bank(sc, 2);
1212 smc_write_1(sc, MSK, 0);
1213 #ifdef DEVICE_POLLING
1214 ether_poll_deregister(sc->smc_ifp);
1215 sc->smc_ifp->if_capenable &= ~IFCAP_POLLING;
1216 sc->smc_ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT;
1220 * Disable transmit and receive.
1222 smc_select_bank(sc, 0);
1223 smc_write_2(sc, TCR, 0);
1224 smc_write_2(sc, RCR, 0);
1226 sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1230 smc_watchdog(void *arg)
1232 struct smc_softc *sc;
1234 sc = (struct smc_softc *)arg;
1235 device_printf(sc->smc_dev, "watchdog timeout\n");
1236 taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr);
1240 smc_init(void *context)
1242 struct smc_softc *sc;
1244 sc = (struct smc_softc *)context;
1246 smc_init_locked(sc);
1251 smc_init_locked(struct smc_softc *sc)
1255 SMC_ASSERT_LOCKED(sc);
1257 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1263 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1264 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1266 smc_start_locked(ifp);
1268 if (sc->smc_mii_tick != NULL)
1269 callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc);
1271 #ifdef DEVICE_POLLING
1273 ether_poll_register(smc_poll, ifp);
1275 ifp->if_capenable |= IFCAP_POLLING;
1276 ifp->if_capenable |= IFCAP_POLLING_NOCOUNT;
1281 smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1283 struct smc_softc *sc;
1291 if ((ifp->if_flags & IFF_UP) == 0 &&
1292 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1298 if (sc->smc_mii_mediachg != NULL)
1299 sc->smc_mii_mediachg(sc);
1315 if (sc->smc_mii_mediaioctl == NULL) {
1319 sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd);
1323 error = ether_ioctl(ifp, cmd, data);