1 /* $NetBSD: if_admsw.c,v 1.3 2007/04/22 19:26:25 dyoung Exp $ */
4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko.
7 * Redistribution and use in source and binary forms, with or
8 * without modification, are permitted provided that the following
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials provided
15 * with the distribution.
16 * 3. The names of the authors may not be used to endorse or promote
17 * products derived from this software without specific prior
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
34 * Copyright (c) 2001 Wasabi Systems, Inc.
35 * All rights reserved.
37 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed for the NetBSD Project by
50 * Wasabi Systems, Inc.
51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
52 * or promote products derived from this software without specific prior
55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
65 * POSSIBILITY OF SUCH DAMAGE.
69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media
74 * Better Rx buffer management; we want to get new Rx buffers
75 * to the chip more quickly than we currently do.
78 #include <sys/cdefs.h>
79 __FBSDID("$FreeBSD$");
81 #include <sys/param.h>
82 #include <sys/systm.h>
84 #include <sys/kernel.h>
86 #include <sys/malloc.h>
87 #include <sys/module.h>
89 #include <sys/socket.h>
90 #include <sys/sockio.h>
91 #include <sys/sysctl.h>
92 #include <machine/bus.h>
94 #include <net/ethernet.h>
96 #include <net/if_arp.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_mib.h>
100 #include <net/if_types.h>
103 #include <netinet/in.h>
104 #include <netinet/in_systm.h>
105 #include <netinet/in_var.h>
106 #include <netinet/ip.h>
110 #include <net/bpfdesc.h>
112 #include <mips/adm5120/adm5120reg.h>
113 #include <mips/adm5120/if_admswreg.h>
114 #include <mips/adm5120/if_admswvar.h>
116 /* TODO: add locking */
117 #define ADMSW_LOCK(sc) do {} while(0);
118 #define ADMSW_UNLOCK(sc) do {} while(0);
120 static uint8_t vlan_matrix[SW_DEVS] = {
121 (1 << 6) | (1 << 0), /* CPU + port0 */
122 (1 << 6) | (1 << 1), /* CPU + port1 */
123 (1 << 6) | (1 << 2), /* CPU + port2 */
124 (1 << 6) | (1 << 3), /* CPU + port3 */
125 (1 << 6) | (1 << 4), /* CPU + port4 */
126 (1 << 6) | (1 << 5), /* CPU + port5 */
129 /* ifnet entry points */
130 static void admsw_start(struct ifnet *);
131 static void admsw_watchdog(struct ifnet *);
132 static int admsw_ioctl(struct ifnet *, u_long, caddr_t);
133 static void admsw_init(void *);
134 static void admsw_stop(struct ifnet *, int);
136 static void admsw_reset(struct admsw_softc *);
137 static void admsw_set_filter(struct admsw_softc *);
139 static void admsw_txintr(struct admsw_softc *, int);
140 static void admsw_rxintr(struct admsw_softc *, int);
141 static int admsw_add_rxbuf(struct admsw_softc *, int, int);
142 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1)
143 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0)
145 static int admsw_mediachange(struct ifnet *);
146 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *);
148 static int admsw_intr(void *);
150 /* bus entry points */
151 static int admsw_probe(device_t dev);
152 static int admsw_attach(device_t dev);
153 static int admsw_detach(device_t dev);
154 static int admsw_shutdown(device_t dev);
157 admsw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
164 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
166 *addr = segs->ds_addr;
170 admsw_rxbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
172 struct admsw_descsoft *ds;
177 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
181 ds->ds_addr[0] = segs[0].ds_addr;
182 ds->ds_len[0] = segs[0].ds_len;
187 admsw_mbuf_map_addr(void *arg, bus_dma_segment_t *segs, int nseg,
188 bus_size_t mapsize, int error)
190 struct admsw_descsoft *ds;
197 if((nseg != 1) && (nseg != 2))
198 panic("%s: nseg == %d\n", __func__, nseg);
201 ds->ds_addr[0] = segs[0].ds_addr;
202 ds->ds_len[0] = segs[0].ds_len;
205 ds->ds_addr[1] = segs[1].ds_addr;
206 ds->ds_len[1] = segs[1].ds_len;
213 admsw_probe(device_t dev)
216 device_set_desc(dev, "ADM5120 Switch Engine");
220 #define REG_READ(o) bus_read_4((sc)->mem_res, (o))
221 #define REG_WRITE(o,v) bus_write_4((sc)->mem_res, (o),(v))
224 admsw_init_bufs(struct admsw_softc *sc)
227 struct admsw_desc *desc;
229 for (i = 0; i < ADMSW_NTXHDESC; i++) {
230 if (sc->sc_txhsoft[i].ds_mbuf != NULL) {
231 m_freem(sc->sc_txhsoft[i].ds_mbuf);
232 sc->sc_txhsoft[i].ds_mbuf = NULL;
234 desc = &sc->sc_txhdescs[i];
237 desc->len = MAC_BUFLEN;
239 ADMSW_CDTXHSYNC(sc, i,
240 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
242 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND;
243 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1,
244 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
246 for (i = 0; i < ADMSW_NRXHDESC; i++) {
247 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) {
248 if (admsw_add_rxhbuf(sc, i) != 0)
249 panic("admsw_init_bufs\n");
251 ADMSW_INIT_RXHDESC(sc, i);
254 for (i = 0; i < ADMSW_NTXLDESC; i++) {
255 if (sc->sc_txlsoft[i].ds_mbuf != NULL) {
256 m_freem(sc->sc_txlsoft[i].ds_mbuf);
257 sc->sc_txlsoft[i].ds_mbuf = NULL;
259 desc = &sc->sc_txldescs[i];
262 desc->len = MAC_BUFLEN;
264 ADMSW_CDTXLSYNC(sc, i,
265 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
267 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND;
268 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1,
269 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
271 for (i = 0; i < ADMSW_NRXLDESC; i++) {
272 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) {
273 if (admsw_add_rxlbuf(sc, i) != 0)
274 panic("admsw_init_bufs\n");
276 ADMSW_INIT_RXLDESC(sc, i);
279 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0));
280 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0));
281 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0));
282 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0));
284 sc->sc_txfree = ADMSW_NTXLDESC;
291 admsw_setvlan(struct admsw_softc *sc, char matrix[6])
295 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24);
296 REG_WRITE(VLAN_G1_REG, i);
297 i = matrix[4] + (matrix[5] << 8);
298 REG_WRITE(VLAN_G2_REG, i);
302 admsw_reset(struct admsw_softc *sc)
307 REG_WRITE(PORT_CONF0_REG,
308 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK);
309 REG_WRITE(CPUP_CONF_REG,
310 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP);
312 /* Wait for DMA to complete. Overkill. In 3ms, we can
313 * send at least two entire 1500-byte packets at 10 Mb/s.
317 /* The datasheet recommends that we move all PHYs to reset
318 * state prior to software reset.
320 REG_WRITE(PHY_CNTL2_REG,
321 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK);
323 /* Reset the switch. */
324 REG_WRITE(ADMSW_SW_RES, 0x1);
328 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO);
331 REG_WRITE(CPUP_CONF_REG,
332 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
333 CPUP_CONF_DMCP_MASK);
335 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK);
337 REG_WRITE(PHY_CNTL2_REG,
338 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK |
339 PHY_CNTL2_AMDIX_MASK);
341 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT);
343 REG_WRITE(ADMSW_INT_MASK, INT_MASK);
344 REG_WRITE(ADMSW_INT_ST, INT_MASK);
347 * While in DDB, we stop servicing interrupts, RX ring
348 * fills up and when free block counter falls behind FC
349 * threshold, the switch starts to emit 802.3x PAUSE
350 * frames. This can upset peer switches.
352 * Stop this from happening by disabling FC and D2
356 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK));
358 admsw_setvlan(sc, vlan_matrix);
360 for (i = 0; i < SW_DEVS; i++) {
361 REG_WRITE(MAC_WT1_REG,
363 (sc->sc_enaddr[3]<<8) |
364 (sc->sc_enaddr[4]<<16) |
365 ((sc->sc_enaddr[5]+i)<<24));
366 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) |
367 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) |
368 MAC_WT0_WRITE | MAC_WT0_VLANID_EN);
370 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE));
373 wdog1 = REG_READ(ADM5120_WDOG1);
374 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE);
378 admsw_attach(device_t dev)
380 uint8_t enaddr[ETHER_ADDR_LEN];
381 struct admsw_softc *sc = (struct admsw_softc *) device_get_softc(dev);
386 device_printf(dev, "ADM5120 Switch Engine, %d ports\n", SW_DEVS);
389 /* XXXMIPS: fix it */
397 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr));
399 device_printf(sc->sc_dev, "base Ethernet address %s\n",
400 ether_sprintf(enaddr));
403 if ((sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
404 RF_ACTIVE)) == NULL) {
405 device_printf(dev, "unable to allocate memory resource\n");
409 /* Hook up the interrupt handler. */
411 if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
412 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
413 device_printf(dev, "unable to allocate IRQ resource\n");
417 if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET,
418 admsw_intr, NULL, sc, &sc->sc_ih)) != 0) {
420 "WARNING: unable to register interrupt handler\n");
425 * Allocate the control data structures, and create and load the
428 if ((error = bus_dma_tag_create(NULL, 4, 0,
429 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
430 NULL, NULL, sizeof(struct admsw_control_data), 1,
431 sizeof(struct admsw_control_data), 0, NULL, NULL,
432 &sc->sc_control_dmat)) != 0) {
433 device_printf(sc->sc_dev,
434 "unable to create control data DMA map, error = %d\n",
439 if ((error = bus_dmamem_alloc(sc->sc_control_dmat,
440 (void **)&sc->sc_control_data, BUS_DMA_NOWAIT,
441 &sc->sc_cddmamap)) != 0) {
442 device_printf(sc->sc_dev,
443 "unable to allocate control data, error = %d\n", error);
447 if ((error = bus_dmamap_load(sc->sc_control_dmat, sc->sc_cddmamap,
448 sc->sc_control_data, sizeof(struct admsw_control_data),
449 admsw_dma_map_addr, &sc->sc_cddma, 0)) != 0) {
450 device_printf(sc->sc_dev,
451 "unable to load control data DMA map, error = %d\n", error);
456 * Create the transmit buffer DMA maps.
458 if ((error = bus_dma_tag_create(NULL, 1, 0,
459 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
460 NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL,
461 &sc->sc_bufs_dmat)) != 0) {
462 device_printf(sc->sc_dev,
463 "unable to create control data DMA map, error = %d\n",
468 for (i = 0; i < ADMSW_NTXHDESC; i++) {
469 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
470 &sc->sc_txhsoft[i].ds_dmamap)) != 0) {
471 device_printf(sc->sc_dev,
472 "unable to create txh DMA map %d, error = %d\n",
476 sc->sc_txhsoft[i].ds_mbuf = NULL;
479 for (i = 0; i < ADMSW_NTXLDESC; i++) {
480 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
481 &sc->sc_txlsoft[i].ds_dmamap)) != 0) {
482 device_printf(sc->sc_dev,
483 "unable to create txl DMA map %d, error = %d\n",
487 sc->sc_txlsoft[i].ds_mbuf = NULL;
491 * Create the receive buffer DMA maps.
493 for (i = 0; i < ADMSW_NRXHDESC; i++) {
494 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
495 &sc->sc_rxhsoft[i].ds_dmamap)) != 0) {
496 device_printf(sc->sc_dev,
497 "unable to create rxh DMA map %d, error = %d\n",
501 sc->sc_rxhsoft[i].ds_mbuf = NULL;
504 for (i = 0; i < ADMSW_NRXLDESC; i++) {
505 if ((error = bus_dmamap_create(sc->sc_bufs_dmat, 0,
506 &sc->sc_rxlsoft[i].ds_dmamap)) != 0) {
507 device_printf(sc->sc_dev,
508 "unable to create rxl DMA map %d, error = %d\n",
512 sc->sc_rxlsoft[i].ds_mbuf = NULL;
518 for (i = 0; i < SW_DEVS; i++) {
519 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange,
521 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL);
522 ifmedia_add(&sc->sc_ifmedia[i],
523 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
524 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL);
525 ifmedia_add(&sc->sc_ifmedia[i],
526 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
527 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL);
528 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO);
530 ifp = sc->sc_ifnet[i] = if_alloc(IFT_ETHER);;
532 /* Setup interface parameters */
534 if_initname(ifp, device_get_name(dev), i);
535 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
536 ifp->if_ioctl = admsw_ioctl;
537 ifp->if_output = ether_output;
538 ifp->if_start = admsw_start;
539 ifp->if_watchdog = admsw_watchdog;
541 ifp->if_init = admsw_init;
542 ifp->if_mtu = ETHERMTU;
543 ifp->if_baudrate = IF_Mbps(100);
544 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN));
545 ifp->if_snd.ifq_drv_maxlen = max(ADMSW_NTXLDESC, IFQ_MAXLEN);
546 IFQ_SET_READY(&ifp->if_snd);
547 ifp->if_capabilities |= IFCAP_VLAN_MTU;
549 /* Attach the interface. */
550 ether_ifattach(ifp, enaddr);
554 /* XXX: admwdog_attach(sc); */
556 /* leave interrupts and cpu port disabled */
561 admsw_detach(device_t dev)
564 printf("TODO: DETACH\n");
571 * Make sure the interface is stopped at reboot time.
574 admsw_shutdown(device_t dev)
576 struct admsw_softc *sc;
579 sc = device_get_softc(dev);
580 for (i = 0; i < SW_DEVS; i++)
581 admsw_stop(sc->sc_ifnet[i], 1);
587 * admsw_start: [ifnet interface function]
589 * Start packet transmission on the interface.
592 admsw_start(struct ifnet *ifp)
594 struct admsw_softc *sc = ifp->if_softc;
596 struct admsw_descsoft *ds;
597 struct admsw_desc *desc;
599 struct ether_header *eh;
600 int error, nexttx, len, i;
604 * Loop through the send queues, setting up transmit descriptors
605 * unitl we drain the queues, or use up all available transmit
614 ifp = sc->sc_ifnet[i];
615 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE))
616 == IFF_DRV_RUNNING) {
617 /* Grab a packet off the queue. */
618 IF_DEQUEUE(&ifp->if_snd, m0);
631 /* Get a spare descriptor. */
632 if (sc->sc_txfree == 0) {
633 /* No more slots left; notify upper layer. */
634 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
637 nexttx = sc->sc_txnext;
638 desc = &sc->sc_txldescs[nexttx];
639 ds = &sc->sc_txlsoft[nexttx];
640 dmamap = ds->ds_dmamap;
643 * Load the DMA map. If this fails, the packet either
644 * didn't fit in the alloted number of segments, or we
645 * were short on resources. In this case, we'll copy
648 if (m0->m_pkthdr.len < ETHER_MIN_LEN ||
649 bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0,
650 admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) {
651 MGETHDR(m, M_DONTWAIT, MT_DATA);
653 device_printf(sc->sc_dev,
654 "unable to allocate Tx mbuf\n");
657 if (m0->m_pkthdr.len > MHLEN) {
658 MCLGET(m, M_DONTWAIT);
659 if ((m->m_flags & M_EXT) == 0) {
660 device_printf(sc->sc_dev,
661 "unable to allocate Tx cluster\n");
666 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
667 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
668 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
669 if (m->m_pkthdr.len < ETHER_MIN_LEN) {
670 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len)
671 panic("admsw_start: M_TRAILINGSPACE\n");
672 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0,
673 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len);
674 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN;
676 error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat,
677 dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT);
679 device_printf(sc->sc_dev,
680 "unable to load Tx buffer, error = %d\n",
692 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
695 /* Sync the DMA map. */
696 bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE);
698 if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2)
699 panic("admsw_start: nsegs == %d\n", ds->ds_nsegs);
700 desc->data = ds->ds_addr[0];
701 desc->len = len = ds->ds_len[0];
702 if (ds->ds_nsegs > 1) {
703 len += ds->ds_len[1];
704 desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE;
707 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan);
708 eh = mtod(m0, struct ether_header *);
709 if (ntohs(eh->ether_type) == ETHERTYPE_IP &&
710 m0->m_pkthdr.csum_flags & CSUM_IP)
711 desc->status |= ADM5120_DMA_CSUM;
712 if (nexttx == ADMSW_NTXLDESC - 1)
713 desc->data |= ADM5120_DMA_RINGEND;
714 desc->data |= ADM5120_DMA_OWN;
716 /* Sync the descriptor. */
717 ADMSW_CDTXLSYNC(sc, nexttx,
718 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
720 REG_WRITE(SEND_TRIG_REG, 1);
721 /* printf("send slot %d\n",nexttx); */
724 * Store a pointer to the packet so we can free it later.
728 /* Advance the Tx pointer. */
730 sc->sc_txnext = ADMSW_NEXTTXL(nexttx);
732 /* Pass the packet to any BPF listeners. */
735 /* Set a watchdog timer in case the chip flakes out. */
736 sc->sc_ifnet[0]->if_timer = 5;
741 * admsw_watchdog: [ifnet interface function]
743 * Watchdog timer handler.
746 admsw_watchdog(struct ifnet *ifp)
748 struct admsw_softc *sc = ifp->if_softc;
751 /* Check if an interrupt was lost. */
752 if (sc->sc_txfree == ADMSW_NTXLDESC) {
753 device_printf(sc->sc_dev, "watchdog false alarm\n");
756 if (sc->sc_ifnet[0]->if_timer != 0)
757 device_printf(sc->sc_dev, "watchdog timer is %d!\n",
758 sc->sc_ifnet[0]->if_timer);
760 if (sc->sc_txfree == ADMSW_NTXLDESC) {
761 device_printf(sc->sc_dev, "tx IRQ lost (queue empty)\n");
764 if (sc->sc_ifnet[0]->if_timer != 0) {
765 device_printf(sc->sc_dev, "tx IRQ lost (timer recharged)\n");
769 device_printf(sc->sc_dev, "device timeout, txfree = %d\n",
771 for (vlan = 0; vlan < SW_DEVS; vlan++)
772 admsw_stop(sc->sc_ifnet[vlan], 0);
775 /* Try to get more packets going. */
780 * admsw_ioctl: [ifnet interface function]
782 * Handle control requests from the operator.
785 admsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
787 struct admsw_softc *sc = ifp->if_softc;
797 while(port < SW_DEVS)
798 if(ifp == sc->sc_ifnet[port])
805 error = ifmedia_ioctl(ifp, (struct ifreq *)data,
806 &sc->sc_ifmedia[port], cmd);
811 ifd = (struct ifdrv *) data;
812 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) {
816 if (cmd == SIOCGDRVSPEC) {
817 error = copyout(vlan_matrix, ifd->ifd_data,
818 sizeof(vlan_matrix));
820 error = copyin(ifd->ifd_data, vlan_matrix,
821 sizeof(vlan_matrix));
822 admsw_setvlan(sc, vlan_matrix);
827 error = ether_ioctl(ifp, cmd, data);
828 if (error == ENETRESET) {
830 * Multicast list has changed; set the hardware filter
833 admsw_set_filter(sc);
839 /* Try to get more packets going. */
850 * Interrupt service routine.
853 admsw_intr(void *arg)
855 struct admsw_softc *sc = arg;
858 pending = REG_READ(ADMSW_INT_ST);
859 REG_WRITE(ADMSW_INT_ST, pending);
862 return (FILTER_STRAY);
864 if ((pending & ADMSW_INTR_RHD) != 0)
867 if ((pending & ADMSW_INTR_RLD) != 0)
870 if ((pending & ADMSW_INTR_SHD) != 0)
873 if ((pending & ADMSW_INTR_SLD) != 0)
876 return (FILTER_HANDLED);
882 * Helper; handle transmit interrupts.
885 admsw_txintr(struct admsw_softc *sc, int prio)
888 struct admsw_desc *desc;
889 struct admsw_descsoft *ds;
893 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
894 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC;
895 i = ADMSW_NEXTTXL(i)) {
897 ADMSW_CDTXLSYNC(sc, i,
898 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
900 desc = &sc->sc_txldescs[i];
901 ds = &sc->sc_txlsoft[i];
902 if (desc->data & ADM5120_DMA_OWN) {
903 ADMSW_CDTXLSYNC(sc, i,
904 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
908 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
909 BUS_DMASYNC_POSTWRITE);
910 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
911 m_freem(ds->ds_mbuf);
914 vlan = ffs(desc->status & 0x3f) - 1;
915 if (vlan < 0 || vlan >= SW_DEVS)
916 panic("admsw_txintr: bad vlan\n");
917 ifp = sc->sc_ifnet[vlan];
919 /* printf("clear tx slot %d\n",i); */
928 for (vlan = 0; vlan < SW_DEVS; vlan++)
929 sc->sc_ifnet[vlan]->if_drv_flags &= ~IFF_DRV_OACTIVE;
931 ifp = sc->sc_ifnet[0];
933 /* Try to queue more packets. */
937 * If there are no more pending transmissions,
938 * cancel the watchdog timer.
940 if (sc->sc_txfree == ADMSW_NTXLDESC)
945 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */
951 * Helper; handle receive interrupts.
954 admsw_rxintr(struct admsw_softc *sc, int high)
957 struct admsw_descsoft *ds;
960 int i, len, port, vlan;
962 /* printf("rxintr\n"); */
965 panic("admsw_rxintr: high priority packet\n");
968 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
969 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
970 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
971 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
972 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
976 ADMSW_CDRXLSYNC(sc, i,
977 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
978 i = ADMSW_NEXTRXL(i);
979 /* the ring is empty, just return. */
980 if (i == sc->sc_rxptr)
982 ADMSW_CDRXLSYNC(sc, i,
983 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
984 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN);
986 ADMSW_CDRXLSYNC(sc, i,
987 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
989 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
990 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
992 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0)
993 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
994 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
996 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr,
997 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
998 /* We've fallen behind the chip: catch it. */
1000 device_printf(sc->sc_dev,
1001 "RX ring resync, base=%x, work=%x, %d -> %d\n",
1002 REG_READ(RECV_LBADDR_REG),
1003 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i);
1006 /* ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); */
1010 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) {
1011 ds = &sc->sc_rxlsoft[i];
1013 ADMSW_CDRXLSYNC(sc, i,
1014 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1016 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) {
1017 ADMSW_CDRXLSYNC(sc, i,
1018 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1022 /* printf("process slot %d\n",i); */
1024 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
1025 BUS_DMASYNC_POSTREAD);
1027 stat = sc->sc_rxldescs[i].status;
1028 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT;
1029 len -= ETHER_CRC_LEN;
1030 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT;
1032 for (vlan = 0; vlan < SW_DEVS; vlan++)
1033 if ((1 << port) & vlan_matrix[vlan])
1036 if (vlan == SW_DEVS)
1039 ifp = sc->sc_ifnet[vlan];
1042 if (admsw_add_rxlbuf(sc, i) != 0) {
1044 ADMSW_INIT_RXLDESC(sc, i);
1045 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap,
1046 BUS_DMASYNC_PREREAD);
1050 m->m_pkthdr.rcvif = ifp;
1051 m->m_pkthdr.len = m->m_len = len;
1052 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) {
1053 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1054 if (!(stat & ADM5120_DMA_CSUMFAIL))
1055 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1061 (*ifp->if_input)(ifp, m);
1065 /* Update the receive pointer. */
1070 * admsw_init: [ifnet interface function]
1072 * Initialize the interface.
1075 admsw_init(void *xsc)
1077 struct admsw_softc *sc = xsc;
1081 for (i = 0; i < SW_DEVS; i++) {
1082 ifp = sc->sc_ifnet[i];
1083 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1084 if (sc->ndevs == 0) {
1085 admsw_init_bufs(sc);
1087 REG_WRITE(CPUP_CONF_REG,
1088 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK |
1089 CPUP_CONF_DMCP_MASK);
1090 /* clear all pending interrupts */
1091 REG_WRITE(ADMSW_INT_ST, INT_MASK);
1093 /* enable needed interrupts */
1094 REG_WRITE(ADMSW_INT_MASK,
1095 REG_READ(ADMSW_INT_MASK) &
1096 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD |
1097 ADMSW_INTR_RHD | ADMSW_INTR_RLD |
1098 ADMSW_INTR_HDF | ADMSW_INTR_LDF));
1104 /* mark iface as running */
1105 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1106 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1109 /* Set the receive filter. */
1110 admsw_set_filter(sc);
1114 * admsw_stop: [ifnet interface function]
1116 * Stop transmission on the interface.
1119 admsw_stop(struct ifnet *ifp, int disable)
1121 struct admsw_softc *sc = ifp->if_softc;
1123 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1126 if (--sc->ndevs == 0) {
1127 /* printf("debug: de-initializing hardware\n"); */
1129 /* disable cpu port */
1130 REG_WRITE(CPUP_CONF_REG,
1131 CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
1132 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK);
1134 /* XXX We should disable, then clear? --dyoung */
1135 /* clear all pending interrupts */
1136 REG_WRITE(ADMSW_INT_ST, INT_MASK);
1138 /* disable interrupts */
1139 REG_WRITE(ADMSW_INT_MASK, INT_MASK);
1142 /* Mark the interface as down and cancel the watchdog timer. */
1143 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1152 * Set up the receive filter.
1155 admsw_set_filter(struct admsw_softc *sc)
1158 uint32_t allmc, anymc, conf, promisc;
1160 struct ifmultiaddr *ifma;
1162 /* Find which ports should be operated in promisc mode. */
1163 allmc = anymc = promisc = 0;
1164 for (i = 0; i < SW_DEVS; i++) {
1165 ifp = sc->sc_ifnet[i];
1166 if (ifp->if_flags & IFF_PROMISC)
1167 promisc |= vlan_matrix[i];
1169 ifp->if_flags &= ~IFF_ALLMULTI;
1171 if_maddr_rlock(ifp);
1172 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
1174 if (ifma->ifma_addr->sa_family != AF_LINK)
1177 anymc |= vlan_matrix[i];
1179 if_maddr_runlock(ifp);
1182 conf = REG_READ(CPUP_CONF_REG);
1183 /* 1 Disable forwarding of unknown & multicast packets to
1185 * 2 Enable forwarding of unknown & multicast packets to
1186 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set.
1188 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK;
1189 /* Enable forwarding of unknown packets to CPU on selected ports. */
1190 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK);
1191 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
1192 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK);
1193 REG_WRITE(CPUP_CONF_REG, conf);
1199 * Add a receive buffer to the indicated descriptor.
1202 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high)
1204 struct admsw_descsoft *ds;
1209 ds = &sc->sc_rxhsoft[idx];
1211 ds = &sc->sc_rxlsoft[idx];
1213 MGETHDR(m, M_DONTWAIT, MT_DATA);
1217 MCLGET(m, M_DONTWAIT);
1218 if ((m->m_flags & M_EXT) == 0) {
1223 if (ds->ds_mbuf != NULL)
1224 bus_dmamap_unload(sc->sc_bufs_dmat, ds->ds_dmamap);
1228 error = bus_dmamap_load(sc->sc_bufs_dmat, ds->ds_dmamap,
1229 m->m_ext.ext_buf, m->m_ext.ext_size, admsw_rxbuf_map_addr,
1230 ds, BUS_DMA_NOWAIT);
1232 device_printf(sc->sc_dev,
1233 "can't load rx DMA map %d, error = %d\n", idx, error);
1234 panic("admsw_add_rxbuf"); /* XXX */
1237 bus_dmamap_sync(sc->sc_bufs_dmat, ds->ds_dmamap, BUS_DMASYNC_PREREAD);
1240 ADMSW_INIT_RXHDESC(sc, idx);
1242 ADMSW_INIT_RXLDESC(sc, idx);
1248 admsw_mediachange(struct ifnet *ifp)
1250 struct admsw_softc *sc = ifp->if_softc;
1252 struct ifmedia *ifm;
1255 while(port < SW_DEVS) {
1256 if(ifp == sc->sc_ifnet[port])
1262 ifm = &sc->sc_ifmedia[port];
1264 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1267 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1268 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX;
1269 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
1270 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1271 val = PHY_CNTL2_100M|PHY_CNTL2_FDX;
1273 val = PHY_CNTL2_100M;
1274 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
1275 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1276 val = PHY_CNTL2_FDX;
1282 old = REG_READ(PHY_CNTL2_REG);
1283 new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port);
1284 new |= (val << port);
1287 REG_WRITE(PHY_CNTL2_REG, new);
1293 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1295 struct admsw_softc *sc = ifp->if_softc;
1299 while(port < SW_DEVS) {
1300 if(ifp == sc->sc_ifnet[port])
1306 ifmr->ifm_status = IFM_AVALID;
1307 ifmr->ifm_active = IFM_ETHER;
1309 status = REG_READ(PHY_ST_REG) >> port;
1311 if ((status & PHY_ST_LINKUP) == 0) {
1312 ifmr->ifm_active |= IFM_NONE;
1316 ifmr->ifm_status |= IFM_ACTIVE;
1317 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T;
1318 if (status & PHY_ST_FDX)
1319 ifmr->ifm_active |= IFM_FDX;
1322 static device_method_t admsw_methods[] = {
1323 /* Device interface */
1324 DEVMETHOD(device_probe, admsw_probe),
1325 DEVMETHOD(device_attach, admsw_attach),
1326 DEVMETHOD(device_detach, admsw_detach),
1327 DEVMETHOD(device_shutdown, admsw_shutdown),
1332 static devclass_t admsw_devclass;
1334 static driver_t admsw_driver = {
1337 sizeof(struct admsw_softc),
1340 DRIVER_MODULE(admsw, obio, admsw_driver, admsw_devclass, 0, 0);
1341 MODULE_DEPEND(admsw, ether, 1, 1, 1);