2 * Copyright (c) 2009 Yohanes Nugroho <yohanes@gmail.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sockio.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
43 #include <net/ethernet.h>
45 #include <net/if_arp.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
52 #include <netinet/in.h>
53 #include <netinet/in_systm.h>
54 #include <netinet/in_var.h>
55 #include <netinet/ip.h>
59 #include <net/bpfdesc.h>
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
64 #include <arm/econa/if_ecereg.h>
65 #include <arm/econa/if_ecevar.h>
66 #include <arm/econa/econa_var.h>
68 #include <machine/bus.h>
69 #include <machine/intr.h>
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
75 vlan0_mac[ETHER_ADDR_LEN] = {0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x19};
78 * Boot loader expects the hardware state to be the same when we
79 * restart the device (warm boot), so we need to save the initial
82 int initial_switch_config;
83 int initial_cpu_config;
84 int initial_port0_config;
85 int initial_port1_config;
87 static inline uint32_t
88 read_4(struct ece_softc *sc, bus_size_t off)
91 return (bus_read_4(sc->mem_res, off));
95 write_4(struct ece_softc *sc, bus_size_t off, uint32_t val)
98 bus_write_4(sc->mem_res, off, val);
101 #define ECE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
102 #define ECE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
103 #define ECE_LOCK_INIT(_sc) \
104 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
105 MTX_NETWORK_LOCK, MTX_DEF)
107 #define ECE_TXLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_tx)
108 #define ECE_TXUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_tx)
109 #define ECE_TXLOCK_INIT(_sc) \
110 mtx_init(&_sc->sc_mtx_tx, device_get_nameunit(_sc->dev), \
111 "ECE TX Lock", MTX_DEF)
113 #define ECE_CLEANUPLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_cleanup)
114 #define ECE_CLEANUPUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_cleanup)
115 #define ECE_CLEANUPLOCK_INIT(_sc) \
116 mtx_init(&_sc->sc_mtx_cleanup, device_get_nameunit(_sc->dev), \
117 "ECE cleanup Lock", MTX_DEF)
119 #define ECE_RXLOCK(_sc) mtx_lock(&(_sc)->sc_mtx_rx)
120 #define ECE_RXUNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx_rx)
121 #define ECE_RXLOCK_INIT(_sc) \
122 mtx_init(&_sc->sc_mtx_rx, device_get_nameunit(_sc->dev), \
123 "ECE RX Lock", MTX_DEF)
125 #define ECE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
126 #define ECE_TXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_tx);
127 #define ECE_RXLOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx_rx);
128 #define ECE_CLEANUPLOCK_DESTROY(_sc) \
129 mtx_destroy(&_sc->sc_mtx_cleanup);
131 #define ECE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
132 #define ECE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
134 static devclass_t ece_devclass;
136 /* ifnet entry points */
138 static void eceinit_locked(void *);
139 static void ecestart_locked(struct ifnet *);
141 static void eceinit(void *);
142 static void ecestart(struct ifnet *);
143 static void ecestop(struct ece_softc *);
144 static int eceioctl(struct ifnet * ifp, u_long, caddr_t);
146 /* bus entry points */
148 static int ece_probe(device_t dev);
149 static int ece_attach(device_t dev);
150 static int ece_detach(device_t dev);
151 static void ece_intr(void *);
152 static void ece_intr_qf(void *);
153 static void ece_intr_status(void *xsc);
155 /* helper routines */
156 static int ece_activate(device_t dev);
157 static void ece_deactivate(device_t dev);
158 static int ece_ifmedia_upd(struct ifnet *ifp);
159 static void ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
160 static int ece_get_mac(struct ece_softc *sc, u_char *eaddr);
161 static void ece_set_mac(struct ece_softc *sc, u_char *eaddr);
162 static int configure_cpu_port(struct ece_softc *sc);
163 static int configure_lan_port(struct ece_softc *sc, int phy_type);
164 static void set_pvid(struct ece_softc *sc, int port0, int port1, int cpu);
165 static void set_vlan_vid(struct ece_softc *sc, int vlan);
166 static void set_vlan_member(struct ece_softc *sc, int vlan);
167 static void set_vlan_tag(struct ece_softc *sc, int vlan);
168 static int hardware_init(struct ece_softc *sc);
169 static void ece_intr_rx_locked(struct ece_softc *sc, int count);
171 static void ece_free_desc_dma_tx(struct ece_softc *sc);
172 static void ece_free_desc_dma_rx(struct ece_softc *sc);
174 static void ece_intr_task(void *arg, int pending __unused);
175 static void ece_tx_task(void *arg, int pending __unused);
176 static void ece_cleanup_task(void *arg, int pending __unused);
178 static int ece_allocate_dma(struct ece_softc *sc);
180 static void ece_intr_tx(void *xsc);
182 static void clear_mac_entries(struct ece_softc *ec, int include_this_mac);
184 static uint32_t read_mac_entry(struct ece_softc *ec,
188 /*PHY related functions*/
190 phy_read(struct ece_softc *sc, int phy, int reg)
196 write_4(sc, PHY_CONTROL, PHY_RW_OK);
197 write_4(sc, PHY_CONTROL,
198 (PHY_ADDRESS(phy)|PHY_READ_COMMAND |
201 for (ii = 0; ii < 0x1000; ii++) {
202 status = read_4(sc, PHY_CONTROL);
203 if (status & PHY_RW_OK) {
204 /* Clear the rw_ok status, and clear other
206 write_4(sc, PHY_CONTROL, PHY_RW_OK);
207 val = PHY_GET_DATA(status);
215 phy_write(struct ece_softc *sc, int phy, int reg, int data)
219 write_4(sc, PHY_CONTROL, PHY_RW_OK);
220 write_4(sc, PHY_CONTROL,
221 PHY_ADDRESS(phy) | PHY_REGISTER(reg) |
222 PHY_WRITE_COMMAND | PHY_DATA(data));
223 for (ii = 0; ii < 0x1000; ii++) {
224 if (read_4(sc, PHY_CONTROL) & PHY_RW_OK) {
225 /* Clear the rw_ok status, and clear other
228 write_4(sc, PHY_CONTROL, PHY_RW_OK);
234 static int get_phy_type(struct ece_softc *sc)
236 uint16_t phy0_id = 0, phy1_id = 0;
239 * Use SMI (MDC/MDIO) to read Link Partner's PHY Identifier
242 phy0_id = phy_read(sc, 0, 0x2);
243 phy1_id = phy_read(sc, 1, 0x2);
245 if ((phy0_id == 0xFFFF) && (phy1_id == 0x000F))
246 return (ASIX_GIGA_PHY);
247 else if ((phy0_id == 0x0243) && (phy1_id == 0x0243))
248 return (TWO_SINGLE_PHY);
249 else if ((phy0_id == 0xFFFF) && (phy1_id == 0x0007))
250 return (VSC8601_GIGA_PHY);
251 else if ((phy0_id == 0x0243) && (phy1_id == 0xFFFF))
252 return (IC_PLUS_PHY);
254 return (NOT_FOUND_PHY);
258 ece_probe(device_t dev)
261 device_set_desc(dev, "Econa Ethernet Controller");
267 ece_attach(device_t dev)
269 struct ece_softc *sc;
270 struct ifnet *ifp = NULL;
271 struct sysctl_ctx_list *sctx;
272 struct sysctl_oid *soid;
273 u_char eaddr[ETHER_ADDR_LEN];
280 sc = device_get_softc(dev);
285 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
287 if (sc->mem_res == NULL)
290 power_on_network_interface();
293 sc->irq_res_status = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
295 if (sc->irq_res_status == NULL)
299 /*TSTC: Fm-Switch-Tx-Complete*/
300 sc->irq_res_tx = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
302 if (sc->irq_res_tx == NULL)
306 /*FSRC: Fm-Switch-Rx-Complete*/
307 sc->irq_res_rec = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
309 if (sc->irq_res_rec == NULL)
313 /*FSQF: Fm-Switch-Queue-Full*/
314 sc->irq_res_qf = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
316 if (sc->irq_res_qf == NULL)
319 err = ece_activate(dev);
324 sctx = device_get_sysctl_ctx(dev);
325 soid = device_get_sysctl_tree(dev);
329 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
331 if ((err = ece_get_mac(sc, eaddr)) != 0) {
332 /* No MAC address configured. Generate the random one. */
335 "Generating random ethernet address.\n");
338 /*from if_ae.c/if_ate.c*/
340 * Set OUI to convenient locally assigned address. 'b'
341 * is 0x62, which has the locally assigned bit set, and
342 * the broadcast/multicast bit clear.
347 eaddr[3] = (rnd >> 16) & 0xff;
348 eaddr[4] = (rnd >> 8) & 0xff;
349 eaddr[5] = rnd & 0xff;
351 for (i = 0; i < ETHER_ADDR_LEN; i++)
352 eaddr[i] = vlan0_mac[i];
354 ece_set_mac(sc, eaddr);
355 sc->ifp = ifp = if_alloc(IFT_ETHER);
356 /* Only one PHY at address 0 in this device. */
357 err = mii_attach(dev, &sc->miibus, ifp, ece_ifmedia_upd,
358 ece_ifmedia_sts, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0);
360 device_printf(dev, "attaching PHYs failed\n");
364 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
365 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
367 ifp->if_capabilities = IFCAP_HWCSUM;
369 ifp->if_hwassist = (CSUM_IP | CSUM_TCP | CSUM_UDP);
370 ifp->if_capenable = ifp->if_capabilities;
371 ifp->if_start = ecestart;
372 ifp->if_ioctl = eceioctl;
373 ifp->if_init = eceinit;
374 ifp->if_snd.ifq_drv_maxlen = ECE_MAX_TX_BUFFERS - 1;
375 IFQ_SET_MAXLEN(&ifp->if_snd, ECE_MAX_TX_BUFFERS - 1);
376 IFQ_SET_READY(&ifp->if_snd);
378 /* Create local taskq. */
380 TASK_INIT(&sc->sc_intr_task, 0, ece_intr_task, sc);
381 TASK_INIT(&sc->sc_tx_task, 1, ece_tx_task, ifp);
382 TASK_INIT(&sc->sc_cleanup_task, 2, ece_cleanup_task, sc);
383 sc->sc_tq = taskqueue_create_fast("ece_taskq", M_WAITOK,
384 taskqueue_thread_enqueue,
386 if (sc->sc_tq == NULL) {
387 device_printf(sc->dev, "could not create taskqueue\n");
391 ether_ifattach(ifp, eaddr);
394 * Activate interrupts
396 err = bus_setup_intr(dev, sc->irq_res_rec, INTR_TYPE_NET | INTR_MPSAFE,
397 NULL, ece_intr, sc, &sc->intrhand);
400 ECE_LOCK_DESTROY(sc);
404 err = bus_setup_intr(dev, sc->irq_res_status,
405 INTR_TYPE_NET | INTR_MPSAFE,
406 NULL, ece_intr_status, sc, &sc->intrhand_status);
409 ECE_LOCK_DESTROY(sc);
413 err = bus_setup_intr(dev, sc->irq_res_qf, INTR_TYPE_NET | INTR_MPSAFE,
414 NULL,ece_intr_qf, sc, &sc->intrhand_qf);
418 ECE_LOCK_DESTROY(sc);
422 err = bus_setup_intr(dev, sc->irq_res_tx, INTR_TYPE_NET | INTR_MPSAFE,
423 NULL, ece_intr_tx, sc, &sc->intrhand_tx);
427 ECE_LOCK_DESTROY(sc);
433 ECE_CLEANUPLOCK_INIT(sc);
435 /* Enable all interrupt sources. */
436 write_4(sc, INTERRUPT_MASK, 0x00000000);
439 write_4(sc, PORT_0_CONFIG, read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE));
441 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
442 device_get_nameunit(sc->dev));
453 ece_detach(device_t dev)
455 struct ece_softc *sc = device_get_softc(dev);
456 struct ifnet *ifp = sc->ifp;
468 ece_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
471 KASSERT(nsegs == 1, ("wrong number of segments, should be 1"));
473 *paddr = segs->ds_addr;
477 ece_alloc_desc_dma_tx(struct ece_softc *sc)
482 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
483 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
484 16, 0, /* alignment, boundary */
485 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
486 BUS_SPACE_MAXADDR, /* highaddr */
487 NULL, NULL, /* filtfunc, filtfuncarg */
488 sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS, /* max size */
490 sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
492 NULL, NULL, /* lockfunc, lockfuncarg */
493 &sc->dmatag_data_tx); /* dmat */
495 /* Allocate memory for TX ring. */
496 error = bus_dmamem_alloc(sc->dmatag_data_tx,
497 (void**)&(sc->desc_tx),
498 BUS_DMA_NOWAIT | BUS_DMA_ZERO |
500 &(sc->dmamap_ring_tx));
503 if_printf(sc->ifp, "failed to allocate DMA memory\n");
504 bus_dma_tag_destroy(sc->dmatag_data_tx);
505 sc->dmatag_data_tx = 0;
510 error = bus_dmamap_load(sc->dmatag_data_tx, sc->dmamap_ring_tx,
512 sizeof(eth_tx_desc_t)*ECE_MAX_TX_BUFFERS,
514 &(sc->ring_paddr_tx), BUS_DMA_NOWAIT);
517 if_printf(sc->ifp, "can't load descriptor\n");
518 bus_dmamem_free(sc->dmatag_data_tx, sc->desc_tx,
521 bus_dma_tag_destroy(sc->dmatag_data_tx);
522 sc->dmatag_data_tx = 0;
526 /* Allocate a busdma tag for mbufs. Alignment is 2 bytes */
527 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
528 1, 0, /* alignment, boundary */
529 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
530 BUS_SPACE_MAXADDR, /* highaddr */
531 NULL, NULL, /* filtfunc, filtfuncarg */
532 MCLBYTES*MAX_FRAGMENT, /* maxsize */
533 MAX_FRAGMENT, /* nsegments */
534 MCLBYTES, 0, /* maxsegsz, flags */
535 NULL, NULL, /* lockfunc, lockfuncarg */
536 &sc->dmatag_ring_tx); /* dmat */
539 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
543 for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
544 /* Create dma map for each descriptor. */
545 error = bus_dmamap_create(sc->dmatag_ring_tx, 0,
546 &(sc->tx_desc[i].dmamap));
548 if_printf(sc->ifp, "failed to create map for mbuf\n");
556 ece_free_desc_dma_tx(struct ece_softc *sc)
560 for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
561 if (sc->tx_desc[i].buff) {
562 m_freem(sc->tx_desc[i].buff);
563 sc->tx_desc[i].buff= 0;
567 if (sc->dmamap_ring_tx) {
568 bus_dmamap_unload(sc->dmatag_data_tx, sc->dmamap_ring_tx);
570 bus_dmamem_free(sc->dmatag_data_tx,
571 sc->desc_tx, sc->dmamap_ring_tx);
573 sc->dmamap_ring_tx = 0;
576 if (sc->dmatag_data_tx) {
577 bus_dma_tag_destroy(sc->dmatag_data_tx);
578 sc->dmatag_data_tx = 0;
581 if (sc->dmatag_ring_tx) {
582 for (i = 0; i<ECE_MAX_TX_BUFFERS; i++) {
583 bus_dmamap_destroy(sc->dmatag_ring_tx,
584 sc->tx_desc[i].dmamap);
585 sc->tx_desc[i].dmamap = 0;
587 bus_dma_tag_destroy(sc->dmatag_ring_tx);
588 sc->dmatag_ring_tx = 0;
593 ece_alloc_desc_dma_rx(struct ece_softc *sc)
598 /* Allocate a busdma tag and DMA safe memory for RX descriptors. */
599 error = bus_dma_tag_create(sc->sc_parent_tag, /* parent */
600 16, 0, /* alignment, boundary */
601 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
602 BUS_SPACE_MAXADDR, /* highaddr */
603 NULL, NULL, /* filtfunc, filtfuncarg */
604 /* maxsize, nsegments */
605 sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 1,
606 /* maxsegsz, flags */
607 sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS, 0,
608 NULL, NULL, /* lockfunc, lockfuncarg */
609 &sc->dmatag_data_rx); /* dmat */
611 /* Allocate RX ring. */
612 error = bus_dmamem_alloc(sc->dmatag_data_rx,
613 (void**)&(sc->desc_rx),
614 BUS_DMA_NOWAIT | BUS_DMA_ZERO |
616 &(sc->dmamap_ring_rx));
619 if_printf(sc->ifp, "failed to allocate DMA memory\n");
624 error = bus_dmamap_load(sc->dmatag_data_rx, sc->dmamap_ring_rx,
626 sizeof(eth_rx_desc_t)*ECE_MAX_RX_BUFFERS,
628 &(sc->ring_paddr_rx), BUS_DMA_NOWAIT);
631 if_printf(sc->ifp, "can't load descriptor\n");
632 bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
634 bus_dma_tag_destroy(sc->dmatag_data_rx);
639 /* Allocate a busdma tag for mbufs. */
640 error = bus_dma_tag_create(sc->sc_parent_tag,/* parent */
641 16, 0, /* alignment, boundary */
642 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
643 BUS_SPACE_MAXADDR, /* highaddr */
644 NULL, NULL, /* filtfunc, filtfuncarg */
645 MCLBYTES, 1, /* maxsize, nsegments */
646 MCLBYTES, 0, /* maxsegsz, flags */
647 NULL, NULL, /* lockfunc, lockfuncarg */
648 &sc->dmatag_ring_rx); /* dmat */
651 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
655 for (i = 0; i<ECE_MAX_RX_BUFFERS; i++) {
656 error = bus_dmamap_create(sc->dmatag_ring_rx, 0,
657 &sc->rx_desc[i].dmamap);
659 if_printf(sc->ifp, "failed to create map for mbuf\n");
664 error = bus_dmamap_create(sc->dmatag_ring_rx, 0, &sc->rx_sparemap);
666 if_printf(sc->ifp, "failed to create spare map\n");
674 ece_free_desc_dma_rx(struct ece_softc *sc)
678 for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
679 if (sc->rx_desc[i].buff) {
680 m_freem(sc->rx_desc[i].buff);
681 sc->rx_desc[i].buff= 0;
685 if (sc->dmatag_data_rx) {
686 bus_dmamap_unload(sc->dmatag_data_rx, sc->dmamap_ring_rx);
687 bus_dmamem_free(sc->dmatag_data_rx, sc->desc_rx,
689 bus_dma_tag_destroy(sc->dmatag_data_rx);
690 sc->dmatag_data_rx = 0;
691 sc->dmamap_ring_rx = 0;
695 if (sc->dmatag_ring_rx) {
696 for (i = 0; i < ECE_MAX_RX_BUFFERS; i++)
697 bus_dmamap_destroy(sc->dmatag_ring_rx,
698 sc->rx_desc[i].dmamap);
699 bus_dmamap_destroy(sc->dmatag_ring_rx, sc->rx_sparemap);
700 bus_dma_tag_destroy(sc->dmatag_ring_rx);
701 sc->dmatag_ring_rx = 0;
706 ece_new_rxbuf(struct ece_softc *sc, struct rx_desc_info* descinfo)
708 struct mbuf *new_mbuf;
709 bus_dma_segment_t seg[1];
715 tag = sc->dmatag_ring_rx;
717 new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
719 if (new_mbuf == NULL)
722 new_mbuf->m_len = new_mbuf->m_pkthdr.len = MCLBYTES;
724 error = bus_dmamap_load_mbuf_sg(tag, sc->rx_sparemap, new_mbuf,
725 seg, &nsegs, BUS_DMA_NOWAIT);
727 KASSERT(nsegs == 1, ("Too many segments returned!"));
729 if (nsegs != 1 || error) {
734 if (descinfo->buff != NULL) {
735 bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_POSTREAD);
736 bus_dmamap_unload(tag, descinfo->dmamap);
739 map = descinfo->dmamap;
740 descinfo->dmamap = sc->rx_sparemap;
741 sc->rx_sparemap = map;
743 bus_dmamap_sync(tag, descinfo->dmamap, BUS_DMASYNC_PREREAD);
745 descinfo->buff = new_mbuf;
746 descinfo->desc->data_ptr = seg->ds_addr;
747 descinfo->desc->length = seg->ds_len - 2;
753 ece_allocate_dma(struct ece_softc *sc)
755 eth_tx_desc_t *desctx;
756 eth_rx_desc_t *descrx;
760 /* Create parent tag for tx and rx */
761 error = bus_dma_tag_create(
762 bus_get_dma_tag(sc->dev),/* parent */
763 1, 0, /* alignment, boundary */
764 BUS_SPACE_MAXADDR, /* lowaddr */
765 BUS_SPACE_MAXADDR, /* highaddr */
766 NULL, NULL, /* filter, filterarg */
767 BUS_SPACE_MAXSIZE_32BIT, 0,/* maxsize, nsegments */
768 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
770 NULL, NULL, /* lockfunc, lockarg */
773 ece_alloc_desc_dma_tx(sc);
775 for (i = 0; i < ECE_MAX_TX_BUFFERS; i++) {
776 desctx = (eth_tx_desc_t *)(&sc->desc_tx[i]);
777 memset(desctx, 0, sizeof(eth_tx_desc_t));
778 desctx->length = MAX_PACKET_LEN;
780 if (i == ECE_MAX_TX_BUFFERS - 1)
784 ece_alloc_desc_dma_rx(sc);
786 for (i = 0; i < ECE_MAX_RX_BUFFERS; i++) {
787 descrx = &(sc->desc_rx[i]);
788 memset(descrx, 0, sizeof(eth_rx_desc_t));
789 sc->rx_desc[i].desc = descrx;
790 sc->rx_desc[i].buff = 0;
791 ece_new_rxbuf(sc, &(sc->rx_desc[i]));
793 if (i == ECE_MAX_RX_BUFFERS - 1)
799 sc->desc_curr_tx = 0;
805 ece_activate(device_t dev)
807 struct ece_softc *sc;
809 uint32_t mac_port_config;
812 sc = device_get_softc(dev);
815 initial_switch_config = read_4(sc, SWITCH_CONFIG);
816 initial_cpu_config = read_4(sc, CPU_PORT_CONFIG);
817 initial_port0_config = read_4(sc, MAC_PORT_0_CONFIG);
818 initial_port1_config = read_4(sc, MAC_PORT_1_CONFIG);
821 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
822 mac_port_config |= (PORT_DISABLE);
823 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
826 mac_port_config = read_4(sc, MAC_PORT_1_CONFIG);
827 mac_port_config |= (PORT_DISABLE);
828 write_4(sc, MAC_PORT_1_CONFIG, mac_port_config);
830 err = ece_allocate_dma(sc);
832 if_printf(sc->ifp, "failed allocating dma\n");
836 write_4(sc, TS_DESCRIPTOR_POINTER, sc->ring_paddr_tx);
837 write_4(sc, TS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_tx);
839 write_4(sc, FS_DESCRIPTOR_POINTER, sc->ring_paddr_rx);
840 write_4(sc, FS_DESCRIPTOR_BASE_ADDR, sc->ring_paddr_rx);
842 write_4(sc, FS_DMA_CONTROL, 1);
851 ece_deactivate(device_t dev)
853 struct ece_softc *sc;
855 sc = device_get_softc(dev);
858 bus_teardown_intr(dev, sc->irq_res_rec, sc->intrhand);
863 bus_teardown_intr(dev, sc->irq_res_qf, sc->intrhand_qf);
867 bus_generic_detach(sc->dev);
869 device_delete_child(sc->dev, sc->miibus);
871 bus_release_resource(dev, SYS_RES_IOPORT,
872 rman_get_rid(sc->mem_res), sc->mem_res);
876 bus_release_resource(dev, SYS_RES_IRQ,
877 rman_get_rid(sc->irq_res_rec), sc->irq_res_rec);
880 bus_release_resource(dev, SYS_RES_IRQ,
881 rman_get_rid(sc->irq_res_qf), sc->irq_res_qf);
884 bus_release_resource(dev, SYS_RES_IRQ,
885 rman_get_rid(sc->irq_res_status), sc->irq_res_status);
889 sc->irq_res_status = 0;
890 ECE_TXLOCK_DESTROY(sc);
891 ECE_RXLOCK_DESTROY(sc);
893 ece_free_desc_dma_tx(sc);
894 ece_free_desc_dma_rx(sc);
900 * Change media according to request.
903 ece_ifmedia_upd(struct ifnet *ifp)
905 struct ece_softc *sc = ifp->if_softc;
906 struct mii_data *mii;
909 mii = device_get_softc(sc->miibus);
911 error = mii_mediachg(mii);
917 * Notify the world which media we're using.
920 ece_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
922 struct ece_softc *sc = ifp->if_softc;
923 struct mii_data *mii;
925 mii = device_get_softc(sc->miibus);
928 ifmr->ifm_active = mii->mii_media_active;
929 ifmr->ifm_status = mii->mii_media_status;
936 struct ece_softc *sc = xsc;
937 struct mii_data *mii;
940 mii = device_get_softc(sc->miibus);
941 active = mii->mii_media_active;
945 * Schedule another timeout one second from now.
947 callout_reset(&sc->tick_ch, hz, ece_tick, sc);
951 read_mac_entry(struct ece_softc *ec,
956 struct arl_table_entry_t entry;
958 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
959 write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
960 write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
962 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x1);
964 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0x2);
966 for (ii = 0; ii < 0x1000; ii++)
967 if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) & (0x1))
970 entry_val = (uint32_t*) (&entry);
971 entry_val[0] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_1);
972 entry_val[1] = read_4(ec, ARL_TABLE_ACCESS_CONTROL_2);
975 memcpy(mac_result, entry.mac_addr, ETHER_ADDR_LEN);
977 return (entry.table_end);
981 write_arl_table_entry(struct ece_softc *ec,
987 const uint8_t *mac_addr)
991 struct arl_table_entry_t entry;
993 memset(&entry, 0, sizeof(entry));
995 entry.filter = filter;
996 entry.vlan_mac = vlan_mac;
997 entry.vlan_gid = vlan_gid;
998 entry.age_field = age_field;
999 entry.port_map = port_map;
1000 memcpy(entry.mac_addr, mac_addr, ETHER_ADDR_LEN);
1002 entry_val = (uint32_t*) (&entry);
1004 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, 0);
1005 write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, 0);
1006 write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, 0);
1008 write_4(ec, ARL_TABLE_ACCESS_CONTROL_1, entry_val[0]);
1009 write_4(ec, ARL_TABLE_ACCESS_CONTROL_2, entry_val[1]);
1011 write_4(ec, ARL_TABLE_ACCESS_CONTROL_0, ARL_WRITE_COMMAND);
1013 for (ii = 0; ii < 0x1000; ii++)
1014 if (read_4(ec, ARL_TABLE_ACCESS_CONTROL_1) &
1015 ARL_COMMAND_COMPLETE)
1016 return (1); /* Write OK. */
1023 remove_mac_entry(struct ece_softc *sc,
1027 /* Invalid age_field mean erase this entry. */
1028 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1029 INVALID_ENTRY, VLAN0_GROUP,
1034 add_mac_entry(struct ece_softc *sc,
1038 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1039 NEW_ENTRY, VLAN0_GROUP,
1044 * The behavior of ARL table reading and deletion is not well defined
1045 * in the documentation. To be safe, all mac addresses are put to a
1046 * list, then deleted.
1050 clear_mac_entries(struct ece_softc *ec, int include_this_mac)
1053 struct mac_list * temp;
1054 struct mac_list * mac_list_header;
1055 struct mac_list * current;
1056 char mac[ETHER_ADDR_LEN];
1059 mac_list_header = 0;
1061 table_end = read_mac_entry(ec, mac, 1);
1062 while (!table_end) {
1063 if (!include_this_mac &&
1064 memcmp(mac, vlan0_mac, ETHER_ADDR_LEN) == 0) {
1065 /* Read next entry. */
1066 table_end = read_mac_entry(ec, mac, 0);
1070 temp = (struct mac_list*)malloc(sizeof(struct mac_list),
1073 memcpy(temp->mac_addr, mac, ETHER_ADDR_LEN);
1075 if (mac_list_header) {
1076 current->next = temp;
1079 mac_list_header = temp;
1082 /* Read next Entry */
1083 table_end = read_mac_entry(ec, mac, 0);
1086 current = mac_list_header;
1089 remove_mac_entry(ec, current->mac_addr);
1091 current = current->next;
1092 free(temp, M_DEVBUF);
1097 configure_lan_port(struct ece_softc *sc, int phy_type)
1100 uint32_t mac_port_config;
1105 sw_config = read_4(sc, SWITCH_CONFIG);
1106 /* Enable fast aging. */
1107 sw_config |= FAST_AGING;
1108 /* Enable IVL learning. */
1109 sw_config |= IVL_LEARNING;
1110 /* Disable hardware NAT. */
1111 sw_config &= ~(HARDWARE_NAT);
1113 sw_config |= SKIP_L2_LOOKUP_PORT_0 | SKIP_L2_LOOKUP_PORT_1| NIC_MODE;
1115 write_4(sc, SWITCH_CONFIG, sw_config);
1117 sw_config = read_4(sc, SWITCH_CONFIG);
1119 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1121 if (!(mac_port_config & 0x1) || (mac_port_config & 0x2))
1122 if_printf(sc->ifp, "Link Down\n");
1124 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1129 set_pvid(struct ece_softc *sc, int port0, int port1, int cpu)
1132 val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 0));
1133 write_4(sc, VLAN_PORT_PVID, val);
1134 val = read_4(sc, VLAN_PORT_PVID) | ((port0) & 0x07);
1135 write_4(sc, VLAN_PORT_PVID, val);
1136 val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 4));
1137 write_4(sc, VLAN_PORT_PVID, val);
1138 val = read_4(sc, VLAN_PORT_PVID) | (((port1) & 0x07) << 4);
1139 write_4(sc, VLAN_PORT_PVID, val);
1141 val = read_4(sc, VLAN_PORT_PVID) & (~(0x7 << 8));
1142 write_4(sc, VLAN_PORT_PVID, val);
1143 val = read_4(sc, VLAN_PORT_PVID) | (((cpu) & 0x07) << 8);
1144 write_4(sc, VLAN_PORT_PVID, val);
1148 /* VLAN related functions */
1150 set_vlan_vid(struct ece_softc *sc, int vlan)
1152 const uint32_t regs[] = {
1163 const int vids[] = {
1182 val = read_4(sc, reg);
1183 write_4(sc, reg, val & (~(0xFFF << 0)));
1184 val = read_4(sc, reg);
1185 write_4(sc, reg, val|((vid & 0xFFF) << 0));
1187 val = read_4(sc, reg);
1188 write_4(sc, reg, val & (~(0xFFF << 12)));
1189 val = read_4(sc, reg);
1190 write_4(sc, reg, val|((vid & 0xFFF) << 12));
1195 set_vlan_member(struct ece_softc *sc, int vlan)
1197 unsigned char shift;
1200 const int groups[] = {
1211 group = groups[vlan];
1214 val = read_4(sc, VLAN_MEMBER_PORT_MAP) & (~(0x7 << shift));
1215 write_4(sc, VLAN_MEMBER_PORT_MAP, val);
1216 val = read_4(sc, VLAN_MEMBER_PORT_MAP);
1217 write_4(sc, VLAN_MEMBER_PORT_MAP, val | ((group & 0x7) << shift));
1221 set_vlan_tag(struct ece_softc *sc, int vlan)
1223 unsigned char shift;
1229 val = read_4(sc, VLAN_TAG_PORT_MAP) & (~(0x7 << shift));
1230 write_4(sc, VLAN_TAG_PORT_MAP, val);
1231 val = read_4(sc, VLAN_TAG_PORT_MAP);
1232 write_4(sc, VLAN_TAG_PORT_MAP, val | ((tag & 0x7) << shift));
1236 configure_cpu_port(struct ece_softc *sc)
1238 uint32_t cpu_port_config;
1241 cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
1242 /* SA learning Disable */
1243 cpu_port_config |= (SA_LEARNING_DISABLE);
1244 /* set data offset + 2 */
1245 cpu_port_config &= ~(1 << 31);
1247 write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
1249 if (!write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1250 STATIC_ENTRY, VLAN0_GROUP,
1254 set_pvid(sc, PORT0_PVID, PORT1_PVID, CPU_PORT_PVID);
1256 for (i = 0; i < 8; i++) {
1257 set_vlan_vid(sc, i);
1258 set_vlan_member(sc, i);
1259 set_vlan_tag(sc, i);
1262 /* disable all interrupt status sources */
1263 write_4(sc, INTERRUPT_MASK, 0xffff1fff);
1265 /* clear previous interrupt sources */
1266 write_4(sc, INTERRUPT_STATUS, 0x00001FFF);
1268 write_4(sc, TS_DMA_CONTROL, 0);
1269 write_4(sc, FS_DMA_CONTROL, 0);
1274 hardware_init(struct ece_softc *sc)
1277 static int gw_phy_type;
1279 gw_phy_type = get_phy_type(sc);
1280 /* Currently only ic_plus phy is supported. */
1281 if (gw_phy_type != IC_PLUS_PHY) {
1282 device_printf(sc->dev, "PHY type is not supported (%d)\n",
1286 status = configure_lan_port(sc, gw_phy_type);
1287 configure_cpu_port(sc);
1292 set_mac_address(struct ece_softc *sc, const char *mac, int mac_len)
1295 /* Invalid age_field mean erase this entry. */
1296 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1297 INVALID_ENTRY, VLAN0_GROUP,
1299 memcpy(vlan0_mac, mac, ETHER_ADDR_LEN);
1301 write_arl_table_entry(sc, 0, 1, VLAN0_GROUP_ID,
1302 STATIC_ENTRY, VLAN0_GROUP,
1307 ece_set_mac(struct ece_softc *sc, u_char *eaddr)
1309 memcpy(vlan0_mac, eaddr, ETHER_ADDR_LEN);
1310 set_mac_address(sc, eaddr, ETHER_ADDR_LEN);
1314 * TODO: the device doesn't have MAC stored, we should read the
1315 * configuration stored in FLASH, but the format depends on the
1319 ece_get_mac(struct ece_softc *sc, u_char *eaddr)
1325 ece_intr_rx_locked(struct ece_softc *sc, int count)
1327 struct ifnet *ifp = sc->ifp;
1329 struct rx_desc_info *rxdesc;
1330 eth_rx_desc_t *desc;
1339 fssd_curr = read_4(sc, FS_DESCRIPTOR_POINTER);
1341 fssd = (fssd_curr - (uint32_t)sc->ring_paddr_rx)>>4;
1343 desc = sc->rx_desc[sc->last_rx].desc;
1345 /* Prepare to read the data in the ring. */
1346 bus_dmamap_sync(sc->dmatag_ring_rx,
1348 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1350 if (fssd > sc->last_rx)
1351 rxcount = fssd - sc->last_rx;
1352 else if (fssd < sc->last_rx)
1353 rxcount = (ECE_MAX_RX_BUFFERS - sc->last_rx) + fssd;
1355 if (desc->cown == 0)
1358 rxcount = ECE_MAX_RX_BUFFERS;
1361 for (i= 0; i < rxcount; i++) {
1362 status = desc->cown;
1367 rxdesc = &sc->rx_desc[idx];
1370 if (desc->length < ETHER_MIN_LEN - ETHER_CRC_LEN ||
1371 desc->length > ETHER_MAX_LEN - ETHER_CRC_LEN +
1372 ETHER_VLAN_ENCAP_LEN) {
1375 desc->length = MCLBYTES - 2;
1376 /* Invalid packet, skip and process next
1382 if (ece_new_rxbuf(sc, rxdesc) != 0) {
1385 desc->length = MCLBYTES - 2;
1390 * The device will write to addrress + 2 So we need to adjust
1391 * the address after the packet is received.
1394 mb->m_len = mb->m_pkthdr.len = desc->length;
1396 mb->m_flags |= M_PKTHDR;
1397 mb->m_pkthdr.rcvif = ifp;
1398 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1399 /*check for valid checksum*/
1400 if ( (!desc->l4f) && (desc->prot != 3)) {
1401 mb->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1402 mb->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1403 mb->m_pkthdr.csum_data = 0xffff;
1407 (*ifp->if_input)(ifp, mb);
1411 desc->length = MCLBYTES - 2;
1413 bus_dmamap_sync(sc->dmatag_ring_rx,
1415 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1417 if (sc->last_rx == ECE_MAX_RX_BUFFERS - 1)
1422 desc = sc->rx_desc[sc->last_rx].desc;
1425 /* Sync updated flags. */
1426 bus_dmamap_sync(sc->dmatag_ring_rx,
1428 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1434 ece_intr_task(void *arg, int pending __unused)
1436 struct ece_softc *sc = arg;
1438 ece_intr_rx_locked(sc, -1);
1445 struct ece_softc *sc = xsc;
1446 struct ifnet *ifp = sc->ifp;
1448 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1449 write_4(sc, FS_DMA_CONTROL, 0);
1453 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1455 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1456 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
1460 ece_intr_status(void *xsc)
1462 struct ece_softc *sc = xsc;
1463 struct ifnet *ifp = sc->ifp;
1466 stat = read_4(sc, INTERRUPT_STATUS);
1468 write_4(sc, INTERRUPT_STATUS, stat);
1470 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1471 if ((stat & ERROR_MASK) != 0)
1477 ece_cleanup_locked(struct ece_softc *sc)
1479 eth_tx_desc_t *desc;
1481 if (sc->tx_cons == sc->tx_prod) return;
1483 /* Prepare to read the ring (owner bit). */
1484 bus_dmamap_sync(sc->dmatag_ring_tx,
1486 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1488 while (sc->tx_cons != sc->tx_prod) {
1489 desc = sc->tx_desc[sc->tx_cons].desc;
1490 if (desc->cown != 0) {
1491 struct tx_desc_info *td = &(sc->tx_desc[sc->tx_cons]);
1492 /* We are finished with this descriptor ... */
1493 bus_dmamap_sync(sc->dmatag_data_tx, td->dmamap,
1494 BUS_DMASYNC_POSTWRITE);
1495 /* ... and unload, so we can reuse. */
1496 bus_dmamap_unload(sc->dmatag_data_tx, td->dmamap);
1499 sc->tx_cons = (sc->tx_cons + 1) % ECE_MAX_TX_BUFFERS;
1508 ece_cleanup_task(void *arg, int pending __unused)
1510 struct ece_softc *sc = arg;
1511 ECE_CLEANUPLOCK(sc);
1512 ece_cleanup_locked(sc);
1513 ECE_CLEANUPUNLOCK(sc);
1517 ece_intr_tx(void *xsc)
1519 struct ece_softc *sc = xsc;
1520 struct ifnet *ifp = sc->ifp;
1521 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1522 /* This should not happen, stop DMA. */
1523 write_4(sc, FS_DMA_CONTROL, 0);
1526 taskqueue_enqueue(sc->sc_tq, &sc->sc_cleanup_task);
1530 ece_intr_qf(void *xsc)
1532 struct ece_softc *sc = xsc;
1533 struct ifnet *ifp = sc->ifp;
1534 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1535 /* This should not happen, stop DMA. */
1536 write_4(sc, FS_DMA_CONTROL, 0);
1539 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task);
1540 write_4(sc, FS_DMA_CONTROL, 1);
1544 * Reset and initialize the chip
1547 eceinit_locked(void *xsc)
1549 struct ece_softc *sc = xsc;
1550 struct ifnet *ifp = sc->ifp;
1551 struct mii_data *mii;
1553 uint32_t cpu_port_config;
1554 uint32_t mac_port_config;
1557 cfg_reg = read_4(sc, BIST_RESULT_TEST_0);
1558 if ((cfg_reg & (1<<17)))
1562 /* Set to default values. */
1563 write_4(sc, SWITCH_CONFIG, 0x007AA7A1);
1564 write_4(sc, MAC_PORT_0_CONFIG, 0x00423D00);
1565 write_4(sc, MAC_PORT_1_CONFIG, 0x00423D80);
1566 write_4(sc, CPU_PORT_CONFIG, 0x004C0000);
1570 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1573 mac_port_config &= (~(PORT_DISABLE));
1574 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1576 cpu_port_config = read_4(sc, CPU_PORT_CONFIG);
1578 cpu_port_config &= ~(PORT_DISABLE);
1579 write_4(sc, CPU_PORT_CONFIG, cpu_port_config);
1582 * Set 'running' flag, and clear output active flag
1583 * and attempt to start the output
1585 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1586 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1588 mii = device_get_softc(sc->miibus);
1591 write_4(sc, FS_DMA_CONTROL, 1);
1593 callout_reset(&sc->tick_ch, hz, ece_tick, sc);
1597 ece_encap(struct ece_softc *sc, struct mbuf *m0)
1600 bus_dma_segment_t segs[MAX_FRAGMENT];
1602 eth_tx_desc_t *desc = 0;
1611 /* Fetch unused map */
1612 mapp = sc->tx_desc[sc->tx_prod].dmamap;
1614 error = bus_dmamap_load_mbuf_sg(sc->dmatag_ring_tx, mapp,
1619 bus_dmamap_unload(sc->dmatag_ring_tx, mapp);
1620 return ((error != 0) ? error : -1);
1623 desc = &(sc->desc_tx[sc->desc_curr_tx]);
1624 sc->tx_desc[sc->tx_prod].desc = desc;
1625 sc->tx_desc[sc->tx_prod].buff = m0;
1626 desc_no = sc->desc_curr_tx;
1628 for (seg = 0; seg < nsegs; seg++) {
1629 if (desc->cown == 0 ) {
1630 if_printf(ifp, "ERROR: descriptor is still used\n");
1634 desc->length = segs[seg].ds_len;
1635 desc->data_ptr = segs[seg].ds_addr;
1642 if (seg == nsegs - 1) {
1648 csum_flags = m0->m_pkthdr.csum_flags;
1656 desc->interrupt = 1;
1658 if (csum_flags & CSUM_IP) {
1660 if (csum_flags & CSUM_TCP)
1662 if (csum_flags & CSUM_UDP)
1667 sc->desc_curr_tx = (sc->desc_curr_tx + 1) % ECE_MAX_TX_BUFFERS;
1668 if (sc->desc_curr_tx == 0) {
1669 desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
1673 desc = sc->tx_desc[sc->tx_prod].desc;
1675 sc->tx_prod = (sc->tx_prod + 1) % ECE_MAX_TX_BUFFERS;
1678 * After all descriptors are set, we set the flags to start the
1681 for (seg = 0; seg < nsegs; seg++) {
1684 desc_no = (desc_no + 1) % ECE_MAX_TX_BUFFERS;
1686 desc = (eth_tx_desc_t *)&(sc->desc_tx[0]);
1689 bus_dmamap_sync(sc->dmatag_data_tx, mapp, BUS_DMASYNC_PREWRITE);
1694 * dequeu packets and transmit
1697 ecestart_locked(struct ifnet *ifp)
1699 struct ece_softc *sc;
1701 uint32_t queued = 0;
1704 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1708 bus_dmamap_sync(sc->dmatag_ring_tx,
1710 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1713 /* Get packet from the queue */
1714 IF_DEQUEUE(&ifp->if_snd, m0);
1717 if (ece_encap(sc, m0)) {
1718 IF_PREPEND(&ifp->if_snd, m0);
1719 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1726 bus_dmamap_sync(sc->dmatag_ring_tx, sc->dmamap_ring_tx,
1727 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1728 write_4(sc, TS_DMA_CONTROL, 1);
1735 struct ece_softc *sc = xsc;
1742 ece_tx_task(void *arg, int pending __unused)
1745 ifp = (struct ifnet *)arg;
1750 ecestart(struct ifnet *ifp)
1752 struct ece_softc *sc = ifp->if_softc;
1754 ecestart_locked(ifp);
1759 * Turn off interrupts, and stop the nic. Can be called with sc->ifp
1760 * NULL so be careful.
1763 ecestop(struct ece_softc *sc)
1765 struct ifnet *ifp = sc->ifp;
1766 uint32_t mac_port_config;
1768 write_4(sc, TS_DMA_CONTROL, 0);
1769 write_4(sc, FS_DMA_CONTROL, 0);
1772 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1774 callout_stop(&sc->tick_ch);
1777 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1778 mac_port_config |= (PORT_DISABLE);
1779 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1782 mac_port_config = read_4(sc, MAC_PORT_1_CONFIG);
1783 mac_port_config |= (PORT_DISABLE);
1784 write_4(sc, MAC_PORT_1_CONFIG, mac_port_config);
1786 /* Disable all interrupt status sources. */
1787 write_4(sc, INTERRUPT_MASK, 0x00001FFF);
1789 /* Clear previous interrupt sources. */
1790 write_4(sc, INTERRUPT_STATUS, 0x00001FFF);
1792 write_4(sc, SWITCH_CONFIG, initial_switch_config);
1793 write_4(sc, CPU_PORT_CONFIG, initial_cpu_config);
1794 write_4(sc, MAC_PORT_0_CONFIG, initial_port0_config);
1795 write_4(sc, MAC_PORT_1_CONFIG, initial_port1_config);
1797 clear_mac_entries(sc, 1);
1801 ece_restart(struct ece_softc *sc)
1803 struct ifnet *ifp = sc->ifp;
1805 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1806 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1807 /* Enable port 0. */
1808 write_4(sc, PORT_0_CONFIG,
1809 read_4(sc, PORT_0_CONFIG) & ~(PORT_DISABLE));
1810 write_4(sc, INTERRUPT_MASK, 0x00000000);
1811 write_4(sc, FS_DMA_CONTROL, 1);
1812 callout_reset(&sc->tick_ch, hz, ece_tick, sc);
1816 set_filter(struct ece_softc *sc)
1819 struct ifmultiaddr *ifma;
1820 uint32_t mac_port_config;
1824 clear_mac_entries(sc, 0);
1825 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1826 mac_port_config = read_4(sc, MAC_PORT_0_CONFIG);
1827 mac_port_config &= ~(DISABLE_BROADCAST_PACKET);
1828 mac_port_config &= ~(DISABLE_MULTICAST_PACKET);
1829 write_4(sc, MAC_PORT_0_CONFIG, mac_port_config);
1832 if_maddr_rlock(ifp);
1833 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1834 if (ifma->ifma_addr->sa_family != AF_LINK)
1837 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1839 if_maddr_runlock(ifp);
1843 eceioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1845 struct ece_softc *sc = ifp->if_softc;
1846 struct mii_data *mii;
1847 struct ifreq *ifr = (struct ifreq *)data;
1848 int mask, error = 0;
1853 if ((ifp->if_flags & IFF_UP) == 0 &&
1854 ifp->if_drv_flags & IFF_DRV_RUNNING) {
1855 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1858 /* Reinitialize card on any parameter change. */
1859 if ((ifp->if_flags & IFF_UP) &&
1860 !(ifp->if_drv_flags & IFF_DRV_RUNNING))
1875 mii = device_get_softc(sc->miibus);
1876 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1879 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1880 if (mask & IFCAP_VLAN_MTU) {
1885 error = ether_ioctl(ifp, cmd, data);
1892 ece_child_detached(device_t dev, device_t child)
1894 struct ece_softc *sc;
1896 sc = device_get_softc(dev);
1897 if (child == sc->miibus)
1902 * MII bus support routines.
1905 ece_miibus_readreg(device_t dev, int phy, int reg)
1907 struct ece_softc *sc;
1908 sc = device_get_softc(dev);
1909 return (phy_read(sc, phy, reg));
1913 ece_miibus_writereg(device_t dev, int phy, int reg, int data)
1915 struct ece_softc *sc;
1916 sc = device_get_softc(dev);
1917 phy_write(sc, phy, reg, data);
1921 static device_method_t ece_methods[] = {
1922 /* Device interface */
1923 DEVMETHOD(device_probe, ece_probe),
1924 DEVMETHOD(device_attach, ece_attach),
1925 DEVMETHOD(device_detach, ece_detach),
1928 DEVMETHOD(bus_child_detached, ece_child_detached),
1931 DEVMETHOD(miibus_readreg, ece_miibus_readreg),
1932 DEVMETHOD(miibus_writereg, ece_miibus_writereg),
1937 static driver_t ece_driver = {
1940 sizeof(struct ece_softc),
1943 DRIVER_MODULE(ece, econaarm, ece_driver, ece_devclass, 0, 0);
1944 DRIVER_MODULE(miibus, ece, miibus_driver, miibus_devclass, 0, 0);
1945 MODULE_DEPEND(ece, miibus, 1, 1, 1);
1946 MODULE_DEPEND(ece, ether, 1, 1, 1);