2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * TI Common Platform Ethernet Switch (CPSW) Driver
29 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
31 * This controller is documented in the AM335x Technical Reference
32 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
33 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
35 * It is basically a single Ethernet port (port 0) wired internally to
36 * a 3-port store-and-forward switch connected to two independent
37 * "sliver" controllers (port 1 and port 2). You can operate the
38 * controller in a variety of different ways by suitably configuring
39 * the slivers and the Address Lookup Engine (ALE) that routes packets
42 * This code was developed and tested on a BeagleBone with
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/endian.h>
54 #include <sys/mutex.h>
55 #include <sys/kernel.h>
56 #include <sys/module.h>
57 #include <sys/socket.h>
58 #include <sys/sysctl.h>
60 #include <net/ethernet.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_vlan_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in.h>
71 #include <netinet/ip.h>
73 #include <sys/sockio.h>
75 #include <machine/bus.h>
77 #include <machine/resource.h>
79 #include <dev/mii/mii.h>
80 #include <dev/mii/miivar.h>
82 #include <dev/fdt/fdt_common.h>
83 #include <dev/ofw/ofw_bus.h>
84 #include <dev/ofw/ofw_bus_subr.h>
86 #include "if_cpswreg.h"
87 #include "if_cpswvar.h"
89 #include <arm/ti/ti_scm.h>
91 #include "miibus_if.h"
93 /* Device probe/attach/detach. */
94 static int cpsw_probe(device_t);
95 static void cpsw_init_slots(struct cpsw_softc *);
96 static int cpsw_attach(device_t);
97 static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *);
98 static int cpsw_detach(device_t);
100 /* Device Init/shutdown. */
101 static void cpsw_init(void *);
102 static void cpsw_init_locked(void *);
103 static int cpsw_shutdown(device_t);
104 static void cpsw_shutdown_locked(struct cpsw_softc *);
106 /* Device Suspend/Resume. */
107 static int cpsw_suspend(device_t);
108 static int cpsw_resume(device_t);
111 static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data);
113 static int cpsw_miibus_readreg(device_t, int phy, int reg);
114 static int cpsw_miibus_writereg(device_t, int phy, int reg, int value);
116 /* Send/Receive packets. */
117 static void cpsw_intr_rx(void *arg);
118 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
119 static void cpsw_rx_enqueue(struct cpsw_softc *);
120 static void cpsw_start(struct ifnet *);
121 static void cpsw_tx_enqueue(struct cpsw_softc *);
122 static int cpsw_tx_dequeue(struct cpsw_softc *);
124 /* Misc interrupts and watchdog. */
125 static void cpsw_intr_rx_thresh(void *);
126 static void cpsw_intr_misc(void *);
127 static void cpsw_tick(void *);
128 static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *);
129 static int cpsw_ifmedia_upd(struct ifnet *);
130 static void cpsw_tx_watchdog(struct cpsw_softc *);
133 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
134 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
135 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac);
136 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
137 static void cpsw_ale_dump_table(struct cpsw_softc *);
139 /* Statistics and sysctls. */
140 static void cpsw_add_sysctls(struct cpsw_softc *);
141 static void cpsw_stats_collect(struct cpsw_softc *);
142 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
145 * Arbitrary limit on number of segments in an mbuf to be transmitted.
146 * Packets with more segments than this will be defragmented before
149 #define CPSW_TXFRAGS 8
153 * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs
154 * as separate Ethernet ports. To properly support this, we should
155 * break this into two separate devices: a CPSW_SS device that owns
156 * the interrupts and actually talks to the CPSW hardware, and a
157 * separate CPSW Ethernet child device for each Ethernet port. The RX
158 * interrupt, for example, would be part of CPSW_SS; it would receive
159 * a packet, note the input port, and then dispatch it to the child
160 * device's interface queue. Similarly for transmit.
162 * It's not clear to me whether the device tree should be restructured
163 * with a cpsw_ss node and two child nodes. That would allow specifying
164 * MAC addresses for each port, for example, but might be overkill.
166 * Unfortunately, I don't have hardware right now that supports two
167 * Ethernet ports via CPSW.
170 static device_method_t cpsw_methods[] = {
171 /* Device interface */
172 DEVMETHOD(device_probe, cpsw_probe),
173 DEVMETHOD(device_attach, cpsw_attach),
174 DEVMETHOD(device_detach, cpsw_detach),
175 DEVMETHOD(device_shutdown, cpsw_shutdown),
176 DEVMETHOD(device_suspend, cpsw_suspend),
177 DEVMETHOD(device_resume, cpsw_resume),
179 DEVMETHOD(miibus_readreg, cpsw_miibus_readreg),
180 DEVMETHOD(miibus_writereg, cpsw_miibus_writereg),
184 static driver_t cpsw_driver = {
187 sizeof(struct cpsw_softc),
190 static devclass_t cpsw_devclass;
192 DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
193 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
194 MODULE_DEPEND(cpsw, ether, 1, 1, 1);
195 MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
197 static struct resource_spec res_spec[] = {
198 { SYS_RES_MEMORY, 0, RF_ACTIVE },
199 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
200 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
201 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
202 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
206 /* Number of entries here must match size of stats
207 * array in struct cpsw_softc. */
208 static struct cpsw_stat {
211 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
212 {0x00, "GoodRxFrames"},
213 {0x04, "BroadcastRxFrames"},
214 {0x08, "MulticastRxFrames"},
215 {0x0C, "PauseRxFrames"},
216 {0x10, "RxCrcErrors"},
217 {0x14, "RxAlignErrors"},
218 {0x18, "OversizeRxFrames"},
220 {0x20, "ShortRxFrames"},
221 {0x24, "RxFragments"},
223 {0x34, "GoodTxFrames"},
224 {0x38, "BroadcastTxFrames"},
225 {0x3c, "MulticastTxFrames"},
226 {0x40, "PauseTxFrames"},
227 {0x44, "DeferredTxFrames"},
228 {0x48, "CollisionsTxFrames"},
229 {0x4c, "SingleCollisionTxFrames"},
230 {0x50, "MultipleCollisionTxFrames"},
231 {0x54, "ExcessiveCollisions"},
232 {0x58, "LateCollisions"},
233 {0x5c, "TxUnderrun"},
234 {0x60, "CarrierSenseErrors"},
236 {0x68, "RxTx64OctetFrames"},
237 {0x6c, "RxTx65to127OctetFrames"},
238 {0x70, "RxTx128to255OctetFrames"},
239 {0x74, "RxTx256to511OctetFrames"},
240 {0x78, "RxTx512to1024OctetFrames"},
241 {0x7c, "RxTx1024upOctetFrames"},
243 {0x84, "RxStartOfFrameOverruns"},
244 {0x88, "RxMiddleOfFrameOverruns"},
245 {0x8c, "RxDmaOverruns"}
249 * Basic debug support.
252 #define IF_DEBUG(sc) if (sc->cpsw_if_flags & IFF_DEBUG)
255 cpsw_debugf_head(const char *funcname)
257 int t = (int)(time_second % (24 * 60 * 60));
259 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
262 #include <machine/stdarg.h>
264 cpsw_debugf(const char *fmt, ...)
275 #define CPSW_DEBUGF(a) do { \
277 cpsw_debugf_head(__func__); \
286 #define CPSW_TX_LOCK(sc) do { \
287 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \
288 mtx_lock(&(sc)->tx.lock); \
291 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock)
292 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED)
294 #define CPSW_RX_LOCK(sc) do { \
295 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \
296 mtx_lock(&(sc)->rx.lock); \
299 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock)
300 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED)
302 #define CPSW_GLOBAL_LOCK(sc) do { \
303 if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) != \
304 (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) { \
305 panic("cpsw deadlock possibility detection!"); \
307 mtx_lock(&(sc)->tx.lock); \
308 mtx_lock(&(sc)->rx.lock); \
311 #define CPSW_GLOBAL_UNLOCK(sc) do { \
312 CPSW_RX_UNLOCK(sc); \
313 CPSW_TX_UNLOCK(sc); \
316 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do { \
317 CPSW_TX_LOCK_ASSERT(sc); \
318 CPSW_RX_LOCK_ASSERT(sc); \
324 #define cpsw_read_4(sc, reg) bus_read_4(sc->res[0], reg)
325 #define cpsw_write_4(sc, reg, val) bus_write_4(sc->res[0], reg, val)
327 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16))
329 #define cpsw_cpdma_bd_paddr(sc, slot) \
330 BUS_SPACE_PHYSADDR(sc->res[0], slot->bd_offset)
331 #define cpsw_cpdma_read_bd(sc, slot, val) \
332 bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
333 #define cpsw_cpdma_write_bd(sc, slot, val) \
334 bus_write_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
335 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \
336 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
337 #define cpsw_cpdma_read_bd_flags(sc, slot) \
338 bus_read_2(sc->res[0], slot->bd_offset + 14)
339 #define cpsw_write_hdp_slot(sc, queue, slot) \
340 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
341 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
342 #define cpsw_read_cp(sc, queue) \
343 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
344 #define cpsw_write_cp(sc, queue, val) \
345 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
346 #define cpsw_write_cp_slot(sc, queue, slot) \
347 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
350 /* XXX temporary function versions for debugging. */
352 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
354 uint32_t reg = queue->hdp_offset;
355 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
356 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
357 cpsw_write_4(sc, reg, v);
361 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
363 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
364 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
365 cpsw_write_cp(sc, queue, v);
370 * Expanded dump routines for verbose debugging.
373 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
375 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
376 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
377 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
379 struct cpsw_cpdma_bd bd;
383 cpsw_cpdma_read_bd(sc, slot, &bd);
384 printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next);
385 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
386 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
389 for (i = 0; i < 16; ++i) {
390 if (bd.flags & (1 << (15 - i))) {
391 printf("%s%s", sep, flags[i]);
397 printf(" Ether: %14D\n",
398 (char *)(slot->mbuf->m_hdr.mh_data), " ");
399 printf(" Packet: %16D\n",
400 (char *)(slot->mbuf->m_hdr.mh_data) + 14, " ");
404 #define CPSW_DUMP_SLOT(cs, slot) do { \
406 cpsw_dump_slot(sc, slot); \
412 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
414 struct cpsw_slot *slot;
418 STAILQ_FOREACH(slot, q, next) {
422 cpsw_dump_slot(sc, slot);
426 printf(" ... and %d more.\n", others);
430 #define CPSW_DUMP_QUEUE(sc, q) do { \
432 cpsw_dump_queue(sc, q); \
439 * Device Probe, Attach, Detach.
444 cpsw_probe(device_t dev)
447 if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
450 device_set_desc(dev, "3-port Switch Ethernet Subsystem");
451 return (BUS_PROBE_DEFAULT);
456 cpsw_init_slots(struct cpsw_softc *sc)
458 struct cpsw_slot *slot;
461 STAILQ_INIT(&sc->avail);
463 /* Put the slot descriptors onto the global avail list. */
464 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) {
465 slot = &sc->_slots[i];
466 slot->bd_offset = cpsw_cpdma_bd_offset(i);
467 STAILQ_INSERT_TAIL(&sc->avail, slot, next);
472 * bind an interrupt, add the relevant info to sc->interrupts
475 cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description)
480 sc->interrupts[sc->interrupt_count].res = res;
481 sc->interrupts[sc->interrupt_count].description = description;
482 pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie;
484 error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE,
485 NULL, *handler, sc, pcookie);
487 device_printf(sc->dev,
488 "could not setup %s\n", description);
490 ++sc->interrupt_count;
495 * teardown everything in sc->interrupts.
498 cpsw_detach_interrupts(struct cpsw_softc *sc)
503 for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) {
504 if (!sc->interrupts[i].ih_cookie)
506 error = bus_teardown_intr(sc->dev,
507 sc->interrupts[i].res, sc->interrupts[i].ih_cookie);
509 device_printf(sc->dev, "could not release %s\n",
510 sc->interrupts[i].description);
511 sc->interrupts[i].ih_cookie = NULL;
516 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
518 const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]);
519 struct cpsw_slot *slot;
523 requested = max_slots;
525 for (i = 0; i < requested; ++i) {
526 slot = STAILQ_FIRST(&sc->avail);
529 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
530 if_printf(sc->ifp, "failed to create dmamap\n");
533 STAILQ_REMOVE_HEAD(&sc->avail, next);
534 STAILQ_INSERT_TAIL(&queue->avail, slot, next);
535 ++queue->avail_queue_len;
536 ++queue->queue_slots;
542 cpsw_attach(device_t dev)
544 bus_dma_segment_t segs[1];
545 struct cpsw_softc *sc = device_get_softc(dev);
546 struct mii_softc *miisc;
549 int error, phy, nsegs;
554 getbinuptime(&sc->attach_uptime);
556 sc->node = ofw_bus_get_node(dev);
558 /* Get phy address from fdt */
559 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) {
560 device_printf(dev, "failed to get PHY address from FDT\n");
563 /* Initialize mutexes */
564 mtx_init(&sc->tx.lock, device_get_nameunit(dev),
565 "cpsw TX lock", MTX_DEF);
566 mtx_init(&sc->rx.lock, device_get_nameunit(dev),
567 "cpsw RX lock", MTX_DEF);
569 /* Allocate IO and IRQ resources */
570 error = bus_alloc_resources(dev, res_spec, sc->res);
572 device_printf(dev, "could not allocate resources\n");
577 reg = cpsw_read_4(sc, CPSW_SS_IDVER);
578 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
579 reg & 0xFF, (reg >> 11) & 0x1F);
581 cpsw_add_sysctls(sc);
583 /* Allocate a busdma tag and DMA safe memory for mbufs. */
584 error = bus_dma_tag_create(
585 bus_get_dma_tag(sc->dev), /* parent */
586 1, 0, /* alignment, boundary */
587 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
588 BUS_SPACE_MAXADDR, /* highaddr */
589 NULL, NULL, /* filtfunc, filtfuncarg */
590 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */
591 MCLBYTES, 0, /* maxsegsz, flags */
592 NULL, NULL, /* lockfunc, lockfuncarg */
593 &sc->mbuf_dtag); /* dmatag */
595 device_printf(dev, "bus_dma_tag_create failed\n");
600 /* Allocate network interface */
601 ifp = sc->ifp = if_alloc(IFT_ETHER);
603 device_printf(dev, "if_alloc() failed\n");
608 /* Allocate the null mbuf and pre-sync it. */
609 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
610 memset(sc->null_mbuf->m_hdr.mh_data, 0, sc->null_mbuf->m_ext.ext_size);
611 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
612 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
613 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
614 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
615 BUS_DMASYNC_PREWRITE);
616 sc->null_mbuf_paddr = segs[0].ds_addr;
618 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
620 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
621 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
622 ifp->if_capenable = ifp->if_capabilities;
624 ifp->if_init = cpsw_init;
625 ifp->if_start = cpsw_start;
626 ifp->if_ioctl = cpsw_ioctl;
630 /* Allocate slots to TX and RX queues. */
631 STAILQ_INIT(&sc->rx.avail);
632 STAILQ_INIT(&sc->rx.active);
633 STAILQ_INIT(&sc->tx.avail);
634 STAILQ_INIT(&sc->tx.active);
635 // For now: 128 slots to TX, rest to RX.
636 // XXX TODO: start with 32/64 and grow dynamically based on demand.
637 if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) {
638 device_printf(dev, "failed to allocate dmamaps\n");
642 device_printf(dev, "Initial queue size TX=%d RX=%d\n",
643 sc->tx.queue_slots, sc->rx.queue_slots);
645 ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots;
646 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
647 IFQ_SET_READY(&ifp->if_snd);
649 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
650 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
652 /* Get high part of MAC address from control module (mac_id0_hi) */
653 /* TODO: Get MAC ID1 as well as MAC ID0. */
654 ti_scm_reg_read_4(0x634, ®);
655 sc->mac_addr[0] = reg & 0xFF;
656 sc->mac_addr[1] = (reg >> 8) & 0xFF;
657 sc->mac_addr[2] = (reg >> 16) & 0xFF;
658 sc->mac_addr[3] = (reg >> 24) & 0xFF;
660 /* Get low part of MAC address from control module (mac_id0_lo) */
661 ti_scm_reg_read_4(0x630, ®);
662 sc->mac_addr[4] = reg & 0xFF;
663 sc->mac_addr[5] = (reg >> 8) & 0xFF;
665 ether_ifattach(ifp, sc->mac_addr);
666 callout_init(&sc->watchdog.callout, 0);
668 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
669 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
670 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
673 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30);
676 error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
677 cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
679 device_printf(dev, "attaching PHYs failed\n");
683 sc->mii = device_get_softc(sc->miibus);
685 /* Tell the MAC where to find the PHY so autoneg works */
686 miisc = LIST_FIRST(&sc->mii->mii_phys);
688 /* Select PHY and enable interrupts */
689 cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F));
691 /* Note: We don't use sc->res[3] (TX interrupt) */
692 if (cpsw_attach_interrupt(sc, sc->res[1],
693 cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") ||
694 cpsw_attach_interrupt(sc, sc->res[2],
695 cpsw_intr_rx, "CPSW RX interrupt") ||
696 cpsw_attach_interrupt(sc, sc->res[4],
697 cpsw_intr_misc, "CPSW misc interrupt")) {
706 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
711 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
712 KASSERT(error == 0, ("Mapping still active"));
722 cpsw_detach(device_t dev)
724 struct cpsw_softc *sc = device_get_softc(dev);
729 /* Stop controller and free TX queue */
730 if (device_is_attached(dev)) {
731 ether_ifdetach(sc->ifp);
732 CPSW_GLOBAL_LOCK(sc);
733 cpsw_shutdown_locked(sc);
734 CPSW_GLOBAL_UNLOCK(sc);
735 callout_drain(&sc->watchdog.callout);
738 bus_generic_detach(dev);
739 device_delete_child(dev, sc->miibus);
741 /* Stop and release all interrupts */
742 cpsw_detach_interrupts(sc);
744 /* Free dmamaps and mbufs */
745 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i) {
746 cpsw_free_slot(sc, &sc->_slots[i]);
750 error = bus_dma_tag_destroy(sc->mbuf_dtag);
751 KASSERT(error == 0, ("Unable to destroy DMA tag"));
753 /* Free IO memory handler */
754 bus_release_resources(dev, res_spec, sc->res);
756 /* Destroy mutexes */
757 mtx_destroy(&sc->rx.lock);
758 mtx_destroy(&sc->tx.lock);
770 cpsw_reset(struct cpsw_softc *sc)
774 /* Reset RMII/RGMII wrapper. */
775 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
776 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
779 /* Disable TX and RX interrupts for all cores. */
780 for (i = 0; i < 3; ++i) {
781 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
782 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
783 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
784 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
787 /* Reset CPSW subsystem. */
788 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
789 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
792 /* Reset Sliver port 1 and 2 */
793 for (i = 0; i < 2; i++) {
795 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
796 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
800 /* Reset DMA controller. */
801 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
802 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
805 /* Disable TX & RX DMA */
806 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
807 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
809 /* Clear all queues. */
810 for (i = 0; i < 8; i++) {
811 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
812 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
813 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
814 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
817 /* Clear all interrupt Masks */
818 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
819 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
825 struct cpsw_softc *sc = arg;
828 CPSW_GLOBAL_LOCK(sc);
829 cpsw_init_locked(arg);
830 CPSW_GLOBAL_UNLOCK(sc);
834 cpsw_init_locked(void *arg)
837 struct cpsw_softc *sc = arg;
838 struct cpsw_slot *slot;
843 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
846 getbinuptime(&sc->init_uptime);
848 /* Reset the controller. */
852 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4);
854 /* Init Sliver port 1 and 2 */
855 for (i = 0; i < 2; i++) {
856 /* Set Slave Mapping */
857 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
858 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100);
859 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
860 /* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15),
861 GMII_EN(5), FULLDUPLEX(1) */
862 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
863 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
864 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1);
867 /* Set Host Port Mapping */
868 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
869 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
871 /* Initialize ALE: all ports set to forwarding(3), initialize addrs */
872 for (i = 0; i < 3; i++)
873 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3);
874 cpsw_ale_update_addresses(sc, 1);
876 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
878 /* Enable statistics for ports 0, 1 and 2 */
879 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
881 /* Experiment: Turn off flow control */
882 /* This seems to fix the watchdog resets that have plagued
883 earlier versions of this driver; I'm not yet sure if there
884 are negative effects yet. */
885 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
887 /* Make IP hdr aligned with 4 */
888 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
890 /* Initialize RX Buffer Descriptors */
891 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
893 /* Enable TX & RX DMA */
894 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
895 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
897 /* Enable Interrupts for core 0 */
898 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
899 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
900 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F);
902 /* Enable host Error Interrupt */
903 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
905 /* Enable interrupts for RX Channel 0 */
906 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
908 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
909 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
910 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
912 /* Select MII in GMII_SEL, Internal Delay mode */
913 //ti_scm_reg_write_4(0x650, 0);
915 /* Initialize active queues. */
916 slot = STAILQ_FIRST(&sc->tx.active);
918 cpsw_write_hdp_slot(sc, &sc->tx, slot);
919 slot = STAILQ_FIRST(&sc->rx.active);
921 cpsw_write_hdp_slot(sc, &sc->rx, slot);
924 /* Activate network interface */
927 sc->watchdog.timer = 0;
928 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
929 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
930 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
935 cpsw_shutdown(device_t dev)
937 struct cpsw_softc *sc = device_get_softc(dev);
940 CPSW_GLOBAL_LOCK(sc);
941 cpsw_shutdown_locked(sc);
942 CPSW_GLOBAL_UNLOCK(sc);
947 cpsw_rx_teardown_locked(struct cpsw_softc *sc)
949 struct mbuf *received, *next;
952 CPSW_DEBUGF(("starting RX teardown"));
953 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
955 received = cpsw_rx_dequeue(sc);
956 CPSW_GLOBAL_UNLOCK(sc);
957 while (received != NULL) {
958 next = received->m_nextpkt;
959 received->m_nextpkt = NULL;
960 (*sc->ifp->if_input)(sc->ifp, received);
963 CPSW_GLOBAL_LOCK(sc);
964 if (!sc->rx.running) {
965 CPSW_DEBUGF(("finished RX teardown (%d retries)", i));
969 if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n");
977 cpsw_tx_teardown_locked(struct cpsw_softc *sc)
981 CPSW_DEBUGF(("starting TX teardown"));
982 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
984 while (sc->tx.running && ++i < 10) {
989 if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n");
990 CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)",
991 i, sc->tx.active_queue_len));
995 cpsw_shutdown_locked(struct cpsw_softc *sc)
1000 CPSW_GLOBAL_LOCK_ASSERT(sc);
1003 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1006 /* Disable interface */
1007 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1008 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1011 callout_stop(&sc->watchdog.callout);
1013 /* Tear down the RX/TX queues. */
1014 cpsw_rx_teardown_locked(sc);
1015 cpsw_tx_teardown_locked(sc);
1017 /* Capture stats before we reset controller. */
1018 cpsw_stats_collect(sc);
1028 cpsw_suspend(device_t dev)
1030 struct cpsw_softc *sc = device_get_softc(dev);
1033 CPSW_GLOBAL_LOCK(sc);
1034 cpsw_shutdown_locked(sc);
1035 CPSW_GLOBAL_UNLOCK(sc);
1040 cpsw_resume(device_t dev)
1042 struct cpsw_softc *sc = device_get_softc(dev);
1044 CPSW_DEBUGF(("UNIMPLEMENTED"));
1055 cpsw_set_promisc(struct cpsw_softc *sc, int set)
1058 * Enabling promiscuous mode requires two bits of work: First,
1059 * ALE_BYPASS needs to be enabled. That disables the ALE
1060 * forwarding logic and causes every packet to be sent to the
1061 * host port. That makes us promiscuous wrt received packets.
1063 * With ALE forwarding disabled, the transmitter needs to set
1064 * an explicit output port on every packet to route it to the
1065 * correct egress. This should be doable for systems such as
1066 * BeagleBone where only one egress port is actually wired to
1067 * a PHY. If you have both egress ports wired up, life gets a
1068 * lot more interesting.
1070 * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't
1071 * seem to set explicit egress ports. Does that mean they
1072 * are always promiscuous?
1075 printf("Promiscuous mode unimplemented\n");
1080 cpsw_set_allmulti(struct cpsw_softc *sc, int set)
1083 printf("All-multicast mode unimplemented\n");
1088 cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1090 struct cpsw_softc *sc = ifp->if_softc;
1091 struct ifreq *ifr = (struct ifreq *)data;
1099 CPSW_GLOBAL_LOCK(sc);
1100 if (ifp->if_flags & IFF_UP) {
1101 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1102 changed = ifp->if_flags ^ sc->cpsw_if_flags;
1103 CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed));
1104 if (changed & IFF_PROMISC)
1105 cpsw_set_promisc(sc,
1106 ifp->if_flags & IFF_PROMISC);
1107 if (changed & IFF_ALLMULTI)
1108 cpsw_set_allmulti(sc,
1109 ifp->if_flags & IFF_ALLMULTI);
1111 CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up"));
1112 cpsw_init_locked(sc);
1114 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1115 CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down"));
1116 cpsw_shutdown_locked(sc);
1119 sc->cpsw_if_flags = ifp->if_flags;
1120 CPSW_GLOBAL_UNLOCK(sc);
1123 cpsw_ale_update_addresses(sc, 0);
1126 /* Ugh. DELMULTI doesn't provide the specific address
1127 being removed, so the best we can do is remove
1128 everything and rebuild it all. */
1129 cpsw_ale_update_addresses(sc, 1);
1133 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1136 error = ether_ioctl(ifp, command, data);
1147 cpsw_miibus_ready(struct cpsw_softc *sc)
1149 uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1152 r = cpsw_read_4(sc, MDIOUSERACCESS0);
1153 if ((r & 1 << 31) == 0)
1155 DELAY(CPSW_MIIBUS_DELAY);
1161 cpsw_miibus_readreg(device_t dev, int phy, int reg)
1163 struct cpsw_softc *sc = device_get_softc(dev);
1166 if (!cpsw_miibus_ready(sc)) {
1167 device_printf(dev, "MDIO not ready to read\n");
1171 /* Set GO, reg, phy */
1172 cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1173 cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1175 if (!cpsw_miibus_ready(sc)) {
1176 device_printf(dev, "MDIO timed out during read\n");
1180 r = cpsw_read_4(sc, MDIOUSERACCESS0);
1181 if((r & 1 << 29) == 0) {
1182 device_printf(dev, "Failed to read from PHY.\n");
1185 return (r & 0xFFFF);
1189 cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
1191 struct cpsw_softc *sc = device_get_softc(dev);
1194 if (!cpsw_miibus_ready(sc)) {
1195 device_printf(dev, "MDIO not ready to write\n");
1199 /* Set GO, WRITE, reg, phy, and value */
1200 cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16
1202 cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1204 if (!cpsw_miibus_ready(sc)) {
1205 device_printf(dev, "MDIO timed out during write\n");
1209 if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0)
1210 device_printf(dev, "Failed to write to PHY.\n");
1217 * Transmit/Receive Packets.
1223 cpsw_intr_rx(void *arg)
1225 struct cpsw_softc *sc = arg;
1226 struct mbuf *received, *next;
1229 received = cpsw_rx_dequeue(sc);
1230 cpsw_rx_enqueue(sc);
1231 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1234 while (received != NULL) {
1235 next = received->m_nextpkt;
1236 received->m_nextpkt = NULL;
1237 (*sc->ifp->if_input)(sc->ifp, received);
1242 static struct mbuf *
1243 cpsw_rx_dequeue(struct cpsw_softc *sc)
1245 struct cpsw_cpdma_bd bd;
1246 struct cpsw_slot *slot;
1248 struct mbuf *mb_head, *mb_tail;
1252 mb_head = mb_tail = NULL;
1254 /* Pull completed packets off hardware RX queue. */
1255 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1256 cpsw_cpdma_read_bd(sc, slot, &bd);
1257 if (bd.flags & CPDMA_BD_OWNER)
1258 break; /* Still in use by hardware */
1260 CPSW_DEBUGF(("Removing received packet from RX queue"));
1262 STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1263 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1265 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1266 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1268 if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1269 CPSW_DEBUGF(("RX teardown in progress"));
1270 m_freem(slot->mbuf);
1272 cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1277 cpsw_write_cp_slot(sc, &sc->rx, slot);
1280 /* TODO: track SOP/EOP bits to assemble a full mbuf
1281 out of received fragments. */
1282 slot->mbuf->m_hdr.mh_data += bd.bufoff;
1283 slot->mbuf->m_hdr.mh_len = bd.pktlen - 4;
1284 slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
1285 slot->mbuf->m_flags |= M_PKTHDR;
1286 slot->mbuf->m_pkthdr.rcvif = ifp;
1287 slot->mbuf->m_nextpkt = NULL;
1289 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1290 /* check for valid CRC by looking into pkt_err[5:4] */
1291 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
1292 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1293 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1294 slot->mbuf->m_pkthdr.csum_data = 0xffff;
1298 /* Add mbuf to packet list to be returned. */
1300 mb_tail->m_nextpkt = slot->mbuf;
1302 mb_head = slot->mbuf;
1304 mb_tail = slot->mbuf;
1309 sc->rx.queue_removes += removed;
1310 sc->rx.active_queue_len -= removed;
1311 sc->rx.avail_queue_len += removed;
1312 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1313 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1319 cpsw_rx_enqueue(struct cpsw_softc *sc)
1321 bus_dma_segment_t seg[1];
1322 struct cpsw_cpdma_bd bd;
1323 struct ifnet *ifp = sc->ifp;
1324 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1325 struct cpsw_slot *slot, *prev_slot = NULL;
1326 struct cpsw_slot *last_old_slot, *first_new_slot;
1327 int error, nsegs, added = 0;
1329 /* Register new mbufs with hardware. */
1330 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1331 if (slot->mbuf == NULL) {
1332 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1333 if (slot->mbuf == NULL) {
1334 if_printf(sc->ifp, "Unable to fill RX queue\n");
1338 slot->mbuf->m_pkthdr.len =
1339 slot->mbuf->m_ext.ext_size;
1342 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1343 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1345 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1346 KASSERT(error == 0, ("DMA error (error=%d)", error));
1347 if (error != 0 || nsegs != 1) {
1349 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1350 __func__, nsegs, error);
1351 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1352 m_freem(slot->mbuf);
1357 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1359 /* Create and submit new rx descriptor*/
1361 bd.bufptr = seg->ds_addr;
1363 bd.buflen = MCLBYTES - 1;
1364 bd.pktlen = bd.buflen;
1365 bd.flags = CPDMA_BD_OWNER;
1366 cpsw_cpdma_write_bd(sc, slot, &bd);
1369 if (prev_slot != NULL)
1370 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1372 STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1373 sc->rx.avail_queue_len--;
1374 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1380 CPSW_DEBUGF(("Adding %d buffers to RX queue", added));
1382 /* Link new entries to hardware RX queue. */
1383 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1384 first_new_slot = STAILQ_FIRST(&tmpqueue);
1385 STAILQ_CONCAT(&sc->rx.active, &tmpqueue);
1386 if (first_new_slot == NULL) {
1388 } else if (last_old_slot == NULL) {
1389 /* Start a fresh queue. */
1390 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1392 /* Add buffers to end of current queue. */
1393 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1394 /* If underrun, restart queue. */
1395 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1396 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1399 sc->rx.queue_adds += added;
1400 sc->rx.active_queue_len += added;
1401 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
1402 sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1407 cpsw_start(struct ifnet *ifp)
1409 struct cpsw_softc *sc = ifp->if_softc;
1412 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) {
1413 cpsw_tx_enqueue(sc);
1414 cpsw_tx_dequeue(sc);
1420 cpsw_tx_enqueue(struct cpsw_softc *sc)
1422 bus_dma_segment_t segs[CPSW_TXFRAGS];
1423 struct cpsw_cpdma_bd bd;
1424 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1425 struct cpsw_slot *slot, *prev_slot = NULL;
1426 struct cpsw_slot *last_old_slot, *first_new_slot;
1428 int error, nsegs, seg, added = 0, padlen;
1430 /* Pull pending packets from IF queue and prep them for DMA. */
1431 while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) {
1432 IF_DEQUEUE(&sc->ifp->if_snd, m0);
1437 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
1441 /* Create mapping in DMA memory */
1442 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1443 slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1444 /* If the packet is too fragmented, try to simplify. */
1445 if (error == EFBIG ||
1447 nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) {
1448 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1449 if (padlen > 0) /* May as well add padding. */
1450 m_append(slot->mbuf, padlen,
1451 sc->null_mbuf->m_hdr.mh_data);
1452 m0 = m_defrag(slot->mbuf, M_NOWAIT);
1455 "Can't defragment packet; dropping\n");
1456 m_freem(slot->mbuf);
1458 CPSW_DEBUGF(("Requeueing defragmented packet"));
1459 IF_PREPEND(&sc->ifp->if_snd, m0);
1466 "%s: Can't setup DMA (error=%d), dropping packet\n",
1468 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1469 m_freem(slot->mbuf);
1474 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap,
1475 BUS_DMASYNC_PREWRITE);
1478 CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes",
1481 /* If there is only one segment, the for() loop
1482 * gets skipped and the single buffer gets set up
1483 * as both SOP and EOP. */
1484 /* Start by setting up the first buffer */
1486 bd.bufptr = segs[0].ds_addr;
1488 bd.buflen = segs[0].ds_len;
1489 bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
1490 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER;
1491 for (seg = 1; seg < nsegs; ++seg) {
1492 /* Save the previous buffer (which isn't EOP) */
1493 cpsw_cpdma_write_bd(sc, slot, &bd);
1494 if (prev_slot != NULL)
1495 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1497 STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1498 sc->tx.avail_queue_len--;
1499 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1501 slot = STAILQ_FIRST(&sc->tx.avail);
1503 /* Setup next buffer (which isn't SOP) */
1505 bd.bufptr = segs[seg].ds_addr;
1507 bd.buflen = segs[seg].ds_len;
1509 bd.flags = CPDMA_BD_OWNER;
1511 /* Save the final buffer. */
1513 bd.flags |= CPDMA_BD_EOP;
1514 cpsw_cpdma_write_bd(sc, slot, &bd);
1515 if (prev_slot != NULL)
1516 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1518 STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1519 sc->tx.avail_queue_len--;
1520 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1524 slot = STAILQ_FIRST(&sc->tx.avail);
1525 STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1526 sc->tx.avail_queue_len--;
1527 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1530 /* Setup buffer of null pad bytes (definitely EOP) */
1531 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1534 bd.bufptr = sc->null_mbuf_paddr;
1538 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER;
1539 cpsw_cpdma_write_bd(sc, slot, &bd);
1543 if (nsegs > sc->tx.longest_chain)
1544 sc->tx.longest_chain = nsegs;
1546 // TODO: Should we defer the BPF tap until
1547 // after all packets are queued?
1548 BPF_MTAP(sc->ifp, m0);
1551 /* Attach the list of new buffers to the hardware TX queue. */
1552 last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next);
1553 first_new_slot = STAILQ_FIRST(&tmpqueue);
1554 STAILQ_CONCAT(&sc->tx.active, &tmpqueue);
1555 if (first_new_slot == NULL) {
1557 } else if (last_old_slot == NULL) {
1558 /* Start a fresh queue. */
1559 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1561 /* Add buffers to end of current queue. */
1562 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1563 /* If underrun, restart queue. */
1564 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1565 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1568 sc->tx.queue_adds += added;
1569 sc->tx.active_queue_len += added;
1570 if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) {
1571 sc->tx.max_active_queue_len = sc->tx.active_queue_len;
1576 cpsw_tx_dequeue(struct cpsw_softc *sc)
1578 struct cpsw_slot *slot, *last_removed_slot = NULL;
1579 uint32_t flags, removed = 0;
1581 slot = STAILQ_FIRST(&sc->tx.active);
1582 if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) {
1583 CPSW_DEBUGF(("TX teardown of an empty queue"));
1584 cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1589 /* Pull completed buffers off the hardware TX queue. */
1590 while (slot != NULL) {
1591 flags = cpsw_cpdma_read_bd_flags(sc, slot);
1592 if (flags & CPDMA_BD_OWNER)
1593 break; /* Hardware is still using this packet. */
1595 CPSW_DEBUGF(("TX removing completed packet"));
1596 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1597 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1598 m_freem(slot->mbuf);
1601 /* Dequeue any additional buffers used by this packet. */
1602 while (slot != NULL && slot->mbuf == NULL) {
1603 STAILQ_REMOVE_HEAD(&sc->tx.active, next);
1604 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
1606 last_removed_slot = slot;
1607 slot = STAILQ_FIRST(&sc->tx.active);
1610 /* TearDown complete is only marked on the SOP for the packet. */
1611 if (flags & CPDMA_BD_TDOWNCMPLT) {
1612 CPSW_DEBUGF(("TX teardown in progress"));
1613 cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1614 // TODO: Increment a count of dropped TX packets
1621 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
1622 sc->tx.queue_removes += removed;
1623 sc->tx.active_queue_len -= removed;
1624 sc->tx.avail_queue_len += removed;
1625 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
1626 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
1633 * Miscellaneous interrupts.
1638 cpsw_intr_rx_thresh(void *arg)
1640 struct cpsw_softc *sc = arg;
1641 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0));
1643 CPSW_DEBUGF(("stat=%x", stat));
1644 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1648 cpsw_intr_misc_host_error(struct cpsw_softc *sc)
1652 int txerr, rxerr, txchan, rxchan;
1655 device_printf(sc->dev,
1656 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n");
1658 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1659 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
1660 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1661 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
1663 txerr = (dmastat >> 20) & 15;
1664 txchan = (dmastat >> 16) & 7;
1665 rxerr = (dmastat >> 12) & 15;
1666 rxchan = (dmastat >> 8) & 7;
1670 case 1: printf("SOP error on TX channel %d\n", txchan);
1672 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
1674 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
1676 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan);
1678 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan);
1680 case 6: printf("Packet length error on TX channel %d\n", txchan);
1682 default: printf("Unknown error on TX channel %d\n", txchan);
1687 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
1688 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
1689 printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
1690 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
1691 cpsw_dump_queue(sc, &sc->tx.active);
1696 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan);
1698 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
1700 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan);
1702 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan);
1704 default: printf("Unknown RX error on RX channel %d\n", rxchan);
1709 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
1710 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
1711 printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
1712 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
1713 cpsw_dump_queue(sc, &sc->rx.active);
1716 printf("\nALE Table\n");
1717 cpsw_ale_dump_table(sc);
1719 // XXX do something useful here??
1720 panic("CPSW HOST ERROR INTERRUPT");
1722 // Suppress this interrupt in the future.
1723 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
1724 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
1725 // The watchdog will probably reset the controller
1726 // in a little while. It will probably fail again.
1730 cpsw_intr_misc(void *arg)
1732 struct cpsw_softc *sc = arg;
1733 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1736 CPSW_DEBUGF(("Time sync event interrupt unimplemented"));
1738 cpsw_stats_collect(sc);
1740 cpsw_intr_misc_host_error(sc);
1742 CPSW_DEBUGF(("MDIO link change interrupt unimplemented"));
1744 CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented"));
1745 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1750 * Periodic Checks and Watchdog.
1755 cpsw_tick(void *msc)
1757 struct cpsw_softc *sc = msc;
1759 /* Check for TX timeout */
1760 cpsw_tx_watchdog(sc);
1762 /* Check for media type change */
1764 if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
1765 printf("%s: media type changed (ifm_media=%x)\n", __func__,
1766 sc->mii->mii_media.ifm_media);
1767 cpsw_ifmedia_upd(sc->ifp);
1770 /* Schedule another timeout one second from now */
1771 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
1775 cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1777 struct cpsw_softc *sc = ifp->if_softc;
1778 struct mii_data *mii;
1786 ifmr->ifm_active = mii->mii_media_active;
1787 ifmr->ifm_status = mii->mii_media_status;
1793 cpsw_ifmedia_upd(struct ifnet *ifp)
1795 struct cpsw_softc *sc = ifp->if_softc;
1798 if (ifp->if_flags & IFF_UP) {
1799 CPSW_GLOBAL_LOCK(sc);
1800 sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
1801 mii_mediachg(sc->mii);
1802 cpsw_init_locked(sc);
1803 CPSW_GLOBAL_UNLOCK(sc);
1810 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
1812 cpsw_debugf_head("CPSW watchdog");
1813 if_printf(sc->ifp, "watchdog timeout\n");
1814 cpsw_shutdown_locked(sc);
1815 cpsw_init_locked(sc);
1819 cpsw_tx_watchdog(struct cpsw_softc *sc)
1821 struct ifnet *ifp = sc->ifp;
1823 CPSW_GLOBAL_LOCK(sc);
1824 if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 ||
1825 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) {
1826 sc->watchdog.timer = 0; /* Nothing to do. */
1827 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
1828 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */
1829 } else if (cpsw_tx_dequeue(sc) > 0) {
1830 sc->watchdog.timer = 0; /* We just did something. */
1832 /* There was something to do but it didn't get done. */
1833 ++sc->watchdog.timer;
1834 if (sc->watchdog.timer > 2) {
1835 sc->watchdog.timer = 0;
1837 ++sc->watchdog.resets;
1838 cpsw_tx_watchdog_full_reset(sc);
1841 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
1842 CPSW_GLOBAL_UNLOCK(sc);
1847 * ALE support routines.
1852 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1854 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1855 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1856 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1857 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1861 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1863 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1864 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1865 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1866 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1870 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1873 uint32_t ale_entry[3];
1875 /* First two entries are link address and broadcast. */
1876 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1877 cpsw_ale_read_entry(sc, i, ale_entry);
1878 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1879 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1880 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1881 cpsw_ale_write_entry(sc, i, ale_entry);
1884 return CPSW_MAX_ALE_ENTRIES;
1888 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1890 int free_index = -1, matching_index = -1, i;
1891 uint32_t ale_entry[3];
1893 /* Find a matching entry or a free entry. */
1894 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1895 cpsw_ale_read_entry(sc, i, ale_entry);
1897 /* Entry Type[61:60] is 0 for free entry */
1898 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1902 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1903 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1904 (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1905 (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1906 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1907 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1913 if (matching_index < 0) {
1919 /* Set MAC address */
1920 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1921 ale_entry[1] = mac[0] << 8 | mac[1];
1923 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1924 ale_entry[1] |= 0xd0 << 24;
1926 /* Set portmask [68:66] */
1927 ale_entry[2] = (portmap & 7) << 2;
1929 cpsw_ale_write_entry(sc, i, ale_entry);
1935 cpsw_ale_dump_table(struct cpsw_softc *sc) {
1937 uint32_t ale_entry[3];
1938 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1939 cpsw_ale_read_entry(sc, i, ale_entry);
1940 if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1941 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1942 ale_entry[1], ale_entry[2]);
1943 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1944 (ale_entry[1] >> 8) & 0xFF,
1945 (ale_entry[1] >> 0) & 0xFF,
1946 (ale_entry[0] >>24) & 0xFF,
1947 (ale_entry[0] >>16) & 0xFF,
1948 (ale_entry[0] >> 8) & 0xFF,
1949 (ale_entry[0] >> 0) & 0xFF);
1950 printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast ");
1951 printf("type: %u ", (ale_entry[1] >> 28) & 3);
1952 printf("port: %u ", (ale_entry[2] >> 2) & 7);
1960 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1963 uint32_t ale_entry[3];
1964 struct ifnet *ifp = sc->ifp;
1965 struct ifmultiaddr *ifma;
1968 /* Route incoming packets for our MAC address to Port 0 (host). */
1969 /* For simplicity, keep this entry at table index 0 in the ALE. */
1971 mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr);
1972 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1973 ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */
1974 ale_entry[2] = 0; /* port = 0 */
1975 cpsw_ale_write_entry(sc, 0, ale_entry);
1977 /* Set outgoing MAC Address for Ports 1 and 2. */
1978 for (i = 1; i < 3; ++i) {
1979 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i),
1980 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1981 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i),
1982 mac[5] << 8 | mac[4]);
1984 if_addr_runlock(ifp);
1986 /* Keep the broadcast address at table entry 1. */
1987 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
1988 ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */
1989 ale_entry[2] = 0x0000001c; /* Forward to all ports */
1990 cpsw_ale_write_entry(sc, 1, ale_entry);
1992 /* SIOCDELMULTI doesn't specify the particular address
1993 being removed, so we have to remove all and rebuild. */
1995 cpsw_ale_remove_all_mc_entries(sc);
1997 /* Set other multicast addrs desired. */
1998 if_maddr_rlock(ifp);
1999 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2000 if (ifma->ifma_addr->sa_family != AF_LINK)
2002 cpsw_ale_mc_entry_set(sc, 7,
2003 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2005 if_maddr_runlock(ifp);
2012 * Statistics and Sysctls.
2018 cpsw_stats_dump(struct cpsw_softc *sc)
2023 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2024 r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2025 cpsw_stat_sysctls[i].reg);
2026 CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2027 (intmax_t)sc->shadow_stats[i], r,
2028 (intmax_t)sc->shadow_stats[i] + r));
2034 cpsw_stats_collect(struct cpsw_softc *sc)
2039 CPSW_DEBUGF(("Controller shadow statistics updated."));
2041 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2042 r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2043 cpsw_stat_sysctls[i].reg);
2044 sc->shadow_stats[i] += r;
2045 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r);
2050 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2052 struct cpsw_softc *sc;
2053 struct cpsw_stat *stat;
2056 sc = (struct cpsw_softc *)arg1;
2057 stat = &cpsw_stat_sysctls[oidp->oid_number];
2058 result = sc->shadow_stats[oidp->oid_number];
2059 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2060 return (sysctl_handle_64(oidp, &result, 0, req));
2064 cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2066 struct cpsw_softc *sc;
2070 sc = (struct cpsw_softc *)arg1;
2072 bintime_sub(&t, &sc->attach_uptime);
2074 return (sysctl_handle_int(oidp, &result, 0, req));
2078 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2080 struct cpsw_softc *sc;
2084 sc = (struct cpsw_softc *)arg1;
2085 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2087 bintime_sub(&t, &sc->init_uptime);
2091 return (sysctl_handle_int(oidp, &result, 0, req));
2095 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue)
2097 struct sysctl_oid_list *parent;
2099 parent = SYSCTL_CHILDREN(node);
2100 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2101 CTLFLAG_RD, &queue->queue_slots, 0,
2102 "Total buffers currently assigned to this queue");
2103 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2104 CTLFLAG_RD, &queue->active_queue_len, 0,
2105 "Buffers currently registered with hardware controller");
2106 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2107 CTLFLAG_RD, &queue->max_active_queue_len, 0,
2108 "Max value of activeBuffers since last driver reset");
2109 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2110 CTLFLAG_RD, &queue->avail_queue_len, 0,
2111 "Buffers allocated to this queue but not currently "
2112 "registered with hardware controller");
2113 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2114 CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2115 "Max value of availBuffers since last driver reset");
2116 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2117 CTLFLAG_RD, &queue->queue_adds, 0,
2118 "Total buffers added to queue");
2119 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2120 CTLFLAG_RD, &queue->queue_removes, 0,
2121 "Total buffers removed from queue");
2122 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2123 CTLFLAG_RD, &queue->longest_chain, 0,
2124 "Max buffers used for a single packet");
2128 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc)
2130 struct sysctl_oid_list *parent;
2132 parent = SYSCTL_CHILDREN(node);
2133 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2134 CTLFLAG_RD, &sc->watchdog.resets, 0,
2135 "Total number of watchdog resets");
2139 cpsw_add_sysctls(struct cpsw_softc *sc)
2141 struct sysctl_ctx_list *ctx;
2142 struct sysctl_oid *stats_node, *queue_node, *node;
2143 struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2146 ctx = device_get_sysctl_ctx(sc->dev);
2147 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2149 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2150 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2151 "Time since driver attach");
2153 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime",
2154 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU",
2155 "Seconds since driver init");
2157 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2158 CTLFLAG_RD, NULL, "CPSW Statistics");
2159 stats_parent = SYSCTL_CHILDREN(stats_node);
2160 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2161 SYSCTL_ADD_PROC(ctx, stats_parent, i,
2162 cpsw_stat_sysctls[i].oid,
2163 CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2164 cpsw_stats_sysctl, "IU",
2165 cpsw_stat_sysctls[i].oid);
2168 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2169 CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2170 queue_parent = SYSCTL_CHILDREN(queue_node);
2172 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2173 CTLFLAG_RD, NULL, "TX Queue Statistics");
2174 cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2176 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2177 CTLFLAG_RD, NULL, "RX Queue Statistics");
2178 cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2180 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2181 CTLFLAG_RD, NULL, "Watchdog Statistics");
2182 cpsw_add_watchdog_sysctls(ctx, node, sc);