2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * TI Common Platform Ethernet Switch (CPSW) Driver
29 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs.
31 * This controller is documented in the AM335x Technical Reference
32 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM
33 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM.
35 * It is basically a single Ethernet port (port 0) wired internally to
36 * a 3-port store-and-forward switch connected to two independent
37 * "sliver" controllers (port 1 and port 2). You can operate the
38 * controller in a variety of different ways by suitably configuring
39 * the slivers and the Address Lookup Engine (ALE) that routes packets
42 * This code was developed and tested on a BeagleBone with
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/endian.h>
54 #include <sys/mutex.h>
55 #include <sys/kernel.h>
56 #include <sys/module.h>
57 #include <sys/socket.h>
58 #include <sys/sysctl.h>
60 #include <net/ethernet.h>
63 #include <net/if_arp.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_types.h>
67 #include <net/if_var.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/ip.h>
74 #include <sys/sockio.h>
76 #include <machine/bus.h>
78 #include <machine/resource.h>
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
83 #include <dev/fdt/fdt_common.h>
84 #include <dev/ofw/ofw_bus.h>
85 #include <dev/ofw/ofw_bus_subr.h>
87 #include "if_cpswreg.h"
88 #include "if_cpswvar.h"
90 #include <arm/ti/ti_scm.h>
92 #include "miibus_if.h"
94 /* Device probe/attach/detach. */
95 static int cpsw_probe(device_t);
96 static void cpsw_init_slots(struct cpsw_softc *);
97 static int cpsw_attach(device_t);
98 static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *);
99 static int cpsw_detach(device_t);
101 /* Device Init/shutdown. */
102 static void cpsw_init(void *);
103 static void cpsw_init_locked(void *);
104 static int cpsw_shutdown(device_t);
105 static void cpsw_shutdown_locked(struct cpsw_softc *);
107 /* Device Suspend/Resume. */
108 static int cpsw_suspend(device_t);
109 static int cpsw_resume(device_t);
112 static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data);
114 static int cpsw_miibus_readreg(device_t, int phy, int reg);
115 static int cpsw_miibus_writereg(device_t, int phy, int reg, int value);
117 /* Send/Receive packets. */
118 static void cpsw_intr_rx(void *arg);
119 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *);
120 static void cpsw_rx_enqueue(struct cpsw_softc *);
121 static void cpsw_start(struct ifnet *);
122 static void cpsw_tx_enqueue(struct cpsw_softc *);
123 static int cpsw_tx_dequeue(struct cpsw_softc *);
125 /* Misc interrupts and watchdog. */
126 static void cpsw_intr_rx_thresh(void *);
127 static void cpsw_intr_misc(void *);
128 static void cpsw_tick(void *);
129 static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *);
130 static int cpsw_ifmedia_upd(struct ifnet *);
131 static void cpsw_tx_watchdog(struct cpsw_softc *);
134 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
135 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry);
136 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac);
137 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
138 static void cpsw_ale_dump_table(struct cpsw_softc *);
140 /* Statistics and sysctls. */
141 static void cpsw_add_sysctls(struct cpsw_softc *);
142 static void cpsw_stats_collect(struct cpsw_softc *);
143 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS);
146 * Arbitrary limit on number of segments in an mbuf to be transmitted.
147 * Packets with more segments than this will be defragmented before
150 #define CPSW_TXFRAGS 8
154 * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs
155 * as separate Ethernet ports. To properly support this, we should
156 * break this into two separate devices: a CPSW_SS device that owns
157 * the interrupts and actually talks to the CPSW hardware, and a
158 * separate CPSW Ethernet child device for each Ethernet port. The RX
159 * interrupt, for example, would be part of CPSW_SS; it would receive
160 * a packet, note the input port, and then dispatch it to the child
161 * device's interface queue. Similarly for transmit.
163 * It's not clear to me whether the device tree should be restructured
164 * with a cpsw_ss node and two child nodes. That would allow specifying
165 * MAC addresses for each port, for example, but might be overkill.
167 * Unfortunately, I don't have hardware right now that supports two
168 * Ethernet ports via CPSW.
171 static device_method_t cpsw_methods[] = {
172 /* Device interface */
173 DEVMETHOD(device_probe, cpsw_probe),
174 DEVMETHOD(device_attach, cpsw_attach),
175 DEVMETHOD(device_detach, cpsw_detach),
176 DEVMETHOD(device_shutdown, cpsw_shutdown),
177 DEVMETHOD(device_suspend, cpsw_suspend),
178 DEVMETHOD(device_resume, cpsw_resume),
180 DEVMETHOD(miibus_readreg, cpsw_miibus_readreg),
181 DEVMETHOD(miibus_writereg, cpsw_miibus_writereg),
185 static driver_t cpsw_driver = {
188 sizeof(struct cpsw_softc),
191 static devclass_t cpsw_devclass;
193 DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0);
194 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0);
195 MODULE_DEPEND(cpsw, ether, 1, 1, 1);
196 MODULE_DEPEND(cpsw, miibus, 1, 1, 1);
198 static struct resource_spec res_spec[] = {
199 { SYS_RES_MEMORY, 0, RF_ACTIVE },
200 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
201 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
202 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
203 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
207 /* Number of entries here must match size of stats
208 * array in struct cpsw_softc. */
209 static struct cpsw_stat {
212 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = {
213 {0x00, "GoodRxFrames"},
214 {0x04, "BroadcastRxFrames"},
215 {0x08, "MulticastRxFrames"},
216 {0x0C, "PauseRxFrames"},
217 {0x10, "RxCrcErrors"},
218 {0x14, "RxAlignErrors"},
219 {0x18, "OversizeRxFrames"},
221 {0x20, "ShortRxFrames"},
222 {0x24, "RxFragments"},
224 {0x34, "GoodTxFrames"},
225 {0x38, "BroadcastTxFrames"},
226 {0x3c, "MulticastTxFrames"},
227 {0x40, "PauseTxFrames"},
228 {0x44, "DeferredTxFrames"},
229 {0x48, "CollisionsTxFrames"},
230 {0x4c, "SingleCollisionTxFrames"},
231 {0x50, "MultipleCollisionTxFrames"},
232 {0x54, "ExcessiveCollisions"},
233 {0x58, "LateCollisions"},
234 {0x5c, "TxUnderrun"},
235 {0x60, "CarrierSenseErrors"},
237 {0x68, "RxTx64OctetFrames"},
238 {0x6c, "RxTx65to127OctetFrames"},
239 {0x70, "RxTx128to255OctetFrames"},
240 {0x74, "RxTx256to511OctetFrames"},
241 {0x78, "RxTx512to1024OctetFrames"},
242 {0x7c, "RxTx1024upOctetFrames"},
244 {0x84, "RxStartOfFrameOverruns"},
245 {0x88, "RxMiddleOfFrameOverruns"},
246 {0x8c, "RxDmaOverruns"}
250 * Basic debug support.
253 #define IF_DEBUG(sc) if (sc->cpsw_if_flags & IFF_DEBUG)
256 cpsw_debugf_head(const char *funcname)
258 int t = (int)(time_second % (24 * 60 * 60));
260 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname);
263 #include <machine/stdarg.h>
265 cpsw_debugf(const char *fmt, ...)
276 #define CPSW_DEBUGF(a) do { \
278 cpsw_debugf_head(__func__); \
287 #define CPSW_TX_LOCK(sc) do { \
288 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \
289 mtx_lock(&(sc)->tx.lock); \
292 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock)
293 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED)
295 #define CPSW_RX_LOCK(sc) do { \
296 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \
297 mtx_lock(&(sc)->rx.lock); \
300 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock)
301 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED)
303 #define CPSW_GLOBAL_LOCK(sc) do { \
304 if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) != \
305 (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) { \
306 panic("cpsw deadlock possibility detection!"); \
308 mtx_lock(&(sc)->tx.lock); \
309 mtx_lock(&(sc)->rx.lock); \
312 #define CPSW_GLOBAL_UNLOCK(sc) do { \
313 CPSW_RX_UNLOCK(sc); \
314 CPSW_TX_UNLOCK(sc); \
317 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do { \
318 CPSW_TX_LOCK_ASSERT(sc); \
319 CPSW_RX_LOCK_ASSERT(sc); \
325 #define cpsw_read_4(sc, reg) bus_read_4(sc->res[0], reg)
326 #define cpsw_write_4(sc, reg, val) bus_write_4(sc->res[0], reg, val)
328 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16))
330 #define cpsw_cpdma_bd_paddr(sc, slot) \
331 BUS_SPACE_PHYSADDR(sc->res[0], slot->bd_offset)
332 #define cpsw_cpdma_read_bd(sc, slot, val) \
333 bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
334 #define cpsw_cpdma_write_bd(sc, slot, val) \
335 bus_write_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4)
336 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \
337 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot))
338 #define cpsw_cpdma_read_bd_flags(sc, slot) \
339 bus_read_2(sc->res[0], slot->bd_offset + 14)
340 #define cpsw_write_hdp_slot(sc, queue, slot) \
341 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
342 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0))
343 #define cpsw_read_cp(sc, queue) \
344 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
345 #define cpsw_write_cp(sc, queue, val) \
346 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
347 #define cpsw_write_cp_slot(sc, queue, slot) \
348 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
351 /* XXX temporary function versions for debugging. */
353 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
355 uint32_t reg = queue->hdp_offset;
356 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
357 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg)));
358 cpsw_write_4(sc, reg, v);
362 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
364 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot);
365 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
366 cpsw_write_cp(sc, queue, v);
371 * Expanded dump routines for verbose debugging.
374 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
376 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ",
377 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun",
378 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1",
380 struct cpsw_cpdma_bd bd;
384 cpsw_cpdma_read_bd(sc, slot, &bd);
385 printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next);
386 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen);
387 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen);
390 for (i = 0; i < 16; ++i) {
391 if (bd.flags & (1 << (15 - i))) {
392 printf("%s%s", sep, flags[i]);
398 printf(" Ether: %14D\n",
399 (char *)(slot->mbuf->m_hdr.mh_data), " ");
400 printf(" Packet: %16D\n",
401 (char *)(slot->mbuf->m_hdr.mh_data) + 14, " ");
405 #define CPSW_DUMP_SLOT(cs, slot) do { \
407 cpsw_dump_slot(sc, slot); \
413 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q)
415 struct cpsw_slot *slot;
419 STAILQ_FOREACH(slot, q, next) {
423 cpsw_dump_slot(sc, slot);
427 printf(" ... and %d more.\n", others);
431 #define CPSW_DUMP_QUEUE(sc, q) do { \
433 cpsw_dump_queue(sc, q); \
440 * Device Probe, Attach, Detach.
445 cpsw_probe(device_t dev)
448 if (!ofw_bus_status_okay(dev))
451 if (!ofw_bus_is_compatible(dev, "ti,cpsw"))
454 device_set_desc(dev, "3-port Switch Ethernet Subsystem");
455 return (BUS_PROBE_DEFAULT);
460 cpsw_init_slots(struct cpsw_softc *sc)
462 struct cpsw_slot *slot;
465 STAILQ_INIT(&sc->avail);
467 /* Put the slot descriptors onto the global avail list. */
468 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) {
469 slot = &sc->_slots[i];
470 slot->bd_offset = cpsw_cpdma_bd_offset(i);
471 STAILQ_INSERT_TAIL(&sc->avail, slot, next);
476 * bind an interrupt, add the relevant info to sc->interrupts
479 cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description)
484 sc->interrupts[sc->interrupt_count].res = res;
485 sc->interrupts[sc->interrupt_count].description = description;
486 pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie;
488 error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE,
489 NULL, *handler, sc, pcookie);
491 device_printf(sc->dev,
492 "could not setup %s\n", description);
494 ++sc->interrupt_count;
499 * teardown everything in sc->interrupts.
502 cpsw_detach_interrupts(struct cpsw_softc *sc)
507 for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) {
508 if (!sc->interrupts[i].ih_cookie)
510 error = bus_teardown_intr(sc->dev,
511 sc->interrupts[i].res, sc->interrupts[i].ih_cookie);
513 device_printf(sc->dev, "could not release %s\n",
514 sc->interrupts[i].description);
515 sc->interrupts[i].ih_cookie = NULL;
520 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
522 const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]);
523 struct cpsw_slot *slot;
527 requested = max_slots;
529 for (i = 0; i < requested; ++i) {
530 slot = STAILQ_FIRST(&sc->avail);
533 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) {
534 if_printf(sc->ifp, "failed to create dmamap\n");
537 STAILQ_REMOVE_HEAD(&sc->avail, next);
538 STAILQ_INSERT_TAIL(&queue->avail, slot, next);
539 ++queue->avail_queue_len;
540 ++queue->queue_slots;
546 cpsw_attach(device_t dev)
548 bus_dma_segment_t segs[1];
549 struct cpsw_softc *sc = device_get_softc(dev);
550 struct mii_softc *miisc;
553 int error, phy, nsegs;
558 getbinuptime(&sc->attach_uptime);
560 sc->node = ofw_bus_get_node(dev);
562 /* Get phy address from fdt */
563 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) {
564 device_printf(dev, "failed to get PHY address from FDT\n");
567 /* Initialize mutexes */
568 mtx_init(&sc->tx.lock, device_get_nameunit(dev),
569 "cpsw TX lock", MTX_DEF);
570 mtx_init(&sc->rx.lock, device_get_nameunit(dev),
571 "cpsw RX lock", MTX_DEF);
573 /* Allocate IO and IRQ resources */
574 error = bus_alloc_resources(dev, res_spec, sc->res);
576 device_printf(dev, "could not allocate resources\n");
581 reg = cpsw_read_4(sc, CPSW_SS_IDVER);
582 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7),
583 reg & 0xFF, (reg >> 11) & 0x1F);
585 cpsw_add_sysctls(sc);
587 /* Allocate a busdma tag and DMA safe memory for mbufs. */
588 error = bus_dma_tag_create(
589 bus_get_dma_tag(sc->dev), /* parent */
590 1, 0, /* alignment, boundary */
591 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
592 BUS_SPACE_MAXADDR, /* highaddr */
593 NULL, NULL, /* filtfunc, filtfuncarg */
594 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */
595 MCLBYTES, 0, /* maxsegsz, flags */
596 NULL, NULL, /* lockfunc, lockfuncarg */
597 &sc->mbuf_dtag); /* dmatag */
599 device_printf(dev, "bus_dma_tag_create failed\n");
604 /* Allocate network interface */
605 ifp = sc->ifp = if_alloc(IFT_ETHER);
607 device_printf(dev, "if_alloc() failed\n");
612 /* Allocate the null mbuf and pre-sync it. */
613 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
614 memset(sc->null_mbuf->m_hdr.mh_data, 0, sc->null_mbuf->m_ext.ext_size);
615 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap);
616 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap,
617 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
618 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap,
619 BUS_DMASYNC_PREWRITE);
620 sc->null_mbuf_paddr = segs[0].ds_addr;
622 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
624 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
625 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN?
626 ifp->if_capenable = ifp->if_capabilities;
628 ifp->if_init = cpsw_init;
629 ifp->if_start = cpsw_start;
630 ifp->if_ioctl = cpsw_ioctl;
634 /* Allocate slots to TX and RX queues. */
635 STAILQ_INIT(&sc->rx.avail);
636 STAILQ_INIT(&sc->rx.active);
637 STAILQ_INIT(&sc->tx.avail);
638 STAILQ_INIT(&sc->tx.active);
639 // For now: 128 slots to TX, rest to RX.
640 // XXX TODO: start with 32/64 and grow dynamically based on demand.
641 if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) {
642 device_printf(dev, "failed to allocate dmamaps\n");
646 device_printf(dev, "Initial queue size TX=%d RX=%d\n",
647 sc->tx.queue_slots, sc->rx.queue_slots);
649 ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots;
650 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
651 IFQ_SET_READY(&ifp->if_snd);
653 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0);
654 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0);
656 /* Get high part of MAC address from control module (mac_id0_hi) */
657 /* TODO: Get MAC ID1 as well as MAC ID0. */
658 ti_scm_reg_read_4(0x634, ®);
659 sc->mac_addr[0] = reg & 0xFF;
660 sc->mac_addr[1] = (reg >> 8) & 0xFF;
661 sc->mac_addr[2] = (reg >> 16) & 0xFF;
662 sc->mac_addr[3] = (reg >> 24) & 0xFF;
664 /* Get low part of MAC address from control module (mac_id0_lo) */
665 ti_scm_reg_read_4(0x630, ®);
666 sc->mac_addr[4] = reg & 0xFF;
667 sc->mac_addr[5] = (reg >> 8) & 0xFF;
669 ether_ifattach(ifp, sc->mac_addr);
670 callout_init(&sc->watchdog.callout, 0);
672 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
673 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
674 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
677 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30);
680 error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd,
681 cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
683 device_printf(dev, "attaching PHYs failed\n");
687 sc->mii = device_get_softc(sc->miibus);
689 /* Tell the MAC where to find the PHY so autoneg works */
690 miisc = LIST_FIRST(&sc->mii->mii_phys);
692 /* Select PHY and enable interrupts */
693 cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F));
695 /* Note: We don't use sc->res[3] (TX interrupt) */
696 if (cpsw_attach_interrupt(sc, sc->res[1],
697 cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") ||
698 cpsw_attach_interrupt(sc, sc->res[2],
699 cpsw_intr_rx, "CPSW RX interrupt") ||
700 cpsw_attach_interrupt(sc, sc->res[4],
701 cpsw_intr_misc, "CPSW misc interrupt")) {
710 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot)
715 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap);
716 KASSERT(error == 0, ("Mapping still active"));
726 cpsw_detach(device_t dev)
728 struct cpsw_softc *sc = device_get_softc(dev);
733 /* Stop controller and free TX queue */
734 if (device_is_attached(dev)) {
735 ether_ifdetach(sc->ifp);
736 CPSW_GLOBAL_LOCK(sc);
737 cpsw_shutdown_locked(sc);
738 CPSW_GLOBAL_UNLOCK(sc);
739 callout_drain(&sc->watchdog.callout);
742 bus_generic_detach(dev);
743 device_delete_child(dev, sc->miibus);
745 /* Stop and release all interrupts */
746 cpsw_detach_interrupts(sc);
748 /* Free dmamaps and mbufs */
749 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i) {
750 cpsw_free_slot(sc, &sc->_slots[i]);
754 error = bus_dma_tag_destroy(sc->mbuf_dtag);
755 KASSERT(error == 0, ("Unable to destroy DMA tag"));
757 /* Free IO memory handler */
758 bus_release_resources(dev, res_spec, sc->res);
760 /* Destroy mutexes */
761 mtx_destroy(&sc->rx.lock);
762 mtx_destroy(&sc->tx.lock);
774 cpsw_reset(struct cpsw_softc *sc)
778 /* Reset RMII/RGMII wrapper. */
779 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
780 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
783 /* Disable TX and RX interrupts for all cores. */
784 for (i = 0; i < 3; ++i) {
785 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00);
786 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00);
787 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00);
788 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00);
791 /* Reset CPSW subsystem. */
792 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
793 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
796 /* Reset Sliver port 1 and 2 */
797 for (i = 0; i < 2; i++) {
799 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
800 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
804 /* Reset DMA controller. */
805 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
806 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
809 /* Disable TX & RX DMA */
810 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0);
811 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0);
813 /* Clear all queues. */
814 for (i = 0; i < 8; i++) {
815 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
816 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
817 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
818 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
821 /* Clear all interrupt Masks */
822 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
823 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
829 struct cpsw_softc *sc = arg;
832 CPSW_GLOBAL_LOCK(sc);
833 cpsw_init_locked(arg);
834 CPSW_GLOBAL_UNLOCK(sc);
838 cpsw_init_locked(void *arg)
841 struct cpsw_softc *sc = arg;
842 struct cpsw_slot *slot;
847 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
850 getbinuptime(&sc->init_uptime);
852 /* Reset the controller. */
856 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4);
858 /* Init Sliver port 1 and 2 */
859 for (i = 0; i < 2; i++) {
860 /* Set Slave Mapping */
861 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
862 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100);
863 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
864 /* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15),
865 GMII_EN(5), FULLDUPLEX(1) */
866 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */
867 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */
868 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1);
871 /* Set Host Port Mapping */
872 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
873 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
875 /* Initialize ALE: all ports set to forwarding(3), initialize addrs */
876 for (i = 0; i < 3; i++)
877 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3);
878 cpsw_ale_update_addresses(sc, 1);
880 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
882 /* Enable statistics for ports 0, 1 and 2 */
883 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
885 /* Experiment: Turn off flow control */
886 /* This seems to fix the watchdog resets that have plagued
887 earlier versions of this driver; I'm not yet sure if there
888 are negative effects yet. */
889 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
891 /* Make IP hdr aligned with 4 */
892 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2);
894 /* Initialize RX Buffer Descriptors */
895 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
897 /* Enable TX & RX DMA */
898 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
899 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
901 /* Enable Interrupts for core 0 */
902 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF);
903 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF);
904 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F);
906 /* Enable host Error Interrupt */
907 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3);
909 /* Enable interrupts for RX Channel 0 */
910 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
912 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
913 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
914 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF);
916 /* Select MII in GMII_SEL, Internal Delay mode */
917 //ti_scm_reg_write_4(0x650, 0);
919 /* Initialize active queues. */
920 slot = STAILQ_FIRST(&sc->tx.active);
922 cpsw_write_hdp_slot(sc, &sc->tx, slot);
923 slot = STAILQ_FIRST(&sc->rx.active);
925 cpsw_write_hdp_slot(sc, &sc->rx, slot);
928 /* Activate network interface */
931 sc->watchdog.timer = 0;
932 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
933 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
934 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
939 cpsw_shutdown(device_t dev)
941 struct cpsw_softc *sc = device_get_softc(dev);
944 CPSW_GLOBAL_LOCK(sc);
945 cpsw_shutdown_locked(sc);
946 CPSW_GLOBAL_UNLOCK(sc);
951 cpsw_rx_teardown_locked(struct cpsw_softc *sc)
953 struct mbuf *received, *next;
956 CPSW_DEBUGF(("starting RX teardown"));
957 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
959 received = cpsw_rx_dequeue(sc);
960 CPSW_GLOBAL_UNLOCK(sc);
961 while (received != NULL) {
962 next = received->m_nextpkt;
963 received->m_nextpkt = NULL;
964 (*sc->ifp->if_input)(sc->ifp, received);
967 CPSW_GLOBAL_LOCK(sc);
968 if (!sc->rx.running) {
969 CPSW_DEBUGF(("finished RX teardown (%d retries)", i));
973 if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n");
981 cpsw_tx_teardown_locked(struct cpsw_softc *sc)
985 CPSW_DEBUGF(("starting TX teardown"));
986 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
988 while (sc->tx.running && ++i < 10) {
993 if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n");
994 CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)",
995 i, sc->tx.active_queue_len));
999 cpsw_shutdown_locked(struct cpsw_softc *sc)
1004 CPSW_GLOBAL_LOCK_ASSERT(sc);
1007 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1010 /* Disable interface */
1011 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1012 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1015 callout_stop(&sc->watchdog.callout);
1017 /* Tear down the RX/TX queues. */
1018 cpsw_rx_teardown_locked(sc);
1019 cpsw_tx_teardown_locked(sc);
1021 /* Capture stats before we reset controller. */
1022 cpsw_stats_collect(sc);
1032 cpsw_suspend(device_t dev)
1034 struct cpsw_softc *sc = device_get_softc(dev);
1037 CPSW_GLOBAL_LOCK(sc);
1038 cpsw_shutdown_locked(sc);
1039 CPSW_GLOBAL_UNLOCK(sc);
1044 cpsw_resume(device_t dev)
1046 struct cpsw_softc *sc = device_get_softc(dev);
1048 CPSW_DEBUGF(("UNIMPLEMENTED"));
1059 cpsw_set_promisc(struct cpsw_softc *sc, int set)
1062 * Enabling promiscuous mode requires two bits of work: First,
1063 * ALE_BYPASS needs to be enabled. That disables the ALE
1064 * forwarding logic and causes every packet to be sent to the
1065 * host port. That makes us promiscuous wrt received packets.
1067 * With ALE forwarding disabled, the transmitter needs to set
1068 * an explicit output port on every packet to route it to the
1069 * correct egress. This should be doable for systems such as
1070 * BeagleBone where only one egress port is actually wired to
1071 * a PHY. If you have both egress ports wired up, life gets a
1072 * lot more interesting.
1074 * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't
1075 * seem to set explicit egress ports. Does that mean they
1076 * are always promiscuous?
1079 printf("Promiscuous mode unimplemented\n");
1084 cpsw_set_allmulti(struct cpsw_softc *sc, int set)
1087 printf("All-multicast mode unimplemented\n");
1092 cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1094 struct cpsw_softc *sc = ifp->if_softc;
1095 struct ifreq *ifr = (struct ifreq *)data;
1103 CPSW_GLOBAL_LOCK(sc);
1104 if (ifp->if_flags & IFF_UP) {
1105 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1106 changed = ifp->if_flags ^ sc->cpsw_if_flags;
1107 CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed));
1108 if (changed & IFF_PROMISC)
1109 cpsw_set_promisc(sc,
1110 ifp->if_flags & IFF_PROMISC);
1111 if (changed & IFF_ALLMULTI)
1112 cpsw_set_allmulti(sc,
1113 ifp->if_flags & IFF_ALLMULTI);
1115 CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up"));
1116 cpsw_init_locked(sc);
1118 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1119 CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down"));
1120 cpsw_shutdown_locked(sc);
1123 sc->cpsw_if_flags = ifp->if_flags;
1124 CPSW_GLOBAL_UNLOCK(sc);
1127 cpsw_ale_update_addresses(sc, 0);
1130 /* Ugh. DELMULTI doesn't provide the specific address
1131 being removed, so the best we can do is remove
1132 everything and rebuild it all. */
1133 cpsw_ale_update_addresses(sc, 1);
1137 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1140 error = ether_ioctl(ifp, command, data);
1151 cpsw_miibus_ready(struct cpsw_softc *sc)
1153 uint32_t r, retries = CPSW_MIIBUS_RETRIES;
1156 r = cpsw_read_4(sc, MDIOUSERACCESS0);
1157 if ((r & 1 << 31) == 0)
1159 DELAY(CPSW_MIIBUS_DELAY);
1165 cpsw_miibus_readreg(device_t dev, int phy, int reg)
1167 struct cpsw_softc *sc = device_get_softc(dev);
1170 if (!cpsw_miibus_ready(sc)) {
1171 device_printf(dev, "MDIO not ready to read\n");
1175 /* Set GO, reg, phy */
1176 cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16;
1177 cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1179 if (!cpsw_miibus_ready(sc)) {
1180 device_printf(dev, "MDIO timed out during read\n");
1184 r = cpsw_read_4(sc, MDIOUSERACCESS0);
1185 if((r & 1 << 29) == 0) {
1186 device_printf(dev, "Failed to read from PHY.\n");
1189 return (r & 0xFFFF);
1193 cpsw_miibus_writereg(device_t dev, int phy, int reg, int value)
1195 struct cpsw_softc *sc = device_get_softc(dev);
1198 if (!cpsw_miibus_ready(sc)) {
1199 device_printf(dev, "MDIO not ready to write\n");
1203 /* Set GO, WRITE, reg, phy, and value */
1204 cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16
1206 cpsw_write_4(sc, MDIOUSERACCESS0, cmd);
1208 if (!cpsw_miibus_ready(sc)) {
1209 device_printf(dev, "MDIO timed out during write\n");
1213 if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0)
1214 device_printf(dev, "Failed to write to PHY.\n");
1221 * Transmit/Receive Packets.
1227 cpsw_intr_rx(void *arg)
1229 struct cpsw_softc *sc = arg;
1230 struct mbuf *received, *next;
1233 received = cpsw_rx_dequeue(sc);
1234 cpsw_rx_enqueue(sc);
1235 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1);
1238 while (received != NULL) {
1239 next = received->m_nextpkt;
1240 received->m_nextpkt = NULL;
1241 (*sc->ifp->if_input)(sc->ifp, received);
1246 static struct mbuf *
1247 cpsw_rx_dequeue(struct cpsw_softc *sc)
1249 struct cpsw_cpdma_bd bd;
1250 struct cpsw_slot *slot;
1252 struct mbuf *mb_head, *mb_tail;
1256 mb_head = mb_tail = NULL;
1258 /* Pull completed packets off hardware RX queue. */
1259 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) {
1260 cpsw_cpdma_read_bd(sc, slot, &bd);
1261 if (bd.flags & CPDMA_BD_OWNER)
1262 break; /* Still in use by hardware */
1264 CPSW_DEBUGF(("Removing received packet from RX queue"));
1266 STAILQ_REMOVE_HEAD(&sc->rx.active, next);
1267 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next);
1269 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD);
1270 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1272 if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1273 CPSW_DEBUGF(("RX teardown in progress"));
1274 m_freem(slot->mbuf);
1276 cpsw_write_cp(sc, &sc->rx, 0xfffffffc);
1281 cpsw_write_cp_slot(sc, &sc->rx, slot);
1284 /* TODO: track SOP/EOP bits to assemble a full mbuf
1285 out of received fragments. */
1286 slot->mbuf->m_hdr.mh_data += bd.bufoff;
1287 slot->mbuf->m_hdr.mh_len = bd.pktlen - 4;
1288 slot->mbuf->m_pkthdr.len = bd.pktlen - 4;
1289 slot->mbuf->m_flags |= M_PKTHDR;
1290 slot->mbuf->m_pkthdr.rcvif = ifp;
1291 slot->mbuf->m_nextpkt = NULL;
1293 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1294 /* check for valid CRC by looking into pkt_err[5:4] */
1295 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) {
1296 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1297 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1298 slot->mbuf->m_pkthdr.csum_data = 0xffff;
1302 /* Add mbuf to packet list to be returned. */
1304 mb_tail->m_nextpkt = slot->mbuf;
1306 mb_head = slot->mbuf;
1308 mb_tail = slot->mbuf;
1313 sc->rx.queue_removes += removed;
1314 sc->rx.active_queue_len -= removed;
1315 sc->rx.avail_queue_len += removed;
1316 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len)
1317 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len;
1323 cpsw_rx_enqueue(struct cpsw_softc *sc)
1325 bus_dma_segment_t seg[1];
1326 struct cpsw_cpdma_bd bd;
1327 struct ifnet *ifp = sc->ifp;
1328 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1329 struct cpsw_slot *slot, *prev_slot = NULL;
1330 struct cpsw_slot *last_old_slot, *first_new_slot;
1331 int error, nsegs, added = 0;
1333 /* Register new mbufs with hardware. */
1334 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) {
1335 if (slot->mbuf == NULL) {
1336 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1337 if (slot->mbuf == NULL) {
1338 if_printf(sc->ifp, "Unable to fill RX queue\n");
1342 slot->mbuf->m_pkthdr.len =
1343 slot->mbuf->m_ext.ext_size;
1346 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1347 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT);
1349 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs));
1350 KASSERT(error == 0, ("DMA error (error=%d)", error));
1351 if (error != 0 || nsegs != 1) {
1353 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n",
1354 __func__, nsegs, error);
1355 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1356 m_freem(slot->mbuf);
1361 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD);
1363 /* Create and submit new rx descriptor*/
1365 bd.bufptr = seg->ds_addr;
1367 bd.buflen = MCLBYTES - 1;
1368 bd.pktlen = bd.buflen;
1369 bd.flags = CPDMA_BD_OWNER;
1370 cpsw_cpdma_write_bd(sc, slot, &bd);
1373 if (prev_slot != NULL)
1374 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1376 STAILQ_REMOVE_HEAD(&sc->rx.avail, next);
1377 sc->rx.avail_queue_len--;
1378 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1384 CPSW_DEBUGF(("Adding %d buffers to RX queue", added));
1386 /* Link new entries to hardware RX queue. */
1387 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next);
1388 first_new_slot = STAILQ_FIRST(&tmpqueue);
1389 STAILQ_CONCAT(&sc->rx.active, &tmpqueue);
1390 if (first_new_slot == NULL) {
1392 } else if (last_old_slot == NULL) {
1393 /* Start a fresh queue. */
1394 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1396 /* Add buffers to end of current queue. */
1397 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1398 /* If underrun, restart queue. */
1399 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1400 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot);
1403 sc->rx.queue_adds += added;
1404 sc->rx.active_queue_len += added;
1405 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) {
1406 sc->rx.max_active_queue_len = sc->rx.active_queue_len;
1411 cpsw_start(struct ifnet *ifp)
1413 struct cpsw_softc *sc = ifp->if_softc;
1416 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) {
1417 cpsw_tx_enqueue(sc);
1418 cpsw_tx_dequeue(sc);
1424 cpsw_tx_enqueue(struct cpsw_softc *sc)
1426 bus_dma_segment_t segs[CPSW_TXFRAGS];
1427 struct cpsw_cpdma_bd bd;
1428 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue);
1429 struct cpsw_slot *slot, *prev_slot = NULL;
1430 struct cpsw_slot *last_old_slot, *first_new_slot;
1432 int error, nsegs, seg, added = 0, padlen;
1434 /* Pull pending packets from IF queue and prep them for DMA. */
1435 while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) {
1436 IF_DEQUEUE(&sc->ifp->if_snd, m0);
1441 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len;
1445 /* Create mapping in DMA memory */
1446 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap,
1447 slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1448 /* If the packet is too fragmented, try to simplify. */
1449 if (error == EFBIG ||
1451 nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) {
1452 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1453 if (padlen > 0) /* May as well add padding. */
1454 m_append(slot->mbuf, padlen,
1455 sc->null_mbuf->m_hdr.mh_data);
1456 m0 = m_defrag(slot->mbuf, M_NOWAIT);
1459 "Can't defragment packet; dropping\n");
1460 m_freem(slot->mbuf);
1462 CPSW_DEBUGF(("Requeueing defragmented packet"));
1463 IF_PREPEND(&sc->ifp->if_snd, m0);
1470 "%s: Can't setup DMA (error=%d), dropping packet\n",
1472 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1473 m_freem(slot->mbuf);
1478 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap,
1479 BUS_DMASYNC_PREWRITE);
1482 CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes",
1485 /* If there is only one segment, the for() loop
1486 * gets skipped and the single buffer gets set up
1487 * as both SOP and EOP. */
1488 /* Start by setting up the first buffer */
1490 bd.bufptr = segs[0].ds_addr;
1492 bd.buflen = segs[0].ds_len;
1493 bd.pktlen = m_length(slot->mbuf, NULL) + padlen;
1494 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER;
1495 for (seg = 1; seg < nsegs; ++seg) {
1496 /* Save the previous buffer (which isn't EOP) */
1497 cpsw_cpdma_write_bd(sc, slot, &bd);
1498 if (prev_slot != NULL)
1499 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1501 STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1502 sc->tx.avail_queue_len--;
1503 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1505 slot = STAILQ_FIRST(&sc->tx.avail);
1507 /* Setup next buffer (which isn't SOP) */
1509 bd.bufptr = segs[seg].ds_addr;
1511 bd.buflen = segs[seg].ds_len;
1513 bd.flags = CPDMA_BD_OWNER;
1515 /* Save the final buffer. */
1517 bd.flags |= CPDMA_BD_EOP;
1518 cpsw_cpdma_write_bd(sc, slot, &bd);
1519 if (prev_slot != NULL)
1520 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1522 STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1523 sc->tx.avail_queue_len--;
1524 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1528 slot = STAILQ_FIRST(&sc->tx.avail);
1529 STAILQ_REMOVE_HEAD(&sc->tx.avail, next);
1530 sc->tx.avail_queue_len--;
1531 STAILQ_INSERT_TAIL(&tmpqueue, slot, next);
1534 /* Setup buffer of null pad bytes (definitely EOP) */
1535 cpsw_cpdma_write_bd_next(sc, prev_slot, slot);
1538 bd.bufptr = sc->null_mbuf_paddr;
1542 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER;
1543 cpsw_cpdma_write_bd(sc, slot, &bd);
1547 if (nsegs > sc->tx.longest_chain)
1548 sc->tx.longest_chain = nsegs;
1550 // TODO: Should we defer the BPF tap until
1551 // after all packets are queued?
1552 BPF_MTAP(sc->ifp, m0);
1555 /* Attach the list of new buffers to the hardware TX queue. */
1556 last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next);
1557 first_new_slot = STAILQ_FIRST(&tmpqueue);
1558 STAILQ_CONCAT(&sc->tx.active, &tmpqueue);
1559 if (first_new_slot == NULL) {
1561 } else if (last_old_slot == NULL) {
1562 /* Start a fresh queue. */
1563 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1565 /* Add buffers to end of current queue. */
1566 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot);
1567 /* If underrun, restart queue. */
1568 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) {
1569 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot);
1572 sc->tx.queue_adds += added;
1573 sc->tx.active_queue_len += added;
1574 if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) {
1575 sc->tx.max_active_queue_len = sc->tx.active_queue_len;
1580 cpsw_tx_dequeue(struct cpsw_softc *sc)
1582 struct cpsw_slot *slot, *last_removed_slot = NULL;
1583 uint32_t flags, removed = 0;
1585 slot = STAILQ_FIRST(&sc->tx.active);
1586 if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) {
1587 CPSW_DEBUGF(("TX teardown of an empty queue"));
1588 cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1593 /* Pull completed buffers off the hardware TX queue. */
1594 while (slot != NULL) {
1595 flags = cpsw_cpdma_read_bd_flags(sc, slot);
1596 if (flags & CPDMA_BD_OWNER)
1597 break; /* Hardware is still using this packet. */
1599 CPSW_DEBUGF(("TX removing completed packet"));
1600 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE);
1601 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap);
1602 m_freem(slot->mbuf);
1605 /* Dequeue any additional buffers used by this packet. */
1606 while (slot != NULL && slot->mbuf == NULL) {
1607 STAILQ_REMOVE_HEAD(&sc->tx.active, next);
1608 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next);
1610 last_removed_slot = slot;
1611 slot = STAILQ_FIRST(&sc->tx.active);
1614 /* TearDown complete is only marked on the SOP for the packet. */
1615 if (flags & CPDMA_BD_TDOWNCMPLT) {
1616 CPSW_DEBUGF(("TX teardown in progress"));
1617 cpsw_write_cp(sc, &sc->tx, 0xfffffffc);
1618 // TODO: Increment a count of dropped TX packets
1625 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot);
1626 sc->tx.queue_removes += removed;
1627 sc->tx.active_queue_len -= removed;
1628 sc->tx.avail_queue_len += removed;
1629 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len)
1630 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len;
1637 * Miscellaneous interrupts.
1642 cpsw_intr_rx_thresh(void *arg)
1644 struct cpsw_softc *sc = arg;
1645 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0));
1647 CPSW_DEBUGF(("stat=%x", stat));
1648 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0);
1652 cpsw_intr_misc_host_error(struct cpsw_softc *sc)
1656 int txerr, rxerr, txchan, rxchan;
1659 device_printf(sc->dev,
1660 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n");
1662 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1663 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat);
1664 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1665 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat);
1667 txerr = (dmastat >> 20) & 15;
1668 txchan = (dmastat >> 16) & 7;
1669 rxerr = (dmastat >> 12) & 15;
1670 rxchan = (dmastat >> 8) & 7;
1674 case 1: printf("SOP error on TX channel %d\n", txchan);
1676 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan);
1678 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan);
1680 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan);
1682 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan);
1684 case 6: printf("Packet length error on TX channel %d\n", txchan);
1686 default: printf("Unknown error on TX channel %d\n", txchan);
1691 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n",
1692 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan)));
1693 printf("CPSW_CPDMA_TX%d_CP=0x%x\n",
1694 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan)));
1695 cpsw_dump_queue(sc, &sc->tx.active);
1700 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan);
1702 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan);
1704 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan);
1706 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan);
1708 default: printf("Unknown RX error on RX channel %d\n", rxchan);
1713 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n",
1714 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan)));
1715 printf("CPSW_CPDMA_RX%d_CP=0x%x\n",
1716 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan)));
1717 cpsw_dump_queue(sc, &sc->rx.active);
1720 printf("\nALE Table\n");
1721 cpsw_ale_dump_table(sc);
1723 // XXX do something useful here??
1724 panic("CPSW HOST ERROR INTERRUPT");
1726 // Suppress this interrupt in the future.
1727 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat);
1728 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n");
1729 // The watchdog will probably reset the controller
1730 // in a little while. It will probably fail again.
1734 cpsw_intr_misc(void *arg)
1736 struct cpsw_softc *sc = arg;
1737 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1740 CPSW_DEBUGF(("Time sync event interrupt unimplemented"));
1742 cpsw_stats_collect(sc);
1744 cpsw_intr_misc_host_error(sc);
1746 CPSW_DEBUGF(("MDIO link change interrupt unimplemented"));
1748 CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented"));
1749 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3);
1754 * Periodic Checks and Watchdog.
1759 cpsw_tick(void *msc)
1761 struct cpsw_softc *sc = msc;
1763 /* Check for TX timeout */
1764 cpsw_tx_watchdog(sc);
1766 /* Check for media type change */
1768 if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) {
1769 printf("%s: media type changed (ifm_media=%x)\n", __func__,
1770 sc->mii->mii_media.ifm_media);
1771 cpsw_ifmedia_upd(sc->ifp);
1774 /* Schedule another timeout one second from now */
1775 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc);
1779 cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1781 struct cpsw_softc *sc = ifp->if_softc;
1782 struct mii_data *mii;
1790 ifmr->ifm_active = mii->mii_media_active;
1791 ifmr->ifm_status = mii->mii_media_status;
1797 cpsw_ifmedia_upd(struct ifnet *ifp)
1799 struct cpsw_softc *sc = ifp->if_softc;
1802 if (ifp->if_flags & IFF_UP) {
1803 CPSW_GLOBAL_LOCK(sc);
1804 sc->cpsw_media_status = sc->mii->mii_media.ifm_media;
1805 mii_mediachg(sc->mii);
1806 cpsw_init_locked(sc);
1807 CPSW_GLOBAL_UNLOCK(sc);
1814 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc)
1816 cpsw_debugf_head("CPSW watchdog");
1817 if_printf(sc->ifp, "watchdog timeout\n");
1818 cpsw_shutdown_locked(sc);
1819 cpsw_init_locked(sc);
1823 cpsw_tx_watchdog(struct cpsw_softc *sc)
1825 struct ifnet *ifp = sc->ifp;
1827 CPSW_GLOBAL_LOCK(sc);
1828 if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 ||
1829 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) {
1830 sc->watchdog.timer = 0; /* Nothing to do. */
1831 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) {
1832 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */
1833 } else if (cpsw_tx_dequeue(sc) > 0) {
1834 sc->watchdog.timer = 0; /* We just did something. */
1836 /* There was something to do but it didn't get done. */
1837 ++sc->watchdog.timer;
1838 if (sc->watchdog.timer > 2) {
1839 sc->watchdog.timer = 0;
1841 ++sc->watchdog.resets;
1842 cpsw_tx_watchdog_full_reset(sc);
1845 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes;
1846 CPSW_GLOBAL_UNLOCK(sc);
1851 * ALE support routines.
1856 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1858 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1859 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1860 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1861 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1865 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1867 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1868 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1869 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1870 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1874 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1877 uint32_t ale_entry[3];
1879 /* First two entries are link address and broadcast. */
1880 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1881 cpsw_ale_read_entry(sc, i, ale_entry);
1882 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1883 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1884 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1885 cpsw_ale_write_entry(sc, i, ale_entry);
1888 return CPSW_MAX_ALE_ENTRIES;
1892 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac)
1894 int free_index = -1, matching_index = -1, i;
1895 uint32_t ale_entry[3];
1897 /* Find a matching entry or a free entry. */
1898 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1899 cpsw_ale_read_entry(sc, i, ale_entry);
1901 /* Entry Type[61:60] is 0 for free entry */
1902 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1906 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) &&
1907 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) &&
1908 (((ale_entry[0] >>24) & 0xFF) == mac[2]) &&
1909 (((ale_entry[0] >>16) & 0xFF) == mac[3]) &&
1910 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) &&
1911 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) {
1917 if (matching_index < 0) {
1923 /* Set MAC address */
1924 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1925 ale_entry[1] = mac[0] << 8 | mac[1];
1927 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1928 ale_entry[1] |= 0xd0 << 24;
1930 /* Set portmask [68:66] */
1931 ale_entry[2] = (portmap & 7) << 2;
1933 cpsw_ale_write_entry(sc, i, ale_entry);
1939 cpsw_ale_dump_table(struct cpsw_softc *sc) {
1941 uint32_t ale_entry[3];
1942 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1943 cpsw_ale_read_entry(sc, i, ale_entry);
1944 if (ale_entry[0] || ale_entry[1] || ale_entry[2]) {
1945 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0],
1946 ale_entry[1], ale_entry[2]);
1947 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ",
1948 (ale_entry[1] >> 8) & 0xFF,
1949 (ale_entry[1] >> 0) & 0xFF,
1950 (ale_entry[0] >>24) & 0xFF,
1951 (ale_entry[0] >>16) & 0xFF,
1952 (ale_entry[0] >> 8) & 0xFF,
1953 (ale_entry[0] >> 0) & 0xFF);
1954 printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast ");
1955 printf("type: %u ", (ale_entry[1] >> 28) & 3);
1956 printf("port: %u ", (ale_entry[2] >> 2) & 7);
1964 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1967 uint32_t ale_entry[3];
1968 struct ifnet *ifp = sc->ifp;
1969 struct ifmultiaddr *ifma;
1972 /* Route incoming packets for our MAC address to Port 0 (host). */
1973 /* For simplicity, keep this entry at table index 0 in the ALE. */
1975 mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr);
1976 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1977 ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */
1978 ale_entry[2] = 0; /* port = 0 */
1979 cpsw_ale_write_entry(sc, 0, ale_entry);
1981 /* Set outgoing MAC Address for Ports 1 and 2. */
1982 for (i = 1; i < 3; ++i) {
1983 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i),
1984 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1985 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i),
1986 mac[5] << 8 | mac[4]);
1988 if_addr_runlock(ifp);
1990 /* Keep the broadcast address at table entry 1. */
1991 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */
1992 ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */
1993 ale_entry[2] = 0x0000001c; /* Forward to all ports */
1994 cpsw_ale_write_entry(sc, 1, ale_entry);
1996 /* SIOCDELMULTI doesn't specify the particular address
1997 being removed, so we have to remove all and rebuild. */
1999 cpsw_ale_remove_all_mc_entries(sc);
2001 /* Set other multicast addrs desired. */
2002 if_maddr_rlock(ifp);
2003 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2004 if (ifma->ifma_addr->sa_family != AF_LINK)
2006 cpsw_ale_mc_entry_set(sc, 7,
2007 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
2009 if_maddr_runlock(ifp);
2016 * Statistics and Sysctls.
2022 cpsw_stats_dump(struct cpsw_softc *sc)
2027 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2028 r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2029 cpsw_stat_sysctls[i].reg);
2030 CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid,
2031 (intmax_t)sc->shadow_stats[i], r,
2032 (intmax_t)sc->shadow_stats[i] + r));
2038 cpsw_stats_collect(struct cpsw_softc *sc)
2043 CPSW_DEBUGF(("Controller shadow statistics updated."));
2045 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2046 r = cpsw_read_4(sc, CPSW_STATS_OFFSET +
2047 cpsw_stat_sysctls[i].reg);
2048 sc->shadow_stats[i] += r;
2049 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r);
2054 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS)
2056 struct cpsw_softc *sc;
2057 struct cpsw_stat *stat;
2060 sc = (struct cpsw_softc *)arg1;
2061 stat = &cpsw_stat_sysctls[oidp->oid_number];
2062 result = sc->shadow_stats[oidp->oid_number];
2063 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg);
2064 return (sysctl_handle_64(oidp, &result, 0, req));
2068 cpsw_stat_attached(SYSCTL_HANDLER_ARGS)
2070 struct cpsw_softc *sc;
2074 sc = (struct cpsw_softc *)arg1;
2076 bintime_sub(&t, &sc->attach_uptime);
2078 return (sysctl_handle_int(oidp, &result, 0, req));
2082 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS)
2084 struct cpsw_softc *sc;
2088 sc = (struct cpsw_softc *)arg1;
2089 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) {
2091 bintime_sub(&t, &sc->init_uptime);
2095 return (sysctl_handle_int(oidp, &result, 0, req));
2099 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue)
2101 struct sysctl_oid_list *parent;
2103 parent = SYSCTL_CHILDREN(node);
2104 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers",
2105 CTLFLAG_RD, &queue->queue_slots, 0,
2106 "Total buffers currently assigned to this queue");
2107 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers",
2108 CTLFLAG_RD, &queue->active_queue_len, 0,
2109 "Buffers currently registered with hardware controller");
2110 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers",
2111 CTLFLAG_RD, &queue->max_active_queue_len, 0,
2112 "Max value of activeBuffers since last driver reset");
2113 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers",
2114 CTLFLAG_RD, &queue->avail_queue_len, 0,
2115 "Buffers allocated to this queue but not currently "
2116 "registered with hardware controller");
2117 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers",
2118 CTLFLAG_RD, &queue->max_avail_queue_len, 0,
2119 "Max value of availBuffers since last driver reset");
2120 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued",
2121 CTLFLAG_RD, &queue->queue_adds, 0,
2122 "Total buffers added to queue");
2123 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued",
2124 CTLFLAG_RD, &queue->queue_removes, 0,
2125 "Total buffers removed from queue");
2126 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain",
2127 CTLFLAG_RD, &queue->longest_chain, 0,
2128 "Max buffers used for a single packet");
2132 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc)
2134 struct sysctl_oid_list *parent;
2136 parent = SYSCTL_CHILDREN(node);
2137 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets",
2138 CTLFLAG_RD, &sc->watchdog.resets, 0,
2139 "Total number of watchdog resets");
2143 cpsw_add_sysctls(struct cpsw_softc *sc)
2145 struct sysctl_ctx_list *ctx;
2146 struct sysctl_oid *stats_node, *queue_node, *node;
2147 struct sysctl_oid_list *parent, *stats_parent, *queue_parent;
2150 ctx = device_get_sysctl_ctx(sc->dev);
2151 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2153 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs",
2154 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU",
2155 "Time since driver attach");
2157 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime",
2158 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU",
2159 "Seconds since driver init");
2161 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
2162 CTLFLAG_RD, NULL, "CPSW Statistics");
2163 stats_parent = SYSCTL_CHILDREN(stats_node);
2164 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) {
2165 SYSCTL_ADD_PROC(ctx, stats_parent, i,
2166 cpsw_stat_sysctls[i].oid,
2167 CTLTYPE_U64 | CTLFLAG_RD, sc, 0,
2168 cpsw_stats_sysctl, "IU",
2169 cpsw_stat_sysctls[i].oid);
2172 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue",
2173 CTLFLAG_RD, NULL, "CPSW Queue Statistics");
2174 queue_parent = SYSCTL_CHILDREN(queue_node);
2176 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx",
2177 CTLFLAG_RD, NULL, "TX Queue Statistics");
2178 cpsw_add_queue_sysctls(ctx, node, &sc->tx);
2180 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx",
2181 CTLFLAG_RD, NULL, "RX Queue Statistics");
2182 cpsw_add_queue_sysctls(ctx, node, &sc->rx);
2184 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog",
2185 CTLFLAG_RD, NULL, "Watchdog Statistics");
2186 cpsw_add_watchdog_sysctls(ctx, node, sc);