2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
4 * This software was developed by SRI International and the University of
5 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
40 #include <sys/socket.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_var.h>
50 #include <machine/bus.h>
52 #include <dev/clk/clk.h>
53 #include <dev/hwreset/hwreset.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
58 #include <dev/dwc/if_dwcvar.h>
59 #include <dev/dwc/dwc1000_reg.h>
60 #include <dev/dwc/dwc1000_dma.h>
62 #define WATCHDOG_TIMEOUT_SECS 5
63 #define DMA_RESET_TIMEOUT 100
65 /* TX descriptors - TDESC0 is almost unified */
66 #define TDESC0_OWN (1U << 31)
67 #define TDESC0_IHE (1U << 16) /* IP Header Error */
68 #define TDESC0_ES (1U << 15) /* Error Summary */
69 #define TDESC0_JT (1U << 14) /* Jabber Timeout */
70 #define TDESC0_FF (1U << 13) /* Frame Flushed */
71 #define TDESC0_PCE (1U << 12) /* Payload Checksum Error */
72 #define TDESC0_LOC (1U << 11) /* Loss of Carrier */
73 #define TDESC0_NC (1U << 10) /* No Carrier */
74 #define TDESC0_LC (1U << 9) /* Late Collision */
75 #define TDESC0_EC (1U << 8) /* Excessive Collision */
76 #define TDESC0_VF (1U << 7) /* VLAN Frame */
77 #define TDESC0_CC_MASK 0xf
78 #define TDESC0_CC_SHIFT 3 /* Collision Count */
79 #define TDESC0_ED (1U << 2) /* Excessive Deferral */
80 #define TDESC0_UF (1U << 1) /* Underflow Error */
81 #define TDESC0_DB (1U << 0) /* Deferred Bit */
82 /* TX descriptors - TDESC0 extended format only */
83 #define ETDESC0_IC (1U << 30) /* Interrupt on Completion */
84 #define ETDESC0_LS (1U << 29) /* Last Segment */
85 #define ETDESC0_FS (1U << 28) /* First Segment */
86 #define ETDESC0_DC (1U << 27) /* Disable CRC */
87 #define ETDESC0_DP (1U << 26) /* Disable Padding */
88 #define ETDESC0_CIC_NONE (0U << 22) /* Checksum Insertion Control */
89 #define ETDESC0_CIC_HDR (1U << 22)
90 #define ETDESC0_CIC_SEG (2U << 22)
91 #define ETDESC0_CIC_FULL (3U << 22)
92 #define ETDESC0_TER (1U << 21) /* Transmit End of Ring */
93 #define ETDESC0_TCH (1U << 20) /* Second Address Chained */
95 /* TX descriptors - TDESC1 normal format */
96 #define NTDESC1_IC (1U << 31) /* Interrupt on Completion */
97 #define NTDESC1_LS (1U << 30) /* Last Segment */
98 #define NTDESC1_FS (1U << 29) /* First Segment */
99 #define NTDESC1_CIC_NONE (0U << 27) /* Checksum Insertion Control */
100 #define NTDESC1_CIC_HDR (1U << 27)
101 #define NTDESC1_CIC_SEG (2U << 27)
102 #define NTDESC1_CIC_FULL (3U << 27)
103 #define NTDESC1_DC (1U << 26) /* Disable CRC */
104 #define NTDESC1_TER (1U << 25) /* Transmit End of Ring */
105 #define NTDESC1_TCH (1U << 24) /* Second Address Chained */
106 /* TX descriptors - TDESC1 extended format */
107 #define ETDESC1_DP (1U << 23) /* Disable Padding */
108 #define ETDESC1_TBS2_MASK 0x7ff
109 #define ETDESC1_TBS2_SHIFT 11 /* Receive Buffer 2 Size */
110 #define ETDESC1_TBS1_MASK 0x7ff
111 #define ETDESC1_TBS1_SHIFT 0 /* Receive Buffer 1 Size */
113 /* RX descriptor - RDESC0 is unified */
114 #define RDESC0_OWN (1U << 31)
115 #define RDESC0_AFM (1U << 30) /* Dest. Address Filter Fail */
116 #define RDESC0_FL_MASK 0x3fff
117 #define RDESC0_FL_SHIFT 16 /* Frame Length */
118 #define RDESC0_ES (1U << 15) /* Error Summary */
119 #define RDESC0_DE (1U << 14) /* Descriptor Error */
120 #define RDESC0_SAF (1U << 13) /* Source Address Filter Fail */
121 #define RDESC0_LE (1U << 12) /* Length Error */
122 #define RDESC0_OE (1U << 11) /* Overflow Error */
123 #define RDESC0_VLAN (1U << 10) /* VLAN Tag */
124 #define RDESC0_FS (1U << 9) /* First Descriptor */
125 #define RDESC0_LS (1U << 8) /* Last Descriptor */
126 #define RDESC0_ICE (1U << 7) /* IPC Checksum Error */
127 #define RDESC0_LC (1U << 6) /* Late Collision */
128 #define RDESC0_FT (1U << 5) /* Frame Type */
129 #define RDESC0_RWT (1U << 4) /* Receive Watchdog Timeout */
130 #define RDESC0_RE (1U << 3) /* Receive Error */
131 #define RDESC0_DBE (1U << 2) /* Dribble Bit Error */
132 #define RDESC0_CE (1U << 1) /* CRC Error */
133 #define RDESC0_PCE (1U << 0) /* Payload Checksum Error */
134 #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */
136 /* RX descriptors - RDESC1 normal format */
137 #define NRDESC1_DIC (1U << 31) /* Disable Intr on Completion */
138 #define NRDESC1_RER (1U << 25) /* Receive End of Ring */
139 #define NRDESC1_RCH (1U << 24) /* Second Address Chained */
140 #define NRDESC1_RBS2_MASK 0x7ff
141 #define NRDESC1_RBS2_SHIFT 11 /* Receive Buffer 2 Size */
142 #define NRDESC1_RBS1_MASK 0x7ff
143 #define NRDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */
145 /* RX descriptors - RDESC1 enhanced format */
146 #define ERDESC1_DIC (1U << 31) /* Disable Intr on Completion */
147 #define ERDESC1_RBS2_MASK 0x7ffff
148 #define ERDESC1_RBS2_SHIFT 16 /* Receive Buffer 2 Size */
149 #define ERDESC1_RER (1U << 15) /* Receive End of Ring */
150 #define ERDESC1_RCH (1U << 14) /* Second Address Chained */
151 #define ERDESC1_RBS1_MASK 0x7ffff
152 #define ERDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */
155 * The hardware imposes alignment restrictions on various objects involved in
156 * DMA transfers. These values are expressed in bytes (not bits).
158 #define DWC_DESC_RING_ALIGN 2048
160 static inline uint32_t
161 next_txidx(struct dwc_softc *sc, uint32_t curidx)
164 return ((curidx + 1) % TX_DESC_COUNT);
167 static inline uint32_t
168 next_rxidx(struct dwc_softc *sc, uint32_t curidx)
171 return ((curidx + 1) % RX_DESC_COUNT);
175 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
180 *(bus_addr_t *)arg = segs[0].ds_addr;
184 txdesc_clear(struct dwc_softc *sc, int idx)
188 sc->txdesc_ring[idx].addr1 = (uint32_t)(0);
189 sc->txdesc_ring[idx].desc0 = 0;
190 sc->txdesc_ring[idx].desc1 = 0;
194 txdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr,
195 uint32_t len, uint32_t flags, bool first, bool last)
197 uint32_t desc0, desc1;
199 if (!sc->dma_ext_desc) {
201 desc1 = NTDESC1_TCH | len | flags;
205 desc1 |= NTDESC1_LS | NTDESC1_IC;
207 desc0 = ETDESC0_TCH | flags;
211 desc0 |= ETDESC0_LS | ETDESC0_IC;
215 sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
216 sc->txdesc_ring[idx].desc0 = desc0;
217 sc->txdesc_ring[idx].desc1 = desc1;
219 sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
223 inline static uint32_t
224 rxdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr)
228 sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
229 nidx = next_rxidx(sc, idx);
230 sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
231 (nidx * sizeof(struct dwc_hwdesc));
232 if (!sc->dma_ext_desc)
233 sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
234 MIN(MCLBYTES, NRDESC1_RBS1_MASK);
236 sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
237 MIN(MCLBYTES, ERDESC1_RBS1_MASK);
240 sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
246 dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
248 struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
255 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
256 *mp, segs, &nsegs, 0);
257 if (error == EFBIG) {
259 * The map may be partially mapped from the first call.
260 * Make sure to reset it.
262 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
263 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
266 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
267 *mp, segs, &nsegs, 0);
272 if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
273 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
279 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
280 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
281 if (!sc->dma_ext_desc)
282 flags = NTDESC1_CIC_FULL;
284 flags = ETDESC0_CIC_FULL;
286 if (!sc->dma_ext_desc)
287 flags = NTDESC1_CIC_HDR;
289 flags = ETDESC0_CIC_HDR;
293 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
294 BUS_DMASYNC_PREWRITE);
296 sc->txbuf_map[idx].mbuf = m;
298 for (i = 0; i < nsegs; i++) {
299 txdesc_setup(sc, sc->tx_desc_head,
300 segs[i].ds_addr, segs[i].ds_len,
301 (i == 0) ? flags : 0, /* only first desc needs flags */
304 last = sc->tx_desc_head;
305 sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
308 sc->txbuf_map[idx].last_desc_idx = last;
314 dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
316 struct bus_dma_segment seg;
319 m_adj(m, ETHER_ALIGN);
321 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
326 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
328 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
329 BUS_DMASYNC_PREREAD);
331 sc->rxbuf_map[idx].mbuf = m;
332 rxdesc_setup(sc, idx, seg.ds_addr);
338 dwc_alloc_mbufcl(struct dwc_softc *sc)
342 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
344 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
350 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
351 struct dwc_bufmap *map)
360 rdesc0 = desc ->desc0;
362 if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
363 (RDESC0_FS | RDESC0_LS)) {
365 * Something very wrong happens. The whole packet should be
366 * received in one descriptor. Report problem.
368 device_printf(sc->dev,
369 "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
374 len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
377 * Lenght is invalid, recycle old mbuf
378 * Probably impossible case
383 /* Allocate new buffer */
384 m0 = dwc_alloc_mbufcl(sc);
386 /* no new mbuf available, recycle old */
387 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
390 /* Do dmasync for newly received packet */
391 bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
392 bus_dmamap_unload(sc->rxbuf_tag, map->map);
394 /* Received packet is valid, process it */
395 m->m_pkthdr.rcvif = ifp;
396 m->m_pkthdr.len = len;
398 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
400 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
401 (rdesc0 & RDESC0_FT) != 0) {
402 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
403 if ((rdesc0 & RDESC0_ICE) == 0)
404 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
405 if ((rdesc0 & RDESC0_PCE) == 0) {
406 m->m_pkthdr.csum_flags |=
407 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
408 m->m_pkthdr.csum_data = 0xffff;
412 /* Remove trailing FCS */
413 m_adj(m, -ETHER_CRC_LEN);
422 dma1000_txfinish_locked(struct dwc_softc *sc)
424 struct dwc_bufmap *bmap;
425 struct dwc_hwdesc *desc;
430 DWC_ASSERT_LOCKED(sc);
433 /* check if all descriptors of the map are done */
434 while (sc->tx_map_tail != sc->tx_map_head) {
436 bmap = &sc->txbuf_map[sc->tx_map_tail];
437 idx = sc->tx_desc_tail;
438 last_idx = next_txidx(sc, bmap->last_desc_idx);
439 while (idx != last_idx) {
440 desc = &sc->txdesc_ring[idx];
441 if ((desc->desc0 & TDESC0_OWN) != 0) {
442 map_finished = false;
445 idx = next_txidx(sc, idx);
450 bus_dmamap_sync(sc->txbuf_tag, bmap->map,
451 BUS_DMASYNC_POSTWRITE);
452 bus_dmamap_unload(sc->txbuf_tag, bmap->map);
456 while (sc->tx_desc_tail != last_idx) {
457 txdesc_clear(sc, sc->tx_desc_tail);
458 sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
460 sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
461 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
462 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
465 /* If there are no buffers outstanding, muzzle the watchdog. */
466 if (sc->tx_desc_tail == sc->tx_desc_head) {
467 sc->tx_watchdog_count = 0;
472 dma1000_txstart(struct dwc_softc *sc)
480 if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) {
481 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
485 if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
486 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
490 m = if_dequeue(sc->ifp);
493 if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
494 if_sendq_prepend(sc->ifp, m);
495 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
498 bpf_mtap_if(sc->ifp, m);
499 sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
505 WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
506 sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
511 dma1000_rxfinish_locked(struct dwc_softc *sc)
515 struct dwc_hwdesc *desc;
517 DWC_ASSERT_LOCKED(sc);
520 desc = sc->rxdesc_ring + idx;
521 if ((desc->desc0 & RDESC0_OWN) != 0)
524 m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
527 desc->desc0 = RDESC0_OWN;
530 /* We cannot create hole in RX ring */
531 error = dma1000_setup_rxbuf(sc, idx, m);
533 panic("dma1000_setup_rxbuf failed: error %d\n",
537 sc->rx_idx = next_rxidx(sc, sc->rx_idx);
542 * Start the DMA controller
545 dma1000_start(struct dwc_softc *sc)
549 DWC_ASSERT_LOCKED(sc);
551 /* Initializa DMA and enable transmitters */
552 reg = READ4(sc, OPERATION_MODE);
553 reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
555 reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
556 WRITE4(sc, OPERATION_MODE, reg);
558 WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
561 reg = READ4(sc, OPERATION_MODE);
562 reg |= (MODE_ST | MODE_SR);
563 WRITE4(sc, OPERATION_MODE, reg);
567 * Stop the DMA controller
570 dma1000_stop(struct dwc_softc *sc)
574 DWC_ASSERT_LOCKED(sc);
577 reg = READ4(sc, OPERATION_MODE);
579 WRITE4(sc, OPERATION_MODE, reg);
582 reg = READ4(sc, OPERATION_MODE);
584 WRITE4(sc, OPERATION_MODE, reg);
587 reg = READ4(sc, OPERATION_MODE);
589 WRITE4(sc, OPERATION_MODE, reg);
593 dma1000_reset(struct dwc_softc *sc)
598 reg = READ4(sc, BUS_MODE);
599 reg |= (BUS_MODE_SWR);
600 WRITE4(sc, BUS_MODE, reg);
602 for (i = 0; i < DMA_RESET_TIMEOUT; i++) {
603 if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
607 if (i >= DMA_RESET_TIMEOUT) {
615 * Create the bus_dma resources
618 dma1000_init(struct dwc_softc *sc)
628 reg |= BUS_MODE_EIGHTXPBL;
629 reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT);
630 reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT);
632 reg |= BUS_MODE_FIXEDBURST;
634 reg |= BUS_MODE_MIXEDBURST;
638 WRITE4(sc, BUS_MODE, reg);
640 reg = READ4(sc, HW_FEATURE);
641 if (reg & HW_FEATURE_EXT_DESCRIPTOR)
642 sc->dma_ext_desc = true;
645 * DMA must be stop while changing descriptor list addresses.
647 reg = READ4(sc, OPERATION_MODE);
648 reg &= ~(MODE_ST | MODE_SR);
649 WRITE4(sc, OPERATION_MODE, reg);
652 * Set up TX descriptor ring, descriptors, and dma maps.
654 error = bus_dma_tag_create(
655 bus_get_dma_tag(sc->dev), /* Parent tag. */
656 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */
657 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
658 BUS_SPACE_MAXADDR, /* highaddr */
659 NULL, NULL, /* filter, filterarg */
660 TX_DESC_SIZE, 1, /* maxsize, nsegments */
661 TX_DESC_SIZE, /* maxsegsize */
663 NULL, NULL, /* lockfunc, lockarg */
666 device_printf(sc->dev,
667 "could not create TX ring DMA tag.\n");
671 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
672 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
675 device_printf(sc->dev,
676 "could not allocate TX descriptor ring.\n");
680 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
681 sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
682 &sc->txdesc_ring_paddr, 0);
684 device_printf(sc->dev,
685 "could not load TX descriptor ring map.\n");
689 for (idx = 0; idx < TX_DESC_COUNT; idx++) {
690 nidx = next_txidx(sc, idx);
691 sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
692 (nidx * sizeof(struct dwc_hwdesc));
695 error = bus_dma_tag_create(
696 bus_get_dma_tag(sc->dev), /* Parent tag. */
697 1, 0, /* alignment, boundary */
698 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
699 BUS_SPACE_MAXADDR, /* highaddr */
700 NULL, NULL, /* filter, filterarg */
701 MCLBYTES*TX_MAP_MAX_SEGS, /* maxsize */
702 TX_MAP_MAX_SEGS, /* nsegments */
703 MCLBYTES, /* maxsegsize */
705 NULL, NULL, /* lockfunc, lockarg */
708 device_printf(sc->dev,
709 "could not create TX ring DMA tag.\n");
713 for (idx = 0; idx < TX_MAP_COUNT; idx++) {
714 error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
715 &sc->txbuf_map[idx].map);
717 device_printf(sc->dev,
718 "could not create TX buffer DMA map.\n");
723 for (idx = 0; idx < TX_DESC_COUNT; idx++)
724 txdesc_clear(sc, idx);
726 WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
729 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
731 error = bus_dma_tag_create(
732 bus_get_dma_tag(sc->dev), /* Parent tag. */
733 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */
734 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
735 BUS_SPACE_MAXADDR, /* highaddr */
736 NULL, NULL, /* filter, filterarg */
737 RX_DESC_SIZE, 1, /* maxsize, nsegments */
738 RX_DESC_SIZE, /* maxsegsize */
740 NULL, NULL, /* lockfunc, lockarg */
743 device_printf(sc->dev,
744 "could not create RX ring DMA tag.\n");
748 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
749 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
752 device_printf(sc->dev,
753 "could not allocate RX descriptor ring.\n");
757 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
758 sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
759 &sc->rxdesc_ring_paddr, 0);
761 device_printf(sc->dev,
762 "could not load RX descriptor ring map.\n");
766 error = bus_dma_tag_create(
767 bus_get_dma_tag(sc->dev), /* Parent tag. */
768 1, 0, /* alignment, boundary */
769 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
770 BUS_SPACE_MAXADDR, /* highaddr */
771 NULL, NULL, /* filter, filterarg */
772 MCLBYTES, 1, /* maxsize, nsegments */
773 MCLBYTES, /* maxsegsize */
775 NULL, NULL, /* lockfunc, lockarg */
778 device_printf(sc->dev,
779 "could not create RX buf DMA tag.\n");
783 for (idx = 0; idx < RX_DESC_COUNT; idx++) {
784 error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
785 &sc->rxbuf_map[idx].map);
787 device_printf(sc->dev,
788 "could not create RX buffer DMA map.\n");
791 if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
792 device_printf(sc->dev, "Could not alloc mbuf\n");
796 if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
797 device_printf(sc->dev,
798 "could not create new RX buffer.\n");
802 WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
812 * Free the bus_dma resources
815 dma1000_free(struct dwc_softc *sc)
820 /* Clean up RX DMA resources and free mbufs. */
821 for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
822 if ((map = sc->rxbuf_map[idx].map) != NULL) {
823 bus_dmamap_unload(sc->rxbuf_tag, map);
824 bus_dmamap_destroy(sc->rxbuf_tag, map);
825 m_freem(sc->rxbuf_map[idx].mbuf);
828 if (sc->rxbuf_tag != NULL)
829 bus_dma_tag_destroy(sc->rxbuf_tag);
830 if (sc->rxdesc_map != NULL) {
831 bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
832 bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
835 if (sc->rxdesc_tag != NULL)
836 bus_dma_tag_destroy(sc->rxdesc_tag);
838 /* Clean up TX DMA resources. */
839 for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
840 if ((map = sc->txbuf_map[idx].map) != NULL) {
841 /* TX maps are already unloaded. */
842 bus_dmamap_destroy(sc->txbuf_tag, map);
845 if (sc->txbuf_tag != NULL)
846 bus_dma_tag_destroy(sc->txbuf_tag);
847 if (sc->txdesc_map != NULL) {
848 bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
849 bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
852 if (sc->txdesc_tag != NULL)
853 bus_dma_tag_destroy(sc->txdesc_tag);
861 dma1000_intr(struct dwc_softc *sc)
866 DWC_ASSERT_LOCKED(sc);
869 reg = READ4(sc, DMA_STATUS);
870 if (reg & DMA_STATUS_NIS) {
871 if (reg & DMA_STATUS_RI)
872 dma1000_rxfinish_locked(sc);
874 if (reg & DMA_STATUS_TI) {
875 dma1000_txfinish_locked(sc);
880 if (reg & DMA_STATUS_AIS) {
881 if (reg & DMA_STATUS_FBI) {
882 /* Fatal bus error */
887 WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);