2 * Copyright 2008 by Nathan Whitehorn. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
20 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
22 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("* $FreeBSD$");
32 * Common routines for the DMA engine on both the Apple Kauai and MacIO
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
42 #include <sys/malloc.h>
44 #include <sys/taskqueue.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
51 #include <dev/ata/ata-all.h>
52 #include <dev/ata/ata-pci.h>
55 #include "ata_dbdma.h"
57 struct ata_dbdma_dmaload_args {
58 struct ata_dbdma_channel *sc;
65 ata_dbdma_setprd(void *xarg, bus_dma_segment_t *segs, int nsegs, int error)
67 struct ata_dbdma_dmaload_args *arg = xarg;
68 struct ata_dbdma_channel *sc = arg->sc;
69 int branch_type, command;
73 mtx_lock(&sc->dbdma_mtx);
75 prev_stop = sc->next_dma_slot-1;
79 for (i = 0; i < nsegs; i++) {
80 /* Loop back to the beginning if this is our last slot */
81 if (sc->next_dma_slot == 0xff)
82 branch_type = DBDMA_ALWAYS;
84 branch_type = DBDMA_NEVER;
87 command = (i + 1 < nsegs) ? DBDMA_OUTPUT_MORE :
90 command = (i + 1 < nsegs) ? DBDMA_INPUT_MORE :
94 dbdma_insert_command(sc->dbdma, sc->next_dma_slot++,
95 command, 0, segs[i].ds_addr, segs[i].ds_len,
96 DBDMA_NEVER, branch_type, DBDMA_NEVER, 0);
98 if (branch_type == DBDMA_ALWAYS)
99 sc->next_dma_slot = 0;
102 /* We have a corner case where the STOP command is the last slot,
103 * but you can't branch in STOP commands. So add a NOP branch here
104 * and the STOP in slot 0. */
106 if (sc->next_dma_slot == 0xff) {
107 dbdma_insert_branch(sc->dbdma, sc->next_dma_slot, 0);
108 sc->next_dma_slot = 0;
112 dbdma_insert_command(sc->dbdma, sc->next_dma_slot++,
113 DBDMA_NOP, 0, 0, 0, DBDMA_ALWAYS, DBDMA_NEVER, DBDMA_NEVER, 0);
115 dbdma_insert_stop(sc->dbdma, sc->next_dma_slot++);
116 dbdma_insert_nop(sc->dbdma, prev_stop);
118 dbdma_sync_commands(sc->dbdma, BUS_DMASYNC_PREWRITE);
120 mtx_unlock(&sc->dbdma_mtx);
126 ata_dbdma_status(device_t dev)
128 struct ata_dbdma_channel *sc = device_get_softc(dev);
129 struct ata_channel *ch = device_get_softc(dev);
131 if (sc->sc_ch.dma.flags & ATA_DMA_ACTIVE) {
132 return (!(dbdma_get_chan_status(sc->dbdma) &
133 DBDMA_STATUS_ACTIVE));
136 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) {
138 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY)
145 ata_dbdma_start(struct ata_request *request)
147 struct ata_dbdma_channel *sc = device_get_softc(request->parent);
149 sc->sc_ch.dma.flags |= ATA_DMA_ACTIVE;
150 dbdma_wake(sc->dbdma);
155 ata_dbdma_reset(device_t dev)
157 struct ata_dbdma_channel *sc = device_get_softc(dev);
159 mtx_lock(&sc->dbdma_mtx);
161 dbdma_stop(sc->dbdma);
162 dbdma_insert_stop(sc->dbdma, 0);
164 dbdma_set_current_cmd(sc->dbdma, 0);
166 sc->sc_ch.dma.flags &= ~ATA_DMA_ACTIVE;
168 mtx_unlock(&sc->dbdma_mtx);
172 ata_dbdma_stop(struct ata_request *request)
174 struct ata_dbdma_channel *sc = device_get_softc(request->parent);
178 status = dbdma_get_chan_status(sc->dbdma);
180 dbdma_pause(sc->dbdma);
181 sc->sc_ch.dma.flags &= ~ATA_DMA_ACTIVE;
183 if (status & DBDMA_STATUS_DEAD) {
184 device_printf(request->parent,"DBDMA dead, resetting "
186 ata_dbdma_reset(request->parent);
190 if (!(status & DBDMA_STATUS_RUN)) {
191 device_printf(request->parent,"DBDMA confused, stop called "
192 "when channel is not running!\n");
196 if (status & DBDMA_STATUS_ACTIVE) {
197 device_printf(request->parent,"DBDMA channel stopped "
205 ata_dbdma_load(struct ata_request *request, void *addr, int *entries)
207 struct ata_channel *ch = device_get_softc(request->parent);
208 struct ata_dbdma_dmaload_args args;
212 args.sc = device_get_softc(request->parent);
213 args.write = !(request->flags & ATA_R_READ);
215 if (!request->bytecount) {
216 device_printf(request->dev,
217 "FAILURE - zero length DMA transfer attempted\n");
220 if (((uintptr_t)(request->data) & (ch->dma.alignment - 1)) ||
221 (request->bytecount & (ch->dma.alignment - 1))) {
222 device_printf(request->dev,
223 "FAILURE - non aligned DMA transfer attempted\n");
226 if (request->bytecount > ch->dma.max_iosize) {
227 device_printf(request->dev,
228 "FAILURE - oversized DMA transfer attempt %d > %d\n",
229 request->bytecount, ch->dma.max_iosize);
233 request->dma = &ch->dma.slot[0];
235 if ((error = bus_dmamap_load(request->dma->data_tag,
236 request->dma->data_map, request->data, request->bytecount,
237 &ata_dbdma_setprd, &args, BUS_DMA_NOWAIT))) {
238 device_printf(request->dev, "FAILURE - load data\n");
243 *entries = args.nsegs;
245 bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map,
246 BUS_DMASYNC_PREWRITE);
247 bus_dmamap_sync(request->dma->data_tag, request->dma->data_map,
248 (request->flags & ATA_R_READ) ?
249 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
254 ch->dma.unload(request);
259 ata_dbdma_dmainit(device_t dev)
261 struct ata_dbdma_channel *sc = device_get_softc(dev);
264 error = dbdma_allocate_channel(sc->dbdma_regs, sc->dbdma_offset,
265 bus_get_dma_tag(dev), 256, &sc->dbdma);
267 dbdma_set_wait_selector(sc->dbdma,1 << 7, 1 << 7);
269 dbdma_insert_stop(sc->dbdma,0);
272 sc->sc_ch.dma.start = ata_dbdma_start;
273 sc->sc_ch.dma.stop = ata_dbdma_stop;
274 sc->sc_ch.dma.load = ata_dbdma_load;
275 sc->sc_ch.dma.reset = ata_dbdma_reset;
278 * DBDMA's field for transfer size is 16 bits. This will overflow
279 * if we try to do a 64K transfer, so stop short of 64K.
281 sc->sc_ch.dma.segsize = 126 * DEV_BSIZE;
284 sc->sc_ch.hw.status = ata_dbdma_status;
286 mtx_init(&sc->dbdma_mtx, "ATA DBDMA", NULL, MTX_DEF);