2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/endian.h>
35 #include <sys/malloc.h>
38 #include <sys/taskqueue.h>
41 #include <machine/bus.h>
43 #include <dev/ata/ata-all.h>
46 static void ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
47 static void ata_dmaalloc(device_t dev);
48 static void ata_dmafree(device_t dev);
49 static void ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
50 static int ata_dmaload(struct ata_request *request, void *addr, int *nsegs);
51 static int ata_dmaunload(struct ata_request *request);
54 static MALLOC_DEFINE(M_ATADMA, "ata_dma", "ATA driver DMA");
57 #define MAXTABSZ PAGE_SIZE
58 #define MAXWSPCSZ PAGE_SIZE*2
60 struct ata_dc_cb_args {
66 ata_dmainit(device_t dev)
68 struct ata_channel *ch = device_get_softc(dev);
69 struct ata_dc_cb_args dcba;
71 ch->dma.alloc = ata_dmaalloc;
72 ch->dma.free = ata_dmafree;
73 ch->dma.setprd = ata_dmasetprd;
74 ch->dma.load = ata_dmaload;
75 ch->dma.unload = ata_dmaunload;
76 ch->dma.alignment = 2;
77 ch->dma.boundary = 65536;
78 ch->dma.segsize = 65536;
79 ch->dma.max_iosize = 128 * DEV_BSIZE;
80 ch->dma.max_address = BUS_SPACE_MAXADDR_32BIT;
81 ch->dma.dma_slots = 6;
83 if (bus_dma_tag_create(bus_get_dma_tag(dev), ch->dma.alignment, 0,
84 ch->dma.max_address, BUS_SPACE_MAXADDR,
85 NULL, NULL, ch->dma.max_iosize,
86 ATA_DMA_ENTRIES, ch->dma.segsize,
87 0, NULL, NULL, &ch->dma.dmatag))
90 if (bus_dma_tag_create(ch->dma.dmatag, PAGE_SIZE, 64 * 1024,
91 ch->dma.max_address, BUS_SPACE_MAXADDR,
92 NULL, NULL, MAXWSPCSZ, 1, MAXWSPCSZ,
93 0, NULL, NULL, &ch->dma.work_tag))
96 if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 0,
100 if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work,
101 MAXWSPCSZ, ata_dmasetupc_cb, &dcba, 0) ||
103 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
106 ch->dma.work_bus = dcba.maddr;
110 device_printf(dev, "WARNING - DMA initialization failed, disabling DMA\n");
115 ata_dmafini(device_t dev)
117 struct ata_channel *ch = device_get_softc(dev);
119 if (ch->dma.work_bus) {
120 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map);
121 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map);
122 ch->dma.work_bus = 0;
123 ch->dma.work_map = NULL;
126 if (ch->dma.work_tag) {
127 bus_dma_tag_destroy(ch->dma.work_tag);
128 ch->dma.work_tag = NULL;
130 if (ch->dma.dmatag) {
131 bus_dma_tag_destroy(ch->dma.dmatag);
132 ch->dma.dmatag = NULL;
137 ata_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
139 struct ata_dc_cb_args *dcba = (struct ata_dc_cb_args *)xsc;
141 if (!(dcba->error = error))
142 dcba->maddr = segs[0].ds_addr;
146 ata_dmaalloc(device_t dev)
148 struct ata_channel *ch = device_get_softc(dev);
149 struct ata_dc_cb_args dcba;
152 /* alloc and setup needed dma slots */
153 bzero(ch->dma.slot, sizeof(struct ata_dmaslot) * ATA_DMA_SLOTS);
154 for (i = 0; i < ch->dma.dma_slots; i++) {
155 struct ata_dmaslot *slot = &ch->dma.slot[i];
157 if (bus_dma_tag_create(ch->dma.dmatag, PAGE_SIZE, PAGE_SIZE,
158 ch->dma.max_address, BUS_SPACE_MAXADDR,
159 NULL, NULL, PAGE_SIZE, 1, PAGE_SIZE,
160 0, NULL, NULL, &slot->sg_tag)) {
161 device_printf(ch->dev, "FAILURE - create sg_tag\n");
165 if (bus_dmamem_alloc(slot->sg_tag, (void **)&slot->sg,
167 device_printf(ch->dev, "FAILURE - alloc sg_map\n");
171 if (bus_dmamap_load(slot->sg_tag, slot->sg_map, slot->sg, MAXTABSZ,
172 ata_dmasetupc_cb, &dcba, 0) || dcba.error) {
173 device_printf(ch->dev, "FAILURE - load sg\n");
176 slot->sg_bus = dcba.maddr;
178 if (bus_dma_tag_create(ch->dma.dmatag,
179 ch->dma.alignment, ch->dma.boundary,
180 ch->dma.max_address, BUS_SPACE_MAXADDR,
181 NULL, NULL, ch->dma.max_iosize,
182 ATA_DMA_ENTRIES, ch->dma.segsize,
183 BUS_DMA_ALLOCNOW, NULL, NULL, &slot->data_tag)) {
184 device_printf(ch->dev, "FAILURE - create data_tag\n");
188 if (bus_dmamap_create(slot->data_tag, 0, &slot->data_map)) {
189 device_printf(ch->dev, "FAILURE - create data_map\n");
197 device_printf(dev, "WARNING - DMA allocation failed, disabling DMA\n");
202 ata_dmafree(device_t dev)
204 struct ata_channel *ch = device_get_softc(dev);
207 /* free all dma slots */
208 for (i = 0; i < ATA_DMA_SLOTS; i++) {
209 struct ata_dmaslot *slot = &ch->dma.slot[i];
212 bus_dmamap_unload(slot->sg_tag, slot->sg_map);
216 bus_dmamem_free(slot->sg_tag, slot->sg, slot->sg_map);
217 bus_dmamap_destroy(slot->sg_tag, slot->sg_map);
221 if (slot->data_map) {
222 bus_dmamap_destroy(slot->data_tag, slot->data_map);
223 slot->data_map = NULL;
226 bus_dma_tag_destroy(slot->sg_tag);
229 if (slot->data_tag) {
230 bus_dma_tag_destroy(slot->data_tag);
231 slot->data_tag = NULL;
237 ata_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
239 struct ata_dmasetprd_args *args = xsc;
240 struct ata_dma_prdentry *prd = args->dmatab;
243 if ((args->error = error))
246 for (i = 0; i < nsegs; i++) {
247 prd[i].addr = htole32(segs[i].ds_addr);
248 prd[i].count = htole32(segs[i].ds_len);
250 prd[i - 1].count |= htole32(ATA_DMA_EOT);
251 KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n"));
256 ata_dmaload(struct ata_request *request, void *addr, int *entries)
258 struct ata_channel *ch = device_get_softc(request->parent);
259 struct ata_device *atadev = device_get_softc(request->dev);
260 struct ata_dmasetprd_args dspa;
263 ATA_DEBUG_RQ(request, "dmaload");
266 device_printf(request->dev,
267 "FAILURE - already active DMA on this device\n");
270 if (!request->bytecount) {
271 device_printf(request->dev,
272 "FAILURE - zero length DMA transfer attempted\n");
275 if (((uintptr_t)(request->data) & (ch->dma.alignment - 1)) ||
276 (request->bytecount & (ch->dma.alignment - 1))) {
277 device_printf(request->dev,
278 "FAILURE - non aligned DMA transfer attempted\n");
281 if (request->bytecount > ch->dma.max_iosize) {
282 device_printf(request->dev,
283 "FAILURE - oversized DMA transfer attempt %d > %d\n",
284 request->bytecount, ch->dma.max_iosize);
288 /* set our slot, unit for simplicity XXX SOS NCQ will change that */
289 request->dma = &ch->dma.slot[atadev->unit];
294 dspa.dmatab = request->dma->sg;
296 if ((error = bus_dmamap_load(request->dma->data_tag, request->dma->data_map,
297 request->data, request->bytecount,
298 ch->dma.setprd, &dspa, BUS_DMA_NOWAIT)) ||
299 (error = dspa.error)) {
300 device_printf(request->dev, "FAILURE - load data\n");
305 *entries = dspa.nsegs;
307 bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map,
308 BUS_DMASYNC_PREWRITE);
309 bus_dmamap_sync(request->dma->data_tag, request->dma->data_map,
310 (request->flags & ATA_R_READ) ?
311 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
315 ata_dmaunload(request);
320 ata_dmaunload(struct ata_request *request)
322 ATA_DEBUG_RQ(request, "dmaunload");
325 bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map,
326 BUS_DMASYNC_POSTWRITE);
327 bus_dmamap_sync(request->dma->data_tag, request->dma->data_map,
328 (request->flags & ATA_R_READ) ?
329 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
331 bus_dmamap_unload(request->dma->data_tag, request->dma->data_map);