2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
37 #include <sys/endian.h>
38 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/taskqueue.h>
44 #include <machine/stdarg.h>
45 #include <machine/resource.h>
46 #include <machine/bus.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/ata/ata-all.h>
51 #include <dev/ata/ata-pci.h>
54 /* local prototypes */
55 static int ata_marvell_chipinit(device_t dev);
56 static int ata_marvell_ch_attach(device_t dev);
57 static int ata_marvell_setmode(device_t dev, int target, int mode);
58 static int ata_marvell_dummy_chipinit(device_t dev);
59 static int ata_marvell_edma_ch_attach(device_t dev);
60 static int ata_marvell_edma_ch_detach(device_t dev);
61 static int ata_marvell_edma_status(device_t dev);
62 static int ata_marvell_edma_begin_transaction(struct ata_request *request);
63 static int ata_marvell_edma_end_transaction(struct ata_request *request);
64 static void ata_marvell_edma_reset(device_t dev);
65 static void ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
66 static void ata_marvell_edma_dmainit(device_t dev);
77 * Marvell chipset support functions
79 #define ATA_MV_HOST_BASE(ch) \
80 ((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000)
81 #define ATA_MV_EDMA_BASE(ch) \
82 ((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000)
84 struct ata_marvell_response {
91 struct ata_marvell_dma_prdentry {
99 ata_marvell_probe(device_t dev)
101 struct ata_pci_controller *ctlr = device_get_softc(dev);
102 static const struct ata_chip_id const ids[] =
103 {{ ATA_M88SX5040, 0, 4, MV_50XX, ATA_SA150, "88SX5040" },
104 { ATA_M88SX5041, 0, 4, MV_50XX, ATA_SA150, "88SX5041" },
105 { ATA_M88SX5080, 0, 8, MV_50XX, ATA_SA150, "88SX5080" },
106 { ATA_M88SX5081, 0, 8, MV_50XX, ATA_SA150, "88SX5081" },
107 { ATA_M88SX6041, 0, 4, MV_60XX, ATA_SA300, "88SX6041" },
108 { ATA_M88SX6042, 0, 4, MV_6042, ATA_SA300, "88SX6042" },
109 { ATA_M88SX6081, 0, 8, MV_60XX, ATA_SA300, "88SX6081" },
110 { ATA_M88SX7042, 0, 4, MV_7042, ATA_SA300, "88SX7042" },
111 { ATA_M88SE6101, 0, 0, MV_61XX, ATA_UDMA6, "88SE6101" },
112 { ATA_M88SE6102, 0, 0, MV_61XX, ATA_UDMA6, "88SE6102" },
113 { ATA_M88SE6111, 0, 1, MV_61XX, ATA_UDMA6, "88SE6111" },
114 { ATA_M88SE6121, 0, 2, MV_61XX, ATA_UDMA6, "88SE6121" },
115 { ATA_M88SE6141, 0, 4, MV_61XX, ATA_UDMA6, "88SE6141" },
116 { ATA_M88SE6145, 0, 4, MV_61XX, ATA_UDMA6, "88SE6145" },
117 { 0x91a41b4b, 0, 0, MV_91XX, ATA_UDMA6, "88SE912x" },
118 { 0, 0, 0, 0, 0, 0}};
120 if (pci_get_vendor(dev) != ATA_MARVELL_ID &&
121 pci_get_vendor(dev) != ATA_MARVELL2_ID)
124 if (!(ctlr->chip = ata_match_chip(dev, ids)))
129 switch (ctlr->chip->cfg2) {
134 ctlr->chipinit = ata_marvell_edma_chipinit;
137 ctlr->chipinit = ata_marvell_chipinit;
140 ctlr->chipinit = ata_marvell_dummy_chipinit;
143 return (BUS_PROBE_DEFAULT);
147 ata_marvell_chipinit(device_t dev)
149 struct ata_pci_controller *ctlr = device_get_softc(dev);
152 if (ata_setup_interrupt(dev, ata_generic_intr))
154 /* Create AHCI subdevice if AHCI part present. */
155 if (ctlr->chip->cfg1) {
156 child = device_add_child(dev, NULL, -1);
158 device_set_ivars(child, (void *)(intptr_t)-1);
159 bus_generic_attach(dev);
162 ctlr->ch_attach = ata_marvell_ch_attach;
163 ctlr->ch_detach = ata_pci_ch_detach;
164 ctlr->reset = ata_generic_reset;
165 ctlr->setmode = ata_marvell_setmode;
171 ata_marvell_ch_attach(device_t dev)
173 struct ata_channel *ch = device_get_softc(dev);
176 error = ata_pci_ch_attach(dev);
177 /* dont use 32 bit PIO transfers */
178 ch->flags |= ATA_USE_16BIT;
179 ch->flags |= ATA_CHECKS_CABLE;
184 ata_marvell_setmode(device_t dev, int target, int mode)
186 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
187 struct ata_channel *ch = device_get_softc(dev);
189 mode = min(mode, ctlr->chip->max_dma);
190 /* Check for 80pin cable present. */
191 if (ata_dma_check_80pin && mode > ATA_UDMA2 &&
192 ATA_IDX_INB(ch, ATA_BMDEVSPEC_0) & 0x01) {
193 ata_print_cable(dev, "controller");
196 /* Nothing to do to setup mode, the controller snoop SET_FEATURE cmd. */
201 ata_marvell_dummy_chipinit(device_t dev)
203 struct ata_pci_controller *ctlr = device_get_softc(dev);
210 ata_marvell_edma_chipinit(device_t dev)
212 struct ata_pci_controller *ctlr = device_get_softc(dev);
214 if (ata_setup_interrupt(dev, ata_generic_intr))
217 ctlr->r_type1 = SYS_RES_MEMORY;
218 ctlr->r_rid1 = PCIR_BAR(0);
219 if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1,
220 &ctlr->r_rid1, RF_ACTIVE)))
223 /* mask all host controller interrupts */
224 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000);
226 /* mask all PCI interrupts */
227 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000);
229 ctlr->ch_attach = ata_marvell_edma_ch_attach;
230 ctlr->ch_detach = ata_marvell_edma_ch_detach;
231 ctlr->reset = ata_marvell_edma_reset;
232 ctlr->setmode = ata_sata_setmode;
233 ctlr->getrev = ata_sata_getrev;
234 ctlr->channels = ctlr->chip->cfg1;
236 /* clear host controller interrupts */
237 ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000);
238 if (ctlr->chip->cfg1 > 4)
239 ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000);
241 /* clear PCI interrupts */
242 ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000);
244 /* unmask PCI interrupts we want */
245 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff);
247 /* unmask host controller interrupts we want */
248 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ |
249 /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25));
255 ata_marvell_edma_ch_attach(device_t dev)
257 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
258 struct ata_channel *ch = device_get_softc(dev);
262 ata_marvell_edma_dmainit(dev);
263 work = ch->dma.work_bus;
264 /* clear work area */
265 bzero(ch->dma.work, 1024+256);
266 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
267 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
269 /* set legacy ATA resources */
270 for (i = ATA_DATA; i <= ATA_COMMAND; i++) {
271 ch->r_io[i].res = ctlr->r_res1;
272 ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch);
274 ch->r_io[ATA_CONTROL].res = ctlr->r_res1;
275 ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch);
276 ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1;
277 ata_default_registers(dev);
279 /* set SATA resources */
280 switch (ctlr->chip->cfg2) {
282 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
283 ch->r_io[ATA_SSTATUS].offset = 0x00100 + ATA_MV_HOST_BASE(ch);
284 ch->r_io[ATA_SERROR].res = ctlr->r_res1;
285 ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch);
286 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
287 ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch);
292 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
293 ch->r_io[ATA_SSTATUS].offset = 0x02300 + ATA_MV_EDMA_BASE(ch);
294 ch->r_io[ATA_SERROR].res = ctlr->r_res1;
295 ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch);
296 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
297 ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch);
298 ch->r_io[ATA_SACTIVE].res = ctlr->r_res1;
299 ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch);
303 ch->flags |= ATA_NO_SLAVE;
304 ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */
305 ch->flags |= ATA_SATA;
307 ch->hw.begin_transaction = ata_marvell_edma_begin_transaction;
308 ch->hw.end_transaction = ata_marvell_edma_end_transaction;
309 ch->hw.status = ata_marvell_edma_status;
311 /* disable the EDMA machinery */
312 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
313 DELAY(100000); /* SOS should poll for disabled */
315 /* set configuration to non-queued 128b read transfers stop on error */
316 ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13));
318 /* request queue base high */
319 ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), work >> 32);
321 /* request queue in ptr */
322 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
324 /* request queue out ptr */
325 ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0);
327 /* response queue base high */
329 ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), work >> 32);
331 /* response queue in ptr */
332 ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0);
334 /* response queue out ptr */
335 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
337 /* clear SATA error register */
338 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
340 /* clear any outstanding error interrupts */
341 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
343 /* unmask all error interrupts */
344 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
346 /* enable EDMA machinery */
347 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
352 ata_marvell_edma_ch_detach(device_t dev)
354 struct ata_channel *ch = device_get_softc(dev);
356 if (ch->dma.work_tag && ch->dma.work_map)
357 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
358 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
364 ata_marvell_edma_status(device_t dev)
366 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
367 struct ata_channel *ch = device_get_softc(dev);
368 u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60);
369 int shift = (ch->unit << 1) + (ch->unit > 3);
371 if (cause & (1 << shift)) {
373 /* clear interrupt(s) */
374 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
376 /* do we have any PHY events ? */
377 ata_sata_phy_check_events(dev, -1);
380 /* do we have any device action ? */
381 return (cause & (2 << shift));
384 /* must be called with ATA channel locked and state_mtx held */
386 ata_marvell_edma_begin_transaction(struct ata_request *request)
388 struct ata_pci_controller *ctlr=device_get_softc(device_get_parent(request->parent));
389 struct ata_channel *ch = device_get_softc(request->parent);
395 /* only DMA R/W goes through the EMDA machine */
396 if (request->u.ata.command != ATA_READ_DMA &&
397 request->u.ata.command != ATA_WRITE_DMA &&
398 request->u.ata.command != ATA_READ_DMA48 &&
399 request->u.ata.command != ATA_WRITE_DMA48) {
401 /* disable the EDMA machinery */
402 if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)
403 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
404 return ata_begin_transaction(request);
407 /* check sanity, setup SG list and DMA engine */
408 if ((error = ch->dma.load(request, NULL, NULL))) {
409 device_printf(request->parent, "setting up DMA failed\n");
410 request->result = error;
411 return ATA_OP_FINISHED;
414 /* get next free request queue slot */
415 req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch));
416 slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f;
417 bytep = (u_int8_t *)(ch->dma.work);
418 bytep += (slot << 5);
420 /* fill in this request */
421 le32enc(bytep + 0 * sizeof(u_int32_t),
422 request->dma->sg_bus & 0xffffffff);
423 le32enc(bytep + 1 * sizeof(u_int32_t),
424 (u_int64_t)request->dma->sg_bus >> 32);
425 if (ctlr->chip->cfg2 != MV_6042 && ctlr->chip->cfg2 != MV_7042) {
426 le16enc(bytep + 4 * sizeof(u_int16_t),
427 (request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag << 1));
430 bytep[i++] = (request->u.ata.count >> 8) & 0xff;
431 bytep[i++] = 0x10 | ATA_COUNT;
432 bytep[i++] = request->u.ata.count & 0xff;
433 bytep[i++] = 0x10 | ATA_COUNT;
435 bytep[i++] = (request->u.ata.lba >> 24) & 0xff;
436 bytep[i++] = 0x10 | ATA_SECTOR;
437 bytep[i++] = request->u.ata.lba & 0xff;
438 bytep[i++] = 0x10 | ATA_SECTOR;
440 bytep[i++] = (request->u.ata.lba >> 32) & 0xff;
441 bytep[i++] = 0x10 | ATA_CYL_LSB;
442 bytep[i++] = (request->u.ata.lba >> 8) & 0xff;
443 bytep[i++] = 0x10 | ATA_CYL_LSB;
445 bytep[i++] = (request->u.ata.lba >> 40) & 0xff;
446 bytep[i++] = 0x10 | ATA_CYL_MSB;
447 bytep[i++] = (request->u.ata.lba >> 16) & 0xff;
448 bytep[i++] = 0x10 | ATA_CYL_MSB;
450 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf);
451 bytep[i++] = 0x10 | ATA_DRIVE;
453 bytep[i++] = request->u.ata.command;
454 bytep[i++] = 0x90 | ATA_COMMAND;
456 le32enc(bytep + 2 * sizeof(u_int32_t),
457 (request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag << 1));
462 bytep[i++] = request->u.ata.command;
463 bytep[i++] = request->u.ata.feature & 0xff;
465 bytep[i++] = request->u.ata.lba & 0xff;
466 bytep[i++] = (request->u.ata.lba >> 8) & 0xff;
467 bytep[i++] = (request->u.ata.lba >> 16) & 0xff;
468 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0x0f);
470 bytep[i++] = (request->u.ata.lba >> 24) & 0xff;
471 bytep[i++] = (request->u.ata.lba >> 32) & 0xff;
472 bytep[i++] = (request->u.ata.lba >> 40) & 0xff;
473 bytep[i++] = (request->u.ata.feature >> 8) & 0xff;
475 bytep[i++] = request->u.ata.count & 0xff;
476 bytep[i++] = (request->u.ata.count >> 8) & 0xff;
481 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
482 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
484 /* enable EDMA machinery if needed */
485 if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) {
486 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
487 while (!(ATA_INL(ctlr->r_res1,
488 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
492 /* tell EDMA it has a new request */
493 slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f;
494 req_in &= 0xfffffc00;
495 req_in += (slot << 5);
496 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in);
498 return ATA_OP_CONTINUES;
501 /* must be called with ATA channel locked and state_mtx held */
503 ata_marvell_edma_end_transaction(struct ata_request *request)
505 struct ata_pci_controller *ctlr=device_get_softc(device_get_parent(request->parent));
506 struct ata_channel *ch = device_get_softc(request->parent);
507 int offset = (ch->unit > 3 ? 0x30014 : 0x20014);
508 u_int32_t icr = ATA_INL(ctlr->r_res1, offset);
512 if ((icr & (0x0001 << (ch->unit & 3)))) {
513 struct ata_marvell_response *response;
514 u_int32_t rsp_in, rsp_out;
518 callout_stop(&request->callout);
520 /* get response ptr's */
521 rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch));
522 rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch));
523 slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f;
524 rsp_out &= 0xffffff00;
525 rsp_out += (slot << 3);
526 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
527 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
528 response = (struct ata_marvell_response *)
529 (ch->dma.work + 1024 + (slot << 3));
531 /* record status for this request */
532 request->status = response->dev_status;
536 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out);
538 /* update progress */
539 if (!(request->status & ATA_S_ERROR) &&
540 !(request->flags & ATA_R_TIMEOUT))
541 request->donecount = request->bytecount;
544 ch->dma.unload(request);
546 res = ATA_OP_FINISHED;
549 /* legacy ATA interrupt */
551 res = ata_end_transaction(request);
555 ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3))));
560 ata_marvell_edma_reset(device_t dev)
562 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
563 struct ata_channel *ch = device_get_softc(dev);
565 /* disable the EDMA machinery */
566 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
567 while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
570 /* clear SATA error register */
571 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
573 /* clear any outstanding error interrupts */
574 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
576 /* unmask all error interrupts */
577 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
579 /* enable channel and test for devices */
580 if (ata_sata_phy_reset(dev, -1, 1))
581 ata_generic_reset(dev);
585 /* enable EDMA machinery */
586 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
590 ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs,
593 struct ata_dmasetprd_args *args = xsc;
594 struct ata_marvell_dma_prdentry *prd = args->dmatab;
597 if ((args->error = error))
600 for (i = 0; i < nsegs; i++) {
601 prd[i].addrlo = htole32(segs[i].ds_addr);
602 prd[i].count = htole32(segs[i].ds_len);
603 prd[i].addrhi = htole32((u_int64_t)segs[i].ds_addr >> 32);
606 prd[i - 1].count |= htole32(ATA_DMA_EOT);
607 KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n"));
612 ata_marvell_edma_dmainit(device_t dev)
614 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
615 struct ata_channel *ch = device_get_softc(dev);
617 /* note start and stop are not used here */
618 ch->dma.setprd = ata_marvell_edma_dmasetprd;
620 /* if 64bit support present adjust max address used */
621 if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004)
622 ch->dma.max_address = BUS_SPACE_MAXADDR;
624 /* chip does not reliably do 64K DMA transfers */
625 if (ctlr->chip->cfg2 == MV_50XX || ctlr->chip->cfg2 == MV_60XX)
626 ch->dma.max_iosize = 64 * DEV_BSIZE;
630 ATA_DECLARE_DRIVER(ata_marvell);