2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
37 #include <sys/endian.h>
38 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/taskqueue.h>
44 #include <machine/stdarg.h>
45 #include <machine/resource.h>
46 #include <machine/bus.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/ata/ata-all.h>
51 #include <dev/ata/ata-pci.h>
54 /* local prototypes */
55 static int ata_marvell_pata_chipinit(device_t dev);
56 static int ata_marvell_pata_allocate(device_t dev);
57 static void ata_marvell_pata_setmode(device_t dev, int mode);
58 static int ata_marvell_edma_allocate(device_t dev);
59 static int ata_marvell_edma_status(device_t dev);
60 static int ata_marvell_edma_begin_transaction(struct ata_request *request);
61 static int ata_marvell_edma_end_transaction(struct ata_request *request);
62 static void ata_marvell_edma_reset(device_t dev);
63 static void ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
64 static void ata_marvell_edma_dmainit(device_t dev);
73 * Marvell chipset support functions
75 #define ATA_MV_HOST_BASE(ch) \
76 ((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000)
77 #define ATA_MV_EDMA_BASE(ch) \
78 ((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000)
80 struct ata_marvell_response {
87 struct ata_marvell_dma_prdentry {
95 ata_marvell_probe(device_t dev)
97 struct ata_pci_controller *ctlr = device_get_softc(dev);
98 static struct ata_chip_id ids[] =
99 {{ ATA_M88SX5040, 0, 4, MV_50XX, ATA_SA150, "88SX5040" },
100 { ATA_M88SX5041, 0, 4, MV_50XX, ATA_SA150, "88SX5041" },
101 { ATA_M88SX5080, 0, 8, MV_50XX, ATA_SA150, "88SX5080" },
102 { ATA_M88SX5081, 0, 8, MV_50XX, ATA_SA150, "88SX5081" },
103 { ATA_M88SX6041, 0, 4, MV_60XX, ATA_SA300, "88SX6041" },
104 { ATA_M88SX6081, 0, 8, MV_60XX, ATA_SA300, "88SX6081" },
105 { ATA_M88SX6101, 0, 1, MV_61XX, ATA_UDMA6, "88SX6101" },
106 { ATA_M88SX6145, 0, 2, MV_61XX, ATA_UDMA6, "88SX6145" },
107 { 0, 0, 0, 0, 0, 0}};
109 if (pci_get_vendor(dev) != ATA_MARVELL_ID)
112 if (!(ctlr->chip = ata_match_chip(dev, ids)))
117 switch (ctlr->chip->cfg2) {
120 ctlr->chipinit = ata_marvell_edma_chipinit;
123 ctlr->chipinit = ata_marvell_pata_chipinit;
130 ata_marvell_pata_chipinit(device_t dev)
132 struct ata_pci_controller *ctlr = device_get_softc(dev);
134 if (ata_setup_interrupt(dev, ata_generic_intr))
137 ctlr->allocate = ata_marvell_pata_allocate;
138 ctlr->setmode = ata_marvell_pata_setmode;
139 ctlr->channels = ctlr->chip->cfg1;
144 ata_marvell_pata_allocate(device_t dev)
146 struct ata_channel *ch = device_get_softc(dev);
148 /* setup the usual register normal pci style */
149 if (ata_pci_allocate(dev))
152 /* dont use 32 bit PIO transfers */
153 ch->flags |= ATA_USE_16BIT;
159 ata_marvell_pata_setmode(device_t dev, int mode)
161 device_t gparent = GRANDPARENT(dev);
162 struct ata_pci_controller *ctlr = device_get_softc(gparent);
163 struct ata_device *atadev = device_get_softc(dev);
165 mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma);
166 mode = ata_check_80pin(dev, mode);
167 if (!ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode))
172 ata_marvell_edma_chipinit(device_t dev)
174 struct ata_pci_controller *ctlr = device_get_softc(dev);
176 if (ata_setup_interrupt(dev, ata_generic_intr))
179 ctlr->r_type1 = SYS_RES_MEMORY;
180 ctlr->r_rid1 = PCIR_BAR(0);
181 if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1,
182 &ctlr->r_rid1, RF_ACTIVE)))
185 /* mask all host controller interrupts */
186 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000);
188 /* mask all PCI interrupts */
189 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000);
191 ctlr->allocate = ata_marvell_edma_allocate;
192 ctlr->reset = ata_marvell_edma_reset;
193 ctlr->dmainit = ata_marvell_edma_dmainit;
194 ctlr->setmode = ata_sata_setmode;
195 ctlr->channels = ctlr->chip->cfg1;
197 /* clear host controller interrupts */
198 ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000);
199 if (ctlr->chip->cfg1 > 4)
200 ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000);
202 /* clear PCI interrupts */
203 ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000);
205 /* unmask PCI interrupts we want */
206 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff);
208 /* unmask host controller interrupts we want */
209 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ |
210 /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25));
212 /* enable PCI interrupt */
213 pci_write_config(dev, PCIR_COMMAND,
214 pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2);
219 ata_marvell_edma_allocate(device_t dev)
221 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
222 struct ata_channel *ch = device_get_softc(dev);
223 u_int64_t work = ch->dma.work_bus;
226 /* clear work area */
227 bzero(ch->dma.work, 1024+256);
229 /* set legacy ATA resources */
230 for (i = ATA_DATA; i <= ATA_COMMAND; i++) {
231 ch->r_io[i].res = ctlr->r_res1;
232 ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch);
234 ch->r_io[ATA_CONTROL].res = ctlr->r_res1;
235 ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch);
236 ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1;
237 ata_default_registers(dev);
239 /* set SATA resources */
240 switch (ctlr->chip->cfg2) {
242 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
243 ch->r_io[ATA_SSTATUS].offset = 0x00100 + ATA_MV_HOST_BASE(ch);
244 ch->r_io[ATA_SERROR].res = ctlr->r_res1;
245 ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch);
246 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
247 ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch);
250 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
251 ch->r_io[ATA_SSTATUS].offset = 0x02300 + ATA_MV_EDMA_BASE(ch);
252 ch->r_io[ATA_SERROR].res = ctlr->r_res1;
253 ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch);
254 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
255 ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch);
256 ch->r_io[ATA_SACTIVE].res = ctlr->r_res1;
257 ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch);
261 ch->flags |= ATA_NO_SLAVE;
262 ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */
264 ch->hw.begin_transaction = ata_marvell_edma_begin_transaction;
265 ch->hw.end_transaction = ata_marvell_edma_end_transaction;
266 ch->hw.status = ata_marvell_edma_status;
268 /* disable the EDMA machinery */
269 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
270 DELAY(100000); /* SOS should poll for disabled */
272 /* set configuration to non-queued 128b read transfers stop on error */
273 ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13));
275 /* request queue base high */
276 ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), work >> 32);
278 /* request queue in ptr */
279 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
281 /* request queue out ptr */
282 ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0);
284 /* response queue base high */
286 ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), work >> 32);
288 /* response queue in ptr */
289 ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0);
291 /* response queue out ptr */
292 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
294 /* clear SATA error register */
295 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
297 /* clear any outstanding error interrupts */
298 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
300 /* unmask all error interrupts */
301 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
303 /* enable EDMA machinery */
304 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
309 ata_marvell_edma_status(device_t dev)
311 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
312 struct ata_channel *ch = device_get_softc(dev);
313 u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60);
314 int shift = (ch->unit << 1) + (ch->unit > 3);
316 if (cause & (1 << shift)) {
318 /* clear interrupt(s) */
319 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
321 /* do we have any PHY events ? */
322 ata_sata_phy_check_events(dev);
325 /* do we have any device action ? */
326 return (cause & (2 << shift));
329 /* must be called with ATA channel locked and state_mtx held */
331 ata_marvell_edma_begin_transaction(struct ata_request *request)
333 struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev));
334 struct ata_channel *ch = device_get_softc(request->parent);
342 /* only DMA R/W goes through the EMDA machine */
343 if (request->u.ata.command != ATA_READ_DMA &&
344 request->u.ata.command != ATA_WRITE_DMA) {
346 /* disable the EDMA machinery */
347 if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)
348 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
349 return ata_begin_transaction(request);
352 /* check for 48 bit access and convert if needed */
353 ata_modify_if_48bit(request);
355 /* check sanity, setup SG list and DMA engine */
356 if ((error = ch->dma.load(request, NULL, NULL))) {
357 device_printf(request->dev, "setting up DMA failed\n");
358 request->result = error;
359 return ATA_OP_FINISHED;
362 /* get next free request queue slot */
363 req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch));
364 slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f;
365 bytep = (u_int8_t *)(ch->dma.work);
366 bytep += (slot << 5);
367 wordp = (u_int16_t *)bytep;
368 quadp = (u_int32_t *)bytep;
370 /* fill in this request */
371 quadp[0] = (long)request->dma->sg_bus & 0xffffffff;
372 quadp[1] = (u_int64_t)request->dma->sg_bus >> 32;
373 wordp[4] = (request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag<<1);
376 bytep[i++] = (request->u.ata.count >> 8) & 0xff;
377 bytep[i++] = 0x10 | ATA_COUNT;
378 bytep[i++] = request->u.ata.count & 0xff;
379 bytep[i++] = 0x10 | ATA_COUNT;
381 bytep[i++] = (request->u.ata.lba >> 24) & 0xff;
382 bytep[i++] = 0x10 | ATA_SECTOR;
383 bytep[i++] = request->u.ata.lba & 0xff;
384 bytep[i++] = 0x10 | ATA_SECTOR;
386 bytep[i++] = (request->u.ata.lba >> 32) & 0xff;
387 bytep[i++] = 0x10 | ATA_CYL_LSB;
388 bytep[i++] = (request->u.ata.lba >> 8) & 0xff;
389 bytep[i++] = 0x10 | ATA_CYL_LSB;
391 bytep[i++] = (request->u.ata.lba >> 40) & 0xff;
392 bytep[i++] = 0x10 | ATA_CYL_MSB;
393 bytep[i++] = (request->u.ata.lba >> 16) & 0xff;
394 bytep[i++] = 0x10 | ATA_CYL_MSB;
396 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf);
397 bytep[i++] = 0x10 | ATA_DRIVE;
399 bytep[i++] = request->u.ata.command;
400 bytep[i++] = 0x90 | ATA_COMMAND;
402 /* enable EDMA machinery if needed */
403 if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) {
404 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
405 while (!(ATA_INL(ctlr->r_res1,
406 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
410 /* tell EDMA it has a new request */
411 slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f;
412 req_in &= 0xfffffc00;
413 req_in += (slot << 5);
414 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in);
416 return ATA_OP_CONTINUES;
419 /* must be called with ATA channel locked and state_mtx held */
421 ata_marvell_edma_end_transaction(struct ata_request *request)
423 struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev));
424 struct ata_channel *ch = device_get_softc(request->parent);
425 int offset = (ch->unit > 3 ? 0x30014 : 0x20014);
426 u_int32_t icr = ATA_INL(ctlr->r_res1, offset);
430 if ((icr & (0x0001 << (ch->unit & 3)))) {
431 struct ata_marvell_response *response;
432 u_int32_t rsp_in, rsp_out;
436 callout_stop(&request->callout);
438 /* get response ptr's */
439 rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch));
440 rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch));
441 slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f;
442 rsp_out &= 0xffffff00;
443 rsp_out += (slot << 3);
444 response = (struct ata_marvell_response *)
445 (ch->dma.work + 1024 + (slot << 3));
447 /* record status for this request */
448 request->status = response->dev_status;
452 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out);
454 /* update progress */
455 if (!(request->status & ATA_S_ERROR) &&
456 !(request->flags & ATA_R_TIMEOUT))
457 request->donecount = request->bytecount;
460 ch->dma.unload(request);
462 res = ATA_OP_FINISHED;
465 /* legacy ATA interrupt */
467 res = ata_end_transaction(request);
471 ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3))));
476 ata_marvell_edma_reset(device_t dev)
478 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
479 struct ata_channel *ch = device_get_softc(dev);
481 /* disable the EDMA machinery */
482 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
483 while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
486 /* clear SATA error register */
487 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
489 /* clear any outstanding error interrupts */
490 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
492 /* unmask all error interrupts */
493 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
495 /* enable channel and test for devices */
496 if (ata_sata_phy_reset(dev))
497 ata_generic_reset(dev);
499 /* enable EDMA machinery */
500 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
504 ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs,
507 struct ata_dmasetprd_args *args = xsc;
508 struct ata_marvell_dma_prdentry *prd = args->dmatab;
511 if ((args->error = error))
514 for (i = 0; i < nsegs; i++) {
515 prd[i].addrlo = htole32(segs[i].ds_addr);
516 prd[i].count = htole32(segs[i].ds_len);
517 prd[i].addrhi = htole32((u_int64_t)segs[i].ds_addr >> 32);
519 prd[i - 1].count |= htole32(ATA_DMA_EOT);
520 KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n"));
525 ata_marvell_edma_dmainit(device_t dev)
527 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
528 struct ata_channel *ch = device_get_softc(dev);
531 /* note start and stop are not used here */
532 ch->dma.setprd = ata_marvell_edma_dmasetprd;
534 /* if 64bit support present adjust max address used */
535 if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004)
536 ch->dma.max_address = BUS_SPACE_MAXADDR;
538 /* chip does not reliably do 64K DMA transfers */
539 ch->dma.max_iosize = 64 * DEV_BSIZE;
542 ATA_DECLARE_DRIVER(ata_marvell);