2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006 Bernd Walter. All rights reserved.
5 * Copyright (c) 2006 M. Warner Losh.
6 * Copyright (c) 2010 Greg Ansley. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_platform.h"
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/resource.h>
46 #include <sys/sysctl.h>
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <machine/intr.h>
52 #include <arm/at91/at91var.h>
53 #include <arm/at91/at91_mcireg.h>
54 #include <arm/at91/at91_pdcreg.h>
56 #include <dev/mmc/bridge.h>
57 #include <dev/mmc/mmcbrvar.h>
60 #include <dev/ofw/ofw_bus.h>
61 #include <dev/ofw/ofw_bus_subr.h>
69 * About running the MCI bus above 25MHz
71 * Historically, the MCI bus has been run at 30MHz on systems with a 60MHz
72 * master clock, in part due to a bug in dev/mmc.c making always request
73 * 30MHz, and in part over clocking the bus because 15MHz was too slow.
74 * Fixing that bug causes the mmc driver to request a 25MHz clock (as it
75 * should) and the logic in at91_mci_update_ios() picks the highest speed that
76 * doesn't exceed that limit. With a 60MHz MCK that would be 15MHz, and
77 * that's a real performance buzzkill when you've been getting away with 30MHz
80 * By defining AT91_MCI_ALLOW_OVERCLOCK (or setting the allow_overclock=1
81 * device hint or sysctl) you can enable logic in at91_mci_update_ios() to
82 * overlcock the SD bus a little by running it at MCK / 2 when the requested
83 * speed is 25MHz and the next highest speed is 15MHz or less. This appears
84 * to work on virtually all SD cards, since it is what this driver has been
85 * doing prior to the introduction of this option, where the overclocking vs
86 * underclocking decision was automatically "overclock". Modern SD cards can
87 * run at 45mhz/1-bit in standard mode (high speed mode enable commands not
88 * sent) without problems.
90 * Speaking of high-speed mode, the rm9200 manual says the MCI device supports
91 * the SD v1.0 specification and can run up to 50MHz. This is interesting in
92 * that the SD v1.0 spec caps the speed at 25MHz; high speed mode was added in
93 * the v1.10 spec. Furthermore, high speed mode doesn't just crank up the
94 * clock, it alters the signal timing. The rm9200 MCI device doesn't support
95 * these altered timings. So while speeds over 25MHz may work, they only work
96 * in what the SD spec calls "default" speed mode, and it amounts to violating
97 * the spec by overclocking the bus.
99 * If you also enable 4-wire mode it's possible transfers faster than 25MHz
100 * will fail. On the AT91RM9200, due to bugs in the bus contention logic, if
101 * you have the USB host device and OHCI driver enabled will fail. Even
102 * underclocking to 15MHz, intermittant overrun and underrun errors occur.
103 * Note that you don't even need to have usb devices attached to the system,
104 * the errors begin to occur as soon as the OHCI driver sets the register bit
105 * to enable periodic transfers. It appears (based on brief investigation)
106 * that the usb host controller uses so much ASB bandwidth that sometimes the
107 * DMA for MCI transfers doesn't get a bus grant in time and data gets
108 * dropped. Adding even a modicum of network activity changes the symptom
109 * from intermittant to very frequent. Members of the AT91SAM9 family have
110 * corrected this problem, or are at least better about their use of the bus.
112 #ifndef AT91_MCI_ALLOW_OVERCLOCK
113 #define AT91_MCI_ALLOW_OVERCLOCK 1
117 * Allocate 2 bounce buffers we'll use to endian-swap the data due to the rm9200
118 * erratum. We use a pair of buffers because when reading that lets us begin
119 * endian-swapping the data in the first buffer while the DMA is reading into
120 * the second buffer. (We can't use the same trick for writing because we might
121 * not get all the data in the 2nd buffer swapped before the hardware needs it;
122 * dealing with that would add complexity to the driver.)
124 * The buffers are sized at 16K each due to the way the busdma cache sync
125 * operations work on arm. A dcache_inv_range() operation on a range larger
126 * than 16K gets turned into a dcache_wbinv_all(). That needlessly flushes the
127 * entire data cache, impacting overall system performance.
130 #define BBSIZE (16*1024)
131 #define MAX_BLOCKS ((BBSIZE*BBCOUNT)/512)
133 static int mci_debug;
135 struct at91_mci_softc {
136 void *intrhand; /* Interrupt handle */
139 #define CAP_HAS_4WIRE 1 /* Has 4 wire bus */
140 #define CAP_NEEDS_BYTESWAP 2 /* broken hardware needing bounce */
141 #define CAP_MCI1_REV2XX 4 /* MCI 1 rev 2.x */
143 #define PENDING_CMD 0x01
144 #define PENDING_STOP 0x02
145 #define CMD_MULTIREAD 0x10
146 #define CMD_MULTIWRITE 0x20
149 struct resource *irq_res; /* IRQ resource */
150 struct resource *mem_res; /* Memory resource */
152 bus_dma_tag_t dmatag;
153 struct mmc_host host;
155 struct mmc_request *req;
156 struct mmc_command *curcmd;
157 bus_dmamap_t bbuf_map[BBCOUNT];
158 char * bbuf_vaddr[BBCOUNT]; /* bounce bufs in KVA space */
159 uint32_t bbuf_len[BBCOUNT]; /* len currently queued for bounce buf */
160 uint32_t bbuf_curidx; /* which bbuf is the active DMA buffer */
161 uint32_t xfer_offset; /* offset so far into caller's buf */
164 /* bus entry points */
165 static int at91_mci_probe(device_t dev);
166 static int at91_mci_attach(device_t dev);
167 static int at91_mci_detach(device_t dev);
168 static void at91_mci_intr(void *);
170 /* helper routines */
171 static int at91_mci_activate(device_t dev);
172 static void at91_mci_deactivate(device_t dev);
173 static int at91_mci_is_mci1rev2xx(void);
175 #define AT91_MCI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
176 #define AT91_MCI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
177 #define AT91_MCI_LOCK_INIT(_sc) \
178 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
180 #define AT91_MCI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
181 #define AT91_MCI_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
182 #define AT91_MCI_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
184 static inline uint32_t
185 RD4(struct at91_mci_softc *sc, bus_size_t off)
187 return (bus_read_4(sc->mem_res, off));
191 WR4(struct at91_mci_softc *sc, bus_size_t off, uint32_t val)
193 bus_write_4(sc->mem_res, off, val);
197 at91_bswap_buf(struct at91_mci_softc *sc, void * dptr, void * sptr, uint32_t memsize)
199 uint32_t * dst = (uint32_t *)dptr;
200 uint32_t * src = (uint32_t *)sptr;
204 * If the hardware doesn't need byte-swapping, let bcopy() do the
205 * work. Use bounce buffer even if we don't need byteswap, since
206 * buffer may straddle a page boundary, and we don't handle
207 * multi-segment transfers in hardware. Seen from 'bsdlabel -w' which
208 * uses raw geom access to the volume. Greg Ansley (gja (at)
211 if (!(sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
212 memcpy(dptr, sptr, memsize);
217 * Nice performance boost for slightly unrolling this loop.
218 * (But very little extra boost for further unrolling it.)
220 for (i = 0; i < memsize; i += 16) {
221 *dst++ = bswap32(*src++);
222 *dst++ = bswap32(*src++);
223 *dst++ = bswap32(*src++);
224 *dst++ = bswap32(*src++);
227 /* Mop up the last 1-3 words, if any. */
228 for (i = 0; i < (memsize & 0x0F); i += 4) {
229 *dst++ = bswap32(*src++);
234 at91_mci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
238 *(bus_addr_t *)arg = segs[0].ds_addr;
242 at91_mci_pdc_disable(struct at91_mci_softc *sc)
244 WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);
247 WR4(sc, PDC_RNPR, 0);
248 WR4(sc, PDC_RNCR, 0);
251 WR4(sc, PDC_TNPR, 0);
252 WR4(sc, PDC_TNCR, 0);
256 * Reset the controller, then restore most of the current state.
258 * This is called after detecting an error. It's also called after stopping a
259 * multi-block write, to un-wedge the device so that it will handle the NOTBUSY
260 * signal correctly. See comments in at91_mci_stop_done() for more details.
262 static void at91_mci_reset(struct at91_mci_softc *sc)
269 at91_mci_pdc_disable(sc);
271 /* save current state */
273 imr = RD4(sc, MCI_IMR);
274 mr = RD4(sc, MCI_MR) & 0x7fff;
275 sdcr = RD4(sc, MCI_SDCR);
276 dtor = RD4(sc, MCI_DTOR);
278 /* reset the controller */
280 WR4(sc, MCI_IDR, 0xffffffff);
281 WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST);
285 WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN);
287 WR4(sc, MCI_SDCR, sdcr);
288 WR4(sc, MCI_DTOR, dtor);
289 WR4(sc, MCI_IER, imr);
292 * Make sure sdio interrupts will fire. Not sure why reading
293 * SR ensures that, but this is in the linux driver.
300 at91_mci_init(device_t dev)
302 struct at91_mci_softc *sc = device_get_softc(dev);
305 WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */
306 WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */
307 WR4(sc, MCI_DTOR, MCI_DTOR_DTOMUL_1M | 1);
308 val = MCI_MR_PDCMODE;
309 val |= 0x34a; /* PWSDIV = 3; CLKDIV = 74 */
310 // if (sc->sc_cap & CAP_MCI1_REV2XX)
311 // val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF;
312 WR4(sc, MCI_MR, val);
313 #ifndef AT91_MCI_SLOT_B
314 WR4(sc, MCI_SDCR, 0); /* SLOT A, 1 bit bus */
317 * XXX Really should add second "unit" but nobody using using
318 * a two slot card that we know of. XXX
320 WR4(sc, MCI_SDCR, 1); /* SLOT B, 1 bit bus */
323 * Enable controller, including power-save. The slower clock
324 * of the power-save mode is only in effect when there is no
325 * transfer in progress, so it can be left in this mode all
328 WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN);
332 at91_mci_fini(device_t dev)
334 struct at91_mci_softc *sc = device_get_softc(dev);
336 WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */
337 at91_mci_pdc_disable(sc);
338 WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */
342 at91_mci_probe(device_t dev)
345 if (!ofw_bus_is_compatible(dev, "atmel,hsmci"))
348 device_set_desc(dev, "MCI mmc/sd host bridge");
353 at91_mci_attach(device_t dev)
355 struct at91_mci_softc *sc = device_get_softc(dev);
356 struct sysctl_ctx_list *sctx;
357 struct sysctl_oid *soid;
361 sctx = device_get_sysctl_ctx(dev);
362 soid = device_get_sysctl_tree(dev);
367 sc->sc_cap |= CAP_NEEDS_BYTESWAP;
369 * MCI1 Rev 2 controllers need some workarounds, flag if so.
371 if (at91_mci_is_mci1rev2xx())
372 sc->sc_cap |= CAP_MCI1_REV2XX;
374 err = at91_mci_activate(dev);
378 AT91_MCI_LOCK_INIT(sc);
384 * Allocate DMA tags and maps and bounce buffers.
386 * The parms in the tag_create call cause the dmamem_alloc call to
387 * create each bounce buffer as a single contiguous buffer of BBSIZE
388 * bytes aligned to a 4096 byte boundary.
390 * Do not use DMA_COHERENT for these buffers because that maps the
391 * memory as non-cachable, which prevents cache line burst fills/writes,
392 * which is something we need since we're trying to overlap the
393 * byte-swapping with the DMA operations.
395 err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0,
396 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
397 BBSIZE, 1, BBSIZE, 0, NULL, NULL, &sc->dmatag);
401 for (i = 0; i < BBCOUNT; ++i) {
402 err = bus_dmamem_alloc(sc->dmatag, (void **)&sc->bbuf_vaddr[i],
403 BUS_DMA_NOWAIT, &sc->bbuf_map[i]);
409 * Activate the interrupt
411 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
412 NULL, at91_mci_intr, sc, &sc->intrhand);
414 AT91_MCI_LOCK_DESTROY(sc);
419 * Allow 4-wire to be initially set via #define.
420 * Allow a device hint to override that.
421 * Allow a sysctl to override that.
423 #if defined(AT91_MCI_HAS_4WIRE) && AT91_MCI_HAS_4WIRE != 0
426 resource_int_value(device_get_name(dev), device_get_unit(dev),
427 "4wire", &sc->has_4wire);
428 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire",
429 CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus");
431 sc->sc_cap |= CAP_HAS_4WIRE;
433 sc->allow_overclock = AT91_MCI_ALLOW_OVERCLOCK;
434 resource_int_value(device_get_name(dev), device_get_unit(dev),
435 "allow_overclock", &sc->allow_overclock);
436 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "allow_overclock",
437 CTLFLAG_RW, &sc->allow_overclock, 0,
438 "Allow up to 30MHz clock for 25MHz request when next highest speed 15MHz or less.");
440 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "debug",
441 CTLFLAG_RWTUN, &mci_debug, 0, "enable debug output");
444 * Our real min freq is master_clock/512, but upper driver layers are
445 * going to set the min speed during card discovery, and the right speed
446 * for that is 400kHz, so advertise a safe value just under that.
448 * For max speed, while the rm9200 manual says the max is 50mhz, it also
449 * says it supports only the SD v1.0 spec, which means the real limit is
450 * 25mhz. On the other hand, historical use has been to slightly violate
451 * the standard by running the bus at 30MHz. For more information on
452 * that, see the comments at the top of this file.
454 sc->host.f_min = 375000;
455 sc->host.f_max = at91_master_clock / 2;
456 if (sc->host.f_max > 25000000)
457 sc->host.f_max = 25000000;
458 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
460 if (sc->sc_cap & CAP_HAS_4WIRE)
461 sc->host.caps |= MMC_CAP_4_BIT_DATA;
463 child = device_add_child(dev, "mmc", 0);
464 device_set_ivars(dev, &sc->host);
465 err = bus_generic_attach(dev);
468 at91_mci_deactivate(dev);
473 at91_mci_detach(device_t dev)
475 struct at91_mci_softc *sc = device_get_softc(dev);
478 at91_mci_deactivate(dev);
480 bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[0], sc->bbuf_map[0]);
481 bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[1], sc->bbuf_map[1]);
482 bus_dma_tag_destroy(sc->dmatag);
484 return (EBUSY); /* XXX */
488 at91_mci_activate(device_t dev)
490 struct at91_mci_softc *sc;
493 sc = device_get_softc(dev);
495 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
497 if (sc->mem_res == NULL)
501 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
503 if (sc->irq_res == NULL)
508 at91_mci_deactivate(dev);
513 at91_mci_deactivate(device_t dev)
515 struct at91_mci_softc *sc;
517 sc = device_get_softc(dev);
519 bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
521 bus_generic_detach(sc->dev);
523 bus_release_resource(dev, SYS_RES_MEMORY,
524 rman_get_rid(sc->mem_res), sc->mem_res);
527 bus_release_resource(dev, SYS_RES_IRQ,
528 rman_get_rid(sc->irq_res), sc->irq_res);
534 at91_mci_is_mci1rev2xx(void)
537 switch (soc_info.type) {
551 at91_mci_update_ios(device_t brdev, device_t reqdev)
553 struct at91_mci_softc *sc;
558 sc = device_get_softc(brdev);
562 * Calculate our closest available clock speed that doesn't exceed the
565 * When overclocking is allowed, the requested clock is 25MHz, the
566 * computed frequency is 15MHz or smaller and clockdiv is 1, use
567 * clockdiv of 0 to double that. If less than 12.5MHz, double
568 * regardless of the overclocking setting.
570 * Whatever we come up with, store it back into ios->clock so that the
571 * upper layer drivers can report the actual speed of the bus.
573 if (ios->clock == 0) {
574 WR4(sc, MCI_CR, MCI_CR_MCIDIS);
577 WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN);
578 if ((at91_master_clock % (ios->clock * 2)) == 0)
579 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
581 clkdiv = (at91_master_clock / ios->clock) / 2;
582 freq = at91_master_clock / ((clkdiv+1) * 2);
583 if (clkdiv == 1 && ios->clock == 25000000 && freq <= 15000000) {
584 if (sc->allow_overclock || freq <= 12500000) {
586 freq = at91_master_clock / ((clkdiv+1) * 2);
591 if (ios->bus_width == bus_width_4)
592 WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) | MCI_SDCR_SDCBUS);
594 WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) & ~MCI_SDCR_SDCBUS);
595 WR4(sc, MCI_MR, (RD4(sc, MCI_MR) & ~MCI_MR_CLKDIV) | clkdiv);
596 /* Do we need a settle time here? */
597 /* XXX We need to turn the device on/off here with a GPIO pin */
602 at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd)
605 struct mmc_data *data;
610 /* XXX Upper layers don't always set this */
613 /* Begin setting up command register. */
617 if (sc->host.ios.bus_mode == opendrain)
618 cmdr |= MCI_CMDR_OPDCMD;
620 /* Set up response handling. Allow max timeout for responses. */
622 if (MMC_RSP(cmd->flags) == MMC_RSP_NONE)
623 cmdr |= MCI_CMDR_RSPTYP_NO;
625 cmdr |= MCI_CMDR_MAXLAT;
626 if (cmd->flags & MMC_RSP_136)
627 cmdr |= MCI_CMDR_RSPTYP_136;
629 cmdr |= MCI_CMDR_RSPTYP_48;
633 * If there is no data transfer, just set up the right interrupt mask
634 * and start the command.
636 * The interrupt mask needs to be CMDRDY plus all non-data-transfer
637 * errors. It's important to leave the transfer-related errors out, to
638 * avoid spurious timeout or crc errors on a STOP command following a
639 * multiblock read. When a multiblock read is in progress, sending a
640 * STOP in the middle of a block occasionally triggers such errors, but
641 * we're totally disinterested in them because we've already gotten all
642 * the data we wanted without error before sending the STOP command.
646 uint32_t ier = MCI_SR_CMDRDY |
647 MCI_SR_RTOE | MCI_SR_RENDE |
648 MCI_SR_RCRCE | MCI_SR_RDIRE | MCI_SR_RINDE;
650 at91_mci_pdc_disable(sc);
652 if (cmd->opcode == MMC_STOP_TRANSMISSION)
653 cmdr |= MCI_CMDR_TRCMD_STOP;
655 /* Ignore response CRC on CMD2 and ACMD41, per standard. */
657 if (cmd->opcode == MMC_SEND_OP_COND ||
658 cmd->opcode == ACMD_SD_SEND_OP_COND)
659 ier &= ~MCI_SR_RCRCE;
662 printf("CMDR %x (opcode %d) ARGR %x no data\n",
663 cmdr, cmd->opcode, cmd->arg);
665 WR4(sc, MCI_ARGR, cmd->arg);
666 WR4(sc, MCI_CMDR, cmdr);
667 WR4(sc, MCI_IDR, 0xffffffff);
668 WR4(sc, MCI_IER, ier);
672 /* There is data, set up the transfer-related parts of the command. */
674 if (data->flags & MMC_DATA_READ)
675 cmdr |= MCI_CMDR_TRDIR;
677 if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE))
678 cmdr |= MCI_CMDR_TRCMD_START;
680 if (data->flags & MMC_DATA_STREAM)
681 cmdr |= MCI_CMDR_TRTYP_STREAM;
682 else if (data->flags & MMC_DATA_MULTI) {
683 cmdr |= MCI_CMDR_TRTYP_MULTIPLE;
684 sc->flags |= (data->flags & MMC_DATA_READ) ?
685 CMD_MULTIREAD : CMD_MULTIWRITE;
689 * Disable PDC until we're ready.
691 * Set block size and turn on PDC mode for dma xfer.
692 * Note that the block size is the smaller of the amount of data to be
693 * transferred, or 512 bytes. The 512 size is fixed by the standard;
694 * smaller blocks are possible, but never larger.
697 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
699 mr = RD4(sc,MCI_MR) & ~MCI_MR_BLKLEN;
700 mr |= min(data->len, 512) << 16;
701 WR4(sc, MCI_MR, mr | MCI_MR_PDCMODE|MCI_MR_PDCPADV);
706 * Use bounce buffers even if we don't need to byteswap, because doing
707 * multi-block IO with large DMA buffers is way fast (compared to
708 * single-block IO), even after incurring the overhead of also copying
709 * from/to the caller's buffers (which may be in non-contiguous physical
712 * In an ideal non-byteswap world we could create a dma tag that allows
713 * for discontiguous segments and do the IO directly from/to the
714 * caller's buffer(s), using ENDRX/ENDTX interrupts to chain the
715 * discontiguous buffers through the PDC. Someday.
717 * If a read is bigger than 2k, split it in half so that we can start
718 * byte-swapping the first half while the second half is on the wire.
719 * It would be best if we could split it into 8k chunks, but we can't
720 * always keep up with the byte-swapping due to other system activity,
721 * and if an RXBUFF interrupt happens while we're still handling the
722 * byte-swap from the prior buffer (IE, we haven't returned from
723 * handling the prior interrupt yet), then data will get dropped on the
724 * floor and we can't easily recover from that. The right fix for that
725 * would be to have the interrupt handling only keep the DMA flowing and
726 * enqueue filled buffers to be byte-swapped in a non-interrupt context.
727 * Even that won't work on the write side of things though; in that
728 * context we have to have all the data ready to go before starting the
731 * XXX what about stream transfers?
736 if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) {
738 uint32_t remaining = data->len;
742 if (remaining > (BBCOUNT*BBSIZE))
743 panic("IO read size exceeds MAXDATA\n");
745 if (data->flags & MMC_DATA_READ) {
746 if (remaining > 2048) // XXX
750 err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
751 sc->bbuf_vaddr[0], len, at91_mci_getaddr,
752 &paddr, BUS_DMA_NOWAIT);
754 panic("IO read dmamap_load failed\n");
755 bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
756 BUS_DMASYNC_PREREAD);
757 WR4(sc, PDC_RPR, paddr);
758 WR4(sc, PDC_RCR, len / 4);
759 sc->bbuf_len[0] = len;
761 if (remaining == 0) {
765 err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
766 sc->bbuf_vaddr[1], len, at91_mci_getaddr,
767 &paddr, BUS_DMA_NOWAIT);
769 panic("IO read dmamap_load failed\n");
770 bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
771 BUS_DMASYNC_PREREAD);
772 WR4(sc, PDC_RNPR, paddr);
773 WR4(sc, PDC_RNCR, len / 4);
774 sc->bbuf_len[1] = len;
777 WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN);
779 len = min(BBSIZE, remaining);
780 at91_bswap_buf(sc, sc->bbuf_vaddr[0], data->data, len);
781 err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
782 sc->bbuf_vaddr[0], len, at91_mci_getaddr,
783 &paddr, BUS_DMA_NOWAIT);
785 panic("IO write dmamap_load failed\n");
786 bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
787 BUS_DMASYNC_PREWRITE);
789 * Erratum workaround: PDC transfer length on a write
790 * must not be smaller than 12 bytes (3 words); only
791 * blklen bytes (set above) are actually transferred.
793 WR4(sc, PDC_TPR,paddr);
794 WR4(sc, PDC_TCR, (len < 12) ? 3 : len / 4);
795 sc->bbuf_len[0] = len;
797 if (remaining == 0) {
801 at91_bswap_buf(sc, sc->bbuf_vaddr[1],
802 ((char *)data->data)+BBSIZE, len);
803 err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
804 sc->bbuf_vaddr[1], len, at91_mci_getaddr,
805 &paddr, BUS_DMA_NOWAIT);
807 panic("IO write dmamap_load failed\n");
808 bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
809 BUS_DMASYNC_PREWRITE);
810 WR4(sc, PDC_TNPR, paddr);
811 WR4(sc, PDC_TNCR, (len < 12) ? 3 : len / 4);
812 sc->bbuf_len[1] = len;
815 /* do not enable PDC xfer until CMDRDY asserted */
817 data->xfer_len = 0; /* XXX what's this? appears to be unused. */
821 printf("CMDR %x (opcode %d) ARGR %x with data len %d\n",
822 cmdr, cmd->opcode, cmd->arg, cmd->data->len);
824 WR4(sc, MCI_ARGR, cmd->arg);
825 WR4(sc, MCI_CMDR, cmdr);
826 WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY);
830 at91_mci_next_operation(struct at91_mci_softc *sc)
832 struct mmc_request *req;
838 if (sc->flags & PENDING_CMD) {
839 sc->flags &= ~PENDING_CMD;
840 at91_mci_start_cmd(sc, req->cmd);
842 } else if (sc->flags & PENDING_STOP) {
843 sc->flags &= ~PENDING_STOP;
844 at91_mci_start_cmd(sc, req->stop);
848 WR4(sc, MCI_IDR, 0xffffffff);
851 //printf("req done\n");
856 at91_mci_request(device_t brdev, device_t reqdev, struct mmc_request *req)
858 struct at91_mci_softc *sc = device_get_softc(brdev);
861 if (sc->req != NULL) {
865 //printf("new req\n");
867 sc->flags = PENDING_CMD;
869 sc->flags |= PENDING_STOP;
870 at91_mci_next_operation(sc);
876 at91_mci_get_ro(device_t brdev, device_t reqdev)
882 at91_mci_acquire_host(device_t brdev, device_t reqdev)
884 struct at91_mci_softc *sc = device_get_softc(brdev);
889 msleep(sc, &sc->sc_mtx, PZERO, "mciah", hz / 5);
896 at91_mci_release_host(device_t brdev, device_t reqdev)
898 struct at91_mci_softc *sc = device_get_softc(brdev);
908 at91_mci_read_done(struct at91_mci_softc *sc, uint32_t sr)
910 struct mmc_command *cmd = sc->curcmd;
911 char * dataptr = (char *)cmd->data->data;
912 uint32_t curidx = sc->bbuf_curidx;
913 uint32_t len = sc->bbuf_len[curidx];
916 * We arrive here when a DMA transfer for a read is done, whether it's
917 * a single or multi-block read.
919 * We byte-swap the buffer that just completed, and if that is the
920 * last buffer that's part of this read then we move on to the next
921 * operation, otherwise we wait for another ENDRX for the next bufer.
924 bus_dmamap_sync(sc->dmatag, sc->bbuf_map[curidx], BUS_DMASYNC_POSTREAD);
925 bus_dmamap_unload(sc->dmatag, sc->bbuf_map[curidx]);
927 at91_bswap_buf(sc, dataptr + sc->xfer_offset, sc->bbuf_vaddr[curidx], len);
930 printf("read done sr %x curidx %d len %d xfer_offset %d\n",
931 sr, curidx, len, sc->xfer_offset);
934 sc->xfer_offset += len;
935 sc->bbuf_curidx = !curidx; /* swap buffers */
938 * If we've transferred all the data, move on to the next operation.
940 * If we're still transferring the last buffer, RNCR is already zero but
941 * we have to write a zero anyway to clear the ENDRX status so we don't
942 * re-interrupt until the last buffer is done.
944 if (sc->xfer_offset == cmd->data->len) {
945 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
946 cmd->error = MMC_ERR_NONE;
947 at91_mci_next_operation(sc);
949 WR4(sc, PDC_RNCR, 0);
950 WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_ENDRX);
955 at91_mci_write_done(struct at91_mci_softc *sc, uint32_t sr)
957 struct mmc_command *cmd = sc->curcmd;
960 * We arrive here when the entire DMA transfer for a write is done,
961 * whether it's a single or multi-block write. If it's multi-block we
962 * have to immediately move on to the next operation which is to send
963 * the stop command. If it's a single-block transfer we need to wait
964 * for NOTBUSY, but if that's already asserted we can avoid another
965 * interrupt and just move on to completing the request right away.
968 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
970 bus_dmamap_sync(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx],
971 BUS_DMASYNC_POSTWRITE);
972 bus_dmamap_unload(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx]);
974 if ((cmd->data->flags & MMC_DATA_MULTI) || (sr & MCI_SR_NOTBUSY)) {
975 cmd->error = MMC_ERR_NONE;
976 at91_mci_next_operation(sc);
978 WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
983 at91_mci_notbusy(struct at91_mci_softc *sc)
985 struct mmc_command *cmd = sc->curcmd;
988 * We arrive here by either completion of a single-block write, or
989 * completion of the stop command that ended a multi-block write (and,
990 * I suppose, after a card-select or erase, but I haven't tested
991 * those). Anyway, we're done and it's time to move on to the next
995 cmd->error = MMC_ERR_NONE;
996 at91_mci_next_operation(sc);
1000 at91_mci_stop_done(struct at91_mci_softc *sc, uint32_t sr)
1002 struct mmc_command *cmd = sc->curcmd;
1005 * We arrive here after receiving CMDRDY for a MMC_STOP_TRANSMISSION
1006 * command. Depending on the operation being stopped, we may have to
1007 * do some unusual things to work around hardware bugs.
1011 * This is known to be true of at91rm9200 hardware; it may or may not
1012 * apply to more recent chips:
1014 * After stopping a multi-block write, the NOTBUSY bit in MCI_SR does
1015 * not properly reflect the actual busy state of the card as signaled
1016 * on the DAT0 line; it always claims the card is not-busy. If we
1017 * believe that and let operations continue, following commands will
1018 * fail with response timeouts (except of course MMC_SEND_STATUS -- it
1019 * indicates the card is busy in the PRG state, which was the smoking
1020 * gun that showed MCI_SR NOTBUSY was not tracking DAT0 correctly).
1022 * The atmel docs are emphatic: "This flag [NOTBUSY] must be used only
1023 * for Write Operations." I guess technically since we sent a stop
1024 * it's not a write operation anymore. But then just what did they
1025 * think it meant for the stop command to have "...an optional busy
1026 * signal transmitted on the data line" according to the SD spec?
1028 * I tried a variety of things to un-wedge the MCI and get the status
1029 * register to reflect NOTBUSY correctly again, but the only thing
1030 * that worked was a full device reset. It feels like an awfully big
1031 * hammer, but doing a full reset after every multiblock write is
1032 * still faster than doing single-block IO (by almost two orders of
1033 * magnitude: 20KB/sec improves to about 1.8MB/sec best case).
1035 * After doing the reset, wait for a NOTBUSY interrupt before
1036 * continuing with the next operation.
1038 * This workaround breaks multiwrite on the rev2xx parts, but some other
1039 * workaround is needed.
1041 if ((sc->flags & CMD_MULTIWRITE) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
1043 WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
1048 * This is known to be true of at91rm9200 hardware; it may or may not
1049 * apply to more recent chips:
1051 * After stopping a multi-block read, loop to read and discard any
1052 * data that coasts in after we sent the stop command. The docs don't
1053 * say anything about it, but empirical testing shows that 1-3
1054 * additional words of data get buffered up in some unmentioned
1055 * internal fifo and if we don't read and discard them here they end
1056 * up on the front of the next read DMA transfer we do.
1058 * This appears to be unnecessary for rev2xx parts.
1060 if ((sc->flags & CMD_MULTIREAD) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
1065 sr = RD4(sc, MCI_SR);
1066 if (sr & MCI_SR_RXRDY) {
1070 } while (sr & MCI_SR_RXRDY);
1074 cmd->error = MMC_ERR_NONE;
1075 at91_mci_next_operation(sc);
1080 at91_mci_cmdrdy(struct at91_mci_softc *sc, uint32_t sr)
1082 struct mmc_command *cmd = sc->curcmd;
1089 * We get here at the end of EVERY command. We retrieve the command
1090 * response (if any) then decide what to do next based on the command.
1093 if (cmd->flags & MMC_RSP_PRESENT) {
1094 for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1); i++) {
1095 cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4);
1097 printf("RSPR[%d] = %x sr=%x\n", i, cmd->resp[i], sr);
1102 * If this was a stop command, go handle the various special
1103 * conditions (read: bugs) that have to be dealt with following a stop.
1105 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
1106 at91_mci_stop_done(sc, sr);
1111 * If this command can continue to assert BUSY beyond the response then
1112 * we need to wait for NOTBUSY before the command is really done.
1114 * Note that this may not work properly on the at91rm9200. It certainly
1115 * doesn't work for the STOP command that follows a multi-block write,
1116 * so post-stop CMDRDY is handled separately; see the special handling
1117 * in at91_mci_stop_done().
1119 * Beside STOP, there are other R1B-type commands that use the busy
1120 * signal after CMDRDY: CMD7 (card select), CMD28-29 (write protect),
1121 * CMD38 (erase). I haven't tested any of them, but I rather expect
1122 * them all to have the same sort of problem with MCI_SR not actually
1123 * reflecting the state of the DAT0-line busy indicator. So this code
1124 * may need to grow some sort of special handling for them too. (This
1125 * just in: CMD7 isn't a problem right now because dev/mmc.c incorrectly
1126 * sets the response flags to R1 rather than R1B.) XXX
1128 if ((cmd->flags & MMC_RSP_BUSY)) {
1129 WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
1134 * If there is a data transfer with this command, then...
1135 * - If it's a read, we need to wait for ENDRX.
1136 * - If it's a write, now is the time to enable the PDC, and we need
1137 * to wait for a BLKE that follows a TXBUFE, because if we're doing
1138 * a split transfer we get a BLKE after the first half (when TPR/TCR
1139 * get loaded from TNPR/TNCR). So first we wait for the TXBUFE, and
1140 * the handling for that interrupt will then invoke the wait for the
1141 * subsequent BLKE which indicates actual completion.
1145 if (cmd->data->flags & MMC_DATA_READ) {
1148 ier = MCI_SR_TXBUFE;
1149 WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN);
1151 WR4(sc, MCI_IER, MCI_SR_ERROR | ier);
1156 * If we made it to here, we don't need to wait for anything more for
1157 * the current command, move on to the next command (will complete the
1158 * request if there is no next command).
1160 cmd->error = MMC_ERR_NONE;
1161 at91_mci_next_operation(sc);
1165 at91_mci_intr(void *arg)
1167 struct at91_mci_softc *sc = (struct at91_mci_softc*)arg;
1168 struct mmc_command *cmd = sc->curcmd;
1173 sr = RD4(sc, MCI_SR);
1174 isr = sr & RD4(sc, MCI_IMR);
1177 printf("i 0x%x sr 0x%x\n", isr, sr);
1180 * All interrupts are one-shot; disable it now.
1181 * The next operation will re-enable whatever interrupts it wants.
1183 WR4(sc, MCI_IDR, isr);
1184 if (isr & MCI_SR_ERROR) {
1185 if (isr & (MCI_SR_RTOE | MCI_SR_DTOE))
1186 cmd->error = MMC_ERR_TIMEOUT;
1187 else if (isr & (MCI_SR_RCRCE | MCI_SR_DCRCE))
1188 cmd->error = MMC_ERR_BADCRC;
1189 else if (isr & (MCI_SR_OVRE | MCI_SR_UNRE))
1190 cmd->error = MMC_ERR_FIFO;
1192 cmd->error = MMC_ERR_FAILED;
1194 * CMD8 is used to probe for SDHC cards, a standard SD card
1195 * will get a response timeout; don't report it because it's a
1196 * normal and expected condition. One might argue that all
1197 * error reporting should be left to higher levels, but when
1198 * they report at all it's always EIO, which isn't very
1199 * helpful. XXX bootverbose?
1201 if (cmd->opcode != 8) {
1202 device_printf(sc->dev,
1203 "IO error; status MCI_SR = 0x%b cmd opcode = %d%s\n",
1204 sr, MCI_SR_BITSTRING, cmd->opcode,
1205 (cmd->opcode != 12) ? "" :
1206 (sc->flags & CMD_MULTIREAD) ? " after read" : " after write");
1207 /* XXX not sure RTOE needs a full reset, just a retry */
1210 at91_mci_next_operation(sc);
1212 if (isr & MCI_SR_TXBUFE) {
1213 // printf("TXBUFE\n");
1215 * We need to wait for a BLKE that follows TXBUFE
1216 * (intermediate BLKEs might happen after ENDTXes if
1217 * we're chaining multiple buffers). If BLKE is also
1218 * asserted at the time we get TXBUFE, we can avoid
1219 * another interrupt and process it right away, below.
1221 if (sr & MCI_SR_BLKE)
1224 WR4(sc, MCI_IER, MCI_SR_BLKE);
1226 if (isr & MCI_SR_RXBUFF) {
1227 // printf("RXBUFF\n");
1229 if (isr & MCI_SR_ENDTX) {
1230 // printf("ENDTX\n");
1232 if (isr & MCI_SR_ENDRX) {
1233 // printf("ENDRX\n");
1234 at91_mci_read_done(sc, sr);
1236 if (isr & MCI_SR_NOTBUSY) {
1237 // printf("NOTBUSY\n");
1238 at91_mci_notbusy(sc);
1240 if (isr & MCI_SR_DTIP) {
1241 // printf("Data transfer in progress\n");
1243 if (isr & MCI_SR_BLKE) {
1244 // printf("Block transfer end\n");
1245 at91_mci_write_done(sc, sr);
1247 if (isr & MCI_SR_TXRDY) {
1248 // printf("Ready to transmit\n");
1250 if (isr & MCI_SR_RXRDY) {
1251 // printf("Ready to receive\n");
1253 if (isr & MCI_SR_CMDRDY) {
1254 // printf("Command ready\n");
1255 at91_mci_cmdrdy(sc, sr);
1258 AT91_MCI_UNLOCK(sc);
1262 at91_mci_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1264 struct at91_mci_softc *sc = device_get_softc(bus);
1269 case MMCBR_IVAR_BUS_MODE:
1270 *(int *)result = sc->host.ios.bus_mode;
1272 case MMCBR_IVAR_BUS_WIDTH:
1273 *(int *)result = sc->host.ios.bus_width;
1275 case MMCBR_IVAR_CHIP_SELECT:
1276 *(int *)result = sc->host.ios.chip_select;
1278 case MMCBR_IVAR_CLOCK:
1279 *(int *)result = sc->host.ios.clock;
1281 case MMCBR_IVAR_F_MIN:
1282 *(int *)result = sc->host.f_min;
1284 case MMCBR_IVAR_F_MAX:
1285 *(int *)result = sc->host.f_max;
1287 case MMCBR_IVAR_HOST_OCR:
1288 *(int *)result = sc->host.host_ocr;
1290 case MMCBR_IVAR_MODE:
1291 *(int *)result = sc->host.mode;
1293 case MMCBR_IVAR_OCR:
1294 *(int *)result = sc->host.ocr;
1296 case MMCBR_IVAR_POWER_MODE:
1297 *(int *)result = sc->host.ios.power_mode;
1299 case MMCBR_IVAR_VDD:
1300 *(int *)result = sc->host.ios.vdd;
1302 case MMCBR_IVAR_CAPS:
1303 if (sc->has_4wire) {
1304 sc->sc_cap |= CAP_HAS_4WIRE;
1305 sc->host.caps |= MMC_CAP_4_BIT_DATA;
1307 sc->sc_cap &= ~CAP_HAS_4WIRE;
1308 sc->host.caps &= ~MMC_CAP_4_BIT_DATA;
1310 *(int *)result = sc->host.caps;
1312 case MMCBR_IVAR_MAX_DATA:
1314 * Something is wrong with the 2x parts and multiblock, so
1315 * just do 1 block at a time for now, which really kills
1318 if (sc->sc_cap & CAP_MCI1_REV2XX)
1321 *(int *)result = MAX_BLOCKS;
1328 at91_mci_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1330 struct at91_mci_softc *sc = device_get_softc(bus);
1335 case MMCBR_IVAR_BUS_MODE:
1336 sc->host.ios.bus_mode = value;
1338 case MMCBR_IVAR_BUS_WIDTH:
1339 sc->host.ios.bus_width = value;
1341 case MMCBR_IVAR_CHIP_SELECT:
1342 sc->host.ios.chip_select = value;
1344 case MMCBR_IVAR_CLOCK:
1345 sc->host.ios.clock = value;
1347 case MMCBR_IVAR_MODE:
1348 sc->host.mode = value;
1350 case MMCBR_IVAR_OCR:
1351 sc->host.ocr = value;
1353 case MMCBR_IVAR_POWER_MODE:
1354 sc->host.ios.power_mode = value;
1356 case MMCBR_IVAR_VDD:
1357 sc->host.ios.vdd = value;
1359 /* These are read-only */
1360 case MMCBR_IVAR_CAPS:
1361 case MMCBR_IVAR_HOST_OCR:
1362 case MMCBR_IVAR_F_MIN:
1363 case MMCBR_IVAR_F_MAX:
1364 case MMCBR_IVAR_MAX_DATA:
1370 static device_method_t at91_mci_methods[] = {
1372 DEVMETHOD(device_probe, at91_mci_probe),
1373 DEVMETHOD(device_attach, at91_mci_attach),
1374 DEVMETHOD(device_detach, at91_mci_detach),
1377 DEVMETHOD(bus_read_ivar, at91_mci_read_ivar),
1378 DEVMETHOD(bus_write_ivar, at91_mci_write_ivar),
1381 DEVMETHOD(mmcbr_update_ios, at91_mci_update_ios),
1382 DEVMETHOD(mmcbr_request, at91_mci_request),
1383 DEVMETHOD(mmcbr_get_ro, at91_mci_get_ro),
1384 DEVMETHOD(mmcbr_acquire_host, at91_mci_acquire_host),
1385 DEVMETHOD(mmcbr_release_host, at91_mci_release_host),
1390 static driver_t at91_mci_driver = {
1393 sizeof(struct at91_mci_softc),
1396 static devclass_t at91_mci_devclass;
1399 DRIVER_MODULE(at91_mci, simplebus, at91_mci_driver, at91_mci_devclass, NULL,
1402 DRIVER_MODULE(at91_mci, atmelarm, at91_mci_driver, at91_mci_devclass, NULL,
1406 MMC_DECLARE_BRIDGE(at91_mci);