2 * Copyright (c) 2013 Alexander Fedorov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
40 #include <sys/sysctl.h>
42 #include <machine/bus.h>
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcreg.h>
49 #include <dev/mmc/mmcbrvar.h>
51 #include <arm/allwinner/a10_clk.h>
52 #include <arm/allwinner/a10_mmc.h>
54 #define A10_MMC_MEMRES 0
55 #define A10_MMC_IRQRES 1
56 #define A10_MMC_RESSZ 2
57 #define A10_MMC_DMA_SEGS 16
58 #define A10_MMC_DMA_MAX_SIZE 0x2000
59 #define A10_MMC_DMA_FTRGLEVEL 0x20070008
61 static int a10_mmc_pio_mode = 0;
63 TUNABLE_INT("hw.a10.mmc.pio_mode", &a10_mmc_pio_mode);
65 struct a10_mmc_softc {
66 bus_space_handle_t a10_bsh;
67 bus_space_tag_t a10_bst;
73 struct callout a10_timeoutc;
74 struct mmc_host a10_host;
75 struct mmc_request * a10_req;
77 struct resource * a10_res[A10_MMC_RESSZ];
79 uint32_t a10_intr_wait;
82 /* Fields required for DMA access. */
83 bus_addr_t a10_dma_desc_phys;
84 bus_dmamap_t a10_dma_map;
85 bus_dma_tag_t a10_dma_tag;
87 bus_dmamap_t a10_dma_buf_map;
88 bus_dma_tag_t a10_dma_buf_tag;
93 static struct resource_spec a10_mmc_res_spec[] = {
94 { SYS_RES_MEMORY, 0, RF_ACTIVE },
95 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
99 static int a10_mmc_probe(device_t);
100 static int a10_mmc_attach(device_t);
101 static int a10_mmc_detach(device_t);
102 static int a10_mmc_setup_dma(struct a10_mmc_softc *);
103 static int a10_mmc_reset(struct a10_mmc_softc *);
104 static void a10_mmc_intr(void *);
105 static int a10_mmc_update_clock(struct a10_mmc_softc *);
107 static int a10_mmc_update_ios(device_t, device_t);
108 static int a10_mmc_request(device_t, device_t, struct mmc_request *);
109 static int a10_mmc_get_ro(device_t, device_t);
110 static int a10_mmc_acquire_host(device_t, device_t);
111 static int a10_mmc_release_host(device_t, device_t);
113 #define A10_MMC_LOCK(_sc) mtx_lock(&(_sc)->a10_mtx)
114 #define A10_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->a10_mtx)
115 #define A10_MMC_READ_4(_sc, _reg) \
116 bus_space_read_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg)
117 #define A10_MMC_WRITE_4(_sc, _reg, _value) \
118 bus_space_write_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg, _value)
121 a10_mmc_probe(device_t dev)
124 if (!ofw_bus_status_okay(dev))
126 if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-mmc"))
128 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
130 return (BUS_PROBE_DEFAULT);
134 a10_mmc_attach(device_t dev)
137 struct a10_mmc_softc *sc;
138 struct sysctl_ctx_list *ctx;
139 struct sysctl_oid_list *tree;
141 sc = device_get_softc(dev);
144 sc->a10_id = device_get_unit(dev);
145 if (sc->a10_id > 3) {
146 device_printf(dev, "only 4 hosts are supported (0-3)\n");
149 if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
150 device_printf(dev, "cannot allocate device resources\n");
153 sc->a10_bst = rman_get_bustag(sc->a10_res[A10_MMC_MEMRES]);
154 sc->a10_bsh = rman_get_bushandle(sc->a10_res[A10_MMC_MEMRES]);
155 if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
156 INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
157 &sc->a10_intrhand)) {
158 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
159 device_printf(dev, "cannot setup interrupt handler\n");
163 /* Activate the module clock. */
164 if (a10_clk_mmc_activate(sc->a10_id) != 0) {
165 bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES],
167 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
168 device_printf(dev, "cannot activate mmc clock\n");
172 sc->a10_timeout = 10;
173 ctx = device_get_sysctl_ctx(dev);
174 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
175 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
176 &sc->a10_timeout, 0, "Request timeout in seconds");
177 mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
179 callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
181 /* Reset controller. */
182 if (a10_mmc_reset(sc) != 0) {
183 device_printf(dev, "cannot reset the controller\n");
187 if (a10_mmc_pio_mode == 0 && a10_mmc_setup_dma(sc) != 0) {
188 device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
189 a10_mmc_pio_mode = 1;
192 device_printf(sc->a10_dev, "DMA status: %s\n",
193 a10_mmc_pio_mode ? "disabled" : "enabled");
195 sc->a10_host.f_min = 400000;
196 sc->a10_host.f_max = 52000000;
197 sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
198 sc->a10_host.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_HSPEED;
199 sc->a10_host.mode = mode_sd;
201 child = device_add_child(dev, "mmc", -1);
203 device_printf(dev, "attaching MMC bus failed!\n");
206 if (device_probe_and_attach(child) != 0) {
207 device_printf(dev, "attaching MMC child failed!\n");
208 device_delete_child(dev, child);
215 callout_drain(&sc->a10_timeoutc);
216 mtx_destroy(&sc->a10_mtx);
217 bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
218 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
224 a10_mmc_detach(device_t dev)
231 a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
233 struct a10_mmc_softc *sc;
235 sc = (struct a10_mmc_softc *)arg;
237 sc->a10_dma_map_err = err;
240 sc->a10_dma_desc_phys = segs[0].ds_addr;
244 a10_mmc_setup_dma(struct a10_mmc_softc *sc)
246 int dma_desc_size, error;
248 /* Allocate the DMA descriptor memory. */
249 dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
250 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0,
251 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
252 dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
255 error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
256 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
260 error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
261 sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
264 if (sc->a10_dma_map_err)
265 return (sc->a10_dma_map_err);
267 /* Create the DMA map for data transfers. */
268 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0,
269 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
270 A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
271 A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
272 &sc->a10_dma_buf_tag);
275 error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
276 &sc->a10_dma_buf_map);
284 a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
287 struct a10_mmc_dma_desc *dma_desc;
288 struct a10_mmc_softc *sc;
290 sc = (struct a10_mmc_softc *)arg;
291 sc->a10_dma_map_err = err;
292 dma_desc = sc->a10_dma_desc;
293 /* Note nsegs is guaranteed to be zero if err is non-zero. */
294 for (i = 0; i < nsegs; i++) {
295 dma_desc[i].buf_size = segs[i].ds_len;
296 dma_desc[i].buf_addr = segs[i].ds_addr;
297 dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
298 A10_MMC_DMA_CONFIG_OWN;
300 dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
301 if (i < (nsegs - 1)) {
302 dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
303 dma_desc[i].next = sc->a10_dma_desc_phys +
304 ((i + 1) * sizeof(struct a10_mmc_dma_desc));
306 dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
307 A10_MMC_DMA_CONFIG_ER;
308 dma_desc[i].next = 0;
314 a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
316 bus_dmasync_op_t sync_op;
318 struct mmc_command *cmd;
321 cmd = sc->a10_req->cmd;
322 if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
324 error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
325 cmd->data->data, cmd->data->len, a10_dma_cb, sc, BUS_DMA_NOWAIT);
328 if (sc->a10_dma_map_err)
329 return (sc->a10_dma_map_err);
331 sc->a10_dma_inuse = 1;
332 if (cmd->data->flags & MMC_DATA_WRITE)
333 sync_op = BUS_DMASYNC_PREWRITE;
335 sync_op = BUS_DMASYNC_PREREAD;
336 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
337 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
339 val = A10_MMC_READ_4(sc, A10_MMC_IMASK);
340 val &= ~(A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ);
341 A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val);
342 val = A10_MMC_READ_4(sc, A10_MMC_GCTRL);
343 val &= ~A10_MMC_ACCESS_BY_AHB;
344 val |= A10_MMC_DMA_ENABLE;
345 A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
346 val |= A10_MMC_DMA_RESET;
347 A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
348 A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_IDMAC_SOFT_RST);
349 A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
350 A10_MMC_IDMAC_IDMA_ON | A10_MMC_IDMAC_FIX_BURST);
351 val = A10_MMC_READ_4(sc, A10_MMC_IDIE);
352 val &= ~(A10_MMC_IDMAC_RECEIVE_INT | A10_MMC_IDMAC_TRANSMIT_INT);
353 if (cmd->data->flags & MMC_DATA_WRITE)
354 val |= A10_MMC_IDMAC_TRANSMIT_INT;
356 val |= A10_MMC_IDMAC_RECEIVE_INT;
357 A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
358 A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
359 A10_MMC_WRITE_4(sc, A10_MMC_FTRGL, A10_MMC_DMA_FTRGLEVEL);
365 a10_mmc_reset(struct a10_mmc_softc *sc)
369 A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
370 A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_RESET);
372 while (--timeout > 0) {
373 if ((A10_MMC_READ_4(sc, A10_MMC_GCTRL) & A10_MMC_RESET) == 0)
380 /* Set the timeout. */
381 A10_MMC_WRITE_4(sc, A10_MMC_TIMEOUT, 0xffffffff);
383 /* Clear pending interrupts. */
384 A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
385 A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
386 /* Unmask interrupts. */
387 A10_MMC_WRITE_4(sc, A10_MMC_IMASK,
388 A10_MMC_CMD_DONE | A10_MMC_INT_ERR_BIT |
389 A10_MMC_DATA_OVER | A10_MMC_AUTOCMD_DONE);
390 /* Enable interrupts and AHB access. */
391 A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
392 A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_INT_ENABLE);
398 a10_mmc_req_done(struct a10_mmc_softc *sc)
400 struct mmc_command *cmd;
401 struct mmc_request *req;
403 cmd = sc->a10_req->cmd;
404 if (cmd->error != MMC_ERR_NONE) {
405 /* Reset the controller. */
407 a10_mmc_update_clock(sc);
409 if (sc->a10_dma_inuse == 0) {
410 /* Reset the FIFO. */
411 A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
412 A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_FIFO_RESET);
416 callout_stop(&sc->a10_timeoutc);
420 sc->a10_dma_inuse = 0;
421 sc->a10_dma_map_err = 0;
422 sc->a10_intr_wait = 0;
427 a10_mmc_req_ok(struct a10_mmc_softc *sc)
430 struct mmc_command *cmd;
434 while (--timeout > 0) {
435 status = A10_MMC_READ_4(sc, A10_MMC_STAS);
436 if ((status & A10_MMC_CARD_DATA_BUSY) == 0)
440 cmd = sc->a10_req->cmd;
442 cmd->error = MMC_ERR_FAILED;
443 a10_mmc_req_done(sc);
446 if (cmd->flags & MMC_RSP_PRESENT) {
447 if (cmd->flags & MMC_RSP_136) {
448 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
449 cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
450 cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
451 cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
453 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
455 /* All data has been transferred ? */
456 if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
457 cmd->error = MMC_ERR_FAILED;
458 a10_mmc_req_done(sc);
462 a10_mmc_timeout(void *arg)
464 struct a10_mmc_softc *sc;
466 sc = (struct a10_mmc_softc *)arg;
467 if (sc->a10_req != NULL) {
468 device_printf(sc->a10_dev, "controller timeout\n");
469 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
470 a10_mmc_req_done(sc);
472 device_printf(sc->a10_dev,
473 "Spurious timeout - no active request\n");
477 a10_mmc_pio_transfer(struct a10_mmc_softc *sc, struct mmc_data *data)
482 buf = (uint32_t *)data->data;
483 write = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
484 bit = write ? A10_MMC_FIFO_FULL : A10_MMC_FIFO_EMPTY;
485 for (i = sc->a10_resid; i < (data->len >> 2); i++) {
486 if ((A10_MMC_READ_4(sc, A10_MMC_STAS) & bit))
489 A10_MMC_WRITE_4(sc, A10_MMC_FIFO, buf[i]);
491 buf[i] = A10_MMC_READ_4(sc, A10_MMC_FIFO);
492 sc->a10_resid = i + 1;
499 a10_mmc_intr(void *arg)
501 bus_dmasync_op_t sync_op;
502 struct a10_mmc_softc *sc;
503 struct mmc_data *data;
504 uint32_t idst, imask, rint;
506 sc = (struct a10_mmc_softc *)arg;
508 rint = A10_MMC_READ_4(sc, A10_MMC_RINTR);
509 idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
510 imask = A10_MMC_READ_4(sc, A10_MMC_IMASK);
511 if (idst == 0 && imask == 0 && rint == 0) {
516 device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
519 if (sc->a10_req == NULL) {
520 device_printf(sc->a10_dev,
521 "Spurious interrupt - no active request, rint: 0x%08X\n",
525 if (rint & A10_MMC_INT_ERR_BIT) {
526 device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
527 if (rint & A10_MMC_RESP_TIMEOUT)
528 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
530 sc->a10_req->cmd->error = MMC_ERR_FAILED;
531 a10_mmc_req_done(sc);
534 if (idst & A10_MMC_IDMAC_ERROR) {
535 device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
536 sc->a10_req->cmd->error = MMC_ERR_FAILED;
537 a10_mmc_req_done(sc);
541 sc->a10_intr |= rint;
542 data = sc->a10_req->cmd->data;
543 if (data != NULL && sc->a10_dma_inuse == 1 &&
544 (idst & A10_MMC_IDMAC_COMPLETE)) {
545 if (data->flags & MMC_DATA_WRITE)
546 sync_op = BUS_DMASYNC_POSTWRITE;
548 sync_op = BUS_DMASYNC_POSTREAD;
549 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
551 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
552 BUS_DMASYNC_POSTWRITE);
553 bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
554 sc->a10_resid = data->len >> 2;
555 } else if (data != NULL && sc->a10_dma_inuse == 0 &&
556 (rint & (A10_MMC_DATA_OVER | A10_MMC_RX_DATA_REQ |
557 A10_MMC_TX_DATA_REQ)) != 0)
558 a10_mmc_pio_transfer(sc, data);
559 if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
563 A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
564 A10_MMC_WRITE_4(sc, A10_MMC_RINTR, rint);
569 a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
572 struct a10_mmc_softc *sc;
573 struct mmc_command *cmd;
574 uint32_t cmdreg, val;
576 sc = device_get_softc(bus);
584 cmdreg = A10_MMC_START;
585 if (cmd->opcode == MMC_GO_IDLE_STATE)
586 cmdreg |= A10_MMC_SEND_INIT_SEQ;
587 if (cmd->flags & MMC_RSP_PRESENT)
588 cmdreg |= A10_MMC_RESP_EXP;
589 if (cmd->flags & MMC_RSP_136)
590 cmdreg |= A10_MMC_LONG_RESP;
591 if (cmd->flags & MMC_RSP_CRC)
592 cmdreg |= A10_MMC_CHECK_RESP_CRC;
596 sc->a10_intr_wait = A10_MMC_CMD_DONE;
597 cmd->error = MMC_ERR_NONE;
598 if (cmd->data != NULL) {
599 sc->a10_intr_wait |= A10_MMC_DATA_OVER;
600 cmdreg |= A10_MMC_DATA_EXP | A10_MMC_WAIT_PREOVER;
601 if (cmd->data->flags & MMC_DATA_MULTI) {
602 cmdreg |= A10_MMC_SEND_AUTOSTOP;
603 sc->a10_intr_wait |= A10_MMC_AUTOCMD_DONE;
605 if (cmd->data->flags & MMC_DATA_WRITE)
606 cmdreg |= A10_MMC_WRITE;
607 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
608 A10_MMC_WRITE_4(sc, A10_MMC_BLKSZ, blksz);
609 A10_MMC_WRITE_4(sc, A10_MMC_BCNTR, cmd->data->len);
611 if (a10_mmc_pio_mode == 0)
612 a10_mmc_prepare_dma(sc);
613 /* Enable PIO access if sc->a10_dma_inuse is not set. */
614 if (sc->a10_dma_inuse == 0) {
615 val = A10_MMC_READ_4(sc, A10_MMC_GCTRL);
616 val &= ~A10_MMC_DMA_ENABLE;
617 val |= A10_MMC_ACCESS_BY_AHB;
618 A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
619 val = A10_MMC_READ_4(sc, A10_MMC_IMASK);
620 val |= A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ;
621 A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val);
625 A10_MMC_WRITE_4(sc, A10_MMC_CARG, cmd->arg);
626 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
627 callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
628 a10_mmc_timeout, sc);
635 a10_mmc_read_ivar(device_t bus, device_t child, int which,
638 struct a10_mmc_softc *sc;
640 sc = device_get_softc(bus);
644 case MMCBR_IVAR_BUS_MODE:
645 *(int *)result = sc->a10_host.ios.bus_mode;
647 case MMCBR_IVAR_BUS_WIDTH:
648 *(int *)result = sc->a10_host.ios.bus_width;
650 case MMCBR_IVAR_CHIP_SELECT:
651 *(int *)result = sc->a10_host.ios.chip_select;
653 case MMCBR_IVAR_CLOCK:
654 *(int *)result = sc->a10_host.ios.clock;
656 case MMCBR_IVAR_F_MIN:
657 *(int *)result = sc->a10_host.f_min;
659 case MMCBR_IVAR_F_MAX:
660 *(int *)result = sc->a10_host.f_max;
662 case MMCBR_IVAR_HOST_OCR:
663 *(int *)result = sc->a10_host.host_ocr;
665 case MMCBR_IVAR_MODE:
666 *(int *)result = sc->a10_host.mode;
669 *(int *)result = sc->a10_host.ocr;
671 case MMCBR_IVAR_POWER_MODE:
672 *(int *)result = sc->a10_host.ios.power_mode;
675 *(int *)result = sc->a10_host.ios.vdd;
677 case MMCBR_IVAR_CAPS:
678 *(int *)result = sc->a10_host.caps;
680 case MMCBR_IVAR_MAX_DATA:
681 *(int *)result = 65535;
689 a10_mmc_write_ivar(device_t bus, device_t child, int which,
692 struct a10_mmc_softc *sc;
694 sc = device_get_softc(bus);
698 case MMCBR_IVAR_BUS_MODE:
699 sc->a10_host.ios.bus_mode = value;
701 case MMCBR_IVAR_BUS_WIDTH:
702 sc->a10_host.ios.bus_width = value;
704 case MMCBR_IVAR_CHIP_SELECT:
705 sc->a10_host.ios.chip_select = value;
707 case MMCBR_IVAR_CLOCK:
708 sc->a10_host.ios.clock = value;
710 case MMCBR_IVAR_MODE:
711 sc->a10_host.mode = value;
714 sc->a10_host.ocr = value;
716 case MMCBR_IVAR_POWER_MODE:
717 sc->a10_host.ios.power_mode = value;
720 sc->a10_host.ios.vdd = value;
722 /* These are read-only */
723 case MMCBR_IVAR_CAPS:
724 case MMCBR_IVAR_HOST_OCR:
725 case MMCBR_IVAR_F_MIN:
726 case MMCBR_IVAR_F_MAX:
727 case MMCBR_IVAR_MAX_DATA:
735 a10_mmc_update_clock(struct a10_mmc_softc *sc)
740 cmdreg = A10_MMC_START | A10_MMC_UPCLK_ONLY |
741 A10_MMC_WAIT_PREOVER;
742 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
744 while (--retry > 0) {
745 if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_START) == 0) {
746 A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
751 A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
752 device_printf(sc->a10_dev, "timeout updating clock\n");
758 a10_mmc_update_ios(device_t bus, device_t child)
761 struct a10_mmc_softc *sc;
765 sc = device_get_softc(bus);
766 clkcr = A10_MMC_READ_4(sc, A10_MMC_CLKCR);
767 if (clkcr & A10_MMC_CARD_CLK_ON) {
769 clkcr &= ~A10_MMC_CARD_CLK_ON;
770 A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
771 error = a10_mmc_update_clock(sc);
776 ios = &sc->a10_host.ios;
778 /* Reset the divider. */
779 clkcr &= ~A10_MMC_CLKCR_DIV;
780 A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
781 error = a10_mmc_update_clock(sc);
785 /* Set the MMC clock. */
786 error = a10_clk_mmc_cfg(sc->a10_id, ios->clock);
791 clkcr |= A10_MMC_CARD_CLK_ON;
792 A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
793 error = a10_mmc_update_clock(sc);
798 /* Set the bus width. */
799 switch (ios->bus_width) {
801 A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH1);
804 A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH4);
807 A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH8);
815 a10_mmc_get_ro(device_t bus, device_t child)
822 a10_mmc_acquire_host(device_t bus, device_t child)
824 struct a10_mmc_softc *sc;
827 sc = device_get_softc(bus);
829 while (sc->a10_bus_busy) {
830 error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
843 a10_mmc_release_host(device_t bus, device_t child)
845 struct a10_mmc_softc *sc;
847 sc = device_get_softc(bus);
856 static device_method_t a10_mmc_methods[] = {
857 /* Device interface */
858 DEVMETHOD(device_probe, a10_mmc_probe),
859 DEVMETHOD(device_attach, a10_mmc_attach),
860 DEVMETHOD(device_detach, a10_mmc_detach),
863 DEVMETHOD(bus_read_ivar, a10_mmc_read_ivar),
864 DEVMETHOD(bus_write_ivar, a10_mmc_write_ivar),
865 DEVMETHOD(bus_print_child, bus_generic_print_child),
867 /* MMC bridge interface */
868 DEVMETHOD(mmcbr_update_ios, a10_mmc_update_ios),
869 DEVMETHOD(mmcbr_request, a10_mmc_request),
870 DEVMETHOD(mmcbr_get_ro, a10_mmc_get_ro),
871 DEVMETHOD(mmcbr_acquire_host, a10_mmc_acquire_host),
872 DEVMETHOD(mmcbr_release_host, a10_mmc_release_host),
877 static devclass_t a10_mmc_devclass;
879 static driver_t a10_mmc_driver = {
882 sizeof(struct a10_mmc_softc),
885 DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0);
886 DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL);