2 * Copyright (c) 2013 Alexander Fedorov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
40 #include <sys/sysctl.h>
42 #include <machine/bus.h>
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
50 #include <arm/allwinner/a10_mmc.h>
51 #include <dev/extres/clk/clk.h>
52 #include <dev/extres/hwreset/hwreset.h>
54 #define A10_MMC_MEMRES 0
55 #define A10_MMC_IRQRES 1
56 #define A10_MMC_RESSZ 2
57 #define A10_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1)
58 #define A10_MMC_DMA_MAX_SIZE 0x2000
59 #define A10_MMC_DMA_FTRGLEVEL 0x20070008
60 #define A10_MMC_RESET_RETRY 1000
62 #define CARD_ID_FREQUENCY 400000
64 static struct ofw_compat_data compat_data[] = {
65 {"allwinner,sun4i-a10-mmc", 1},
66 {"allwinner,sun5i-a13-mmc", 1},
67 {"allwinner,sun7i-a20-mmc", 1},
71 struct a10_mmc_softc {
75 hwreset_t a10_rst_ahb;
79 struct callout a10_timeoutc;
80 struct mmc_host a10_host;
81 struct mmc_request * a10_req;
83 struct resource * a10_res[A10_MMC_RESSZ];
85 uint32_t a10_intr_wait;
88 /* Fields required for DMA access. */
89 bus_addr_t a10_dma_desc_phys;
90 bus_dmamap_t a10_dma_map;
91 bus_dma_tag_t a10_dma_tag;
93 bus_dmamap_t a10_dma_buf_map;
94 bus_dma_tag_t a10_dma_buf_tag;
98 static struct resource_spec a10_mmc_res_spec[] = {
99 { SYS_RES_MEMORY, 0, RF_ACTIVE },
100 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
104 static int a10_mmc_probe(device_t);
105 static int a10_mmc_attach(device_t);
106 static int a10_mmc_detach(device_t);
107 static int a10_mmc_setup_dma(struct a10_mmc_softc *);
108 static int a10_mmc_reset(struct a10_mmc_softc *);
109 static void a10_mmc_intr(void *);
110 static int a10_mmc_update_clock(struct a10_mmc_softc *, uint32_t);
112 static int a10_mmc_update_ios(device_t, device_t);
113 static int a10_mmc_request(device_t, device_t, struct mmc_request *);
114 static int a10_mmc_get_ro(device_t, device_t);
115 static int a10_mmc_acquire_host(device_t, device_t);
116 static int a10_mmc_release_host(device_t, device_t);
118 #define A10_MMC_LOCK(_sc) mtx_lock(&(_sc)->a10_mtx)
119 #define A10_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->a10_mtx)
120 #define A10_MMC_READ_4(_sc, _reg) \
121 bus_read_4((_sc)->a10_res[A10_MMC_MEMRES], _reg)
122 #define A10_MMC_WRITE_4(_sc, _reg, _value) \
123 bus_write_4((_sc)->a10_res[A10_MMC_MEMRES], _reg, _value)
126 a10_mmc_probe(device_t dev)
129 if (!ofw_bus_status_okay(dev))
131 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
134 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
136 return (BUS_PROBE_DEFAULT);
140 a10_mmc_attach(device_t dev)
143 struct a10_mmc_softc *sc;
144 struct sysctl_ctx_list *ctx;
145 struct sysctl_oid_list *tree;
150 node = ofw_bus_get_node(dev);
151 sc = device_get_softc(dev);
154 if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
155 device_printf(dev, "cannot allocate device resources\n");
158 if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
159 INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
160 &sc->a10_intrhand)) {
161 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
162 device_printf(dev, "cannot setup interrupt handler\n");
165 mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
167 callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
169 /* De-assert reset */
170 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->a10_rst_ahb) == 0) {
171 error = hwreset_deassert(sc->a10_rst_ahb);
173 device_printf(dev, "cannot de-assert reset\n");
178 /* Activate the module clock. */
179 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->a10_clk_ahb);
181 device_printf(dev, "cannot get ahb clock\n");
184 error = clk_enable(sc->a10_clk_ahb);
186 device_printf(dev, "cannot enable ahb clock\n");
189 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->a10_clk_mmc);
191 device_printf(dev, "cannot get mmc clock\n");
194 error = clk_set_freq(sc->a10_clk_mmc, CARD_ID_FREQUENCY,
197 device_printf(dev, "cannot init mmc clock\n");
200 error = clk_enable(sc->a10_clk_mmc);
202 device_printf(dev, "cannot enable mmc clock\n");
206 sc->a10_timeout = 10;
207 ctx = device_get_sysctl_ctx(dev);
208 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
209 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
210 &sc->a10_timeout, 0, "Request timeout in seconds");
213 A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 1);
215 A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 0);
218 /* Soft Reset controller. */
219 if (a10_mmc_reset(sc) != 0) {
220 device_printf(dev, "cannot reset the controller\n");
224 if (a10_mmc_setup_dma(sc) != 0) {
225 device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
229 if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
232 sc->a10_host.f_min = 400000;
233 sc->a10_host.f_max = 52000000;
234 sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
235 sc->a10_host.mode = mode_sd;
236 sc->a10_host.caps = MMC_CAP_HSPEED;
238 sc->a10_host.caps |= MMC_CAP_4_BIT_DATA;
240 sc->a10_host.caps |= MMC_CAP_8_BIT_DATA;
242 child = device_add_child(dev, "mmc", -1);
244 device_printf(dev, "attaching MMC bus failed!\n");
247 if (device_probe_and_attach(child) != 0) {
248 device_printf(dev, "attaching MMC child failed!\n");
249 device_delete_child(dev, child);
256 callout_drain(&sc->a10_timeoutc);
257 mtx_destroy(&sc->a10_mtx);
258 bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
259 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
265 a10_mmc_detach(device_t dev)
272 a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
274 struct a10_mmc_softc *sc;
276 sc = (struct a10_mmc_softc *)arg;
278 sc->a10_dma_map_err = err;
281 sc->a10_dma_desc_phys = segs[0].ds_addr;
285 a10_mmc_setup_dma(struct a10_mmc_softc *sc)
287 int dma_desc_size, error;
289 /* Allocate the DMA descriptor memory. */
290 dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
291 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
292 A10_MMC_DMA_ALIGN, 0,
293 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
294 dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
297 error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
298 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
302 error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
303 sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
306 if (sc->a10_dma_map_err)
307 return (sc->a10_dma_map_err);
309 /* Create the DMA map for data transfers. */
310 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
311 A10_MMC_DMA_ALIGN, 0,
312 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
313 A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
314 A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
315 &sc->a10_dma_buf_tag);
318 error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
319 &sc->a10_dma_buf_map);
327 a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
330 struct a10_mmc_dma_desc *dma_desc;
331 struct a10_mmc_softc *sc;
333 sc = (struct a10_mmc_softc *)arg;
334 sc->a10_dma_map_err = err;
339 dma_desc = sc->a10_dma_desc;
340 for (i = 0; i < nsegs; i++) {
341 dma_desc[i].buf_size = segs[i].ds_len;
342 dma_desc[i].buf_addr = segs[i].ds_addr;
343 dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
344 A10_MMC_DMA_CONFIG_OWN;
346 dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
347 if (i < (nsegs - 1)) {
348 dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
349 dma_desc[i].next = sc->a10_dma_desc_phys +
350 ((i + 1) * sizeof(struct a10_mmc_dma_desc));
352 dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
353 A10_MMC_DMA_CONFIG_ER;
354 dma_desc[i].next = 0;
360 a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
362 bus_dmasync_op_t sync_op;
364 struct mmc_command *cmd;
367 cmd = sc->a10_req->cmd;
368 if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
370 error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
371 cmd->data->data, cmd->data->len, a10_dma_cb, sc, 0);
374 if (sc->a10_dma_map_err)
375 return (sc->a10_dma_map_err);
377 if (cmd->data->flags & MMC_DATA_WRITE)
378 sync_op = BUS_DMASYNC_PREWRITE;
380 sync_op = BUS_DMASYNC_PREREAD;
381 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
382 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
385 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
386 val &= ~A10_MMC_CTRL_FIFO_AC_MOD;
387 val |= A10_MMC_CTRL_DMA_ENB;
388 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
391 val |= A10_MMC_CTRL_DMA_RST;
392 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
394 A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_DMAC_IDMAC_SOFT_RST);
395 A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
396 A10_MMC_DMAC_IDMAC_IDMA_ON | A10_MMC_DMAC_IDMAC_FIX_BURST);
398 /* Enable RX or TX DMA interrupt */
399 if (cmd->data->flags & MMC_DATA_WRITE)
400 val |= A10_MMC_IDST_TX_INT;
402 val |= A10_MMC_IDST_RX_INT;
403 A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
405 /* Set DMA descritptor list address */
406 A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
408 /* FIFO trigger level */
409 A10_MMC_WRITE_4(sc, A10_MMC_FWLR, A10_MMC_DMA_FTRGLEVEL);
415 a10_mmc_reset(struct a10_mmc_softc *sc)
419 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, A10_MMC_RESET);
421 while (--timeout > 0) {
422 if ((A10_MMC_READ_4(sc, A10_MMC_GCTL) & A10_MMC_RESET) == 0)
429 /* Set the timeout. */
430 A10_MMC_WRITE_4(sc, A10_MMC_TMOR,
431 A10_MMC_TMOR_DTO_LMT_SHIFT(A10_MMC_TMOR_DTO_LMT_MASK) |
432 A10_MMC_TMOR_RTO_LMT_SHIFT(A10_MMC_TMOR_RTO_LMT_MASK));
434 /* Clear pending interrupts. */
435 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
436 A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
437 /* Unmask interrupts. */
438 A10_MMC_WRITE_4(sc, A10_MMC_IMKR,
439 A10_MMC_INT_CMD_DONE | A10_MMC_INT_ERR_BIT |
440 A10_MMC_INT_DATA_OVER | A10_MMC_INT_AUTO_STOP_DONE);
441 /* Enable interrupts and AHB access. */
442 A10_MMC_WRITE_4(sc, A10_MMC_GCTL,
443 A10_MMC_READ_4(sc, A10_MMC_GCTL) | A10_MMC_CTRL_INT_ENB);
449 a10_mmc_req_done(struct a10_mmc_softc *sc)
451 struct mmc_command *cmd;
452 struct mmc_request *req;
456 cmd = sc->a10_req->cmd;
457 if (cmd->error != MMC_ERR_NONE) {
458 /* Reset the FIFO and DMA engines. */
459 mask = A10_MMC_CTRL_FIFO_RST | A10_MMC_CTRL_DMA_RST;
460 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
461 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val | mask);
463 retry = A10_MMC_RESET_RETRY;
464 while (--retry > 0) {
465 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
466 if ((val & mask) == 0)
471 device_printf(sc->a10_dev,
472 "timeout resetting DMA/FIFO\n");
473 a10_mmc_update_clock(sc, 1);
477 callout_stop(&sc->a10_timeoutc);
481 sc->a10_dma_map_err = 0;
482 sc->a10_intr_wait = 0;
487 a10_mmc_req_ok(struct a10_mmc_softc *sc)
490 struct mmc_command *cmd;
494 while (--timeout > 0) {
495 status = A10_MMC_READ_4(sc, A10_MMC_STAR);
496 if ((status & A10_MMC_STAR_CARD_BUSY) == 0)
500 cmd = sc->a10_req->cmd;
502 cmd->error = MMC_ERR_FAILED;
503 a10_mmc_req_done(sc);
506 if (cmd->flags & MMC_RSP_PRESENT) {
507 if (cmd->flags & MMC_RSP_136) {
508 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
509 cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
510 cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
511 cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
513 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
515 /* All data has been transferred ? */
516 if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
517 cmd->error = MMC_ERR_FAILED;
518 a10_mmc_req_done(sc);
522 a10_mmc_timeout(void *arg)
524 struct a10_mmc_softc *sc;
526 sc = (struct a10_mmc_softc *)arg;
527 if (sc->a10_req != NULL) {
528 device_printf(sc->a10_dev, "controller timeout\n");
529 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
530 a10_mmc_req_done(sc);
532 device_printf(sc->a10_dev,
533 "Spurious timeout - no active request\n");
537 a10_mmc_intr(void *arg)
539 bus_dmasync_op_t sync_op;
540 struct a10_mmc_softc *sc;
541 struct mmc_data *data;
542 uint32_t idst, imask, rint;
544 sc = (struct a10_mmc_softc *)arg;
546 rint = A10_MMC_READ_4(sc, A10_MMC_RISR);
547 idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
548 imask = A10_MMC_READ_4(sc, A10_MMC_IMKR);
549 if (idst == 0 && imask == 0 && rint == 0) {
554 device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
557 if (sc->a10_req == NULL) {
558 device_printf(sc->a10_dev,
559 "Spurious interrupt - no active request, rint: 0x%08X\n",
563 if (rint & A10_MMC_INT_ERR_BIT) {
564 device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
565 if (rint & A10_MMC_INT_RESP_TIMEOUT)
566 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
568 sc->a10_req->cmd->error = MMC_ERR_FAILED;
569 a10_mmc_req_done(sc);
572 if (idst & A10_MMC_IDST_ERROR) {
573 device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
574 sc->a10_req->cmd->error = MMC_ERR_FAILED;
575 a10_mmc_req_done(sc);
579 sc->a10_intr |= rint;
580 data = sc->a10_req->cmd->data;
581 if (data != NULL && (idst & A10_MMC_IDST_COMPLETE) != 0) {
582 if (data->flags & MMC_DATA_WRITE)
583 sync_op = BUS_DMASYNC_POSTWRITE;
585 sync_op = BUS_DMASYNC_POSTREAD;
586 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
588 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
589 BUS_DMASYNC_POSTWRITE);
590 bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
591 sc->a10_resid = data->len >> 2;
593 if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
597 A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
598 A10_MMC_WRITE_4(sc, A10_MMC_RISR, rint);
603 a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
606 struct a10_mmc_softc *sc;
607 struct mmc_command *cmd;
611 sc = device_get_softc(bus);
619 cmdreg = A10_MMC_CMDR_LOAD;
620 if (cmd->opcode == MMC_GO_IDLE_STATE)
621 cmdreg |= A10_MMC_CMDR_SEND_INIT_SEQ;
622 if (cmd->flags & MMC_RSP_PRESENT)
623 cmdreg |= A10_MMC_CMDR_RESP_RCV;
624 if (cmd->flags & MMC_RSP_136)
625 cmdreg |= A10_MMC_CMDR_LONG_RESP;
626 if (cmd->flags & MMC_RSP_CRC)
627 cmdreg |= A10_MMC_CMDR_CHK_RESP_CRC;
631 sc->a10_intr_wait = A10_MMC_INT_CMD_DONE;
632 cmd->error = MMC_ERR_NONE;
633 if (cmd->data != NULL) {
634 sc->a10_intr_wait |= A10_MMC_INT_DATA_OVER;
635 cmdreg |= A10_MMC_CMDR_DATA_TRANS | A10_MMC_CMDR_WAIT_PRE_OVER;
636 if (cmd->data->flags & MMC_DATA_MULTI) {
637 cmdreg |= A10_MMC_CMDR_STOP_CMD_FLAG;
638 sc->a10_intr_wait |= A10_MMC_INT_AUTO_STOP_DONE;
640 if (cmd->data->flags & MMC_DATA_WRITE)
641 cmdreg |= A10_MMC_CMDR_DIR_WRITE;
642 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
643 A10_MMC_WRITE_4(sc, A10_MMC_BKSR, blksz);
644 A10_MMC_WRITE_4(sc, A10_MMC_BYCR, cmd->data->len);
646 err = a10_mmc_prepare_dma(sc);
648 device_printf(sc->a10_dev, "prepare_dma failed: %d\n", err);
651 A10_MMC_WRITE_4(sc, A10_MMC_CAGR, cmd->arg);
652 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
653 callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
654 a10_mmc_timeout, sc);
661 a10_mmc_read_ivar(device_t bus, device_t child, int which,
664 struct a10_mmc_softc *sc;
666 sc = device_get_softc(bus);
670 case MMCBR_IVAR_BUS_MODE:
671 *(int *)result = sc->a10_host.ios.bus_mode;
673 case MMCBR_IVAR_BUS_WIDTH:
674 *(int *)result = sc->a10_host.ios.bus_width;
676 case MMCBR_IVAR_CHIP_SELECT:
677 *(int *)result = sc->a10_host.ios.chip_select;
679 case MMCBR_IVAR_CLOCK:
680 *(int *)result = sc->a10_host.ios.clock;
682 case MMCBR_IVAR_F_MIN:
683 *(int *)result = sc->a10_host.f_min;
685 case MMCBR_IVAR_F_MAX:
686 *(int *)result = sc->a10_host.f_max;
688 case MMCBR_IVAR_HOST_OCR:
689 *(int *)result = sc->a10_host.host_ocr;
691 case MMCBR_IVAR_MODE:
692 *(int *)result = sc->a10_host.mode;
695 *(int *)result = sc->a10_host.ocr;
697 case MMCBR_IVAR_POWER_MODE:
698 *(int *)result = sc->a10_host.ios.power_mode;
701 *(int *)result = sc->a10_host.ios.vdd;
703 case MMCBR_IVAR_CAPS:
704 *(int *)result = sc->a10_host.caps;
706 case MMCBR_IVAR_MAX_DATA:
707 *(int *)result = 65535;
715 a10_mmc_write_ivar(device_t bus, device_t child, int which,
718 struct a10_mmc_softc *sc;
720 sc = device_get_softc(bus);
724 case MMCBR_IVAR_BUS_MODE:
725 sc->a10_host.ios.bus_mode = value;
727 case MMCBR_IVAR_BUS_WIDTH:
728 sc->a10_host.ios.bus_width = value;
730 case MMCBR_IVAR_CHIP_SELECT:
731 sc->a10_host.ios.chip_select = value;
733 case MMCBR_IVAR_CLOCK:
734 sc->a10_host.ios.clock = value;
736 case MMCBR_IVAR_MODE:
737 sc->a10_host.mode = value;
740 sc->a10_host.ocr = value;
742 case MMCBR_IVAR_POWER_MODE:
743 sc->a10_host.ios.power_mode = value;
746 sc->a10_host.ios.vdd = value;
748 /* These are read-only */
749 case MMCBR_IVAR_CAPS:
750 case MMCBR_IVAR_HOST_OCR:
751 case MMCBR_IVAR_F_MIN:
752 case MMCBR_IVAR_F_MAX:
753 case MMCBR_IVAR_MAX_DATA:
761 a10_mmc_update_clock(struct a10_mmc_softc *sc, uint32_t clkon)
767 ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
768 ckcr &= ~(A10_MMC_CKCR_CCLK_ENB | A10_MMC_CKCR_CCLK_CTRL);
771 ckcr |= A10_MMC_CKCR_CCLK_ENB;
773 A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
775 cmdreg = A10_MMC_CMDR_LOAD | A10_MMC_CMDR_PRG_CLK |
776 A10_MMC_CMDR_WAIT_PRE_OVER;
777 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
779 while (--retry > 0) {
780 if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_CMDR_LOAD) == 0) {
781 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
786 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
787 device_printf(sc->a10_dev, "timeout updating clock\n");
793 a10_mmc_update_ios(device_t bus, device_t child)
796 struct a10_mmc_softc *sc;
800 sc = device_get_softc(bus);
802 ios = &sc->a10_host.ios;
804 /* Set the bus width. */
805 switch (ios->bus_width) {
807 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR1);
810 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR4);
813 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR8);
820 error = a10_mmc_update_clock(sc, 0);
824 /* Reset the divider. */
825 ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
826 ckcr &= ~A10_MMC_CKCR_CCLK_DIV;
827 A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
829 /* Set the MMC clock. */
830 error = clk_set_freq(sc->a10_clk_mmc, ios->clock,
833 device_printf(sc->a10_dev,
834 "failed to set frequency to %u Hz: %d\n",
840 error = a10_mmc_update_clock(sc, 1);
850 a10_mmc_get_ro(device_t bus, device_t child)
857 a10_mmc_acquire_host(device_t bus, device_t child)
859 struct a10_mmc_softc *sc;
862 sc = device_get_softc(bus);
864 while (sc->a10_bus_busy) {
865 error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
878 a10_mmc_release_host(device_t bus, device_t child)
880 struct a10_mmc_softc *sc;
882 sc = device_get_softc(bus);
891 static device_method_t a10_mmc_methods[] = {
892 /* Device interface */
893 DEVMETHOD(device_probe, a10_mmc_probe),
894 DEVMETHOD(device_attach, a10_mmc_attach),
895 DEVMETHOD(device_detach, a10_mmc_detach),
898 DEVMETHOD(bus_read_ivar, a10_mmc_read_ivar),
899 DEVMETHOD(bus_write_ivar, a10_mmc_write_ivar),
901 /* MMC bridge interface */
902 DEVMETHOD(mmcbr_update_ios, a10_mmc_update_ios),
903 DEVMETHOD(mmcbr_request, a10_mmc_request),
904 DEVMETHOD(mmcbr_get_ro, a10_mmc_get_ro),
905 DEVMETHOD(mmcbr_acquire_host, a10_mmc_acquire_host),
906 DEVMETHOD(mmcbr_release_host, a10_mmc_release_host),
911 static devclass_t a10_mmc_devclass;
913 static driver_t a10_mmc_driver = {
916 sizeof(struct a10_mmc_softc),
919 DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, NULL,
921 MMC_DECLARE_BRIDGE(a10_mmc);