2 * Copyright (c) 2013 Alexander Fedorov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
40 #include <sys/sysctl.h>
42 #include <machine/bus.h>
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
50 #include <arm/allwinner/aw_mmc.h>
51 #include <dev/extres/clk/clk.h>
52 #include <dev/extres/hwreset/hwreset.h>
54 #define AW_MMC_MEMRES 0
55 #define AW_MMC_IRQRES 1
56 #define AW_MMC_RESSZ 2
57 #define AW_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1)
58 #define AW_MMC_DMA_MAX_SIZE 0x2000
59 #define AW_MMC_DMA_FTRGLEVEL 0x20070008
60 #define AW_MMC_RESET_RETRY 1000
62 #define CARD_ID_FREQUENCY 400000
64 static struct ofw_compat_data compat_data[] = {
65 {"allwinner,sun4i-a10-mmc", 1},
66 {"allwinner,sun5i-a13-mmc", 1},
67 {"allwinner,sun7i-a20-mmc", 1},
68 {"allwinner,sun50i-a64-mmc", 1},
80 struct callout aw_timeoutc;
81 struct mmc_host aw_host;
82 struct mmc_request * aw_req;
84 struct resource * aw_res[AW_MMC_RESSZ];
86 uint32_t aw_intr_wait;
89 /* Fields required for DMA access. */
90 bus_addr_t aw_dma_desc_phys;
91 bus_dmamap_t aw_dma_map;
92 bus_dma_tag_t aw_dma_tag;
94 bus_dmamap_t aw_dma_buf_map;
95 bus_dma_tag_t aw_dma_buf_tag;
99 static struct resource_spec aw_mmc_res_spec[] = {
100 { SYS_RES_MEMORY, 0, RF_ACTIVE },
101 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
105 static int aw_mmc_probe(device_t);
106 static int aw_mmc_attach(device_t);
107 static int aw_mmc_detach(device_t);
108 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
109 static int aw_mmc_reset(struct aw_mmc_softc *);
110 static void aw_mmc_intr(void *);
111 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
113 static int aw_mmc_update_ios(device_t, device_t);
114 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
115 static int aw_mmc_get_ro(device_t, device_t);
116 static int aw_mmc_acquire_host(device_t, device_t);
117 static int aw_mmc_release_host(device_t, device_t);
119 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
120 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
121 #define AW_MMC_READ_4(_sc, _reg) \
122 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
123 #define AW_MMC_WRITE_4(_sc, _reg, _value) \
124 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
127 aw_mmc_probe(device_t dev)
130 if (!ofw_bus_status_okay(dev))
132 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
135 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
137 return (BUS_PROBE_DEFAULT);
141 aw_mmc_attach(device_t dev)
144 struct aw_mmc_softc *sc;
145 struct sysctl_ctx_list *ctx;
146 struct sysctl_oid_list *tree;
151 node = ofw_bus_get_node(dev);
152 sc = device_get_softc(dev);
155 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
156 device_printf(dev, "cannot allocate device resources\n");
159 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
160 INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
162 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
163 device_printf(dev, "cannot setup interrupt handler\n");
166 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
168 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
170 /* De-assert reset */
171 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
172 error = hwreset_deassert(sc->aw_rst_ahb);
174 device_printf(dev, "cannot de-assert reset\n");
179 /* Activate the module clock. */
180 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
182 device_printf(dev, "cannot get ahb clock\n");
185 error = clk_enable(sc->aw_clk_ahb);
187 device_printf(dev, "cannot enable ahb clock\n");
190 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
192 device_printf(dev, "cannot get mmc clock\n");
195 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
198 device_printf(dev, "cannot init mmc clock\n");
201 error = clk_enable(sc->aw_clk_mmc);
203 device_printf(dev, "cannot enable mmc clock\n");
208 ctx = device_get_sysctl_ctx(dev);
209 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
210 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
211 &sc->aw_timeout, 0, "Request timeout in seconds");
214 AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 1);
216 AW_MMC_WRITE_4(sc, AW_MMC_HWRST, 0);
219 /* Soft Reset controller. */
220 if (aw_mmc_reset(sc) != 0) {
221 device_printf(dev, "cannot reset the controller\n");
225 if (aw_mmc_setup_dma(sc) != 0) {
226 device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
230 if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
233 sc->aw_host.f_min = 400000;
234 sc->aw_host.f_max = 52000000;
235 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
236 sc->aw_host.mode = mode_sd;
237 sc->aw_host.caps = MMC_CAP_HSPEED;
239 sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
241 sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
243 child = device_add_child(dev, "mmc", -1);
245 device_printf(dev, "attaching MMC bus failed!\n");
248 if (device_probe_and_attach(child) != 0) {
249 device_printf(dev, "attaching MMC child failed!\n");
250 device_delete_child(dev, child);
257 callout_drain(&sc->aw_timeoutc);
258 mtx_destroy(&sc->aw_mtx);
259 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
260 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
266 aw_mmc_detach(device_t dev)
273 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
275 struct aw_mmc_softc *sc;
277 sc = (struct aw_mmc_softc *)arg;
279 sc->aw_dma_map_err = err;
282 sc->aw_dma_desc_phys = segs[0].ds_addr;
286 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
288 int dma_desc_size, error;
290 /* Allocate the DMA descriptor memory. */
291 dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS;
292 error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
294 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
295 dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag);
298 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
299 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map);
303 error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map,
304 sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0);
307 if (sc->aw_dma_map_err)
308 return (sc->aw_dma_map_err);
310 /* Create the DMA map for data transfers. */
311 error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
313 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
314 AW_MMC_DMA_MAX_SIZE * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,
315 AW_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
316 &sc->aw_dma_buf_tag);
319 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
320 &sc->aw_dma_buf_map);
328 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
331 struct aw_mmc_dma_desc *dma_desc;
332 struct aw_mmc_softc *sc;
334 sc = (struct aw_mmc_softc *)arg;
335 sc->aw_dma_map_err = err;
340 dma_desc = sc->aw_dma_desc;
341 for (i = 0; i < nsegs; i++) {
342 dma_desc[i].buf_size = segs[i].ds_len;
343 dma_desc[i].buf_addr = segs[i].ds_addr;
344 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
345 AW_MMC_DMA_CONFIG_OWN;
347 dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD;
348 if (i < (nsegs - 1)) {
349 dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC;
350 dma_desc[i].next = sc->aw_dma_desc_phys +
351 ((i + 1) * sizeof(struct aw_mmc_dma_desc));
353 dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD |
354 AW_MMC_DMA_CONFIG_ER;
355 dma_desc[i].next = 0;
361 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
363 bus_dmasync_op_t sync_op;
365 struct mmc_command *cmd;
368 cmd = sc->aw_req->cmd;
369 if (cmd->data->len > AW_MMC_DMA_MAX_SIZE * AW_MMC_DMA_SEGS)
371 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
372 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
375 if (sc->aw_dma_map_err)
376 return (sc->aw_dma_map_err);
378 if (cmd->data->flags & MMC_DATA_WRITE)
379 sync_op = BUS_DMASYNC_PREWRITE;
381 sync_op = BUS_DMASYNC_PREREAD;
382 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
383 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
386 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
387 val &= ~AW_MMC_CTRL_FIFO_AC_MOD;
388 val |= AW_MMC_CTRL_DMA_ENB;
389 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
392 val |= AW_MMC_CTRL_DMA_RST;
393 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
395 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
396 AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
397 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
399 /* Enable RX or TX DMA interrupt */
400 if (cmd->data->flags & MMC_DATA_WRITE)
401 val |= AW_MMC_IDST_TX_INT;
403 val |= AW_MMC_IDST_RX_INT;
404 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
406 /* Set DMA descritptor list address */
407 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
409 /* FIFO trigger level */
410 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
416 aw_mmc_reset(struct aw_mmc_softc *sc)
420 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, AW_MMC_RESET);
422 while (--timeout > 0) {
423 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_RESET) == 0)
430 /* Set the timeout. */
431 AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
432 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
433 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
435 /* Clear pending interrupts. */
436 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
437 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
438 /* Unmask interrupts. */
439 AW_MMC_WRITE_4(sc, AW_MMC_IMKR,
440 AW_MMC_INT_CMD_DONE | AW_MMC_INT_ERR_BIT |
441 AW_MMC_INT_DATA_OVER | AW_MMC_INT_AUTO_STOP_DONE);
442 /* Enable interrupts and AHB access. */
443 AW_MMC_WRITE_4(sc, AW_MMC_GCTL,
444 AW_MMC_READ_4(sc, AW_MMC_GCTL) | AW_MMC_CTRL_INT_ENB);
450 aw_mmc_req_done(struct aw_mmc_softc *sc)
452 struct mmc_command *cmd;
453 struct mmc_request *req;
457 cmd = sc->aw_req->cmd;
458 if (cmd->error != MMC_ERR_NONE) {
459 /* Reset the FIFO and DMA engines. */
460 mask = AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST;
461 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
462 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
464 retry = AW_MMC_RESET_RETRY;
465 while (--retry > 0) {
466 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
467 if ((val & mask) == 0)
472 device_printf(sc->aw_dev,
473 "timeout resetting DMA/FIFO\n");
474 aw_mmc_update_clock(sc, 1);
478 callout_stop(&sc->aw_timeoutc);
482 sc->aw_dma_map_err = 0;
483 sc->aw_intr_wait = 0;
488 aw_mmc_req_ok(struct aw_mmc_softc *sc)
491 struct mmc_command *cmd;
495 while (--timeout > 0) {
496 status = AW_MMC_READ_4(sc, AW_MMC_STAR);
497 if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
501 cmd = sc->aw_req->cmd;
503 cmd->error = MMC_ERR_FAILED;
507 if (cmd->flags & MMC_RSP_PRESENT) {
508 if (cmd->flags & MMC_RSP_136) {
509 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
510 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
511 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
512 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
514 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
516 /* All data has been transferred ? */
517 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
518 cmd->error = MMC_ERR_FAILED;
523 aw_mmc_timeout(void *arg)
525 struct aw_mmc_softc *sc;
527 sc = (struct aw_mmc_softc *)arg;
528 if (sc->aw_req != NULL) {
529 device_printf(sc->aw_dev, "controller timeout\n");
530 sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
533 device_printf(sc->aw_dev,
534 "Spurious timeout - no active request\n");
538 aw_mmc_intr(void *arg)
540 bus_dmasync_op_t sync_op;
541 struct aw_mmc_softc *sc;
542 struct mmc_data *data;
543 uint32_t idst, imask, rint;
545 sc = (struct aw_mmc_softc *)arg;
547 rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
548 idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
549 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
550 if (idst == 0 && imask == 0 && rint == 0) {
555 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
558 if (sc->aw_req == NULL) {
559 device_printf(sc->aw_dev,
560 "Spurious interrupt - no active request, rint: 0x%08X\n",
564 if (rint & AW_MMC_INT_ERR_BIT) {
565 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
566 if (rint & AW_MMC_INT_RESP_TIMEOUT)
567 sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
569 sc->aw_req->cmd->error = MMC_ERR_FAILED;
573 if (idst & AW_MMC_IDST_ERROR) {
574 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
575 sc->aw_req->cmd->error = MMC_ERR_FAILED;
581 data = sc->aw_req->cmd->data;
582 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
583 if (data->flags & MMC_DATA_WRITE)
584 sync_op = BUS_DMASYNC_POSTWRITE;
586 sync_op = BUS_DMASYNC_POSTREAD;
587 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
589 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
590 BUS_DMASYNC_POSTWRITE);
591 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
592 sc->aw_resid = data->len >> 2;
594 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
598 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
599 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
604 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
607 struct aw_mmc_softc *sc;
608 struct mmc_command *cmd;
612 sc = device_get_softc(bus);
620 cmdreg = AW_MMC_CMDR_LOAD;
621 if (cmd->opcode == MMC_GO_IDLE_STATE)
622 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
623 if (cmd->flags & MMC_RSP_PRESENT)
624 cmdreg |= AW_MMC_CMDR_RESP_RCV;
625 if (cmd->flags & MMC_RSP_136)
626 cmdreg |= AW_MMC_CMDR_LONG_RESP;
627 if (cmd->flags & MMC_RSP_CRC)
628 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
632 sc->aw_intr_wait = AW_MMC_INT_CMD_DONE;
633 cmd->error = MMC_ERR_NONE;
634 if (cmd->data != NULL) {
635 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
636 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
637 if (cmd->data->flags & MMC_DATA_MULTI) {
638 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
639 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
641 if (cmd->data->flags & MMC_DATA_WRITE)
642 cmdreg |= AW_MMC_CMDR_DIR_WRITE;
643 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
644 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
645 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
647 err = aw_mmc_prepare_dma(sc);
649 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
652 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
653 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
654 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
662 aw_mmc_read_ivar(device_t bus, device_t child, int which,
665 struct aw_mmc_softc *sc;
667 sc = device_get_softc(bus);
671 case MMCBR_IVAR_BUS_MODE:
672 *(int *)result = sc->aw_host.ios.bus_mode;
674 case MMCBR_IVAR_BUS_WIDTH:
675 *(int *)result = sc->aw_host.ios.bus_width;
677 case MMCBR_IVAR_CHIP_SELECT:
678 *(int *)result = sc->aw_host.ios.chip_select;
680 case MMCBR_IVAR_CLOCK:
681 *(int *)result = sc->aw_host.ios.clock;
683 case MMCBR_IVAR_F_MIN:
684 *(int *)result = sc->aw_host.f_min;
686 case MMCBR_IVAR_F_MAX:
687 *(int *)result = sc->aw_host.f_max;
689 case MMCBR_IVAR_HOST_OCR:
690 *(int *)result = sc->aw_host.host_ocr;
692 case MMCBR_IVAR_MODE:
693 *(int *)result = sc->aw_host.mode;
696 *(int *)result = sc->aw_host.ocr;
698 case MMCBR_IVAR_POWER_MODE:
699 *(int *)result = sc->aw_host.ios.power_mode;
702 *(int *)result = sc->aw_host.ios.vdd;
704 case MMCBR_IVAR_CAPS:
705 *(int *)result = sc->aw_host.caps;
707 case MMCBR_IVAR_MAX_DATA:
708 *(int *)result = 65535;
716 aw_mmc_write_ivar(device_t bus, device_t child, int which,
719 struct aw_mmc_softc *sc;
721 sc = device_get_softc(bus);
725 case MMCBR_IVAR_BUS_MODE:
726 sc->aw_host.ios.bus_mode = value;
728 case MMCBR_IVAR_BUS_WIDTH:
729 sc->aw_host.ios.bus_width = value;
731 case MMCBR_IVAR_CHIP_SELECT:
732 sc->aw_host.ios.chip_select = value;
734 case MMCBR_IVAR_CLOCK:
735 sc->aw_host.ios.clock = value;
737 case MMCBR_IVAR_MODE:
738 sc->aw_host.mode = value;
741 sc->aw_host.ocr = value;
743 case MMCBR_IVAR_POWER_MODE:
744 sc->aw_host.ios.power_mode = value;
747 sc->aw_host.ios.vdd = value;
749 /* These are read-only */
750 case MMCBR_IVAR_CAPS:
751 case MMCBR_IVAR_HOST_OCR:
752 case MMCBR_IVAR_F_MIN:
753 case MMCBR_IVAR_F_MAX:
754 case MMCBR_IVAR_MAX_DATA:
762 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
768 ckcr = AW_MMC_READ_4(sc, AW_MMC_CKCR);
769 ckcr &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL);
772 ckcr |= AW_MMC_CKCR_CCLK_ENB;
774 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, ckcr);
776 cmdreg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
777 AW_MMC_CMDR_WAIT_PRE_OVER;
778 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg);
780 while (--retry > 0) {
781 if ((AW_MMC_READ_4(sc, AW_MMC_CMDR) & AW_MMC_CMDR_LOAD) == 0) {
782 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
787 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
788 device_printf(sc->aw_dev, "timeout updating clock\n");
794 aw_mmc_update_ios(device_t bus, device_t child)
797 struct aw_mmc_softc *sc;
801 sc = device_get_softc(bus);
803 ios = &sc->aw_host.ios;
805 /* Set the bus width. */
806 switch (ios->bus_width) {
808 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
811 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
814 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
821 error = aw_mmc_update_clock(sc, 0);
825 /* Reset the divider. */
826 ckcr = AW_MMC_READ_4(sc, AW_MMC_CKCR);
827 ckcr &= ~AW_MMC_CKCR_CCLK_DIV;
828 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, ckcr);
830 /* Set the MMC clock. */
831 error = clk_set_freq(sc->aw_clk_mmc, ios->clock,
834 device_printf(sc->aw_dev,
835 "failed to set frequency to %u Hz: %d\n",
841 error = aw_mmc_update_clock(sc, 1);
851 aw_mmc_get_ro(device_t bus, device_t child)
858 aw_mmc_acquire_host(device_t bus, device_t child)
860 struct aw_mmc_softc *sc;
863 sc = device_get_softc(bus);
865 while (sc->aw_bus_busy) {
866 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
879 aw_mmc_release_host(device_t bus, device_t child)
881 struct aw_mmc_softc *sc;
883 sc = device_get_softc(bus);
892 static device_method_t aw_mmc_methods[] = {
893 /* Device interface */
894 DEVMETHOD(device_probe, aw_mmc_probe),
895 DEVMETHOD(device_attach, aw_mmc_attach),
896 DEVMETHOD(device_detach, aw_mmc_detach),
899 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
900 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
902 /* MMC bridge interface */
903 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
904 DEVMETHOD(mmcbr_request, aw_mmc_request),
905 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
906 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
907 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
912 static devclass_t aw_mmc_devclass;
914 static driver_t aw_mmc_driver = {
917 sizeof(struct aw_mmc_softc),
920 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
922 MMC_DECLARE_BRIDGE(aw_mmc);