2 * Copyright (c) 2013 Alexander Fedorov
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
40 #include <sys/sysctl.h>
42 #include <machine/bus.h>
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
50 #include <arm/allwinner/aw_mmc.h>
51 #include <dev/extres/clk/clk.h>
52 #include <dev/extres/hwreset/hwreset.h>
53 #include <dev/extres/regulator/regulator.h>
55 #define AW_MMC_MEMRES 0
56 #define AW_MMC_IRQRES 1
57 #define AW_MMC_RESSZ 2
58 #define AW_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1)
59 #define AW_MMC_DMA_FTRGLEVEL 0x20070008
60 #define AW_MMC_RESET_RETRY 1000
62 #define CARD_ID_FREQUENCY 400000
71 static const struct aw_mmc_conf a10_mmc_conf = {
72 .dma_xferlen = 0x2000,
75 static const struct aw_mmc_conf a13_mmc_conf = {
76 .dma_xferlen = 0x10000,
79 static const struct aw_mmc_conf a64_mmc_conf = {
80 .dma_xferlen = 0x10000,
82 .can_calibrate = true,
86 static const struct aw_mmc_conf a64_emmc_conf = {
87 .dma_xferlen = 0x2000,
88 .can_calibrate = true,
91 static struct ofw_compat_data compat_data[] = {
92 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
93 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
94 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
95 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
96 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
100 struct aw_mmc_softc {
104 hwreset_t aw_rst_ahb;
108 struct callout aw_timeoutc;
109 struct mmc_host aw_host;
110 struct mmc_request * aw_req;
112 struct resource * aw_res[AW_MMC_RESSZ];
113 struct aw_mmc_conf * aw_mmc_conf;
115 uint32_t aw_intr_wait;
119 regulator_t aw_reg_vmmc;
120 regulator_t aw_reg_vqmmc;
121 unsigned int aw_clock;
123 /* Fields required for DMA access. */
124 bus_addr_t aw_dma_desc_phys;
125 bus_dmamap_t aw_dma_map;
126 bus_dma_tag_t aw_dma_tag;
128 bus_dmamap_t aw_dma_buf_map;
129 bus_dma_tag_t aw_dma_buf_tag;
133 static struct resource_spec aw_mmc_res_spec[] = {
134 { SYS_RES_MEMORY, 0, RF_ACTIVE },
135 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
139 static int aw_mmc_probe(device_t);
140 static int aw_mmc_attach(device_t);
141 static int aw_mmc_detach(device_t);
142 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
143 static int aw_mmc_reset(struct aw_mmc_softc *);
144 static int aw_mmc_init(struct aw_mmc_softc *);
145 static void aw_mmc_intr(void *);
146 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
148 static int aw_mmc_update_ios(device_t, device_t);
149 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
150 static int aw_mmc_get_ro(device_t, device_t);
151 static int aw_mmc_acquire_host(device_t, device_t);
152 static int aw_mmc_release_host(device_t, device_t);
154 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
155 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
156 #define AW_MMC_READ_4(_sc, _reg) \
157 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
158 #define AW_MMC_WRITE_4(_sc, _reg, _value) \
159 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
162 aw_mmc_probe(device_t dev)
165 if (!ofw_bus_status_okay(dev))
167 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
170 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
172 return (BUS_PROBE_DEFAULT);
176 aw_mmc_attach(device_t dev)
179 struct aw_mmc_softc *sc;
180 struct sysctl_ctx_list *ctx;
181 struct sysctl_oid_list *tree;
186 node = ofw_bus_get_node(dev);
187 sc = device_get_softc(dev);
190 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
193 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
194 device_printf(dev, "cannot allocate device resources\n");
197 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
198 INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
200 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
201 device_printf(dev, "cannot setup interrupt handler\n");
204 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
206 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
208 /* De-assert reset */
209 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
210 error = hwreset_deassert(sc->aw_rst_ahb);
212 device_printf(dev, "cannot de-assert reset\n");
217 /* Activate the module clock. */
218 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
220 device_printf(dev, "cannot get ahb clock\n");
223 error = clk_enable(sc->aw_clk_ahb);
225 device_printf(dev, "cannot enable ahb clock\n");
228 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
230 device_printf(dev, "cannot get mmc clock\n");
233 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
236 device_printf(dev, "cannot init mmc clock\n");
239 error = clk_enable(sc->aw_clk_mmc);
241 device_printf(dev, "cannot enable mmc clock\n");
246 ctx = device_get_sysctl_ctx(dev);
247 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
248 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
249 &sc->aw_timeout, 0, "Request timeout in seconds");
251 /* Soft Reset controller. */
252 if (aw_mmc_reset(sc) != 0) {
253 device_printf(dev, "cannot reset the controller\n");
257 if (aw_mmc_setup_dma(sc) != 0) {
258 device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
262 if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
265 if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply",
266 &sc->aw_reg_vmmc) == 0) {
268 device_printf(dev, "vmmc-supply regulator found\n");
270 if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply",
271 &sc->aw_reg_vqmmc) == 0 && bootverbose) {
273 device_printf(dev, "vqmmc-supply regulator found\n");
276 sc->aw_host.f_min = 400000;
277 sc->aw_host.f_max = 52000000;
278 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
279 sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 |
280 MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
281 MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52;
283 sc->aw_host.caps |= MMC_CAP_SIGNALING_330 | MMC_CAP_SIGNALING_180;
286 sc->aw_host.caps |= MMC_CAP_4_BIT_DATA;
288 sc->aw_host.caps |= MMC_CAP_8_BIT_DATA;
290 child = device_add_child(dev, "mmc", -1);
292 device_printf(dev, "attaching MMC bus failed!\n");
295 if (device_probe_and_attach(child) != 0) {
296 device_printf(dev, "attaching MMC child failed!\n");
297 device_delete_child(dev, child);
304 callout_drain(&sc->aw_timeoutc);
305 mtx_destroy(&sc->aw_mtx);
306 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
307 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
313 aw_mmc_detach(device_t dev)
320 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
322 struct aw_mmc_softc *sc;
324 sc = (struct aw_mmc_softc *)arg;
326 sc->aw_dma_map_err = err;
329 sc->aw_dma_desc_phys = segs[0].ds_addr;
333 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
335 int dma_desc_size, error;
337 /* Allocate the DMA descriptor memory. */
338 dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS;
339 error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
341 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
342 dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag);
345 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
346 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map);
350 error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map,
351 sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0);
354 if (sc->aw_dma_map_err)
355 return (sc->aw_dma_map_err);
357 /* Create the DMA map for data transfers. */
358 error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev),
360 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
361 sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS,
362 sc->aw_mmc_conf->dma_xferlen, BUS_DMA_ALLOCNOW, NULL, NULL,
363 &sc->aw_dma_buf_tag);
366 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
367 &sc->aw_dma_buf_map);
375 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
378 struct aw_mmc_dma_desc *dma_desc;
379 struct aw_mmc_softc *sc;
381 sc = (struct aw_mmc_softc *)arg;
382 sc->aw_dma_map_err = err;
387 dma_desc = sc->aw_dma_desc;
388 for (i = 0; i < nsegs; i++) {
389 dma_desc[i].buf_size = segs[i].ds_len;
390 dma_desc[i].buf_addr = segs[i].ds_addr;
391 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
392 AW_MMC_DMA_CONFIG_OWN;
394 dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD;
395 if (i < (nsegs - 1)) {
396 dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC;
397 dma_desc[i].next = sc->aw_dma_desc_phys +
398 ((i + 1) * sizeof(struct aw_mmc_dma_desc));
400 dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD |
401 AW_MMC_DMA_CONFIG_ER;
402 dma_desc[i].next = 0;
408 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
410 bus_dmasync_op_t sync_op;
412 struct mmc_command *cmd;
415 cmd = sc->aw_req->cmd;
416 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
418 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
419 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
422 if (sc->aw_dma_map_err)
423 return (sc->aw_dma_map_err);
425 if (cmd->data->flags & MMC_DATA_WRITE)
426 sync_op = BUS_DMASYNC_PREWRITE;
428 sync_op = BUS_DMASYNC_PREREAD;
429 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
430 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
433 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
434 val &= ~AW_MMC_CTRL_FIFO_AC_MOD;
435 val |= AW_MMC_CTRL_DMA_ENB;
436 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
439 val |= AW_MMC_CTRL_DMA_RST;
440 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
442 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
443 AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
444 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
446 /* Enable RX or TX DMA interrupt */
447 val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
448 if (cmd->data->flags & MMC_DATA_WRITE)
449 val |= AW_MMC_IDST_TX_INT;
451 val |= AW_MMC_IDST_RX_INT;
452 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
454 /* Set DMA descritptor list address */
455 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
457 /* FIFO trigger level */
458 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
464 aw_mmc_reset(struct aw_mmc_softc *sc)
468 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, AW_MMC_RESET);
470 while (--timeout > 0) {
471 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_RESET) == 0)
482 aw_mmc_init(struct aw_mmc_softc *sc)
486 ret = aw_mmc_reset(sc);
490 /* Set the timeout. */
491 AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
492 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
493 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
495 /* Unmask interrupts. */
496 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
498 /* Clear pending interrupts. */
499 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
501 /* Debug register, undocumented */
502 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
504 /* Function select register */
505 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
507 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
509 /* Enable interrupts and AHB access. */
510 AW_MMC_WRITE_4(sc, AW_MMC_GCTL,
511 AW_MMC_READ_4(sc, AW_MMC_GCTL) | AW_MMC_CTRL_INT_ENB);
517 aw_mmc_req_done(struct aw_mmc_softc *sc)
519 struct mmc_command *cmd;
520 struct mmc_request *req;
524 cmd = sc->aw_req->cmd;
525 if (cmd->error != MMC_ERR_NONE) {
526 /* Reset the FIFO and DMA engines. */
527 mask = AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST;
528 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
529 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
531 retry = AW_MMC_RESET_RETRY;
532 while (--retry > 0) {
533 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
534 if ((val & mask) == 0)
539 device_printf(sc->aw_dev,
540 "timeout resetting DMA/FIFO\n");
541 aw_mmc_update_clock(sc, 1);
545 callout_stop(&sc->aw_timeoutc);
549 sc->aw_dma_map_err = 0;
550 sc->aw_intr_wait = 0;
555 aw_mmc_req_ok(struct aw_mmc_softc *sc)
558 struct mmc_command *cmd;
562 while (--timeout > 0) {
563 status = AW_MMC_READ_4(sc, AW_MMC_STAR);
564 if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
568 cmd = sc->aw_req->cmd;
570 cmd->error = MMC_ERR_FAILED;
574 if (cmd->flags & MMC_RSP_PRESENT) {
575 if (cmd->flags & MMC_RSP_136) {
576 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
577 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
578 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
579 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
581 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
583 /* All data has been transferred ? */
584 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
585 cmd->error = MMC_ERR_FAILED;
590 aw_mmc_timeout(void *arg)
592 struct aw_mmc_softc *sc;
594 sc = (struct aw_mmc_softc *)arg;
595 if (sc->aw_req != NULL) {
596 device_printf(sc->aw_dev, "controller timeout\n");
597 sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
600 device_printf(sc->aw_dev,
601 "Spurious timeout - no active request\n");
605 aw_mmc_intr(void *arg)
607 bus_dmasync_op_t sync_op;
608 struct aw_mmc_softc *sc;
609 struct mmc_data *data;
610 uint32_t idst, imask, rint;
612 sc = (struct aw_mmc_softc *)arg;
614 rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
615 idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
616 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
617 if (idst == 0 && imask == 0 && rint == 0) {
622 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
625 if (sc->aw_req == NULL) {
626 device_printf(sc->aw_dev,
627 "Spurious interrupt - no active request, rint: 0x%08X\n",
631 if (rint & AW_MMC_INT_ERR_BIT) {
633 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
634 if (rint & AW_MMC_INT_RESP_TIMEOUT)
635 sc->aw_req->cmd->error = MMC_ERR_TIMEOUT;
637 sc->aw_req->cmd->error = MMC_ERR_FAILED;
641 if (idst & AW_MMC_IDST_ERROR) {
642 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
643 sc->aw_req->cmd->error = MMC_ERR_FAILED;
649 data = sc->aw_req->cmd->data;
650 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
651 if (data->flags & MMC_DATA_WRITE)
652 sync_op = BUS_DMASYNC_POSTWRITE;
654 sync_op = BUS_DMASYNC_POSTREAD;
655 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
657 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
658 BUS_DMASYNC_POSTWRITE);
659 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
660 sc->aw_resid = data->len >> 2;
662 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
666 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
667 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
672 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
675 struct aw_mmc_softc *sc;
676 struct mmc_command *cmd;
677 uint32_t cmdreg, imask;
680 sc = device_get_softc(bus);
689 cmdreg = AW_MMC_CMDR_LOAD;
690 imask = AW_MMC_INT_ERR_BIT;
691 sc->aw_intr_wait = 0;
694 cmd->error = MMC_ERR_NONE;
696 if (cmd->opcode == MMC_GO_IDLE_STATE)
697 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
699 if (cmd->flags & MMC_RSP_PRESENT)
700 cmdreg |= AW_MMC_CMDR_RESP_RCV;
701 if (cmd->flags & MMC_RSP_136)
702 cmdreg |= AW_MMC_CMDR_LONG_RESP;
703 if (cmd->flags & MMC_RSP_CRC)
704 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
707 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
709 if (cmd->data->flags & MMC_DATA_MULTI) {
710 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
711 imask |= AW_MMC_INT_AUTO_STOP_DONE;
712 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
714 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
715 imask |= AW_MMC_INT_DATA_OVER;
717 if (cmd->data->flags & MMC_DATA_WRITE)
718 cmdreg |= AW_MMC_CMDR_DIR_WRITE;
720 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
721 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
722 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
724 imask |= AW_MMC_INT_CMD_DONE;
727 /* Enable the interrupts we are interested in */
728 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
729 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
731 /* Enable auto stop if needed */
732 AW_MMC_WRITE_4(sc, AW_MMC_A12A,
733 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
735 /* Write the command argument */
736 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
739 * If we don't have data start the request
740 * if we do prepare the dma request and start the request
742 if (cmd->data == NULL) {
743 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
745 err = aw_mmc_prepare_dma(sc);
747 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
749 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
752 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
760 aw_mmc_read_ivar(device_t bus, device_t child, int which,
763 struct aw_mmc_softc *sc;
765 sc = device_get_softc(bus);
769 case MMCBR_IVAR_BUS_MODE:
770 *(int *)result = sc->aw_host.ios.bus_mode;
772 case MMCBR_IVAR_BUS_WIDTH:
773 *(int *)result = sc->aw_host.ios.bus_width;
775 case MMCBR_IVAR_CHIP_SELECT:
776 *(int *)result = sc->aw_host.ios.chip_select;
778 case MMCBR_IVAR_CLOCK:
779 *(int *)result = sc->aw_host.ios.clock;
781 case MMCBR_IVAR_F_MIN:
782 *(int *)result = sc->aw_host.f_min;
784 case MMCBR_IVAR_F_MAX:
785 *(int *)result = sc->aw_host.f_max;
787 case MMCBR_IVAR_HOST_OCR:
788 *(int *)result = sc->aw_host.host_ocr;
790 case MMCBR_IVAR_MODE:
791 *(int *)result = sc->aw_host.mode;
794 *(int *)result = sc->aw_host.ocr;
796 case MMCBR_IVAR_POWER_MODE:
797 *(int *)result = sc->aw_host.ios.power_mode;
800 *(int *)result = sc->aw_host.ios.vdd;
802 case MMCBR_IVAR_VCCQ:
803 *(int *)result = sc->aw_host.ios.vccq;
805 case MMCBR_IVAR_CAPS:
806 *(int *)result = sc->aw_host.caps;
808 case MMCBR_IVAR_TIMING:
809 *(int *)result = sc->aw_host.ios.timing;
811 case MMCBR_IVAR_MAX_DATA:
812 *(int *)result = 65535;
820 aw_mmc_write_ivar(device_t bus, device_t child, int which,
823 struct aw_mmc_softc *sc;
825 sc = device_get_softc(bus);
829 case MMCBR_IVAR_BUS_MODE:
830 sc->aw_host.ios.bus_mode = value;
832 case MMCBR_IVAR_BUS_WIDTH:
833 sc->aw_host.ios.bus_width = value;
835 case MMCBR_IVAR_CHIP_SELECT:
836 sc->aw_host.ios.chip_select = value;
838 case MMCBR_IVAR_CLOCK:
839 sc->aw_host.ios.clock = value;
841 case MMCBR_IVAR_MODE:
842 sc->aw_host.mode = value;
845 sc->aw_host.ocr = value;
847 case MMCBR_IVAR_POWER_MODE:
848 sc->aw_host.ios.power_mode = value;
851 sc->aw_host.ios.vdd = value;
853 case MMCBR_IVAR_VCCQ:
854 sc->aw_host.ios.vccq = value;
856 case MMCBR_IVAR_TIMING:
857 sc->aw_host.ios.timing = value;
859 /* These are read-only */
860 case MMCBR_IVAR_CAPS:
861 case MMCBR_IVAR_HOST_OCR:
862 case MMCBR_IVAR_F_MIN:
863 case MMCBR_IVAR_F_MAX:
864 case MMCBR_IVAR_MAX_DATA:
872 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
877 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
878 reg &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL |
879 AW_MMC_CKCR_CCLK_MASK_DATA0);
882 reg |= AW_MMC_CKCR_CCLK_ENB;
883 if (sc->aw_mmc_conf->mask_data0)
884 reg |= AW_MMC_CKCR_CCLK_MASK_DATA0;
886 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
888 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
889 AW_MMC_CMDR_WAIT_PRE_OVER;
890 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
893 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
894 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
897 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
899 if (reg & AW_MMC_CMDR_LOAD) {
900 device_printf(sc->aw_dev, "timeout updating clock\n");
904 if (sc->aw_mmc_conf->mask_data0) {
905 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
906 reg &= ~AW_MMC_CKCR_CCLK_MASK_DATA0;
907 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
914 aw_mmc_set_vccq(struct aw_mmc_softc *sc, int32_t vccq)
918 if (sc->aw_reg_vqmmc == NULL)
932 if (regulator_set_voltage(sc->aw_reg_vqmmc,
934 device_printf(sc->aw_dev,
935 "Cannot set vqmmc to %d<->%d\n",
941 aw_mmc_update_ios(device_t bus, device_t child)
944 struct aw_mmc_softc *sc;
947 uint32_t reg, div = 1;
949 sc = device_get_softc(bus);
951 ios = &sc->aw_host.ios;
953 /* Set the bus width. */
954 switch (ios->bus_width) {
956 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
959 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
962 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
966 switch (ios->power_mode) {
971 device_printf(sc->aw_dev, "Powering down sd/mmc\n");
974 regulator_disable(sc->aw_reg_vmmc);
975 if (sc->aw_reg_vqmmc)
976 regulator_disable(sc->aw_reg_vqmmc);
982 device_printf(sc->aw_dev, "Powering up sd/mmc\n");
985 regulator_enable(sc->aw_reg_vmmc);
986 if (sc->aw_reg_vqmmc)
987 regulator_enable(sc->aw_reg_vqmmc);
992 if (ios->vccq != sc->aw_vccq) {
993 aw_mmc_set_vccq(sc, ios->vccq);
994 sc->aw_vccq = ios->vccq;
997 /* Enable ddr mode if needed */
998 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
999 if (ios->timing == bus_timing_uhs_ddr50 ||
1000 ios->timing == bus_timing_mmc_ddr52)
1001 reg |= AW_MMC_CTRL_DDR_MOD_SEL;
1003 reg &= ~AW_MMC_CTRL_DDR_MOD_SEL;
1004 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1006 if (ios->clock && ios->clock != sc->aw_clock) {
1007 sc->aw_clock = clock = ios->clock;
1010 error = aw_mmc_update_clock(sc, 0);
1014 if (ios->timing == bus_timing_mmc_ddr52 &&
1015 (sc->aw_mmc_conf->new_timing ||
1016 ios->bus_width == bus_width_8)) {
1021 /* Reset the divider. */
1022 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1023 reg &= ~AW_MMC_CKCR_CCLK_DIV;
1025 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1027 /* New timing mode if needed */
1028 if (sc->aw_mmc_conf->new_timing) {
1029 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1030 reg |= AW_MMC_NTSR_MODE_SELECT;
1031 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1034 /* Set the MMC clock. */
1035 error = clk_set_freq(sc->aw_clk_mmc, clock,
1036 CLK_SET_ROUND_DOWN);
1038 device_printf(sc->aw_dev,
1039 "failed to set frequency to %u Hz: %d\n",
1044 if (sc->aw_mmc_conf->can_calibrate)
1045 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1048 error = aw_mmc_update_clock(sc, 1);
1058 aw_mmc_get_ro(device_t bus, device_t child)
1065 aw_mmc_acquire_host(device_t bus, device_t child)
1067 struct aw_mmc_softc *sc;
1070 sc = device_get_softc(bus);
1072 while (sc->aw_bus_busy) {
1073 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1086 aw_mmc_release_host(device_t bus, device_t child)
1088 struct aw_mmc_softc *sc;
1090 sc = device_get_softc(bus);
1099 static device_method_t aw_mmc_methods[] = {
1100 /* Device interface */
1101 DEVMETHOD(device_probe, aw_mmc_probe),
1102 DEVMETHOD(device_attach, aw_mmc_attach),
1103 DEVMETHOD(device_detach, aw_mmc_detach),
1106 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
1107 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
1109 /* MMC bridge interface */
1110 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
1111 DEVMETHOD(mmcbr_request, aw_mmc_request),
1112 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
1113 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
1114 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
1119 static devclass_t aw_mmc_devclass;
1121 static driver_t aw_mmc_driver = {
1124 sizeof(struct aw_mmc_softc),
1127 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1129 MMC_DECLARE_BRIDGE(aw_mmc);