2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
5 * Copyright (c) 2013 Alexander Fedorov
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/resource.h>
42 #include <sys/sysctl.h>
43 #include <sys/queue.h>
44 #include <sys/taskqueue.h>
46 #include <machine/bus.h>
48 #include <dev/ofw/ofw_bus.h>
49 #include <dev/ofw/ofw_bus_subr.h>
51 #include <dev/mmc/bridge.h>
52 #include <dev/mmc/mmcbrvar.h>
53 #include <dev/mmc/mmc_fdt_helpers.h>
55 #include <arm/allwinner/aw_mmc.h>
56 #include <dev/extres/clk/clk.h>
57 #include <dev/extres/hwreset/hwreset.h>
58 #include <dev/extres/regulator/regulator.h>
60 #include "opt_mmccam.h"
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/mmc/mmc_sim.h>
70 #include "mmc_sim_if.h"
73 #include "mmc_pwrseq_if.h"
75 #define AW_MMC_MEMRES 0
76 #define AW_MMC_IRQRES 1
77 #define AW_MMC_RESSZ 2
78 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc))
79 #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS)
80 #define AW_MMC_DMA_FTRGLEVEL 0x20070008
82 #define AW_MMC_RESET_RETRY 1000
84 #define CARD_ID_FREQUENCY 400000
93 static const struct aw_mmc_conf a10_mmc_conf = {
94 .dma_xferlen = 0x2000,
97 static const struct aw_mmc_conf a13_mmc_conf = {
98 .dma_xferlen = 0x10000,
101 static const struct aw_mmc_conf a64_mmc_conf = {
102 .dma_xferlen = 0x10000,
104 .can_calibrate = true,
108 static const struct aw_mmc_conf a64_emmc_conf = {
109 .dma_xferlen = 0x2000,
110 .can_calibrate = true,
113 static struct ofw_compat_data compat_data[] = {
114 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
115 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
116 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
117 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
118 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
122 struct aw_mmc_softc {
126 hwreset_t aw_rst_ahb;
130 struct callout aw_timeoutc;
131 struct mmc_host aw_host;
132 struct mmc_helper mmc_helper;
135 struct mmc_sim mmc_sim;
137 struct mmc_request * aw_req;
140 struct resource * aw_res[AW_MMC_RESSZ];
141 struct aw_mmc_conf * aw_mmc_conf;
143 uint32_t aw_intr_wait;
145 unsigned int aw_clock;
148 /* Fields required for DMA access. */
149 bus_addr_t aw_dma_desc_phys;
150 bus_dmamap_t aw_dma_map;
151 bus_dma_tag_t aw_dma_tag;
153 bus_dmamap_t aw_dma_buf_map;
154 bus_dma_tag_t aw_dma_buf_tag;
158 static struct resource_spec aw_mmc_res_spec[] = {
159 { SYS_RES_MEMORY, 0, RF_ACTIVE },
160 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
164 static int aw_mmc_probe(device_t);
165 static int aw_mmc_attach(device_t);
166 static int aw_mmc_detach(device_t);
167 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
168 static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc);
169 static int aw_mmc_reset(struct aw_mmc_softc *);
170 static int aw_mmc_init(struct aw_mmc_softc *);
171 static void aw_mmc_intr(void *);
172 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
173 static void aw_mmc_helper_cd_handler(device_t, bool);
175 static void aw_mmc_print_error(uint32_t);
176 static int aw_mmc_update_ios(device_t, device_t);
177 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
180 static int aw_mmc_get_ro(device_t, device_t);
181 static int aw_mmc_acquire_host(device_t, device_t);
182 static int aw_mmc_release_host(device_t, device_t);
185 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
186 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
187 #define AW_MMC_READ_4(_sc, _reg) \
188 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
189 #define AW_MMC_WRITE_4(_sc, _reg, _value) \
190 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
192 SYSCTL_NODE(_hw, OID_AUTO, aw_mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
195 static int aw_mmc_debug = 0;
196 SYSCTL_INT(_hw_aw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_mmc_debug, 0,
197 "Debug level bit0=card changes bit1=ios changes, bit2=interrupts, bit3=commands");
198 #define AW_MMC_DEBUG_CARD 0x1
199 #define AW_MMC_DEBUG_IOS 0x2
200 #define AW_MMC_DEBUG_INT 0x4
201 #define AW_MMC_DEBUG_CMD 0x8
205 aw_mmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
207 struct aw_mmc_softc *sc;
209 sc = device_get_softc(dev);
211 cts->host_ocr = sc->aw_host.host_ocr;
212 cts->host_f_min = sc->aw_host.f_min;
213 cts->host_f_max = sc->aw_host.f_max;
214 cts->host_caps = sc->aw_host.caps;
215 cts->host_max_data = (sc->aw_mmc_conf->dma_xferlen *
216 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
217 memcpy(&cts->ios, &sc->aw_host.ios, sizeof(struct mmc_ios));
223 aw_mmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
225 struct aw_mmc_softc *sc;
227 struct mmc_ios *new_ios;
229 sc = device_get_softc(dev);
230 ios = &sc->aw_host.ios;
233 /* Update only requested fields */
234 if (cts->ios_valid & MMC_CLK) {
235 ios->clock = new_ios->clock;
236 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
237 device_printf(sc->aw_dev, "Clock => %d\n", ios->clock);
239 if (cts->ios_valid & MMC_VDD) {
240 ios->vdd = new_ios->vdd;
241 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
242 device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd);
244 if (cts->ios_valid & MMC_CS) {
245 ios->chip_select = new_ios->chip_select;
246 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
247 device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select);
249 if (cts->ios_valid & MMC_BW) {
250 ios->bus_width = new_ios->bus_width;
251 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
252 device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width);
254 if (cts->ios_valid & MMC_PM) {
255 ios->power_mode = new_ios->power_mode;
256 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
257 device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode);
259 if (cts->ios_valid & MMC_BT) {
260 ios->timing = new_ios->timing;
261 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
262 device_printf(sc->aw_dev, "Timing => %d\n", ios->timing);
264 if (cts->ios_valid & MMC_BM) {
265 ios->bus_mode = new_ios->bus_mode;
266 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
267 device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode);
270 return (aw_mmc_update_ios(sc->aw_dev, NULL));
274 aw_mmc_cam_request(device_t dev, union ccb *ccb)
276 struct aw_mmc_softc *sc;
277 struct ccb_mmcio *mmcio;
279 sc = device_get_softc(dev);
284 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
285 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
286 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
287 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
288 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
290 if (mmcio->cmd.data != NULL) {
291 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
292 panic("data->len = %d, data->flags = %d -- something is b0rked",
293 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
295 if (sc->ccb != NULL) {
296 device_printf(sc->aw_dev, "Controller still has an active command\n");
300 /* aw_mmc_request locks again */
302 aw_mmc_request(sc->aw_dev, NULL, NULL);
308 aw_mmc_cam_poll(device_t dev)
310 struct aw_mmc_softc *sc;
312 sc = device_get_softc(dev);
318 aw_mmc_helper_cd_handler(device_t dev, bool present)
320 struct aw_mmc_softc *sc;
322 sc = device_get_softc(dev);
324 mmc_cam_sim_discover(&sc->mmc_sim);
328 if (sc->child == NULL) {
329 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
330 device_printf(sc->aw_dev, "Card inserted\n");
332 sc->child = device_add_child(sc->aw_dev, "mmc", -1);
335 device_set_ivars(sc->child, sc);
336 (void)device_probe_and_attach(sc->child);
341 /* Card isn't present, detach if necessary */
342 if (sc->child != NULL) {
343 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
344 device_printf(sc->aw_dev, "Card removed\n");
347 device_delete_child(sc->aw_dev, sc->child);
356 aw_mmc_probe(device_t dev)
359 if (!ofw_bus_status_okay(dev))
361 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
364 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
366 return (BUS_PROBE_DEFAULT);
370 aw_mmc_attach(device_t dev)
372 struct aw_mmc_softc *sc;
373 struct sysctl_ctx_list *ctx;
374 struct sysctl_oid_list *tree;
377 sc = device_get_softc(dev);
380 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
385 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
386 device_printf(dev, "cannot allocate device resources\n");
389 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
390 INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
392 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
393 device_printf(dev, "cannot setup interrupt handler\n");
396 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
398 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
400 /* De-assert reset */
401 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
402 error = hwreset_deassert(sc->aw_rst_ahb);
404 device_printf(dev, "cannot de-assert reset\n");
409 /* Activate the module clock. */
410 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
412 device_printf(dev, "cannot get ahb clock\n");
415 error = clk_enable(sc->aw_clk_ahb);
417 device_printf(dev, "cannot enable ahb clock\n");
420 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
422 device_printf(dev, "cannot get mmc clock\n");
425 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
428 device_printf(dev, "cannot init mmc clock\n");
431 error = clk_enable(sc->aw_clk_mmc);
433 device_printf(dev, "cannot enable mmc clock\n");
438 ctx = device_get_sysctl_ctx(dev);
439 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
440 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
441 &sc->aw_timeout, 0, "Request timeout in seconds");
443 /* Soft Reset controller. */
444 if (aw_mmc_reset(sc) != 0) {
445 device_printf(dev, "cannot reset the controller\n");
449 if (aw_mmc_setup_dma(sc) != 0) {
450 device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
454 /* Set some defaults for freq and supported mode */
455 sc->aw_host.f_min = 400000;
456 sc->aw_host.f_max = 52000000;
457 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
458 sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
459 mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host);
460 mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler);
465 if (mmc_cam_sim_alloc(dev, "aw_mmc", &sc->mmc_sim) != 0) {
466 device_printf(dev, "cannot alloc cam sim\n");
474 callout_drain(&sc->aw_timeoutc);
475 mtx_destroy(&sc->aw_mtx);
476 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
477 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
483 aw_mmc_detach(device_t dev)
485 struct aw_mmc_softc *sc;
488 sc = device_get_softc(dev);
490 clk_disable(sc->aw_clk_mmc);
491 clk_disable(sc->aw_clk_ahb);
492 hwreset_assert(sc->aw_rst_ahb);
494 mmc_fdt_gpio_teardown(&sc->mmc_helper);
496 callout_drain(&sc->aw_timeoutc);
503 device_delete_child(sc->aw_dev, d);
505 aw_mmc_teardown_dma(sc);
507 mtx_destroy(&sc->aw_mtx);
509 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
510 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
513 mmc_cam_sim_free(&sc->mmc_sim);
520 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
522 struct aw_mmc_softc *sc;
524 sc = (struct aw_mmc_softc *)arg;
526 sc->aw_dma_map_err = err;
529 sc->aw_dma_desc_phys = segs[0].ds_addr;
533 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
537 /* Allocate the DMA descriptor memory. */
538 error = bus_dma_tag_create(
539 bus_get_dma_tag(sc->aw_dev), /* parent */
540 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
541 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
542 BUS_SPACE_MAXADDR, /* highaddr */
543 NULL, NULL, /* filter, filterarg*/
544 AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */
545 AW_MMC_DMA_DESC_SIZE, /* maxsegsize */
547 NULL, NULL, /* lock, lockarg*/
552 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
553 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
558 error = bus_dmamap_load(sc->aw_dma_tag,
560 sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE,
561 aw_dma_desc_cb, sc, 0);
564 if (sc->aw_dma_map_err)
565 return (sc->aw_dma_map_err);
567 /* Create the DMA map for data transfers. */
568 error = bus_dma_tag_create(
569 bus_get_dma_tag(sc->aw_dev), /* parent */
570 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
571 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
572 BUS_SPACE_MAXADDR, /* highaddr */
573 NULL, NULL, /* filter, filterarg*/
574 sc->aw_mmc_conf->dma_xferlen *
575 AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */
576 sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */
577 BUS_DMA_ALLOCNOW, /* flags */
578 NULL, NULL, /* lock, lockarg*/
579 &sc->aw_dma_buf_tag);
582 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
583 &sc->aw_dma_buf_map);
591 aw_mmc_teardown_dma(struct aw_mmc_softc *sc)
594 bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map);
595 bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map);
596 if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0)
597 device_printf(sc->aw_dev, "Cannot destroy the dma tag\n");
599 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
600 bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
601 if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0)
602 device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n");
606 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
609 struct aw_mmc_dma_desc *dma_desc;
610 struct aw_mmc_softc *sc;
612 sc = (struct aw_mmc_softc *)arg;
613 sc->aw_dma_map_err = err;
618 dma_desc = sc->aw_dma_desc;
619 for (i = 0; i < nsegs; i++) {
620 if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen)
621 dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */
623 dma_desc[i].buf_size = segs[i].ds_len;
624 dma_desc[i].buf_addr = segs[i].ds_addr;
625 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
626 AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC;
628 dma_desc[i].next = sc->aw_dma_desc_phys +
629 ((i + 1) * sizeof(struct aw_mmc_dma_desc));
632 dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD;
633 dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD |
634 AW_MMC_DMA_CONFIG_ER;
635 dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC;
636 dma_desc[nsegs - 1].next = 0;
640 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
642 bus_dmasync_op_t sync_op;
644 struct mmc_command *cmd;
648 cmd = &sc->ccb->mmcio.cmd;
650 cmd = sc->aw_req->cmd;
652 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
654 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
655 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
658 if (sc->aw_dma_map_err)
659 return (sc->aw_dma_map_err);
661 if (cmd->data->flags & MMC_DATA_WRITE)
662 sync_op = BUS_DMASYNC_PREWRITE;
664 sync_op = BUS_DMASYNC_PREREAD;
665 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
666 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
669 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
670 val &= ~AW_MMC_GCTL_FIFO_AC_MOD;
671 val |= AW_MMC_GCTL_DMA_ENB;
672 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
675 val |= AW_MMC_GCTL_DMA_RST;
676 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
678 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
679 AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
680 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
682 /* Enable RX or TX DMA interrupt */
683 val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
684 if (cmd->data->flags & MMC_DATA_WRITE)
685 val |= AW_MMC_IDST_TX_INT;
687 val |= AW_MMC_IDST_RX_INT;
688 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
690 /* Set DMA descritptor list address */
691 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
693 /* FIFO trigger level */
694 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
700 aw_mmc_reset(struct aw_mmc_softc *sc)
705 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
706 reg |= AW_MMC_GCTL_RESET;
707 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
708 timeout = AW_MMC_RESET_RETRY;
709 while (--timeout > 0) {
710 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0)
721 aw_mmc_init(struct aw_mmc_softc *sc)
726 ret = aw_mmc_reset(sc);
730 /* Set the timeout. */
731 AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
732 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
733 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
735 /* Unmask interrupts. */
736 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
738 /* Clear pending interrupts. */
739 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
741 /* Debug register, undocumented */
742 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
744 /* Function select register */
745 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
747 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
749 /* Enable interrupts and disable AHB access. */
750 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
751 reg |= AW_MMC_GCTL_INT_ENB;
752 reg &= ~AW_MMC_GCTL_FIFO_AC_MOD;
753 reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS;
754 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
760 aw_mmc_req_done(struct aw_mmc_softc *sc)
762 struct mmc_command *cmd;
766 struct mmc_request *req;
773 cmd = &ccb->mmcio.cmd;
775 cmd = sc->aw_req->cmd;
777 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
778 device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error);
780 if (cmd->error != MMC_ERR_NONE) {
781 /* Reset the FIFO and DMA engines. */
782 mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST;
783 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
784 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
786 retry = AW_MMC_RESET_RETRY;
787 while (--retry > 0) {
788 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) &
789 AW_MMC_GCTL_RESET) == 0)
794 device_printf(sc->aw_dev,
795 "timeout resetting DMA/FIFO\n");
796 aw_mmc_update_clock(sc, 1);
800 callout_stop(&sc->aw_timeoutc);
803 sc->aw_dma_map_err = 0;
804 sc->aw_intr_wait = 0;
808 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
818 aw_mmc_req_ok(struct aw_mmc_softc *sc)
821 struct mmc_command *cmd;
825 while (--timeout > 0) {
826 status = AW_MMC_READ_4(sc, AW_MMC_STAR);
827 if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
832 cmd = &sc->ccb->mmcio.cmd;
834 cmd = sc->aw_req->cmd;
837 cmd->error = MMC_ERR_FAILED;
841 if (cmd->flags & MMC_RSP_PRESENT) {
842 if (cmd->flags & MMC_RSP_136) {
843 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
844 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
845 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
846 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
848 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
850 /* All data has been transferred ? */
851 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
852 cmd->error = MMC_ERR_FAILED;
857 set_mmc_error(struct aw_mmc_softc *sc, int error_code)
860 sc->ccb->mmcio.cmd.error = error_code;
862 sc->aw_req->cmd->error = error_code;
867 aw_mmc_timeout(void *arg)
869 struct aw_mmc_softc *sc;
871 sc = (struct aw_mmc_softc *)arg;
873 if (sc->ccb != NULL) {
875 if (sc->aw_req != NULL) {
877 device_printf(sc->aw_dev, "controller timeout\n");
878 set_mmc_error(sc, MMC_ERR_TIMEOUT);
881 device_printf(sc->aw_dev,
882 "Spurious timeout - no active request\n");
886 aw_mmc_print_error(uint32_t err)
888 if(err & AW_MMC_INT_RESP_ERR)
889 printf("AW_MMC_INT_RESP_ERR ");
890 if (err & AW_MMC_INT_RESP_CRC_ERR)
891 printf("AW_MMC_INT_RESP_CRC_ERR ");
892 if (err & AW_MMC_INT_DATA_CRC_ERR)
893 printf("AW_MMC_INT_DATA_CRC_ERR ");
894 if (err & AW_MMC_INT_RESP_TIMEOUT)
895 printf("AW_MMC_INT_RESP_TIMEOUT ");
896 if (err & AW_MMC_INT_FIFO_RUN_ERR)
897 printf("AW_MMC_INT_FIFO_RUN_ERR ");
898 if (err & AW_MMC_INT_CMD_BUSY)
899 printf("AW_MMC_INT_CMD_BUSY ");
900 if (err & AW_MMC_INT_DATA_START_ERR)
901 printf("AW_MMC_INT_DATA_START_ERR ");
902 if (err & AW_MMC_INT_DATA_END_BIT_ERR)
903 printf("AW_MMC_INT_DATA_END_BIT_ERR");
908 aw_mmc_intr(void *arg)
910 bus_dmasync_op_t sync_op;
911 struct aw_mmc_softc *sc;
912 struct mmc_data *data;
913 uint32_t idst, imask, rint;
915 sc = (struct aw_mmc_softc *)arg;
917 rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
918 idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
919 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
920 if (idst == 0 && imask == 0 && rint == 0) {
924 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
925 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
929 if (sc->ccb == NULL) {
931 if (sc->aw_req == NULL) {
933 device_printf(sc->aw_dev,
934 "Spurious interrupt - no active request, rint: 0x%08X\n",
936 aw_mmc_print_error(rint);
939 if (rint & AW_MMC_INT_ERR_BIT) {
940 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
941 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
942 aw_mmc_print_error(rint);
944 if (rint & AW_MMC_INT_RESP_TIMEOUT)
945 set_mmc_error(sc, MMC_ERR_TIMEOUT);
947 set_mmc_error(sc, MMC_ERR_FAILED);
951 if (idst & AW_MMC_IDST_ERROR) {
952 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT))
953 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
954 set_mmc_error(sc, MMC_ERR_FAILED);
961 data = sc->ccb->mmcio.cmd.data;
963 data = sc->aw_req->cmd->data;
965 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
966 if (data->flags & MMC_DATA_WRITE)
967 sync_op = BUS_DMASYNC_POSTWRITE;
969 sync_op = BUS_DMASYNC_POSTREAD;
970 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
972 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
973 BUS_DMASYNC_POSTWRITE);
974 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
975 sc->aw_resid = data->len >> 2;
977 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
981 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
982 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
987 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
990 struct aw_mmc_softc *sc;
991 struct mmc_command *cmd;
992 uint32_t cmdreg, imask;
995 sc = device_get_softc(bus);
999 KASSERT(req == NULL, ("req should be NULL in MMCCAM case!"));
1001 * For MMCCAM, sc->ccb has been NULL-checked and populated
1002 * by aw_mmc_cam_request() already.
1004 cmd = &sc->ccb->mmcio.cmd;
1013 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
1014 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1015 cmd->opcode, cmd->arg, cmd->flags,
1016 cmd->data != NULL ? (unsigned int)cmd->data->len : 0,
1017 cmd->data != NULL ? cmd->data->flags: 0);
1020 cmdreg = AW_MMC_CMDR_LOAD;
1021 imask = AW_MMC_INT_ERR_BIT;
1022 sc->aw_intr_wait = 0;
1025 cmd->error = MMC_ERR_NONE;
1027 if (cmd->opcode == MMC_GO_IDLE_STATE)
1028 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
1030 if (cmd->flags & MMC_RSP_PRESENT)
1031 cmdreg |= AW_MMC_CMDR_RESP_RCV;
1032 if (cmd->flags & MMC_RSP_136)
1033 cmdreg |= AW_MMC_CMDR_LONG_RESP;
1034 if (cmd->flags & MMC_RSP_CRC)
1035 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
1038 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
1040 if (cmd->data->flags & MMC_DATA_MULTI) {
1041 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
1042 imask |= AW_MMC_INT_AUTO_STOP_DONE;
1043 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
1045 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
1046 imask |= AW_MMC_INT_DATA_OVER;
1048 if (cmd->data->flags & MMC_DATA_WRITE)
1049 cmdreg |= AW_MMC_CMDR_DIR_WRITE;
1051 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1052 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size);
1053 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1057 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
1058 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
1059 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1062 imask |= AW_MMC_INT_CMD_DONE;
1065 /* Enable the interrupts we are interested in */
1066 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
1067 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1069 /* Enable auto stop if needed */
1070 AW_MMC_WRITE_4(sc, AW_MMC_A12A,
1071 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
1073 /* Write the command argument */
1074 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
1077 * If we don't have data start the request
1078 * if we do prepare the dma request and start the request
1080 if (cmd->data == NULL) {
1081 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1083 err = aw_mmc_prepare_dma(sc);
1085 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
1087 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1091 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
1092 aw_mmc_timeout, sc);
1100 aw_mmc_read_ivar(device_t bus, device_t child, int which,
1103 struct aw_mmc_softc *sc;
1105 sc = device_get_softc(bus);
1109 case MMCBR_IVAR_BUS_MODE:
1110 *(int *)result = sc->aw_host.ios.bus_mode;
1112 case MMCBR_IVAR_BUS_WIDTH:
1113 *(int *)result = sc->aw_host.ios.bus_width;
1115 case MMCBR_IVAR_CHIP_SELECT:
1116 *(int *)result = sc->aw_host.ios.chip_select;
1118 case MMCBR_IVAR_CLOCK:
1119 *(int *)result = sc->aw_host.ios.clock;
1121 case MMCBR_IVAR_F_MIN:
1122 *(int *)result = sc->aw_host.f_min;
1124 case MMCBR_IVAR_F_MAX:
1125 *(int *)result = sc->aw_host.f_max;
1127 case MMCBR_IVAR_HOST_OCR:
1128 *(int *)result = sc->aw_host.host_ocr;
1130 case MMCBR_IVAR_MODE:
1131 *(int *)result = sc->aw_host.mode;
1133 case MMCBR_IVAR_OCR:
1134 *(int *)result = sc->aw_host.ocr;
1136 case MMCBR_IVAR_POWER_MODE:
1137 *(int *)result = sc->aw_host.ios.power_mode;
1139 case MMCBR_IVAR_VDD:
1140 *(int *)result = sc->aw_host.ios.vdd;
1142 case MMCBR_IVAR_VCCQ:
1143 *(int *)result = sc->aw_host.ios.vccq;
1145 case MMCBR_IVAR_CAPS:
1146 *(int *)result = sc->aw_host.caps;
1148 case MMCBR_IVAR_TIMING:
1149 *(int *)result = sc->aw_host.ios.timing;
1151 case MMCBR_IVAR_MAX_DATA:
1152 *(int *)result = (sc->aw_mmc_conf->dma_xferlen *
1153 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
1155 case MMCBR_IVAR_RETUNE_REQ:
1156 *(int *)result = retune_req_none;
1164 aw_mmc_write_ivar(device_t bus, device_t child, int which,
1167 struct aw_mmc_softc *sc;
1169 sc = device_get_softc(bus);
1173 case MMCBR_IVAR_BUS_MODE:
1174 sc->aw_host.ios.bus_mode = value;
1176 case MMCBR_IVAR_BUS_WIDTH:
1177 sc->aw_host.ios.bus_width = value;
1179 case MMCBR_IVAR_CHIP_SELECT:
1180 sc->aw_host.ios.chip_select = value;
1182 case MMCBR_IVAR_CLOCK:
1183 sc->aw_host.ios.clock = value;
1185 case MMCBR_IVAR_MODE:
1186 sc->aw_host.mode = value;
1188 case MMCBR_IVAR_OCR:
1189 sc->aw_host.ocr = value;
1191 case MMCBR_IVAR_POWER_MODE:
1192 sc->aw_host.ios.power_mode = value;
1194 case MMCBR_IVAR_VDD:
1195 sc->aw_host.ios.vdd = value;
1197 case MMCBR_IVAR_VCCQ:
1198 sc->aw_host.ios.vccq = value;
1200 case MMCBR_IVAR_TIMING:
1201 sc->aw_host.ios.timing = value;
1203 /* These are read-only */
1204 case MMCBR_IVAR_CAPS:
1205 case MMCBR_IVAR_HOST_OCR:
1206 case MMCBR_IVAR_F_MIN:
1207 case MMCBR_IVAR_F_MAX:
1208 case MMCBR_IVAR_MAX_DATA:
1216 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
1221 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1222 reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER |
1223 AW_MMC_CKCR_MASK_DATA0);
1226 reg |= AW_MMC_CKCR_ENB;
1227 if (sc->aw_mmc_conf->mask_data0)
1228 reg |= AW_MMC_CKCR_MASK_DATA0;
1230 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1232 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
1233 AW_MMC_CMDR_WAIT_PRE_OVER;
1234 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
1237 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
1238 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
1241 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1243 if (reg & AW_MMC_CMDR_LOAD) {
1244 device_printf(sc->aw_dev, "timeout updating clock\n");
1248 if (sc->aw_mmc_conf->mask_data0) {
1249 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1250 reg &= ~AW_MMC_CKCR_MASK_DATA0;
1251 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1259 aw_mmc_switch_vccq(device_t bus, device_t child)
1261 struct aw_mmc_softc *sc;
1264 sc = device_get_softc(bus);
1266 if (sc->mmc_helper.vqmmc_supply == NULL)
1269 switch (sc->aw_host.ios.vccq) {
1280 err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt);
1282 device_printf(sc->aw_dev,
1283 "Cannot set vqmmc to %d<->%d\n",
1294 aw_mmc_update_ios(device_t bus, device_t child)
1297 struct aw_mmc_softc *sc;
1298 struct mmc_ios *ios;
1300 uint32_t reg, div = 1;
1304 sc = device_get_softc(bus);
1306 ios = &sc->aw_host.ios;
1308 /* Set the bus width. */
1309 switch (ios->bus_width) {
1311 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
1314 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
1317 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
1321 switch (ios->power_mode) {
1325 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1326 device_printf(sc->aw_dev, "Powering down sd/mmc\n");
1328 if (sc->mmc_helper.vmmc_supply) {
1329 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status);
1330 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED)
1331 regulator_disable(sc->mmc_helper.vmmc_supply);
1333 if (sc->mmc_helper.vqmmc_supply) {
1334 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status);
1335 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED)
1336 regulator_disable(sc->mmc_helper.vqmmc_supply);
1339 if (sc->mmc_helper.mmc_pwrseq)
1340 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, false);
1345 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1346 device_printf(sc->aw_dev, "Powering up sd/mmc\n");
1348 if (sc->mmc_helper.vmmc_supply) {
1349 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status);
1350 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED)
1351 regulator_enable(sc->mmc_helper.vmmc_supply);
1353 if (sc->mmc_helper.vqmmc_supply) {
1354 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status);
1355 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED)
1356 regulator_enable(sc->mmc_helper.vqmmc_supply);
1359 if (sc->mmc_helper.mmc_pwrseq)
1360 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, true);
1365 /* Enable ddr mode if needed */
1366 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
1367 if (ios->timing == bus_timing_uhs_ddr50 ||
1368 ios->timing == bus_timing_mmc_ddr52)
1369 reg |= AW_MMC_GCTL_DDR_MOD_SEL;
1371 reg &= ~AW_MMC_GCTL_DDR_MOD_SEL;
1372 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1374 if (ios->clock && ios->clock != sc->aw_clock) {
1375 sc->aw_clock = clock = ios->clock;
1378 error = aw_mmc_update_clock(sc, 0);
1382 if (ios->timing == bus_timing_mmc_ddr52 &&
1383 (sc->aw_mmc_conf->new_timing ||
1384 ios->bus_width == bus_width_8)) {
1389 /* Reset the divider. */
1390 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1391 reg &= ~AW_MMC_CKCR_DIV;
1393 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1395 /* New timing mode if needed */
1396 if (sc->aw_mmc_conf->new_timing) {
1397 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1398 reg |= AW_MMC_NTSR_MODE_SELECT;
1399 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1402 /* Set the MMC clock. */
1403 error = clk_disable(sc->aw_clk_mmc);
1404 if (error != 0 && bootverbose)
1405 device_printf(sc->aw_dev,
1406 "failed to disable mmc clock: %d\n", error);
1407 error = clk_set_freq(sc->aw_clk_mmc, clock,
1408 CLK_SET_ROUND_DOWN);
1410 device_printf(sc->aw_dev,
1411 "failed to set frequency to %u Hz: %d\n",
1415 error = clk_enable(sc->aw_clk_mmc);
1416 if (error != 0 && bootverbose)
1417 device_printf(sc->aw_dev,
1418 "failed to re-enable mmc clock: %d\n", error);
1420 if (sc->aw_mmc_conf->can_calibrate)
1421 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1424 error = aw_mmc_update_clock(sc, 1);
1434 aw_mmc_get_ro(device_t bus, device_t child)
1436 struct aw_mmc_softc *sc;
1438 sc = device_get_softc(bus);
1440 return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper));
1444 aw_mmc_acquire_host(device_t bus, device_t child)
1446 struct aw_mmc_softc *sc;
1449 sc = device_get_softc(bus);
1451 while (sc->aw_bus_busy) {
1452 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1465 aw_mmc_release_host(device_t bus, device_t child)
1467 struct aw_mmc_softc *sc;
1469 sc = device_get_softc(bus);
1479 static device_method_t aw_mmc_methods[] = {
1480 /* Device interface */
1481 DEVMETHOD(device_probe, aw_mmc_probe),
1482 DEVMETHOD(device_attach, aw_mmc_attach),
1483 DEVMETHOD(device_detach, aw_mmc_detach),
1486 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
1487 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
1488 DEVMETHOD(bus_add_child, bus_generic_add_child),
1491 /* MMC bridge interface */
1492 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
1493 DEVMETHOD(mmcbr_request, aw_mmc_request),
1494 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
1495 DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq),
1496 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
1497 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
1501 /* MMCCAM interface */
1502 DEVMETHOD(mmc_sim_get_tran_settings, aw_mmc_get_tran_settings),
1503 DEVMETHOD(mmc_sim_set_tran_settings, aw_mmc_set_tran_settings),
1504 DEVMETHOD(mmc_sim_cam_request, aw_mmc_cam_request),
1505 DEVMETHOD(mmc_sim_cam_poll, aw_mmc_cam_poll),
1511 static driver_t aw_mmc_driver = {
1514 sizeof(struct aw_mmc_softc),
1517 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, NULL, NULL);
1519 MMC_DECLARE_BRIDGE(aw_mmc);
1521 SIMPLEBUS_PNP_INFO(compat_data);