2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
5 * Copyright (c) 2013 Alexander Fedorov
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/resource.h>
43 #include <sys/sysctl.h>
44 #include <sys/queue.h>
45 #include <sys/taskqueue.h>
47 #include <machine/bus.h>
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
52 #include <dev/mmc/bridge.h>
53 #include <dev/mmc/mmcbrvar.h>
54 #include <dev/mmc/mmc_fdt_helpers.h>
56 #include <arm/allwinner/aw_mmc.h>
57 #include <dev/extres/clk/clk.h>
58 #include <dev/extres/hwreset/hwreset.h>
59 #include <dev/extres/regulator/regulator.h>
61 #include "opt_mmccam.h"
65 #include <cam/cam_ccb.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
71 #define AW_MMC_MEMRES 0
72 #define AW_MMC_IRQRES 1
73 #define AW_MMC_RESSZ 2
74 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc))
75 #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS)
76 #define AW_MMC_DMA_FTRGLEVEL 0x20070008
78 #define AW_MMC_RESET_RETRY 1000
80 #define CARD_ID_FREQUENCY 400000
89 static const struct aw_mmc_conf a10_mmc_conf = {
90 .dma_xferlen = 0x2000,
93 static const struct aw_mmc_conf a13_mmc_conf = {
94 .dma_xferlen = 0x10000,
97 static const struct aw_mmc_conf a64_mmc_conf = {
98 .dma_xferlen = 0x10000,
100 .can_calibrate = true,
104 static const struct aw_mmc_conf a64_emmc_conf = {
105 .dma_xferlen = 0x2000,
106 .can_calibrate = true,
109 static struct ofw_compat_data compat_data[] = {
110 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
111 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
112 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
113 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
114 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
118 struct aw_mmc_softc {
122 hwreset_t aw_rst_ahb;
126 struct callout aw_timeoutc;
127 struct mmc_host aw_host;
128 struct mmc_fdt_helper mmc_helper;
131 struct cam_devq * devq;
132 struct cam_sim * sim;
135 struct mmc_request * aw_req;
138 struct resource * aw_res[AW_MMC_RESSZ];
139 struct aw_mmc_conf * aw_mmc_conf;
141 uint32_t aw_intr_wait;
143 unsigned int aw_clock;
146 /* Fields required for DMA access. */
147 bus_addr_t aw_dma_desc_phys;
148 bus_dmamap_t aw_dma_map;
149 bus_dma_tag_t aw_dma_tag;
151 bus_dmamap_t aw_dma_buf_map;
152 bus_dma_tag_t aw_dma_buf_tag;
156 static struct resource_spec aw_mmc_res_spec[] = {
157 { SYS_RES_MEMORY, 0, RF_ACTIVE },
158 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
162 static int aw_mmc_probe(device_t);
163 static int aw_mmc_attach(device_t);
164 static int aw_mmc_detach(device_t);
165 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
166 static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc);
167 static int aw_mmc_reset(struct aw_mmc_softc *);
168 static int aw_mmc_init(struct aw_mmc_softc *);
169 static void aw_mmc_intr(void *);
170 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
171 static void aw_mmc_helper_cd_handler(device_t, bool);
173 static void aw_mmc_print_error(uint32_t);
174 static int aw_mmc_update_ios(device_t, device_t);
175 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
176 static int aw_mmc_get_ro(device_t, device_t);
177 static int aw_mmc_acquire_host(device_t, device_t);
178 static int aw_mmc_release_host(device_t, device_t);
180 static void aw_mmc_cam_action(struct cam_sim *, union ccb *);
181 static void aw_mmc_cam_poll(struct cam_sim *);
182 static int aw_mmc_cam_settran_settings(struct aw_mmc_softc *, union ccb *);
183 static int aw_mmc_cam_request(struct aw_mmc_softc *, union ccb *);
184 static void aw_mmc_cam_handle_mmcio(struct cam_sim *, union ccb *);
187 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
188 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
189 #define AW_MMC_READ_4(_sc, _reg) \
190 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
191 #define AW_MMC_WRITE_4(_sc, _reg, _value) \
192 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
196 aw_mmc_cam_handle_mmcio(struct cam_sim *sim, union ccb *ccb)
198 struct aw_mmc_softc *sc;
200 sc = cam_sim_softc(sim);
202 aw_mmc_cam_request(sc, ccb);
206 aw_mmc_cam_action(struct cam_sim *sim, union ccb *ccb)
208 struct aw_mmc_softc *sc;
210 sc = cam_sim_softc(sim);
212 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
217 mtx_assert(&sc->sim_mtx, MA_OWNED);
219 switch (ccb->ccb_h.func_code) {
221 mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim,
222 (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) /
226 case XPT_GET_TRAN_SETTINGS:
228 struct ccb_trans_settings *cts = &ccb->cts;
231 device_printf(sc->aw_dev, "Got XPT_GET_TRAN_SETTINGS\n");
233 cts->protocol = PROTO_MMCSD;
234 cts->protocol_version = 1;
235 cts->transport = XPORT_MMCSD;
236 cts->transport_version = 1;
237 cts->xport_specific.valid = 0;
238 cts->proto_specific.mmc.host_ocr = sc->aw_host.host_ocr;
239 cts->proto_specific.mmc.host_f_min = sc->aw_host.f_min;
240 cts->proto_specific.mmc.host_f_max = sc->aw_host.f_max;
241 cts->proto_specific.mmc.host_caps = sc->aw_host.caps;
242 cts->proto_specific.mmc.host_max_data = (sc->aw_mmc_conf->dma_xferlen *
243 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
244 memcpy(&cts->proto_specific.mmc.ios, &sc->aw_host.ios, sizeof(struct mmc_ios));
245 ccb->ccb_h.status = CAM_REQ_CMP;
248 case XPT_SET_TRAN_SETTINGS:
251 device_printf(sc->aw_dev, "Got XPT_SET_TRAN_SETTINGS\n");
252 aw_mmc_cam_settran_settings(sc, ccb);
253 ccb->ccb_h.status = CAM_REQ_CMP;
258 device_printf(sc->aw_dev, "Got XPT_RESET_BUS, ACK it...\n");
259 ccb->ccb_h.status = CAM_REQ_CMP;
263 * Here is the HW-dependent part of
264 * sending the command to the underlying h/w
265 * At some point in the future an interrupt comes.
266 * Then the request will be marked as completed.
268 ccb->ccb_h.status = CAM_REQ_INPROG;
270 aw_mmc_cam_handle_mmcio(sim, ccb);
275 ccb->ccb_h.status = CAM_REQ_INVALID;
283 aw_mmc_cam_poll(struct cam_sim *sim)
289 aw_mmc_cam_settran_settings(struct aw_mmc_softc *sc, union ccb *ccb)
292 struct mmc_ios *new_ios;
293 struct ccb_trans_settings_mmc *cts;
295 ios = &sc->aw_host.ios;
297 cts = &ccb->cts.proto_specific.mmc;
300 /* Update only requested fields */
301 if (cts->ios_valid & MMC_CLK) {
302 ios->clock = new_ios->clock;
303 device_printf(sc->aw_dev, "Clock => %d\n", ios->clock);
305 if (cts->ios_valid & MMC_VDD) {
306 ios->vdd = new_ios->vdd;
307 device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd);
309 if (cts->ios_valid & MMC_CS) {
310 ios->chip_select = new_ios->chip_select;
311 device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select);
313 if (cts->ios_valid & MMC_BW) {
314 ios->bus_width = new_ios->bus_width;
315 device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width);
317 if (cts->ios_valid & MMC_PM) {
318 ios->power_mode = new_ios->power_mode;
319 device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode);
321 if (cts->ios_valid & MMC_BT) {
322 ios->timing = new_ios->timing;
323 device_printf(sc->aw_dev, "Timing => %d\n", ios->timing);
325 if (cts->ios_valid & MMC_BM) {
326 ios->bus_mode = new_ios->bus_mode;
327 device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode);
330 return (aw_mmc_update_ios(sc->aw_dev, NULL));
334 aw_mmc_cam_request(struct aw_mmc_softc *sc, union ccb *ccb)
336 struct ccb_mmcio *mmcio;
343 if (__predict_false(bootverbose)) {
344 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
345 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
346 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
347 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
350 if (mmcio->cmd.data != NULL) {
351 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
352 panic("data->len = %d, data->flags = %d -- something is b0rked",
353 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
355 if (sc->ccb != NULL) {
356 device_printf(sc->aw_dev, "Controller still has an active command\n");
360 /* aw_mmc_request locks again */
362 aw_mmc_request(sc->aw_dev, NULL, NULL);
369 aw_mmc_helper_cd_handler(device_t dev, bool present)
371 struct aw_mmc_softc *sc;
373 sc = device_get_softc(dev);
376 if (sc->child == NULL) {
378 device_printf(sc->aw_dev, "Card inserted\n");
380 sc->child = device_add_child(sc->aw_dev, "mmc", -1);
383 device_set_ivars(sc->child, sc);
384 (void)device_probe_and_attach(sc->child);
389 /* Card isn't present, detach if necessary */
390 if (sc->child != NULL) {
392 device_printf(sc->aw_dev, "Card removed\n");
395 device_delete_child(sc->aw_dev, sc->child);
403 aw_mmc_probe(device_t dev)
406 if (!ofw_bus_status_okay(dev))
408 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
411 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
413 return (BUS_PROBE_DEFAULT);
417 aw_mmc_attach(device_t dev)
419 struct aw_mmc_softc *sc;
420 struct sysctl_ctx_list *ctx;
421 struct sysctl_oid_list *tree;
424 sc = device_get_softc(dev);
427 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
432 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
433 device_printf(dev, "cannot allocate device resources\n");
436 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
437 INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
439 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
440 device_printf(dev, "cannot setup interrupt handler\n");
443 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
445 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
447 /* De-assert reset */
448 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
449 error = hwreset_deassert(sc->aw_rst_ahb);
451 device_printf(dev, "cannot de-assert reset\n");
456 /* Activate the module clock. */
457 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
459 device_printf(dev, "cannot get ahb clock\n");
462 error = clk_enable(sc->aw_clk_ahb);
464 device_printf(dev, "cannot enable ahb clock\n");
467 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
469 device_printf(dev, "cannot get mmc clock\n");
472 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
475 device_printf(dev, "cannot init mmc clock\n");
478 error = clk_enable(sc->aw_clk_mmc);
480 device_printf(dev, "cannot enable mmc clock\n");
485 ctx = device_get_sysctl_ctx(dev);
486 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
487 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
488 &sc->aw_timeout, 0, "Request timeout in seconds");
490 /* Soft Reset controller. */
491 if (aw_mmc_reset(sc) != 0) {
492 device_printf(dev, "cannot reset the controller\n");
496 if (aw_mmc_setup_dma(sc) != 0) {
497 device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
501 /* Set some defaults for freq and supported mode */
502 sc->aw_host.f_min = 400000;
503 sc->aw_host.f_max = 52000000;
504 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
505 sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
506 mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host);
507 mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler);
511 if ((sc->devq = cam_simq_alloc(1)) == NULL) {
515 mtx_init(&sc->sim_mtx, "awmmcsim", NULL, MTX_DEF);
516 sc->sim = cam_sim_alloc_dev(aw_mmc_cam_action, aw_mmc_cam_poll,
517 "aw_mmc_sim", sc, dev,
518 &sc->sim_mtx, 1, 1, sc->devq);
520 if (sc->sim == NULL) {
521 cam_simq_free(sc->devq);
522 device_printf(dev, "cannot allocate CAM SIM\n");
526 mtx_lock(&sc->sim_mtx);
527 if (xpt_bus_register(sc->sim, sc->aw_dev, 0) != 0) {
528 device_printf(dev, "cannot register SCSI pass-through bus\n");
529 cam_sim_free(sc->sim, FALSE);
530 cam_simq_free(sc->devq);
531 mtx_unlock(&sc->sim_mtx);
535 mtx_unlock(&sc->sim_mtx);
541 callout_drain(&sc->aw_timeoutc);
542 mtx_destroy(&sc->aw_mtx);
543 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
544 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
547 if (sc->sim != NULL) {
548 mtx_lock(&sc->sim_mtx);
549 xpt_bus_deregister(cam_sim_path(sc->sim));
550 cam_sim_free(sc->sim, FALSE);
551 mtx_unlock(&sc->sim_mtx);
554 if (sc->devq != NULL)
555 cam_simq_free(sc->devq);
561 aw_mmc_detach(device_t dev)
563 struct aw_mmc_softc *sc;
566 sc = device_get_softc(dev);
568 clk_disable(sc->aw_clk_mmc);
569 clk_disable(sc->aw_clk_ahb);
570 hwreset_assert(sc->aw_rst_ahb);
572 mmc_fdt_gpio_teardown(&sc->mmc_helper);
574 callout_drain(&sc->aw_timeoutc);
581 device_delete_child(sc->aw_dev, d);
583 aw_mmc_teardown_dma(sc);
585 mtx_destroy(&sc->aw_mtx);
587 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
588 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
591 if (sc->sim != NULL) {
592 mtx_lock(&sc->sim_mtx);
593 xpt_bus_deregister(cam_sim_path(sc->sim));
594 cam_sim_free(sc->sim, FALSE);
595 mtx_unlock(&sc->sim_mtx);
598 if (sc->devq != NULL)
599 cam_simq_free(sc->devq);
606 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
608 struct aw_mmc_softc *sc;
610 sc = (struct aw_mmc_softc *)arg;
612 sc->aw_dma_map_err = err;
615 sc->aw_dma_desc_phys = segs[0].ds_addr;
619 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
623 /* Allocate the DMA descriptor memory. */
624 error = bus_dma_tag_create(
625 bus_get_dma_tag(sc->aw_dev), /* parent */
626 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
627 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
628 BUS_SPACE_MAXADDR, /* highaddr */
629 NULL, NULL, /* filter, filterarg*/
630 AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */
631 AW_MMC_DMA_DESC_SIZE, /* maxsegsize */
633 NULL, NULL, /* lock, lockarg*/
638 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
639 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
644 error = bus_dmamap_load(sc->aw_dma_tag,
646 sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE,
647 aw_dma_desc_cb, sc, 0);
650 if (sc->aw_dma_map_err)
651 return (sc->aw_dma_map_err);
653 /* Create the DMA map for data transfers. */
654 error = bus_dma_tag_create(
655 bus_get_dma_tag(sc->aw_dev), /* parent */
656 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
657 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
658 BUS_SPACE_MAXADDR, /* highaddr */
659 NULL, NULL, /* filter, filterarg*/
660 sc->aw_mmc_conf->dma_xferlen *
661 AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */
662 sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */
663 BUS_DMA_ALLOCNOW, /* flags */
664 NULL, NULL, /* lock, lockarg*/
665 &sc->aw_dma_buf_tag);
668 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
669 &sc->aw_dma_buf_map);
677 aw_mmc_teardown_dma(struct aw_mmc_softc *sc)
680 bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map);
681 bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map);
682 if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0)
683 device_printf(sc->aw_dev, "Cannot destroy the dma tag\n");
685 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
686 bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
687 if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0)
688 device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n");
692 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
695 struct aw_mmc_dma_desc *dma_desc;
696 struct aw_mmc_softc *sc;
698 sc = (struct aw_mmc_softc *)arg;
699 sc->aw_dma_map_err = err;
704 dma_desc = sc->aw_dma_desc;
705 for (i = 0; i < nsegs; i++) {
706 if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen)
707 dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */
709 dma_desc[i].buf_size = segs[i].ds_len;
710 dma_desc[i].buf_addr = segs[i].ds_addr;
711 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
712 AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC;
714 dma_desc[i].next = sc->aw_dma_desc_phys +
715 ((i + 1) * sizeof(struct aw_mmc_dma_desc));
718 dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD;
719 dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD |
720 AW_MMC_DMA_CONFIG_ER;
721 dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC;
722 dma_desc[nsegs - 1].next = 0;
726 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
728 bus_dmasync_op_t sync_op;
730 struct mmc_command *cmd;
734 cmd = &sc->ccb->mmcio.cmd;
736 cmd = sc->aw_req->cmd;
738 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
740 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
741 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
744 if (sc->aw_dma_map_err)
745 return (sc->aw_dma_map_err);
747 if (cmd->data->flags & MMC_DATA_WRITE)
748 sync_op = BUS_DMASYNC_PREWRITE;
750 sync_op = BUS_DMASYNC_PREREAD;
751 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
752 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
755 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
756 val &= ~AW_MMC_GCTL_FIFO_AC_MOD;
757 val |= AW_MMC_GCTL_DMA_ENB;
758 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
761 val |= AW_MMC_GCTL_DMA_RST;
762 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
764 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
765 AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
766 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
768 /* Enable RX or TX DMA interrupt */
769 val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
770 if (cmd->data->flags & MMC_DATA_WRITE)
771 val |= AW_MMC_IDST_TX_INT;
773 val |= AW_MMC_IDST_RX_INT;
774 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
776 /* Set DMA descritptor list address */
777 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
779 /* FIFO trigger level */
780 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
786 aw_mmc_reset(struct aw_mmc_softc *sc)
791 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
792 reg |= AW_MMC_GCTL_RESET;
793 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
794 timeout = AW_MMC_RESET_RETRY;
795 while (--timeout > 0) {
796 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0)
807 aw_mmc_init(struct aw_mmc_softc *sc)
812 ret = aw_mmc_reset(sc);
816 /* Set the timeout. */
817 AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
818 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
819 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
821 /* Unmask interrupts. */
822 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
824 /* Clear pending interrupts. */
825 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
827 /* Debug register, undocumented */
828 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
830 /* Function select register */
831 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
833 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
835 /* Enable interrupts and disable AHB access. */
836 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
837 reg |= AW_MMC_GCTL_INT_ENB;
838 reg &= ~AW_MMC_GCTL_FIFO_AC_MOD;
839 reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS;
840 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
846 aw_mmc_req_done(struct aw_mmc_softc *sc)
848 struct mmc_command *cmd;
852 struct mmc_request *req;
859 cmd = &ccb->mmcio.cmd;
861 cmd = sc->aw_req->cmd;
865 device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error);
868 if (cmd->error != MMC_ERR_NONE) {
869 /* Reset the FIFO and DMA engines. */
870 mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST;
871 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
872 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
874 retry = AW_MMC_RESET_RETRY;
875 while (--retry > 0) {
876 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) &
877 AW_MMC_GCTL_RESET) == 0)
882 device_printf(sc->aw_dev,
883 "timeout resetting DMA/FIFO\n");
884 aw_mmc_update_clock(sc, 1);
887 callout_stop(&sc->aw_timeoutc);
890 sc->aw_dma_map_err = 0;
891 sc->aw_intr_wait = 0;
895 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
905 aw_mmc_req_ok(struct aw_mmc_softc *sc)
908 struct mmc_command *cmd;
912 while (--timeout > 0) {
913 status = AW_MMC_READ_4(sc, AW_MMC_STAR);
914 if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
919 cmd = &sc->ccb->mmcio.cmd;
921 cmd = sc->aw_req->cmd;
924 cmd->error = MMC_ERR_FAILED;
928 if (cmd->flags & MMC_RSP_PRESENT) {
929 if (cmd->flags & MMC_RSP_136) {
930 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
931 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
932 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
933 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
935 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
937 /* All data has been transferred ? */
938 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
939 cmd->error = MMC_ERR_FAILED;
945 set_mmc_error(struct aw_mmc_softc *sc, int error_code)
948 sc->ccb->mmcio.cmd.error = error_code;
950 sc->aw_req->cmd->error = error_code;
955 aw_mmc_timeout(void *arg)
957 struct aw_mmc_softc *sc;
959 sc = (struct aw_mmc_softc *)arg;
961 if (sc->ccb != NULL) {
963 if (sc->aw_req != NULL) {
965 device_printf(sc->aw_dev, "controller timeout\n");
966 set_mmc_error(sc, MMC_ERR_TIMEOUT);
969 device_printf(sc->aw_dev,
970 "Spurious timeout - no active request\n");
974 aw_mmc_print_error(uint32_t err)
976 if(err & AW_MMC_INT_RESP_ERR)
977 printf("AW_MMC_INT_RESP_ERR ");
978 if (err & AW_MMC_INT_RESP_CRC_ERR)
979 printf("AW_MMC_INT_RESP_CRC_ERR ");
980 if (err & AW_MMC_INT_DATA_CRC_ERR)
981 printf("AW_MMC_INT_DATA_CRC_ERR ");
982 if (err & AW_MMC_INT_RESP_TIMEOUT)
983 printf("AW_MMC_INT_RESP_TIMEOUT ");
984 if (err & AW_MMC_INT_FIFO_RUN_ERR)
985 printf("AW_MMC_INT_FIFO_RUN_ERR ");
986 if (err & AW_MMC_INT_CMD_BUSY)
987 printf("AW_MMC_INT_CMD_BUSY ");
988 if (err & AW_MMC_INT_DATA_START_ERR)
989 printf("AW_MMC_INT_DATA_START_ERR ");
990 if (err & AW_MMC_INT_DATA_END_BIT_ERR)
991 printf("AW_MMC_INT_DATA_END_BIT_ERR");
996 aw_mmc_intr(void *arg)
998 bus_dmasync_op_t sync_op;
999 struct aw_mmc_softc *sc;
1000 struct mmc_data *data;
1001 uint32_t idst, imask, rint;
1003 sc = (struct aw_mmc_softc *)arg;
1005 rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
1006 idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
1007 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
1008 if (idst == 0 && imask == 0 && rint == 0) {
1013 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
1017 if (sc->ccb == NULL) {
1019 if (sc->aw_req == NULL) {
1021 device_printf(sc->aw_dev,
1022 "Spurious interrupt - no active request, rint: 0x%08X\n",
1024 aw_mmc_print_error(rint);
1027 if (rint & AW_MMC_INT_ERR_BIT) {
1029 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
1030 aw_mmc_print_error(rint);
1031 if (rint & AW_MMC_INT_RESP_TIMEOUT)
1032 set_mmc_error(sc, MMC_ERR_TIMEOUT);
1034 set_mmc_error(sc, MMC_ERR_FAILED);
1035 aw_mmc_req_done(sc);
1038 if (idst & AW_MMC_IDST_ERROR) {
1039 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
1040 set_mmc_error(sc, MMC_ERR_FAILED);
1041 aw_mmc_req_done(sc);
1045 sc->aw_intr |= rint;
1047 data = sc->ccb->mmcio.cmd.data;
1049 data = sc->aw_req->cmd->data;
1051 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
1052 if (data->flags & MMC_DATA_WRITE)
1053 sync_op = BUS_DMASYNC_POSTWRITE;
1055 sync_op = BUS_DMASYNC_POSTREAD;
1056 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
1058 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
1059 BUS_DMASYNC_POSTWRITE);
1060 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
1061 sc->aw_resid = data->len >> 2;
1063 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
1067 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
1068 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
1073 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
1076 struct aw_mmc_softc *sc;
1077 struct mmc_command *cmd;
1078 uint32_t cmdreg, imask;
1081 sc = device_get_softc(bus);
1085 KASSERT(req == NULL, ("req should be NULL in MMCCAM case!"));
1087 * For MMCCAM, sc->ccb has been NULL-checked and populated
1088 * by aw_mmc_cam_request() already.
1090 cmd = &sc->ccb->mmcio.cmd;
1101 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1102 cmd->opcode, cmd->arg, cmd->flags,
1103 cmd->data != NULL ? (unsigned int)cmd->data->len : 0,
1104 cmd->data != NULL ? cmd->data->flags: 0);
1107 cmdreg = AW_MMC_CMDR_LOAD;
1108 imask = AW_MMC_INT_ERR_BIT;
1109 sc->aw_intr_wait = 0;
1112 cmd->error = MMC_ERR_NONE;
1114 if (cmd->opcode == MMC_GO_IDLE_STATE)
1115 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
1117 if (cmd->flags & MMC_RSP_PRESENT)
1118 cmdreg |= AW_MMC_CMDR_RESP_RCV;
1119 if (cmd->flags & MMC_RSP_136)
1120 cmdreg |= AW_MMC_CMDR_LONG_RESP;
1121 if (cmd->flags & MMC_RSP_CRC)
1122 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
1125 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
1127 if (cmd->data->flags & MMC_DATA_MULTI) {
1128 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
1129 imask |= AW_MMC_INT_AUTO_STOP_DONE;
1130 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
1132 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
1133 imask |= AW_MMC_INT_DATA_OVER;
1135 if (cmd->data->flags & MMC_DATA_WRITE)
1136 cmdreg |= AW_MMC_CMDR_DIR_WRITE;
1138 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1139 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size);
1140 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1144 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
1145 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
1146 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1149 imask |= AW_MMC_INT_CMD_DONE;
1152 /* Enable the interrupts we are interested in */
1153 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
1154 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1156 /* Enable auto stop if needed */
1157 AW_MMC_WRITE_4(sc, AW_MMC_A12A,
1158 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
1160 /* Write the command argument */
1161 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
1164 * If we don't have data start the request
1165 * if we do prepare the dma request and start the request
1167 if (cmd->data == NULL) {
1168 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1170 err = aw_mmc_prepare_dma(sc);
1172 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
1174 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1177 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
1178 aw_mmc_timeout, sc);
1185 aw_mmc_read_ivar(device_t bus, device_t child, int which,
1188 struct aw_mmc_softc *sc;
1190 sc = device_get_softc(bus);
1194 case MMCBR_IVAR_BUS_MODE:
1195 *(int *)result = sc->aw_host.ios.bus_mode;
1197 case MMCBR_IVAR_BUS_WIDTH:
1198 *(int *)result = sc->aw_host.ios.bus_width;
1200 case MMCBR_IVAR_CHIP_SELECT:
1201 *(int *)result = sc->aw_host.ios.chip_select;
1203 case MMCBR_IVAR_CLOCK:
1204 *(int *)result = sc->aw_host.ios.clock;
1206 case MMCBR_IVAR_F_MIN:
1207 *(int *)result = sc->aw_host.f_min;
1209 case MMCBR_IVAR_F_MAX:
1210 *(int *)result = sc->aw_host.f_max;
1212 case MMCBR_IVAR_HOST_OCR:
1213 *(int *)result = sc->aw_host.host_ocr;
1215 case MMCBR_IVAR_MODE:
1216 *(int *)result = sc->aw_host.mode;
1218 case MMCBR_IVAR_OCR:
1219 *(int *)result = sc->aw_host.ocr;
1221 case MMCBR_IVAR_POWER_MODE:
1222 *(int *)result = sc->aw_host.ios.power_mode;
1224 case MMCBR_IVAR_VDD:
1225 *(int *)result = sc->aw_host.ios.vdd;
1227 case MMCBR_IVAR_VCCQ:
1228 *(int *)result = sc->aw_host.ios.vccq;
1230 case MMCBR_IVAR_CAPS:
1231 *(int *)result = sc->aw_host.caps;
1233 case MMCBR_IVAR_TIMING:
1234 *(int *)result = sc->aw_host.ios.timing;
1236 case MMCBR_IVAR_MAX_DATA:
1237 *(int *)result = (sc->aw_mmc_conf->dma_xferlen *
1238 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
1240 case MMCBR_IVAR_RETUNE_REQ:
1241 *(int *)result = retune_req_none;
1249 aw_mmc_write_ivar(device_t bus, device_t child, int which,
1252 struct aw_mmc_softc *sc;
1254 sc = device_get_softc(bus);
1258 case MMCBR_IVAR_BUS_MODE:
1259 sc->aw_host.ios.bus_mode = value;
1261 case MMCBR_IVAR_BUS_WIDTH:
1262 sc->aw_host.ios.bus_width = value;
1264 case MMCBR_IVAR_CHIP_SELECT:
1265 sc->aw_host.ios.chip_select = value;
1267 case MMCBR_IVAR_CLOCK:
1268 sc->aw_host.ios.clock = value;
1270 case MMCBR_IVAR_MODE:
1271 sc->aw_host.mode = value;
1273 case MMCBR_IVAR_OCR:
1274 sc->aw_host.ocr = value;
1276 case MMCBR_IVAR_POWER_MODE:
1277 sc->aw_host.ios.power_mode = value;
1279 case MMCBR_IVAR_VDD:
1280 sc->aw_host.ios.vdd = value;
1282 case MMCBR_IVAR_VCCQ:
1283 sc->aw_host.ios.vccq = value;
1285 case MMCBR_IVAR_TIMING:
1286 sc->aw_host.ios.timing = value;
1288 /* These are read-only */
1289 case MMCBR_IVAR_CAPS:
1290 case MMCBR_IVAR_HOST_OCR:
1291 case MMCBR_IVAR_F_MIN:
1292 case MMCBR_IVAR_F_MAX:
1293 case MMCBR_IVAR_MAX_DATA:
1301 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
1306 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1307 reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER |
1308 AW_MMC_CKCR_MASK_DATA0);
1311 reg |= AW_MMC_CKCR_ENB;
1312 if (sc->aw_mmc_conf->mask_data0)
1313 reg |= AW_MMC_CKCR_MASK_DATA0;
1315 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1317 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
1318 AW_MMC_CMDR_WAIT_PRE_OVER;
1319 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
1322 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
1323 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
1326 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1328 if (reg & AW_MMC_CMDR_LOAD) {
1329 device_printf(sc->aw_dev, "timeout updating clock\n");
1333 if (sc->aw_mmc_conf->mask_data0) {
1334 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1335 reg &= ~AW_MMC_CKCR_MASK_DATA0;
1336 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1343 aw_mmc_switch_vccq(device_t bus, device_t child)
1345 struct aw_mmc_softc *sc;
1348 sc = device_get_softc(bus);
1350 if (sc->mmc_helper.vqmmc_supply == NULL)
1353 switch (sc->aw_host.ios.vccq) {
1364 err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt);
1366 device_printf(sc->aw_dev,
1367 "Cannot set vqmmc to %d<->%d\n",
1377 aw_mmc_update_ios(device_t bus, device_t child)
1380 struct aw_mmc_softc *sc;
1381 struct mmc_ios *ios;
1383 uint32_t reg, div = 1;
1385 sc = device_get_softc(bus);
1387 ios = &sc->aw_host.ios;
1389 /* Set the bus width. */
1390 switch (ios->bus_width) {
1392 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
1395 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
1398 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
1402 switch (ios->power_mode) {
1407 device_printf(sc->aw_dev, "Powering down sd/mmc\n");
1409 if (sc->mmc_helper.vmmc_supply)
1410 regulator_disable(sc->mmc_helper.vmmc_supply);
1411 if (sc->mmc_helper.vqmmc_supply)
1412 regulator_disable(sc->mmc_helper.vqmmc_supply);
1418 device_printf(sc->aw_dev, "Powering up sd/mmc\n");
1420 if (sc->mmc_helper.vmmc_supply)
1421 regulator_enable(sc->mmc_helper.vmmc_supply);
1422 if (sc->mmc_helper.vqmmc_supply)
1423 regulator_enable(sc->mmc_helper.vqmmc_supply);
1428 /* Enable ddr mode if needed */
1429 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
1430 if (ios->timing == bus_timing_uhs_ddr50 ||
1431 ios->timing == bus_timing_mmc_ddr52)
1432 reg |= AW_MMC_GCTL_DDR_MOD_SEL;
1434 reg &= ~AW_MMC_GCTL_DDR_MOD_SEL;
1435 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1437 if (ios->clock && ios->clock != sc->aw_clock) {
1438 sc->aw_clock = clock = ios->clock;
1441 error = aw_mmc_update_clock(sc, 0);
1445 if (ios->timing == bus_timing_mmc_ddr52 &&
1446 (sc->aw_mmc_conf->new_timing ||
1447 ios->bus_width == bus_width_8)) {
1452 /* Reset the divider. */
1453 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1454 reg &= ~AW_MMC_CKCR_DIV;
1456 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1458 /* New timing mode if needed */
1459 if (sc->aw_mmc_conf->new_timing) {
1460 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1461 reg |= AW_MMC_NTSR_MODE_SELECT;
1462 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1465 /* Set the MMC clock. */
1466 error = clk_disable(sc->aw_clk_mmc);
1467 if (error != 0 && bootverbose)
1468 device_printf(sc->aw_dev,
1469 "failed to disable mmc clock: %d\n", error);
1470 error = clk_set_freq(sc->aw_clk_mmc, clock,
1471 CLK_SET_ROUND_DOWN);
1473 device_printf(sc->aw_dev,
1474 "failed to set frequency to %u Hz: %d\n",
1478 error = clk_enable(sc->aw_clk_mmc);
1479 if (error != 0 && bootverbose)
1480 device_printf(sc->aw_dev,
1481 "failed to re-enable mmc clock: %d\n", error);
1483 if (sc->aw_mmc_conf->can_calibrate)
1484 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1487 error = aw_mmc_update_clock(sc, 1);
1497 aw_mmc_get_ro(device_t bus, device_t child)
1499 struct aw_mmc_softc *sc;
1501 sc = device_get_softc(bus);
1503 return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper));
1507 aw_mmc_acquire_host(device_t bus, device_t child)
1509 struct aw_mmc_softc *sc;
1512 sc = device_get_softc(bus);
1514 while (sc->aw_bus_busy) {
1515 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1528 aw_mmc_release_host(device_t bus, device_t child)
1530 struct aw_mmc_softc *sc;
1532 sc = device_get_softc(bus);
1541 static device_method_t aw_mmc_methods[] = {
1542 /* Device interface */
1543 DEVMETHOD(device_probe, aw_mmc_probe),
1544 DEVMETHOD(device_attach, aw_mmc_attach),
1545 DEVMETHOD(device_detach, aw_mmc_detach),
1548 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
1549 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
1550 DEVMETHOD(bus_add_child, bus_generic_add_child),
1552 /* MMC bridge interface */
1553 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
1554 DEVMETHOD(mmcbr_request, aw_mmc_request),
1555 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
1556 DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq),
1557 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
1558 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
1563 static devclass_t aw_mmc_devclass;
1565 static driver_t aw_mmc_driver = {
1568 sizeof(struct aw_mmc_softc),
1571 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1574 MMC_DECLARE_BRIDGE(aw_mmc);
1576 SIMPLEBUS_PNP_INFO(compat_data);