2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Synopsys DesignWare Mobile Storage Host Controller
33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/malloc.h>
46 #include <sys/timeet.h>
47 #include <sys/timetc.h>
49 #include <dev/mmc/bridge.h>
50 #include <dev/mmc/mmcreg.h>
51 #include <dev/mmc/mmcbrvar.h>
53 #include <dev/fdt/fdt_common.h>
54 #include <dev/ofw/openfirm.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
58 #include <machine/bus.h>
59 #include <machine/fdt.h>
60 #include <machine/cpu.h>
61 #include <machine/intr.h>
63 #include <dev/mmc/host/dwmmc.h>
67 #define dprintf(x, arg...)
69 #define READ4(_sc, _reg) \
70 bus_read_4((_sc)->res[0], _reg)
71 #define WRITE4(_sc, _reg, _val) \
72 bus_write_4((_sc)->res[0], _reg, _val)
74 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
76 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
77 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
78 #define DWMMC_LOCK_INIT(_sc) \
79 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
81 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
82 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
83 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
85 #define PENDING_CMD 0x01
86 #define PENDING_STOP 0x02
87 #define CARD_INIT_DONE 0x04
89 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
90 |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
92 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
94 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
97 #define DES0_DIC (1 << 1)
98 #define DES0_LD (1 << 2)
99 #define DES0_FS (1 << 3)
100 #define DES0_CH (1 << 4)
101 #define DES0_ER (1 << 5)
102 #define DES0_CES (1 << 30)
103 #define DES0_OWN (1 << 31)
105 #define DES1_BS1_MASK 0xfff
106 #define DES1_BS1_SHIFT 0
109 uint32_t des0; /* control */
110 uint32_t des1; /* bufsize */
111 uint32_t des2; /* buf1 phys addr */
112 uint32_t des3; /* buf2 phys addr or next descr */
115 #define DESC_COUNT 256
116 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_COUNT)
117 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */
120 struct resource *res[2];
122 bus_space_handle_t bsh;
125 struct mmc_host host;
127 struct mmc_request *req;
128 struct mmc_command *curcmd;
131 uint32_t use_auto_stop;
133 uint32_t pwren_inverted;
135 bus_dma_tag_t desc_tag;
136 bus_dmamap_t desc_map;
137 struct idmac_desc *desc_ring;
138 bus_addr_t desc_ring_paddr;
139 bus_dma_tag_t buf_tag;
140 bus_dmamap_t buf_map;
153 static void dwmmc_next_operation(struct dwmmc_softc *);
154 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
155 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
156 static int dma_stop(struct dwmmc_softc *);
157 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
158 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
160 static struct resource_spec dwmmc_spec[] = {
161 { SYS_RES_MEMORY, 0, RF_ACTIVE },
162 { SYS_RES_IRQ, 0, RF_ACTIVE },
173 #define HWTYPE_MASK (0x0000ffff)
174 #define HWFLAG_MASK (0xffff << 16)
176 static struct ofw_compat_data compat_data[] = {
177 {"altr,socfpga-dw-mshc", HWTYPE_ALTERA},
178 {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS},
179 {"rockchip,rk2928-dw-mshc", HWTYPE_ROCKCHIP},
184 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
189 *(bus_addr_t *)arg = segs[0].ds_addr;
193 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
195 struct dwmmc_softc *sc;
203 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
205 for (idx = 0; idx < nsegs; idx++) {
206 sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
207 sc->desc_ring[idx].des1 = segs[idx].ds_len;
208 sc->desc_ring[idx].des2 = segs[idx].ds_addr;
211 sc->desc_ring[idx].des0 |= DES0_FS;
213 if (idx == (nsegs - 1)) {
214 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
215 sc->desc_ring[idx].des0 |= DES0_LD;
221 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
226 reg = READ4(sc, SDMMC_CTRL);
228 WRITE4(sc, SDMMC_CTRL, reg);
230 /* Wait reset done */
231 for (i = 0; i < 100; i++) {
232 if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
237 device_printf(sc->dev, "Reset failed\n");
243 dma_setup(struct dwmmc_softc *sc)
250 * Set up TX descriptor ring, descriptors, and dma maps.
252 error = bus_dma_tag_create(
253 bus_get_dma_tag(sc->dev), /* Parent tag. */
254 4096, 0, /* alignment, boundary */
255 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
256 BUS_SPACE_MAXADDR, /* highaddr */
257 NULL, NULL, /* filter, filterarg */
258 DESC_SIZE, 1, /* maxsize, nsegments */
259 DESC_SIZE, /* maxsegsize */
261 NULL, NULL, /* lockfunc, lockarg */
264 device_printf(sc->dev,
265 "could not create ring DMA tag.\n");
269 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
270 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
273 device_printf(sc->dev,
274 "could not allocate descriptor ring.\n");
278 error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
279 sc->desc_ring, DESC_SIZE, dwmmc_get1paddr,
280 &sc->desc_ring_paddr, 0);
282 device_printf(sc->dev,
283 "could not load descriptor ring map.\n");
287 for (idx = 0; idx < DESC_COUNT; idx++) {
288 sc->desc_ring[idx].des0 = DES0_CH;
289 sc->desc_ring[idx].des1 = 0;
290 nidx = (idx + 1) % DESC_COUNT;
291 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
292 (nidx * sizeof(struct idmac_desc));
295 error = bus_dma_tag_create(
296 bus_get_dma_tag(sc->dev), /* Parent tag. */
297 4096, 0, /* alignment, boundary */
298 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
299 BUS_SPACE_MAXADDR, /* highaddr */
300 NULL, NULL, /* filter, filterarg */
301 DESC_COUNT*MMC_SECTOR_SIZE, /* maxsize */
302 DESC_COUNT, /* nsegments */
303 MMC_SECTOR_SIZE, /* maxsegsize */
305 NULL, NULL, /* lockfunc, lockarg */
308 device_printf(sc->dev,
309 "could not create ring DMA tag.\n");
313 error = bus_dmamap_create(sc->buf_tag, 0,
316 device_printf(sc->dev,
317 "could not create TX buffer DMA map.\n");
325 dwmmc_cmd_done(struct dwmmc_softc *sc)
327 struct mmc_command *cmd;
333 if (cmd->flags & MMC_RSP_PRESENT) {
334 if (cmd->flags & MMC_RSP_136) {
335 cmd->resp[3] = READ4(sc, SDMMC_RESP0);
336 cmd->resp[2] = READ4(sc, SDMMC_RESP1);
337 cmd->resp[1] = READ4(sc, SDMMC_RESP2);
338 cmd->resp[0] = READ4(sc, SDMMC_RESP3);
343 cmd->resp[0] = READ4(sc, SDMMC_RESP0);
349 dwmmc_tasklet(struct dwmmc_softc *sc)
351 struct mmc_command *cmd;
360 if (cmd->error != MMC_ERR_NONE || !cmd->data) {
361 dwmmc_next_operation(sc);
362 } else if (cmd->data && sc->dto_rcvd) {
363 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
364 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
367 dwmmc_next_operation(sc);
369 dwmmc_next_operation(sc);
375 dwmmc_intr(void *arg)
377 struct mmc_command *cmd;
378 struct dwmmc_softc *sc;
387 /* First handle SDMMC controller interrupts */
388 reg = READ4(sc, SDMMC_MINTSTS);
390 dprintf("%s 0x%08x\n", __func__, reg);
392 if (reg & DWMMC_CMD_ERR_FLAGS) {
393 WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS);
394 dprintf("cmd err 0x%08x cmd 0x%08x\n",
396 cmd->error = MMC_ERR_TIMEOUT;
399 if (reg & DWMMC_DATA_ERR_FLAGS) {
400 WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS);
401 dprintf("data err 0x%08x cmd 0x%08x\n",
403 cmd->error = MMC_ERR_FAILED;
410 if (reg & SDMMC_INTMASK_CMD_DONE) {
413 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE);
416 if (reg & SDMMC_INTMASK_ACD) {
418 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD);
421 if (reg & SDMMC_INTMASK_DTO) {
423 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO);
426 if (reg & SDMMC_INTMASK_CD) {
427 /* XXX: Handle card detect */
428 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD);
433 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
436 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
440 /* Now handle DMA interrupts */
441 reg = READ4(sc, SDMMC_IDSTS);
443 dprintf("dma intr 0x%08x\n", reg);
444 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
445 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
447 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
459 parse_fdt(struct dwmmc_softc *sc)
461 pcell_t dts_value[3];
465 if ((node = ofw_bus_get_node(sc->dev)) == -1)
469 if ((len = OF_getproplen(node, "fifo-depth")) <= 0)
471 OF_getencprop(node, "fifo-depth", dts_value, len);
472 sc->fifo_depth = dts_value[0];
475 if ((len = OF_getproplen(node, "num-slots")) <= 0)
477 OF_getencprop(node, "num-slots", dts_value, len);
478 sc->num_slots = dts_value[0];
481 * We need some platform-specific code to know
482 * what the clock is supplied for our device.
483 * For now rely on the value specified in FDT.
485 if ((len = OF_getproplen(node, "bus-frequency")) <= 0)
487 OF_getencprop(node, "bus-frequency", dts_value, len);
488 sc->bus_hz = dts_value[0];
491 * Platform-specific stuff
492 * XXX: Move to separate file
495 if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS)
498 if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0)
500 OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len);
501 sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
502 sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT);
504 if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0)
506 OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len);
507 sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
508 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
510 if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0)
512 OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len);
513 sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) |
514 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT));
520 dwmmc_probe(device_t dev)
524 if (!ofw_bus_status_okay(dev))
527 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
528 if (hwtype == HWTYPE_NONE)
531 device_set_desc(dev, "Synopsys DesignWare Mobile "
532 "Storage Host Controller");
533 return (BUS_PROBE_DEFAULT);
537 dwmmc_attach(device_t dev)
539 struct dwmmc_softc *sc;
544 sc = device_get_softc(dev);
547 sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
549 /* Why not to use Auto Stop? It save a hundred of irq per second */
550 sc->use_auto_stop = 1;
552 error = parse_fdt(sc);
554 device_printf(dev, "Can't get FDT property.\n");
560 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
561 device_printf(dev, "could not allocate resources\n");
565 /* Memory interface */
566 sc->bst = rman_get_bustag(sc->res[0]);
567 sc->bsh = rman_get_bushandle(sc->res[0]);
569 /* Setup interrupt handler. */
570 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
571 NULL, dwmmc_intr, sc, &sc->intr_cookie);
573 device_printf(dev, "could not setup interrupt handler.\n");
577 device_printf(dev, "Hardware version ID is %04x\n",
578 READ4(sc, SDMMC_VERID) & 0xffff);
581 sc->pwren_inverted = 0;
583 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) {
585 sc->pwren_inverted = 1;
587 WRITE4(sc, EMMCP_MPSBEGIN0, 0);
588 WRITE4(sc, EMMCP_SEND0, 0);
589 WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT |
590 MPSCTRL_SECURE_WRITE_BIT |
591 MPSCTRL_NON_SECURE_READ_BIT |
592 MPSCTRL_NON_SECURE_WRITE_BIT |
596 /* XXX: we support operation for slot index 0 only */
598 if (sc->pwren_inverted) {
599 WRITE4(sc, SDMMC_PWREN, (0 << slot));
601 WRITE4(sc, SDMMC_PWREN, (1 << slot));
605 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
606 SDMMC_CTRL_FIFO_RESET |
607 SDMMC_CTRL_DMA_RESET)))
610 dwmmc_setup_bus(sc, sc->host.f_min);
616 /* Install desc base */
617 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
619 /* Enable DMA interrupts */
620 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
621 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
626 /* Clear and disable interrups for a while */
627 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
628 WRITE4(sc, SDMMC_INTMASK, 0);
630 /* Maximum timeout */
631 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
633 /* Enable interrupts */
634 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
635 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
642 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
644 sc->host.f_min = 400000;
645 sc->host.f_max = 200000000;
646 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
647 sc->host.caps = MMC_CAP_4_BIT_DATA;
649 child = device_add_child(dev, "mmc", 0);
650 return (bus_generic_attach(dev));
654 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
660 WRITE4(sc, SDMMC_CLKENA, 0);
661 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
662 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
667 device_printf(sc->dev, "Failed update clk\n");
670 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
675 WRITE4(sc, SDMMC_CLKENA, 0);
676 WRITE4(sc, SDMMC_CLKSRC, 0);
678 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
680 WRITE4(sc, SDMMC_CLKDIV, div);
681 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
682 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
687 device_printf(sc->dev, "Failed to update clk");
690 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
692 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
693 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
694 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
699 device_printf(sc->dev, "Failed to enable clk\n");
702 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
708 dwmmc_update_ios(device_t brdev, device_t reqdev)
710 struct dwmmc_softc *sc;
713 sc = device_get_softc(brdev);
716 dprintf("Setting up clk %u bus_width %d\n",
717 ios->clock, ios->bus_width);
719 dwmmc_setup_bus(sc, ios->clock);
721 if (ios->bus_width == bus_width_8)
722 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
723 else if (ios->bus_width == bus_width_4)
724 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
726 WRITE4(sc, SDMMC_CTYPE, 0);
728 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
729 /* XXX: take care about DDR or SDR use here */
730 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
734 * XXX: take care about DDR bit
736 * reg = READ4(sc, SDMMC_UHS_REG);
737 * reg |= (SDMMC_UHS_REG_DDR);
738 * WRITE4(sc, SDMMC_UHS_REG, reg);
745 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
747 struct mmc_data *data;
751 if (data->flags & MMC_DATA_WRITE)
752 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
753 BUS_DMASYNC_POSTWRITE);
755 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
756 BUS_DMASYNC_POSTREAD);
758 bus_dmamap_unload(sc->buf_tag, sc->buf_map);
764 dma_stop(struct dwmmc_softc *sc)
768 reg = READ4(sc, SDMMC_CTRL);
769 reg &= ~(SDMMC_CTRL_USE_IDMAC);
770 reg |= (SDMMC_CTRL_DMA_RESET);
771 WRITE4(sc, SDMMC_CTRL, reg);
773 reg = READ4(sc, SDMMC_BMOD);
774 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
775 reg |= (SDMMC_BMOD_SWR);
776 WRITE4(sc, SDMMC_BMOD, reg);
782 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
784 struct mmc_data *data;
792 reg = READ4(sc, SDMMC_INTMASK);
793 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
794 WRITE4(sc, SDMMC_INTMASK, reg);
796 err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
797 data->data, data->len, dwmmc_ring_setup,
800 panic("dmamap_load failed\n");
802 if (data->flags & MMC_DATA_WRITE)
803 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
804 BUS_DMASYNC_PREWRITE);
806 bus_dmamap_sync(sc->buf_tag, sc->buf_map,
807 BUS_DMASYNC_PREREAD);
809 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
810 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
811 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
813 WRITE4(sc, SDMMC_FIFOTH, reg);
816 reg = READ4(sc, SDMMC_CTRL);
817 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
818 WRITE4(sc, SDMMC_CTRL, reg);
821 reg = READ4(sc, SDMMC_BMOD);
822 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
823 WRITE4(sc, SDMMC_BMOD, reg);
826 WRITE4(sc, SDMMC_PLDMND, 1);
832 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
834 struct mmc_data *data;
840 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
841 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
842 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
844 WRITE4(sc, SDMMC_FIFOTH, reg);
851 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
853 struct mmc_data *data;
856 if (cmd == NULL || cmd->data == NULL)
860 if ((data->flags & MMC_DATA_READ) == 0)
863 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
864 p = (uint32_t *)data->data + (data->xfer_len >> 2);
866 while (data->xfer_len < data->len) {
867 status = READ4(sc, SDMMC_STATUS);
868 if (status & SDMMC_STATUS_FIFO_EMPTY)
870 *p++ = READ4(sc, SDMMC_DATA);
874 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
878 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
880 struct mmc_data *data;
883 if (cmd == NULL || cmd->data == NULL)
887 if ((data->flags & MMC_DATA_WRITE) == 0)
890 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
891 p = (uint32_t *)data->data + (data->xfer_len >> 2);
893 while (data->xfer_len < data->len) {
894 status = READ4(sc, SDMMC_STATUS);
895 if (status & SDMMC_STATUS_FIFO_FULL)
897 WRITE4(sc, SDMMC_DATA, *p++);
901 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
905 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
907 struct mmc_data *data;
914 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
915 dwmmc_setup_bus(sc, sc->host.ios.clock);
917 /* XXX Upper layers don't always set this */
920 /* Begin setting up command register. */
924 dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
926 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
927 cmd->opcode == MMC_GO_IDLE_STATE ||
928 cmd->opcode == MMC_GO_INACTIVE_STATE)
929 cmdr |= SDMMC_CMD_STOP_ABORT;
930 else if (cmd->opcode != MMC_SEND_STATUS && data)
931 cmdr |= SDMMC_CMD_WAIT_PRVDATA;
933 /* Set up response handling. */
934 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
935 cmdr |= SDMMC_CMD_RESP_EXP;
936 if (cmd->flags & MMC_RSP_136)
937 cmdr |= SDMMC_CMD_RESP_LONG;
940 if (cmd->flags & MMC_RSP_CRC)
941 cmdr |= SDMMC_CMD_RESP_CRC;
944 * XXX: Not all platforms want this.
946 cmdr |= SDMMC_CMD_USE_HOLD_REG;
948 if ((sc->flags & CARD_INIT_DONE) == 0) {
949 sc->flags |= (CARD_INIT_DONE);
950 cmdr |= SDMMC_CMD_SEND_INIT;
954 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
955 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
957 cmdr |= SDMMC_CMD_SEND_ASTOP;
959 cmdr |= SDMMC_CMD_DATA_EXP;
960 if (data->flags & MMC_DATA_STREAM)
961 cmdr |= SDMMC_CMD_MODE_STREAM;
962 if (data->flags & MMC_DATA_WRITE)
963 cmdr |= SDMMC_CMD_DATA_WRITE;
965 WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
966 WRITE4(sc, SDMMC_BYTCNT, data->len);
967 blksz = (data->len < MMC_SECTOR_SIZE) ? \
968 data->len : MMC_SECTOR_SIZE;
969 WRITE4(sc, SDMMC_BLKSIZ, blksz);
972 pio_prepare(sc, cmd);
974 dma_prepare(sc, cmd);
979 dprintf("cmdr 0x%08x\n", cmdr);
981 WRITE4(sc, SDMMC_CMDARG, cmd->arg);
983 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
987 dwmmc_next_operation(struct dwmmc_softc *sc)
989 struct mmc_request *req;
1000 * XXX: Wait until card is still busy.
1001 * We do need this to prevent data timeouts,
1002 * mostly caused by multi-block write command
1003 * followed by single-read.
1005 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1008 if (sc->flags & PENDING_CMD) {
1009 sc->flags &= ~PENDING_CMD;
1010 dwmmc_start_cmd(sc, req->cmd);
1012 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1013 sc->flags &= ~PENDING_STOP;
1014 dwmmc_start_cmd(sc, req->stop);
1024 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1026 struct dwmmc_softc *sc;
1028 sc = device_get_softc(brdev);
1030 dprintf("%s\n", __func__);
1034 if (sc->req != NULL) {
1040 sc->flags |= PENDING_CMD;
1042 sc->flags |= PENDING_STOP;
1043 dwmmc_next_operation(sc);
1050 dwmmc_get_ro(device_t brdev, device_t reqdev)
1053 dprintf("%s\n", __func__);
1059 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1061 struct dwmmc_softc *sc;
1063 sc = device_get_softc(brdev);
1066 while (sc->bus_busy)
1067 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1074 dwmmc_release_host(device_t brdev, device_t reqdev)
1076 struct dwmmc_softc *sc;
1078 sc = device_get_softc(brdev);
1088 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1090 struct dwmmc_softc *sc;
1092 sc = device_get_softc(bus);
1097 case MMCBR_IVAR_BUS_MODE:
1098 *(int *)result = sc->host.ios.bus_mode;
1100 case MMCBR_IVAR_BUS_WIDTH:
1101 *(int *)result = sc->host.ios.bus_width;
1103 case MMCBR_IVAR_CHIP_SELECT:
1104 *(int *)result = sc->host.ios.chip_select;
1106 case MMCBR_IVAR_CLOCK:
1107 *(int *)result = sc->host.ios.clock;
1109 case MMCBR_IVAR_F_MIN:
1110 *(int *)result = sc->host.f_min;
1112 case MMCBR_IVAR_F_MAX:
1113 *(int *)result = sc->host.f_max;
1115 case MMCBR_IVAR_HOST_OCR:
1116 *(int *)result = sc->host.host_ocr;
1118 case MMCBR_IVAR_MODE:
1119 *(int *)result = sc->host.mode;
1121 case MMCBR_IVAR_OCR:
1122 *(int *)result = sc->host.ocr;
1124 case MMCBR_IVAR_POWER_MODE:
1125 *(int *)result = sc->host.ios.power_mode;
1127 case MMCBR_IVAR_VDD:
1128 *(int *)result = sc->host.ios.vdd;
1130 case MMCBR_IVAR_CAPS:
1131 sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
1132 *(int *)result = sc->host.caps;
1134 case MMCBR_IVAR_MAX_DATA:
1135 *(int *)result = DESC_COUNT;
1141 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1143 struct dwmmc_softc *sc;
1145 sc = device_get_softc(bus);
1150 case MMCBR_IVAR_BUS_MODE:
1151 sc->host.ios.bus_mode = value;
1153 case MMCBR_IVAR_BUS_WIDTH:
1154 sc->host.ios.bus_width = value;
1156 case MMCBR_IVAR_CHIP_SELECT:
1157 sc->host.ios.chip_select = value;
1159 case MMCBR_IVAR_CLOCK:
1160 sc->host.ios.clock = value;
1162 case MMCBR_IVAR_MODE:
1163 sc->host.mode = value;
1165 case MMCBR_IVAR_OCR:
1166 sc->host.ocr = value;
1168 case MMCBR_IVAR_POWER_MODE:
1169 sc->host.ios.power_mode = value;
1171 case MMCBR_IVAR_VDD:
1172 sc->host.ios.vdd = value;
1174 /* These are read-only */
1175 case MMCBR_IVAR_CAPS:
1176 case MMCBR_IVAR_HOST_OCR:
1177 case MMCBR_IVAR_F_MIN:
1178 case MMCBR_IVAR_F_MAX:
1179 case MMCBR_IVAR_MAX_DATA:
1185 static device_method_t dwmmc_methods[] = {
1186 DEVMETHOD(device_probe, dwmmc_probe),
1187 DEVMETHOD(device_attach, dwmmc_attach),
1190 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar),
1191 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar),
1194 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios),
1195 DEVMETHOD(mmcbr_request, dwmmc_request),
1196 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro),
1197 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host),
1198 DEVMETHOD(mmcbr_release_host, dwmmc_release_host),
1203 static driver_t dwmmc_driver = {
1206 sizeof(struct dwmmc_softc),
1209 static devclass_t dwmmc_devclass;
1211 DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0);