]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/allwinner/a10_mmc.c
Add workaround for CESA MBUS windows with 4GB DRAM
[FreeBSD/FreeBSD.git] / sys / arm / allwinner / a10_mmc.c
1 /*-
2  * Copyright (c) 2013 Alexander Fedorov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
39 #include <sys/rman.h>
40 #include <sys/sysctl.h>
41
42 #include <machine/bus.h>
43
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
46
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcbrvar.h>
49
50 #include <arm/allwinner/a10_mmc.h>
51 #include <dev/extres/clk/clk.h>
52 #include <dev/extres/hwreset/hwreset.h>
53
54 #define A10_MMC_MEMRES          0
55 #define A10_MMC_IRQRES          1
56 #define A10_MMC_RESSZ           2
57 #define A10_MMC_DMA_SEGS                ((MAXPHYS / PAGE_SIZE) + 1)
58 #define A10_MMC_DMA_MAX_SIZE    0x2000
59 #define A10_MMC_DMA_FTRGLEVEL   0x20070008
60 #define A10_MMC_RESET_RETRY     1000
61
62 #define CARD_ID_FREQUENCY       400000
63
64 static struct ofw_compat_data compat_data[] = {
65         {"allwinner,sun4i-a10-mmc", 1},
66         {"allwinner,sun5i-a13-mmc", 1},
67         {"allwinner,sun7i-a20-mmc", 1},
68         {NULL,             0}
69 };
70
71 struct a10_mmc_softc {
72         device_t                a10_dev;
73         clk_t                   a10_clk_ahb;
74         clk_t                   a10_clk_mmc;
75         hwreset_t               a10_rst_ahb;
76         int                     a10_bus_busy;
77         int                     a10_resid;
78         int                     a10_timeout;
79         struct callout          a10_timeoutc;
80         struct mmc_host         a10_host;
81         struct mmc_request *    a10_req;
82         struct mtx              a10_mtx;
83         struct resource *       a10_res[A10_MMC_RESSZ];
84         uint32_t                a10_intr;
85         uint32_t                a10_intr_wait;
86         void *                  a10_intrhand;
87
88         /* Fields required for DMA access. */
89         bus_addr_t              a10_dma_desc_phys;
90         bus_dmamap_t            a10_dma_map;
91         bus_dma_tag_t           a10_dma_tag;
92         void *                  a10_dma_desc;
93         bus_dmamap_t            a10_dma_buf_map;
94         bus_dma_tag_t           a10_dma_buf_tag;
95         int                     a10_dma_map_err;
96 };
97
98 static struct resource_spec a10_mmc_res_spec[] = {
99         { SYS_RES_MEMORY,       0,      RF_ACTIVE },
100         { SYS_RES_IRQ,          0,      RF_ACTIVE | RF_SHAREABLE },
101         { -1,                   0,      0 }
102 };
103
104 static int a10_mmc_probe(device_t);
105 static int a10_mmc_attach(device_t);
106 static int a10_mmc_detach(device_t);
107 static int a10_mmc_setup_dma(struct a10_mmc_softc *);
108 static int a10_mmc_reset(struct a10_mmc_softc *);
109 static void a10_mmc_intr(void *);
110 static int a10_mmc_update_clock(struct a10_mmc_softc *, uint32_t);
111
112 static int a10_mmc_update_ios(device_t, device_t);
113 static int a10_mmc_request(device_t, device_t, struct mmc_request *);
114 static int a10_mmc_get_ro(device_t, device_t);
115 static int a10_mmc_acquire_host(device_t, device_t);
116 static int a10_mmc_release_host(device_t, device_t);
117
118 #define A10_MMC_LOCK(_sc)       mtx_lock(&(_sc)->a10_mtx)
119 #define A10_MMC_UNLOCK(_sc)     mtx_unlock(&(_sc)->a10_mtx)
120 #define A10_MMC_READ_4(_sc, _reg)                                       \
121         bus_read_4((_sc)->a10_res[A10_MMC_MEMRES], _reg)
122 #define A10_MMC_WRITE_4(_sc, _reg, _value)                              \
123         bus_write_4((_sc)->a10_res[A10_MMC_MEMRES], _reg, _value)
124
125 static int
126 a10_mmc_probe(device_t dev)
127 {
128
129         if (!ofw_bus_status_okay(dev))
130                 return (ENXIO);
131         if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
132                 return (ENXIO);
133
134         device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
135
136         return (BUS_PROBE_DEFAULT);
137 }
138
139 static int
140 a10_mmc_attach(device_t dev)
141 {
142         device_t child;
143         struct a10_mmc_softc *sc;
144         struct sysctl_ctx_list *ctx;
145         struct sysctl_oid_list *tree;
146         uint32_t bus_width;
147         phandle_t node;
148         int error;
149
150         node = ofw_bus_get_node(dev);
151         sc = device_get_softc(dev);
152         sc->a10_dev = dev;
153         sc->a10_req = NULL;
154         if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
155                 device_printf(dev, "cannot allocate device resources\n");
156                 return (ENXIO);
157         }
158         if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
159             INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
160             &sc->a10_intrhand)) {
161                 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
162                 device_printf(dev, "cannot setup interrupt handler\n");
163                 return (ENXIO);
164         }
165         mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
166             MTX_DEF);
167         callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
168
169         /* De-assert reset */
170         if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->a10_rst_ahb) == 0) {
171                 error = hwreset_deassert(sc->a10_rst_ahb);
172                 if (error != 0) {
173                         device_printf(dev, "cannot de-assert reset\n");
174                         goto fail;
175                 }
176         }
177
178         /* Activate the module clock. */
179         error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->a10_clk_ahb);
180         if (error != 0) {
181                 device_printf(dev, "cannot get ahb clock\n");
182                 goto fail;
183         }
184         error = clk_enable(sc->a10_clk_ahb);
185         if (error != 0) {
186                 device_printf(dev, "cannot enable ahb clock\n");
187                 goto fail;
188         }
189         error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->a10_clk_mmc);
190         if (error != 0) {
191                 device_printf(dev, "cannot get mmc clock\n");
192                 goto fail;
193         }
194         error = clk_set_freq(sc->a10_clk_mmc, CARD_ID_FREQUENCY,
195             CLK_SET_ROUND_DOWN);
196         if (error != 0) {
197                 device_printf(dev, "cannot init mmc clock\n");
198                 goto fail;
199         }
200         error = clk_enable(sc->a10_clk_mmc);
201         if (error != 0) {
202                 device_printf(dev, "cannot enable mmc clock\n");
203                 goto fail;
204         }
205
206         sc->a10_timeout = 10;
207         ctx = device_get_sysctl_ctx(dev);
208         tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
209         SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
210             &sc->a10_timeout, 0, "Request timeout in seconds");
211
212         /* Hardware reset */
213         A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 1);
214         DELAY(100);
215         A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 0);
216         DELAY(500);
217
218         /* Soft Reset controller. */
219         if (a10_mmc_reset(sc) != 0) {
220                 device_printf(dev, "cannot reset the controller\n");
221                 goto fail;
222         }
223
224         if (a10_mmc_setup_dma(sc) != 0) {
225                 device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
226                 goto fail;
227         }
228
229         if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
230                 bus_width = 4;
231
232         sc->a10_host.f_min = 400000;
233         sc->a10_host.f_max = 52000000;
234         sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
235         sc->a10_host.mode = mode_sd;
236         sc->a10_host.caps = MMC_CAP_HSPEED;
237         if (bus_width >= 4)
238                 sc->a10_host.caps |= MMC_CAP_4_BIT_DATA;
239         if (bus_width >= 8)
240                 sc->a10_host.caps |= MMC_CAP_8_BIT_DATA;
241
242         child = device_add_child(dev, "mmc", -1);
243         if (child == NULL) {
244                 device_printf(dev, "attaching MMC bus failed!\n");
245                 goto fail;
246         }
247         if (device_probe_and_attach(child) != 0) {
248                 device_printf(dev, "attaching MMC child failed!\n");
249                 device_delete_child(dev, child);
250                 goto fail;
251         }
252
253         return (0);
254
255 fail:
256         callout_drain(&sc->a10_timeoutc);
257         mtx_destroy(&sc->a10_mtx);
258         bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
259         bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
260
261         return (ENXIO);
262 }
263
264 static int
265 a10_mmc_detach(device_t dev)
266 {
267
268         return (EBUSY);
269 }
270
271 static void
272 a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
273 {
274         struct a10_mmc_softc *sc;
275
276         sc = (struct a10_mmc_softc *)arg;
277         if (err) {
278                 sc->a10_dma_map_err = err;
279                 return;
280         }
281         sc->a10_dma_desc_phys = segs[0].ds_addr;
282 }
283
284 static int
285 a10_mmc_setup_dma(struct a10_mmc_softc *sc)
286 {
287         int dma_desc_size, error;
288
289         /* Allocate the DMA descriptor memory. */
290         dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
291         error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
292             A10_MMC_DMA_ALIGN, 0,
293             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
294             dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
295         if (error)
296                 return (error);
297         error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
298             BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
299         if (error)
300                 return (error);
301
302         error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
303             sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
304         if (error)
305                 return (error);
306         if (sc->a10_dma_map_err)
307                 return (sc->a10_dma_map_err);
308
309         /* Create the DMA map for data transfers. */
310         error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
311             A10_MMC_DMA_ALIGN, 0,
312             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
313             A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
314             A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
315             &sc->a10_dma_buf_tag);
316         if (error)
317                 return (error);
318         error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
319             &sc->a10_dma_buf_map);
320         if (error)
321                 return (error);
322
323         return (0);
324 }
325
326 static void
327 a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
328 {
329         int i;
330         struct a10_mmc_dma_desc *dma_desc;
331         struct a10_mmc_softc *sc;
332
333         sc = (struct a10_mmc_softc *)arg;
334         sc->a10_dma_map_err = err;
335
336         if (err)
337                 return;
338
339         dma_desc = sc->a10_dma_desc;
340         for (i = 0; i < nsegs; i++) {
341                 dma_desc[i].buf_size = segs[i].ds_len;
342                 dma_desc[i].buf_addr = segs[i].ds_addr;
343                 dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
344                     A10_MMC_DMA_CONFIG_OWN;
345                 if (i == 0)
346                         dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
347                 if (i < (nsegs - 1)) {
348                         dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
349                         dma_desc[i].next = sc->a10_dma_desc_phys +
350                             ((i + 1) * sizeof(struct a10_mmc_dma_desc));
351                 } else {
352                         dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
353                             A10_MMC_DMA_CONFIG_ER;
354                         dma_desc[i].next = 0;
355                 }
356         }
357 }
358
359 static int
360 a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
361 {
362         bus_dmasync_op_t sync_op;
363         int error;
364         struct mmc_command *cmd;
365         uint32_t val;
366
367         cmd = sc->a10_req->cmd;
368         if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
369                 return (EFBIG);
370         error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
371             cmd->data->data, cmd->data->len, a10_dma_cb, sc, 0);
372         if (error)
373                 return (error);
374         if (sc->a10_dma_map_err)
375                 return (sc->a10_dma_map_err);
376
377         if (cmd->data->flags & MMC_DATA_WRITE)
378                 sync_op = BUS_DMASYNC_PREWRITE;
379         else
380                 sync_op = BUS_DMASYNC_PREREAD;
381         bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
382         bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
383
384         /* Enable DMA */
385         val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
386         val &= ~A10_MMC_CTRL_FIFO_AC_MOD;
387         val |= A10_MMC_CTRL_DMA_ENB;
388         A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
389
390         /* Reset DMA */
391         val |= A10_MMC_CTRL_DMA_RST;
392         A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
393
394         A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_DMAC_IDMAC_SOFT_RST);
395         A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
396             A10_MMC_DMAC_IDMAC_IDMA_ON | A10_MMC_DMAC_IDMAC_FIX_BURST);
397
398         /* Enable RX or TX DMA interrupt */
399         if (cmd->data->flags & MMC_DATA_WRITE)
400                 val |= A10_MMC_IDST_TX_INT;
401         else
402                 val |= A10_MMC_IDST_RX_INT;
403         A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
404
405         /* Set DMA descritptor list address */
406         A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
407
408         /* FIFO trigger level */
409         A10_MMC_WRITE_4(sc, A10_MMC_FWLR, A10_MMC_DMA_FTRGLEVEL);
410
411         return (0);
412 }
413
414 static int
415 a10_mmc_reset(struct a10_mmc_softc *sc)
416 {
417         int timeout;
418
419         A10_MMC_WRITE_4(sc, A10_MMC_GCTL, A10_MMC_RESET);
420         timeout = 1000;
421         while (--timeout > 0) {
422                 if ((A10_MMC_READ_4(sc, A10_MMC_GCTL) & A10_MMC_RESET) == 0)
423                         break;
424                 DELAY(100);
425         }
426         if (timeout == 0)
427                 return (ETIMEDOUT);
428
429         /* Set the timeout. */
430         A10_MMC_WRITE_4(sc, A10_MMC_TMOR,
431             A10_MMC_TMOR_DTO_LMT_SHIFT(A10_MMC_TMOR_DTO_LMT_MASK) |
432             A10_MMC_TMOR_RTO_LMT_SHIFT(A10_MMC_TMOR_RTO_LMT_MASK));
433
434         /* Clear pending interrupts. */
435         A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
436         A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
437         /* Unmask interrupts. */
438         A10_MMC_WRITE_4(sc, A10_MMC_IMKR,
439             A10_MMC_INT_CMD_DONE | A10_MMC_INT_ERR_BIT |
440             A10_MMC_INT_DATA_OVER | A10_MMC_INT_AUTO_STOP_DONE);
441         /* Enable interrupts and AHB access. */
442         A10_MMC_WRITE_4(sc, A10_MMC_GCTL,
443             A10_MMC_READ_4(sc, A10_MMC_GCTL) | A10_MMC_CTRL_INT_ENB);
444
445         return (0);
446 }
447
448 static void
449 a10_mmc_req_done(struct a10_mmc_softc *sc)
450 {
451         struct mmc_command *cmd;
452         struct mmc_request *req;
453         uint32_t val, mask;
454         int retry;
455
456         cmd = sc->a10_req->cmd;
457         if (cmd->error != MMC_ERR_NONE) {
458                 /* Reset the FIFO and DMA engines. */
459                 mask = A10_MMC_CTRL_FIFO_RST | A10_MMC_CTRL_DMA_RST;
460                 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
461                 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val | mask);
462
463                 retry = A10_MMC_RESET_RETRY;
464                 while (--retry > 0) {
465                         val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
466                         if ((val & mask) == 0)
467                                 break;
468                         DELAY(10);
469                 }
470                 if (retry == 0)
471                         device_printf(sc->a10_dev,
472                             "timeout resetting DMA/FIFO\n");
473                 a10_mmc_update_clock(sc, 1);
474         }
475
476         req = sc->a10_req;
477         callout_stop(&sc->a10_timeoutc);
478         sc->a10_req = NULL;
479         sc->a10_intr = 0;
480         sc->a10_resid = 0;
481         sc->a10_dma_map_err = 0;
482         sc->a10_intr_wait = 0;
483         req->done(req);
484 }
485
486 static void
487 a10_mmc_req_ok(struct a10_mmc_softc *sc)
488 {
489         int timeout;
490         struct mmc_command *cmd;
491         uint32_t status;
492
493         timeout = 1000;
494         while (--timeout > 0) {
495                 status = A10_MMC_READ_4(sc, A10_MMC_STAR);
496                 if ((status & A10_MMC_STAR_CARD_BUSY) == 0)
497                         break;
498                 DELAY(1000);
499         }
500         cmd = sc->a10_req->cmd;
501         if (timeout == 0) {
502                 cmd->error = MMC_ERR_FAILED;
503                 a10_mmc_req_done(sc);
504                 return;
505         }
506         if (cmd->flags & MMC_RSP_PRESENT) {
507                 if (cmd->flags & MMC_RSP_136) {
508                         cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
509                         cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
510                         cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
511                         cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
512                 } else
513                         cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
514         }
515         /* All data has been transferred ? */
516         if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
517                 cmd->error = MMC_ERR_FAILED;
518         a10_mmc_req_done(sc);
519 }
520
521 static void
522 a10_mmc_timeout(void *arg)
523 {
524         struct a10_mmc_softc *sc;
525
526         sc = (struct a10_mmc_softc *)arg;
527         if (sc->a10_req != NULL) {
528                 device_printf(sc->a10_dev, "controller timeout\n");
529                 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
530                 a10_mmc_req_done(sc);
531         } else
532                 device_printf(sc->a10_dev,
533                     "Spurious timeout - no active request\n");
534 }
535
536 static void
537 a10_mmc_intr(void *arg)
538 {
539         bus_dmasync_op_t sync_op;
540         struct a10_mmc_softc *sc;
541         struct mmc_data *data;
542         uint32_t idst, imask, rint;
543
544         sc = (struct a10_mmc_softc *)arg;
545         A10_MMC_LOCK(sc);
546         rint = A10_MMC_READ_4(sc, A10_MMC_RISR);
547         idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
548         imask = A10_MMC_READ_4(sc, A10_MMC_IMKR);
549         if (idst == 0 && imask == 0 && rint == 0) {
550                 A10_MMC_UNLOCK(sc);
551                 return;
552         }
553 #ifdef DEBUG
554         device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
555             idst, imask, rint);
556 #endif
557         if (sc->a10_req == NULL) {
558                 device_printf(sc->a10_dev,
559                     "Spurious interrupt - no active request, rint: 0x%08X\n",
560                     rint);
561                 goto end;
562         }
563         if (rint & A10_MMC_INT_ERR_BIT) {
564                 device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
565                 if (rint & A10_MMC_INT_RESP_TIMEOUT)
566                         sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
567                 else
568                         sc->a10_req->cmd->error = MMC_ERR_FAILED;
569                 a10_mmc_req_done(sc);
570                 goto end;
571         }
572         if (idst & A10_MMC_IDST_ERROR) {
573                 device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
574                 sc->a10_req->cmd->error = MMC_ERR_FAILED;
575                 a10_mmc_req_done(sc);
576                 goto end;
577         }
578
579         sc->a10_intr |= rint;
580         data = sc->a10_req->cmd->data;
581         if (data != NULL && (idst & A10_MMC_IDST_COMPLETE) != 0) {
582                 if (data->flags & MMC_DATA_WRITE)
583                         sync_op = BUS_DMASYNC_POSTWRITE;
584                 else
585                         sync_op = BUS_DMASYNC_POSTREAD;
586                 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
587                     sync_op);
588                 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
589                     BUS_DMASYNC_POSTWRITE);
590                 bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
591                 sc->a10_resid = data->len >> 2;
592         }
593         if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
594                 a10_mmc_req_ok(sc);
595
596 end:
597         A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
598         A10_MMC_WRITE_4(sc, A10_MMC_RISR, rint);
599         A10_MMC_UNLOCK(sc);
600 }
601
602 static int
603 a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
604 {
605         int blksz;
606         struct a10_mmc_softc *sc;
607         struct mmc_command *cmd;
608         uint32_t cmdreg;
609         int err;
610
611         sc = device_get_softc(bus);
612         A10_MMC_LOCK(sc);
613         if (sc->a10_req) {
614                 A10_MMC_UNLOCK(sc);
615                 return (EBUSY);
616         }
617         sc->a10_req = req;
618         cmd = req->cmd;
619         cmdreg = A10_MMC_CMDR_LOAD;
620         if (cmd->opcode == MMC_GO_IDLE_STATE)
621                 cmdreg |= A10_MMC_CMDR_SEND_INIT_SEQ;
622         if (cmd->flags & MMC_RSP_PRESENT)
623                 cmdreg |= A10_MMC_CMDR_RESP_RCV;
624         if (cmd->flags & MMC_RSP_136)
625                 cmdreg |= A10_MMC_CMDR_LONG_RESP;
626         if (cmd->flags & MMC_RSP_CRC)
627                 cmdreg |= A10_MMC_CMDR_CHK_RESP_CRC;
628
629         sc->a10_intr = 0;
630         sc->a10_resid = 0;
631         sc->a10_intr_wait = A10_MMC_INT_CMD_DONE;
632         cmd->error = MMC_ERR_NONE;
633         if (cmd->data != NULL) {
634                 sc->a10_intr_wait |= A10_MMC_INT_DATA_OVER;
635                 cmdreg |= A10_MMC_CMDR_DATA_TRANS | A10_MMC_CMDR_WAIT_PRE_OVER;
636                 if (cmd->data->flags & MMC_DATA_MULTI) {
637                         cmdreg |= A10_MMC_CMDR_STOP_CMD_FLAG;
638                         sc->a10_intr_wait |= A10_MMC_INT_AUTO_STOP_DONE;
639                 }
640                 if (cmd->data->flags & MMC_DATA_WRITE)
641                         cmdreg |= A10_MMC_CMDR_DIR_WRITE;
642                 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
643                 A10_MMC_WRITE_4(sc, A10_MMC_BKSR, blksz);
644                 A10_MMC_WRITE_4(sc, A10_MMC_BYCR, cmd->data->len);
645
646                 err = a10_mmc_prepare_dma(sc);
647                 if (err != 0)
648                         device_printf(sc->a10_dev, "prepare_dma failed: %d\n", err);
649         }
650
651         A10_MMC_WRITE_4(sc, A10_MMC_CAGR, cmd->arg);
652         A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
653         callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
654             a10_mmc_timeout, sc);
655         A10_MMC_UNLOCK(sc);
656
657         return (0);
658 }
659
660 static int
661 a10_mmc_read_ivar(device_t bus, device_t child, int which,
662     uintptr_t *result)
663 {
664         struct a10_mmc_softc *sc;
665
666         sc = device_get_softc(bus);
667         switch (which) {
668         default:
669                 return (EINVAL);
670         case MMCBR_IVAR_BUS_MODE:
671                 *(int *)result = sc->a10_host.ios.bus_mode;
672                 break;
673         case MMCBR_IVAR_BUS_WIDTH:
674                 *(int *)result = sc->a10_host.ios.bus_width;
675                 break;
676         case MMCBR_IVAR_CHIP_SELECT:
677                 *(int *)result = sc->a10_host.ios.chip_select;
678                 break;
679         case MMCBR_IVAR_CLOCK:
680                 *(int *)result = sc->a10_host.ios.clock;
681                 break;
682         case MMCBR_IVAR_F_MIN:
683                 *(int *)result = sc->a10_host.f_min;
684                 break;
685         case MMCBR_IVAR_F_MAX:
686                 *(int *)result = sc->a10_host.f_max;
687                 break;
688         case MMCBR_IVAR_HOST_OCR:
689                 *(int *)result = sc->a10_host.host_ocr;
690                 break;
691         case MMCBR_IVAR_MODE:
692                 *(int *)result = sc->a10_host.mode;
693                 break;
694         case MMCBR_IVAR_OCR:
695                 *(int *)result = sc->a10_host.ocr;
696                 break;
697         case MMCBR_IVAR_POWER_MODE:
698                 *(int *)result = sc->a10_host.ios.power_mode;
699                 break;
700         case MMCBR_IVAR_VDD:
701                 *(int *)result = sc->a10_host.ios.vdd;
702                 break;
703         case MMCBR_IVAR_CAPS:
704                 *(int *)result = sc->a10_host.caps;
705                 break;
706         case MMCBR_IVAR_MAX_DATA:
707                 *(int *)result = 65535;
708                 break;
709         }
710
711         return (0);
712 }
713
714 static int
715 a10_mmc_write_ivar(device_t bus, device_t child, int which,
716     uintptr_t value)
717 {
718         struct a10_mmc_softc *sc;
719
720         sc = device_get_softc(bus);
721         switch (which) {
722         default:
723                 return (EINVAL);
724         case MMCBR_IVAR_BUS_MODE:
725                 sc->a10_host.ios.bus_mode = value;
726                 break;
727         case MMCBR_IVAR_BUS_WIDTH:
728                 sc->a10_host.ios.bus_width = value;
729                 break;
730         case MMCBR_IVAR_CHIP_SELECT:
731                 sc->a10_host.ios.chip_select = value;
732                 break;
733         case MMCBR_IVAR_CLOCK:
734                 sc->a10_host.ios.clock = value;
735                 break;
736         case MMCBR_IVAR_MODE:
737                 sc->a10_host.mode = value;
738                 break;
739         case MMCBR_IVAR_OCR:
740                 sc->a10_host.ocr = value;
741                 break;
742         case MMCBR_IVAR_POWER_MODE:
743                 sc->a10_host.ios.power_mode = value;
744                 break;
745         case MMCBR_IVAR_VDD:
746                 sc->a10_host.ios.vdd = value;
747                 break;
748         /* These are read-only */
749         case MMCBR_IVAR_CAPS:
750         case MMCBR_IVAR_HOST_OCR:
751         case MMCBR_IVAR_F_MIN:
752         case MMCBR_IVAR_F_MAX:
753         case MMCBR_IVAR_MAX_DATA:
754                 return (EINVAL);
755         }
756
757         return (0);
758 }
759
760 static int
761 a10_mmc_update_clock(struct a10_mmc_softc *sc, uint32_t clkon)
762 {
763         uint32_t cmdreg;
764         int retry;
765         uint32_t ckcr;
766
767         ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
768         ckcr &= ~(A10_MMC_CKCR_CCLK_ENB | A10_MMC_CKCR_CCLK_CTRL);
769
770         if (clkon)
771                 ckcr |= A10_MMC_CKCR_CCLK_ENB;
772
773         A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
774
775         cmdreg = A10_MMC_CMDR_LOAD | A10_MMC_CMDR_PRG_CLK |
776             A10_MMC_CMDR_WAIT_PRE_OVER;
777         A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
778         retry = 0xfffff;
779         while (--retry > 0) {
780                 if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_CMDR_LOAD) == 0) {
781                         A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
782                         return (0);
783                 }
784                 DELAY(10);
785         }
786         A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
787         device_printf(sc->a10_dev, "timeout updating clock\n");
788
789         return (ETIMEDOUT);
790 }
791
792 static int
793 a10_mmc_update_ios(device_t bus, device_t child)
794 {
795         int error;
796         struct a10_mmc_softc *sc;
797         struct mmc_ios *ios;
798         uint32_t ckcr;
799
800         sc = device_get_softc(bus);
801
802         ios = &sc->a10_host.ios;
803
804         /* Set the bus width. */
805         switch (ios->bus_width) {
806         case bus_width_1:
807                 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR1);
808                 break;
809         case bus_width_4:
810                 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR4);
811                 break;
812         case bus_width_8:
813                 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR8);
814                 break;
815         }
816
817         if (ios->clock) {
818
819                 /* Disable clock */
820                 error = a10_mmc_update_clock(sc, 0);
821                 if (error != 0)
822                         return (error);
823
824                 /* Reset the divider. */
825                 ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
826                 ckcr &= ~A10_MMC_CKCR_CCLK_DIV;
827                 A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
828
829                 /* Set the MMC clock. */
830                 error = clk_set_freq(sc->a10_clk_mmc, ios->clock,
831                     CLK_SET_ROUND_DOWN);
832                 if (error != 0) {
833                         device_printf(sc->a10_dev,
834                             "failed to set frequency to %u Hz: %d\n",
835                             ios->clock, error);
836                         return (error);
837                 }
838
839                 /* Enable clock. */
840                 error = a10_mmc_update_clock(sc, 1);
841                 if (error != 0)
842                         return (error);
843         }
844
845
846         return (0);
847 }
848
849 static int
850 a10_mmc_get_ro(device_t bus, device_t child)
851 {
852
853         return (0);
854 }
855
856 static int
857 a10_mmc_acquire_host(device_t bus, device_t child)
858 {
859         struct a10_mmc_softc *sc;
860         int error;
861
862         sc = device_get_softc(bus);
863         A10_MMC_LOCK(sc);
864         while (sc->a10_bus_busy) {
865                 error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
866                 if (error != 0) {
867                         A10_MMC_UNLOCK(sc);
868                         return (error);
869                 }
870         }
871         sc->a10_bus_busy++;
872         A10_MMC_UNLOCK(sc);
873
874         return (0);
875 }
876
877 static int
878 a10_mmc_release_host(device_t bus, device_t child)
879 {
880         struct a10_mmc_softc *sc;
881
882         sc = device_get_softc(bus);
883         A10_MMC_LOCK(sc);
884         sc->a10_bus_busy--;
885         wakeup(sc);
886         A10_MMC_UNLOCK(sc);
887
888         return (0);
889 }
890
891 static device_method_t a10_mmc_methods[] = {
892         /* Device interface */
893         DEVMETHOD(device_probe,         a10_mmc_probe),
894         DEVMETHOD(device_attach,        a10_mmc_attach),
895         DEVMETHOD(device_detach,        a10_mmc_detach),
896
897         /* Bus interface */
898         DEVMETHOD(bus_read_ivar,        a10_mmc_read_ivar),
899         DEVMETHOD(bus_write_ivar,       a10_mmc_write_ivar),
900
901         /* MMC bridge interface */
902         DEVMETHOD(mmcbr_update_ios,     a10_mmc_update_ios),
903         DEVMETHOD(mmcbr_request,        a10_mmc_request),
904         DEVMETHOD(mmcbr_get_ro,         a10_mmc_get_ro),
905         DEVMETHOD(mmcbr_acquire_host,   a10_mmc_acquire_host),
906         DEVMETHOD(mmcbr_release_host,   a10_mmc_release_host),
907
908         DEVMETHOD_END
909 };
910
911 static devclass_t a10_mmc_devclass;
912
913 static driver_t a10_mmc_driver = {
914         "a10_mmc",
915         a10_mmc_methods,
916         sizeof(struct a10_mmc_softc),
917 };
918
919 DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, NULL,
920     NULL);
921 MMC_DECLARE_BRIDGE(a10_mmc);