]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/allwinner/a10_mmc.c
Upgrade to Unbound 1.5.7.
[FreeBSD/FreeBSD.git] / sys / arm / allwinner / a10_mmc.c
1 /*-
2  * Copyright (c) 2013 Alexander Fedorov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/resource.h>
39 #include <sys/rman.h>
40 #include <sys/sysctl.h>
41
42 #include <machine/bus.h>
43
44 #include <dev/ofw/ofw_bus.h>
45 #include <dev/ofw/ofw_bus_subr.h>
46
47 #include <dev/mmc/bridge.h>
48 #include <dev/mmc/mmcreg.h>
49 #include <dev/mmc/mmcbrvar.h>
50
51 #include <arm/allwinner/a10_clk.h>
52 #include <arm/allwinner/a10_mmc.h>
53
54 #define A10_MMC_MEMRES          0
55 #define A10_MMC_IRQRES          1
56 #define A10_MMC_RESSZ           2
57 #define A10_MMC_DMA_SEGS        16
58 #define A10_MMC_DMA_MAX_SIZE    0x2000
59 #define A10_MMC_DMA_FTRGLEVEL   0x20070008
60
61 static int a10_mmc_pio_mode = 0;
62
63 TUNABLE_INT("hw.a10.mmc.pio_mode", &a10_mmc_pio_mode);
64
65 struct a10_mmc_softc {
66         bus_space_handle_t      a10_bsh;
67         bus_space_tag_t         a10_bst;
68         device_t                a10_dev;
69         int                     a10_bus_busy;
70         int                     a10_id;
71         int                     a10_resid;
72         int                     a10_timeout;
73         struct callout          a10_timeoutc;
74         struct mmc_host         a10_host;
75         struct mmc_request *    a10_req;
76         struct mtx              a10_mtx;
77         struct resource *       a10_res[A10_MMC_RESSZ];
78         uint32_t                a10_intr;
79         uint32_t                a10_intr_wait;
80         void *                  a10_intrhand;
81
82         /* Fields required for DMA access. */
83         bus_addr_t              a10_dma_desc_phys;
84         bus_dmamap_t            a10_dma_map;
85         bus_dma_tag_t           a10_dma_tag;
86         void *                  a10_dma_desc;
87         bus_dmamap_t            a10_dma_buf_map;
88         bus_dma_tag_t           a10_dma_buf_tag;
89         int                     a10_dma_inuse;
90         int                     a10_dma_map_err;
91 };
92
93 static struct resource_spec a10_mmc_res_spec[] = {
94         { SYS_RES_MEMORY,       0,      RF_ACTIVE },
95         { SYS_RES_IRQ,          0,      RF_ACTIVE | RF_SHAREABLE },
96         { -1,                   0,      0 }
97 };
98
99 static int a10_mmc_probe(device_t);
100 static int a10_mmc_attach(device_t);
101 static int a10_mmc_detach(device_t);
102 static int a10_mmc_setup_dma(struct a10_mmc_softc *);
103 static int a10_mmc_reset(struct a10_mmc_softc *);
104 static void a10_mmc_intr(void *);
105 static int a10_mmc_update_clock(struct a10_mmc_softc *);
106
107 static int a10_mmc_update_ios(device_t, device_t);
108 static int a10_mmc_request(device_t, device_t, struct mmc_request *);
109 static int a10_mmc_get_ro(device_t, device_t);
110 static int a10_mmc_acquire_host(device_t, device_t);
111 static int a10_mmc_release_host(device_t, device_t);
112
113 #define A10_MMC_LOCK(_sc)       mtx_lock(&(_sc)->a10_mtx)
114 #define A10_MMC_UNLOCK(_sc)     mtx_unlock(&(_sc)->a10_mtx)
115 #define A10_MMC_READ_4(_sc, _reg)                                       \
116         bus_space_read_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg)
117 #define A10_MMC_WRITE_4(_sc, _reg, _value)                              \
118         bus_space_write_4((_sc)->a10_bst, (_sc)->a10_bsh, _reg, _value)
119
120 static int
121 a10_mmc_probe(device_t dev)
122 {
123
124         if (!ofw_bus_status_okay(dev))
125                 return (ENXIO);
126         if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-mmc"))
127                 return (ENXIO);
128         device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
129
130         return (BUS_PROBE_DEFAULT);
131 }
132
133 static int
134 a10_mmc_attach(device_t dev)
135 {
136         device_t child;
137         struct a10_mmc_softc *sc;
138         struct sysctl_ctx_list *ctx;
139         struct sysctl_oid_list *tree;
140
141         sc = device_get_softc(dev);
142         sc->a10_dev = dev;
143         sc->a10_req = NULL;
144         sc->a10_id = device_get_unit(dev);
145         if (sc->a10_id > 3) {
146                 device_printf(dev, "only 4 hosts are supported (0-3)\n");
147                 return (ENXIO);
148         }
149         if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
150                 device_printf(dev, "cannot allocate device resources\n");
151                 return (ENXIO);
152         }
153         sc->a10_bst = rman_get_bustag(sc->a10_res[A10_MMC_MEMRES]);
154         sc->a10_bsh = rman_get_bushandle(sc->a10_res[A10_MMC_MEMRES]);
155         if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
156             INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
157             &sc->a10_intrhand)) {
158                 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
159                 device_printf(dev, "cannot setup interrupt handler\n");
160                 return (ENXIO);
161         }
162
163         /* Activate the module clock. */
164         if (a10_clk_mmc_activate(sc->a10_id) != 0) {
165                 bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES],
166                     sc->a10_intrhand);
167                 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
168                 device_printf(dev, "cannot activate mmc clock\n");
169                 return (ENXIO);
170         }
171
172         sc->a10_timeout = 10;
173         ctx = device_get_sysctl_ctx(dev);
174         tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
175         SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
176             &sc->a10_timeout, 0, "Request timeout in seconds");
177         mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
178             MTX_DEF);
179         callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
180
181         /* Reset controller. */
182         if (a10_mmc_reset(sc) != 0) {
183                 device_printf(dev, "cannot reset the controller\n");
184                 goto fail;
185         }
186
187         if (a10_mmc_pio_mode == 0 && a10_mmc_setup_dma(sc) != 0) {
188                 device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
189                 a10_mmc_pio_mode = 1;
190         }
191         if (bootverbose)
192                 device_printf(sc->a10_dev, "DMA status: %s\n",
193                     a10_mmc_pio_mode ? "disabled" : "enabled");
194
195         sc->a10_host.f_min = 400000;
196         sc->a10_host.f_max = 52000000;
197         sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
198         sc->a10_host.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_HSPEED;
199         sc->a10_host.mode = mode_sd;
200
201         child = device_add_child(dev, "mmc", -1);
202         if (child == NULL) {
203                 device_printf(dev, "attaching MMC bus failed!\n");
204                 goto fail;
205         }
206         if (device_probe_and_attach(child) != 0) {
207                 device_printf(dev, "attaching MMC child failed!\n");
208                 device_delete_child(dev, child);
209                 goto fail;
210         }
211
212         return (0);
213
214 fail:
215         callout_drain(&sc->a10_timeoutc);
216         mtx_destroy(&sc->a10_mtx);
217         bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
218         bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
219
220         return (ENXIO);
221 }
222
223 static int
224 a10_mmc_detach(device_t dev)
225 {
226
227         return (EBUSY);
228 }
229
230 static void
231 a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
232 {
233         struct a10_mmc_softc *sc;
234
235         sc = (struct a10_mmc_softc *)arg;
236         if (err) {
237                 sc->a10_dma_map_err = err;
238                 return;
239         }
240         sc->a10_dma_desc_phys = segs[0].ds_addr;
241 }
242
243 static int
244 a10_mmc_setup_dma(struct a10_mmc_softc *sc)
245 {
246         int dma_desc_size, error;
247
248         /* Allocate the DMA descriptor memory. */
249         dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
250         error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0,
251             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
252             dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
253         if (error)
254                 return (error);
255         error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
256             BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
257         if (error)
258                 return (error);
259
260         error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
261             sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
262         if (error)
263                 return (error);
264         if (sc->a10_dma_map_err)
265                 return (sc->a10_dma_map_err);
266
267         /* Create the DMA map for data transfers. */
268         error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev), 1, 0,
269             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
270             A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
271             A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
272             &sc->a10_dma_buf_tag);
273         if (error)
274                 return (error);
275         error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
276             &sc->a10_dma_buf_map);
277         if (error)
278                 return (error);
279
280         return (0);
281 }
282
283 static void
284 a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
285 {
286         int i;
287         struct a10_mmc_dma_desc *dma_desc;
288         struct a10_mmc_softc *sc;
289
290         sc = (struct a10_mmc_softc *)arg;
291         sc->a10_dma_map_err = err;
292         dma_desc = sc->a10_dma_desc;
293         /* Note nsegs is guaranteed to be zero if err is non-zero. */
294         for (i = 0; i < nsegs; i++) {
295                 dma_desc[i].buf_size = segs[i].ds_len;
296                 dma_desc[i].buf_addr = segs[i].ds_addr;
297                 dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
298                     A10_MMC_DMA_CONFIG_OWN;
299                 if (i == 0)
300                         dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
301                 if (i < (nsegs - 1)) {
302                         dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
303                         dma_desc[i].next = sc->a10_dma_desc_phys +
304                             ((i + 1) * sizeof(struct a10_mmc_dma_desc));
305                 } else {
306                         dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
307                             A10_MMC_DMA_CONFIG_ER;
308                         dma_desc[i].next = 0;
309                 }
310         }
311 }
312
313 static int
314 a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
315 {
316         bus_dmasync_op_t sync_op;
317         int error;
318         struct mmc_command *cmd;
319         uint32_t val;
320
321         cmd = sc->a10_req->cmd;
322         if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
323                 return (EFBIG);
324         error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
325             cmd->data->data, cmd->data->len, a10_dma_cb, sc, BUS_DMA_NOWAIT);
326         if (error)
327                 return (error);
328         if (sc->a10_dma_map_err)
329                 return (sc->a10_dma_map_err);
330
331         sc->a10_dma_inuse = 1;
332         if (cmd->data->flags & MMC_DATA_WRITE)
333                 sync_op = BUS_DMASYNC_PREWRITE;
334         else
335                 sync_op = BUS_DMASYNC_PREREAD;
336         bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
337         bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
338
339         val = A10_MMC_READ_4(sc, A10_MMC_IMASK);
340         val &= ~(A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ);
341         A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val);
342         val = A10_MMC_READ_4(sc, A10_MMC_GCTRL);
343         val &= ~A10_MMC_ACCESS_BY_AHB;
344         val |= A10_MMC_DMA_ENABLE;
345         A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
346         val |= A10_MMC_DMA_RESET;
347         A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
348         A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_IDMAC_SOFT_RST);
349         A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
350             A10_MMC_IDMAC_IDMA_ON | A10_MMC_IDMAC_FIX_BURST);
351         val = A10_MMC_READ_4(sc, A10_MMC_IDIE);
352         val &= ~(A10_MMC_IDMAC_RECEIVE_INT | A10_MMC_IDMAC_TRANSMIT_INT);
353         if (cmd->data->flags & MMC_DATA_WRITE)
354                 val |= A10_MMC_IDMAC_TRANSMIT_INT;
355         else
356                 val |= A10_MMC_IDMAC_RECEIVE_INT;
357         A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
358         A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
359         A10_MMC_WRITE_4(sc, A10_MMC_FTRGL, A10_MMC_DMA_FTRGLEVEL);
360
361         return (0);
362 }
363
364 static int
365 a10_mmc_reset(struct a10_mmc_softc *sc)
366 {
367         int timeout;
368
369         A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
370             A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_RESET);
371         timeout = 1000;
372         while (--timeout > 0) {
373                 if ((A10_MMC_READ_4(sc, A10_MMC_GCTRL) & A10_MMC_RESET) == 0)
374                         break;
375                 DELAY(100);
376         }
377         if (timeout == 0)
378                 return (ETIMEDOUT);
379
380         /* Set the timeout. */
381         A10_MMC_WRITE_4(sc, A10_MMC_TIMEOUT, 0xffffffff);
382
383         /* Clear pending interrupts. */
384         A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
385         A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
386         /* Unmask interrupts. */
387         A10_MMC_WRITE_4(sc, A10_MMC_IMASK,
388             A10_MMC_CMD_DONE | A10_MMC_INT_ERR_BIT |
389             A10_MMC_DATA_OVER | A10_MMC_AUTOCMD_DONE);
390         /* Enable interrupts and AHB access. */
391         A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
392             A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_INT_ENABLE);
393
394         return (0);
395 }
396
397 static void
398 a10_mmc_req_done(struct a10_mmc_softc *sc)
399 {
400         struct mmc_command *cmd;
401         struct mmc_request *req;
402
403         cmd = sc->a10_req->cmd;
404         if (cmd->error != MMC_ERR_NONE) {
405                 /* Reset the controller. */
406                 a10_mmc_reset(sc);
407                 a10_mmc_update_clock(sc);
408         }
409         if (sc->a10_dma_inuse == 0) {
410                 /* Reset the FIFO. */
411                 A10_MMC_WRITE_4(sc, A10_MMC_GCTRL,
412                     A10_MMC_READ_4(sc, A10_MMC_GCTRL) | A10_MMC_FIFO_RESET);
413         }
414
415         req = sc->a10_req;
416         callout_stop(&sc->a10_timeoutc);
417         sc->a10_req = NULL;
418         sc->a10_intr = 0;
419         sc->a10_resid = 0;
420         sc->a10_dma_inuse = 0;
421         sc->a10_dma_map_err = 0;
422         sc->a10_intr_wait = 0;
423         req->done(req);
424 }
425
426 static void
427 a10_mmc_req_ok(struct a10_mmc_softc *sc)
428 {
429         int timeout;
430         struct mmc_command *cmd;
431         uint32_t status;
432
433         timeout = 1000;
434         while (--timeout > 0) {
435                 status = A10_MMC_READ_4(sc, A10_MMC_STAS);
436                 if ((status & A10_MMC_CARD_DATA_BUSY) == 0)
437                         break;
438                 DELAY(1000);
439         }
440         cmd = sc->a10_req->cmd;
441         if (timeout == 0) {
442                 cmd->error = MMC_ERR_FAILED;
443                 a10_mmc_req_done(sc);
444                 return;
445         }
446         if (cmd->flags & MMC_RSP_PRESENT) {
447                 if (cmd->flags & MMC_RSP_136) {
448                         cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
449                         cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
450                         cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
451                         cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
452                 } else
453                         cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
454         }
455         /* All data has been transferred ? */
456         if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
457                 cmd->error = MMC_ERR_FAILED;
458         a10_mmc_req_done(sc);
459 }
460
461 static void
462 a10_mmc_timeout(void *arg)
463 {
464         struct a10_mmc_softc *sc;
465
466         sc = (struct a10_mmc_softc *)arg;
467         if (sc->a10_req != NULL) {
468                 device_printf(sc->a10_dev, "controller timeout\n");
469                 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
470                 a10_mmc_req_done(sc);
471         } else
472                 device_printf(sc->a10_dev,
473                     "Spurious timeout - no active request\n");
474 }
475
476 static int
477 a10_mmc_pio_transfer(struct a10_mmc_softc *sc, struct mmc_data *data)
478 {
479         int i, write;
480         uint32_t bit, *buf;
481
482         buf = (uint32_t *)data->data;
483         write = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
484         bit = write ? A10_MMC_FIFO_FULL : A10_MMC_FIFO_EMPTY;
485         for (i = sc->a10_resid; i < (data->len >> 2); i++) {
486                 if ((A10_MMC_READ_4(sc, A10_MMC_STAS) & bit))
487                         return (1);
488                 if (write)
489                         A10_MMC_WRITE_4(sc, A10_MMC_FIFO, buf[i]);
490                 else
491                         buf[i] = A10_MMC_READ_4(sc, A10_MMC_FIFO);
492                 sc->a10_resid = i + 1;
493         }
494
495         return (0);
496 }
497
498 static void
499 a10_mmc_intr(void *arg)
500 {
501         bus_dmasync_op_t sync_op;
502         struct a10_mmc_softc *sc;
503         struct mmc_data *data;
504         uint32_t idst, imask, rint;
505
506         sc = (struct a10_mmc_softc *)arg;
507         A10_MMC_LOCK(sc);
508         rint = A10_MMC_READ_4(sc, A10_MMC_RINTR);
509         idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
510         imask = A10_MMC_READ_4(sc, A10_MMC_IMASK);
511         if (idst == 0 && imask == 0 && rint == 0) {
512                 A10_MMC_UNLOCK(sc);
513                 return;
514         }
515 #ifdef DEBUG
516         device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
517             idst, imask, rint);
518 #endif
519         if (sc->a10_req == NULL) {
520                 device_printf(sc->a10_dev,
521                     "Spurious interrupt - no active request, rint: 0x%08X\n",
522                     rint);
523                 goto end;
524         }
525         if (rint & A10_MMC_INT_ERR_BIT) {
526                 device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
527                 if (rint & A10_MMC_RESP_TIMEOUT)
528                         sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
529                 else
530                         sc->a10_req->cmd->error = MMC_ERR_FAILED;
531                 a10_mmc_req_done(sc);
532                 goto end;
533         }
534         if (idst & A10_MMC_IDMAC_ERROR) {
535                 device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
536                 sc->a10_req->cmd->error = MMC_ERR_FAILED;
537                 a10_mmc_req_done(sc);
538                 goto end;
539         }
540
541         sc->a10_intr |= rint;
542         data = sc->a10_req->cmd->data;
543         if (data != NULL && sc->a10_dma_inuse == 1 &&
544             (idst & A10_MMC_IDMAC_COMPLETE)) {
545                 if (data->flags & MMC_DATA_WRITE)
546                         sync_op = BUS_DMASYNC_POSTWRITE;
547                 else
548                         sync_op = BUS_DMASYNC_POSTREAD;
549                 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
550                     sync_op);
551                 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
552                     BUS_DMASYNC_POSTWRITE);
553                 bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
554                 sc->a10_resid = data->len >> 2;
555         } else if (data != NULL && sc->a10_dma_inuse == 0 &&
556             (rint & (A10_MMC_DATA_OVER | A10_MMC_RX_DATA_REQ |
557             A10_MMC_TX_DATA_REQ)) != 0)
558                 a10_mmc_pio_transfer(sc, data);
559         if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
560                 a10_mmc_req_ok(sc);
561
562 end:
563         A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
564         A10_MMC_WRITE_4(sc, A10_MMC_RINTR, rint);
565         A10_MMC_UNLOCK(sc);
566 }
567
568 static int
569 a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
570 {
571         int blksz;
572         struct a10_mmc_softc *sc;
573         struct mmc_command *cmd;
574         uint32_t cmdreg, val;
575
576         sc = device_get_softc(bus);
577         A10_MMC_LOCK(sc);
578         if (sc->a10_req) {
579                 A10_MMC_UNLOCK(sc);
580                 return (EBUSY);
581         }
582         sc->a10_req = req;
583         cmd = req->cmd;
584         cmdreg = A10_MMC_START;
585         if (cmd->opcode == MMC_GO_IDLE_STATE)
586                 cmdreg |= A10_MMC_SEND_INIT_SEQ;
587         if (cmd->flags & MMC_RSP_PRESENT)
588                 cmdreg |= A10_MMC_RESP_EXP;
589         if (cmd->flags & MMC_RSP_136)
590                 cmdreg |= A10_MMC_LONG_RESP;
591         if (cmd->flags & MMC_RSP_CRC)
592                 cmdreg |= A10_MMC_CHECK_RESP_CRC;
593
594         sc->a10_intr = 0;
595         sc->a10_resid = 0;
596         sc->a10_intr_wait = A10_MMC_CMD_DONE;
597         cmd->error = MMC_ERR_NONE;
598         if (cmd->data != NULL) {
599                 sc->a10_intr_wait |= A10_MMC_DATA_OVER;
600                 cmdreg |= A10_MMC_DATA_EXP | A10_MMC_WAIT_PREOVER;
601                 if (cmd->data->flags & MMC_DATA_MULTI) {
602                         cmdreg |= A10_MMC_SEND_AUTOSTOP;
603                         sc->a10_intr_wait |= A10_MMC_AUTOCMD_DONE;
604                 }
605                 if (cmd->data->flags & MMC_DATA_WRITE)
606                         cmdreg |= A10_MMC_WRITE;
607                 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
608                 A10_MMC_WRITE_4(sc, A10_MMC_BLKSZ, blksz);
609                 A10_MMC_WRITE_4(sc, A10_MMC_BCNTR, cmd->data->len);
610
611                 if (a10_mmc_pio_mode == 0)
612                         a10_mmc_prepare_dma(sc);
613                 /* Enable PIO access if sc->a10_dma_inuse is not set. */
614                 if (sc->a10_dma_inuse == 0) {
615                         val = A10_MMC_READ_4(sc, A10_MMC_GCTRL);
616                         val &= ~A10_MMC_DMA_ENABLE;
617                         val |= A10_MMC_ACCESS_BY_AHB;
618                         A10_MMC_WRITE_4(sc, A10_MMC_GCTRL, val);
619                         val = A10_MMC_READ_4(sc, A10_MMC_IMASK);
620                         val |= A10_MMC_RX_DATA_REQ | A10_MMC_TX_DATA_REQ;
621                         A10_MMC_WRITE_4(sc, A10_MMC_IMASK, val);
622                 }
623         }
624
625         A10_MMC_WRITE_4(sc, A10_MMC_CARG, cmd->arg);
626         A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
627         callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
628             a10_mmc_timeout, sc);
629         A10_MMC_UNLOCK(sc);
630
631         return (0);
632 }
633
634 static int
635 a10_mmc_read_ivar(device_t bus, device_t child, int which,
636     uintptr_t *result)
637 {
638         struct a10_mmc_softc *sc;
639
640         sc = device_get_softc(bus);
641         switch (which) {
642         default:
643                 return (EINVAL);
644         case MMCBR_IVAR_BUS_MODE:
645                 *(int *)result = sc->a10_host.ios.bus_mode;
646                 break;
647         case MMCBR_IVAR_BUS_WIDTH:
648                 *(int *)result = sc->a10_host.ios.bus_width;
649                 break;
650         case MMCBR_IVAR_CHIP_SELECT:
651                 *(int *)result = sc->a10_host.ios.chip_select;
652                 break;
653         case MMCBR_IVAR_CLOCK:
654                 *(int *)result = sc->a10_host.ios.clock;
655                 break;
656         case MMCBR_IVAR_F_MIN:
657                 *(int *)result = sc->a10_host.f_min;
658                 break;
659         case MMCBR_IVAR_F_MAX:
660                 *(int *)result = sc->a10_host.f_max;
661                 break;
662         case MMCBR_IVAR_HOST_OCR:
663                 *(int *)result = sc->a10_host.host_ocr;
664                 break;
665         case MMCBR_IVAR_MODE:
666                 *(int *)result = sc->a10_host.mode;
667                 break;
668         case MMCBR_IVAR_OCR:
669                 *(int *)result = sc->a10_host.ocr;
670                 break;
671         case MMCBR_IVAR_POWER_MODE:
672                 *(int *)result = sc->a10_host.ios.power_mode;
673                 break;
674         case MMCBR_IVAR_VDD:
675                 *(int *)result = sc->a10_host.ios.vdd;
676                 break;
677         case MMCBR_IVAR_CAPS:
678                 *(int *)result = sc->a10_host.caps;
679                 break;
680         case MMCBR_IVAR_MAX_DATA:
681                 *(int *)result = 65535;
682                 break;
683         }
684
685         return (0);
686 }
687
688 static int
689 a10_mmc_write_ivar(device_t bus, device_t child, int which,
690     uintptr_t value)
691 {
692         struct a10_mmc_softc *sc;
693
694         sc = device_get_softc(bus);
695         switch (which) {
696         default:
697                 return (EINVAL);
698         case MMCBR_IVAR_BUS_MODE:
699                 sc->a10_host.ios.bus_mode = value;
700                 break;
701         case MMCBR_IVAR_BUS_WIDTH:
702                 sc->a10_host.ios.bus_width = value;
703                 break;
704         case MMCBR_IVAR_CHIP_SELECT:
705                 sc->a10_host.ios.chip_select = value;
706                 break;
707         case MMCBR_IVAR_CLOCK:
708                 sc->a10_host.ios.clock = value;
709                 break;
710         case MMCBR_IVAR_MODE:
711                 sc->a10_host.mode = value;
712                 break;
713         case MMCBR_IVAR_OCR:
714                 sc->a10_host.ocr = value;
715                 break;
716         case MMCBR_IVAR_POWER_MODE:
717                 sc->a10_host.ios.power_mode = value;
718                 break;
719         case MMCBR_IVAR_VDD:
720                 sc->a10_host.ios.vdd = value;
721                 break;
722         /* These are read-only */
723         case MMCBR_IVAR_CAPS:
724         case MMCBR_IVAR_HOST_OCR:
725         case MMCBR_IVAR_F_MIN:
726         case MMCBR_IVAR_F_MAX:
727         case MMCBR_IVAR_MAX_DATA:
728                 return (EINVAL);
729         }
730
731         return (0);
732 }
733
734 static int
735 a10_mmc_update_clock(struct a10_mmc_softc *sc)
736 {
737         uint32_t cmdreg;
738         int retry;
739
740         cmdreg = A10_MMC_START | A10_MMC_UPCLK_ONLY |
741             A10_MMC_WAIT_PREOVER;
742         A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
743         retry = 0xfffff;
744         while (--retry > 0) {
745                 if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_START) == 0) {
746                         A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
747                         return (0);
748                 }
749                 DELAY(10);
750         }
751         A10_MMC_WRITE_4(sc, A10_MMC_RINTR, 0xffffffff);
752         device_printf(sc->a10_dev, "timeout updating clock\n");
753
754         return (ETIMEDOUT);
755 }
756
757 static int
758 a10_mmc_update_ios(device_t bus, device_t child)
759 {
760         int error;
761         struct a10_mmc_softc *sc;
762         struct mmc_ios *ios;
763         uint32_t clkcr;
764
765         sc = device_get_softc(bus);
766         clkcr = A10_MMC_READ_4(sc, A10_MMC_CLKCR);
767         if (clkcr & A10_MMC_CARD_CLK_ON) {
768                 /* Disable clock. */
769                 clkcr &= ~A10_MMC_CARD_CLK_ON;
770                 A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
771                 error = a10_mmc_update_clock(sc);
772                 if (error != 0)
773                         return (error);
774         }
775
776         ios = &sc->a10_host.ios;
777         if (ios->clock) {
778                 /* Reset the divider. */
779                 clkcr &= ~A10_MMC_CLKCR_DIV;
780                 A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
781                 error = a10_mmc_update_clock(sc);
782                 if (error != 0)
783                         return (error);
784
785                 /* Set the MMC clock. */
786                 error = a10_clk_mmc_cfg(sc->a10_id, ios->clock);
787                 if (error != 0)
788                         return (error);
789
790                 /* Enable clock. */
791                 clkcr |= A10_MMC_CARD_CLK_ON;
792                 A10_MMC_WRITE_4(sc, A10_MMC_CLKCR, clkcr);
793                 error = a10_mmc_update_clock(sc);
794                 if (error != 0)
795                         return (error);
796         }
797
798         /* Set the bus width. */
799         switch (ios->bus_width) {
800         case bus_width_1:
801                 A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH1);
802                 break;
803         case bus_width_4:
804                 A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH4);
805                 break;
806         case bus_width_8:
807                 A10_MMC_WRITE_4(sc, A10_MMC_WIDTH, A10_MMC_WIDTH8);
808                 break;
809         }
810
811         return (0);
812 }
813
814 static int
815 a10_mmc_get_ro(device_t bus, device_t child)
816 {
817
818         return (0);
819 }
820
821 static int
822 a10_mmc_acquire_host(device_t bus, device_t child)
823 {
824         struct a10_mmc_softc *sc;
825         int error;
826
827         sc = device_get_softc(bus);
828         A10_MMC_LOCK(sc);
829         while (sc->a10_bus_busy) {
830                 error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
831                 if (error != 0) {
832                         A10_MMC_UNLOCK(sc);
833                         return (error);
834                 }
835         }
836         sc->a10_bus_busy++;
837         A10_MMC_UNLOCK(sc);
838
839         return (0);
840 }
841
842 static int
843 a10_mmc_release_host(device_t bus, device_t child)
844 {
845         struct a10_mmc_softc *sc;
846
847         sc = device_get_softc(bus);
848         A10_MMC_LOCK(sc);
849         sc->a10_bus_busy--;
850         wakeup(sc);
851         A10_MMC_UNLOCK(sc);
852
853         return (0);
854 }
855
856 static device_method_t a10_mmc_methods[] = {
857         /* Device interface */
858         DEVMETHOD(device_probe,         a10_mmc_probe),
859         DEVMETHOD(device_attach,        a10_mmc_attach),
860         DEVMETHOD(device_detach,        a10_mmc_detach),
861
862         /* Bus interface */
863         DEVMETHOD(bus_read_ivar,        a10_mmc_read_ivar),
864         DEVMETHOD(bus_write_ivar,       a10_mmc_write_ivar),
865         DEVMETHOD(bus_print_child,      bus_generic_print_child),
866
867         /* MMC bridge interface */
868         DEVMETHOD(mmcbr_update_ios,     a10_mmc_update_ios),
869         DEVMETHOD(mmcbr_request,        a10_mmc_request),
870         DEVMETHOD(mmcbr_get_ro,         a10_mmc_get_ro),
871         DEVMETHOD(mmcbr_acquire_host,   a10_mmc_acquire_host),
872         DEVMETHOD(mmcbr_release_host,   a10_mmc_release_host),
873
874         DEVMETHOD_END
875 };
876
877 static devclass_t a10_mmc_devclass;
878
879 static driver_t a10_mmc_driver = {
880         "a10_mmc",
881         a10_mmc_methods,
882         sizeof(struct a10_mmc_softc),
883 };
884
885 DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0);
886 DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL);