]> CyberLeo.Net >> Repos - FreeBSD/releng/10.3.git/blob - sys/dev/usb/usb_busdma.c
- Copy stable/10@296371 to releng/10.3 in preparation for 10.3-RC1
[FreeBSD/releng/10.3.git] / sys / dev / usb / usb_busdma.c
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #ifdef USB_GLOBAL_INCLUDE_FILE
28 #include USB_GLOBAL_INCLUDE_FILE
29 #else
30 #include <sys/stdint.h>
31 #include <sys/stddef.h>
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/sx.h>
44 #include <sys/unistd.h>
45 #include <sys/callout.h>
46 #include <sys/malloc.h>
47 #include <sys/priv.h>
48
49 #include <dev/usb/usb.h>
50 #include <dev/usb/usbdi.h>
51 #include <dev/usb/usbdi_util.h>
52
53 #define USB_DEBUG_VAR usb_debug
54
55 #include <dev/usb/usb_core.h>
56 #include <dev/usb/usb_busdma.h>
57 #include <dev/usb/usb_process.h>
58 #include <dev/usb/usb_transfer.h>
59 #include <dev/usb/usb_device.h>
60 #include <dev/usb/usb_util.h>
61 #include <dev/usb/usb_debug.h>
62
63 #include <dev/usb/usb_controller.h>
64 #include <dev/usb/usb_bus.h>
65 #endif                  /* USB_GLOBAL_INCLUDE_FILE */
66
67 #if USB_HAVE_BUSDMA
68 static void     usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
69 static void     usb_dma_tag_destroy(struct usb_dma_tag *);
70 static void     usb_dma_lock_cb(void *, bus_dma_lock_op_t);
71 static void     usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
72 static void     usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
73 static void     usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
74                     uint8_t);
75 #endif
76
77 /*------------------------------------------------------------------------*
78  *  usbd_get_page - lookup DMA-able memory for the given offset
79  *
80  * NOTE: Only call this function when the "page_cache" structure has
81  * been properly initialized !
82  *------------------------------------------------------------------------*/
83 void
84 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
85     struct usb_page_search *res)
86 {
87 #if USB_HAVE_BUSDMA
88         struct usb_page *page;
89
90         if (pc->page_start) {
91
92                 /* Case 1 - something has been loaded into DMA */
93
94                 if (pc->buffer) {
95
96                         /* Case 1a - Kernel Virtual Address */
97
98                         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
99                 }
100                 offset += pc->page_offset_buf;
101
102                 /* compute destination page */
103
104                 page = pc->page_start;
105
106                 if (pc->ismultiseg) {
107
108                         page += (offset / USB_PAGE_SIZE);
109
110                         offset %= USB_PAGE_SIZE;
111
112                         res->length = USB_PAGE_SIZE - offset;
113                         res->physaddr = page->physaddr + offset;
114                 } else {
115                         res->length = (usb_size_t)-1;
116                         res->physaddr = page->physaddr + offset;
117                 }
118                 if (!pc->buffer) {
119
120                         /* Case 1b - Non Kernel Virtual Address */
121
122                         res->buffer = USB_ADD_BYTES(page->buffer, offset);
123                 }
124                 return;
125         }
126 #endif
127         /* Case 2 - Plain PIO */
128
129         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
130         res->length = (usb_size_t)-1;
131 #if USB_HAVE_BUSDMA
132         res->physaddr = 0;
133 #endif
134 }
135
136 /*------------------------------------------------------------------------*
137  *  usb_pc_buffer_is_aligned - verify alignment
138  * 
139  * This function is used to check if a page cache buffer is properly
140  * aligned to reduce the use of bounce buffers in PIO mode.
141  *------------------------------------------------------------------------*/
142 uint8_t
143 usb_pc_buffer_is_aligned(struct usb_page_cache *pc, usb_frlength_t offset,
144     usb_frlength_t len, usb_frlength_t mask)
145 {
146         struct usb_page_search buf_res;
147
148         while (len != 0) {
149
150                 usbd_get_page(pc, offset, &buf_res);
151
152                 if (buf_res.length > len)
153                         buf_res.length = len;
154                 if (USB_P2U(buf_res.buffer) & mask)
155                         return (0);
156                 if (buf_res.length & mask)
157                         return (0);
158
159                 offset += buf_res.length;
160                 len -= buf_res.length;
161         }
162         return (1);
163 }
164
165 /*------------------------------------------------------------------------*
166  *  usbd_copy_in - copy directly to DMA-able memory
167  *------------------------------------------------------------------------*/
168 void
169 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
170     const void *ptr, usb_frlength_t len)
171 {
172         struct usb_page_search buf_res;
173
174         while (len != 0) {
175
176                 usbd_get_page(cache, offset, &buf_res);
177
178                 if (buf_res.length > len) {
179                         buf_res.length = len;
180                 }
181                 memcpy(buf_res.buffer, ptr, buf_res.length);
182
183                 offset += buf_res.length;
184                 len -= buf_res.length;
185                 ptr = USB_ADD_BYTES(ptr, buf_res.length);
186         }
187 }
188
189 /*------------------------------------------------------------------------*
190  *  usbd_copy_in_user - copy directly to DMA-able memory from userland
191  *
192  * Return values:
193  *    0: Success
194  * Else: Failure
195  *------------------------------------------------------------------------*/
196 #if USB_HAVE_USER_IO
197 int
198 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
199     const void *ptr, usb_frlength_t len)
200 {
201         struct usb_page_search buf_res;
202         int error;
203
204         while (len != 0) {
205
206                 usbd_get_page(cache, offset, &buf_res);
207
208                 if (buf_res.length > len) {
209                         buf_res.length = len;
210                 }
211                 error = copyin(ptr, buf_res.buffer, buf_res.length);
212                 if (error)
213                         return (error);
214
215                 offset += buf_res.length;
216                 len -= buf_res.length;
217                 ptr = USB_ADD_BYTES(ptr, buf_res.length);
218         }
219         return (0);                     /* success */
220 }
221 #endif
222
223 /*------------------------------------------------------------------------*
224  *  usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
225  *------------------------------------------------------------------------*/
226 #if USB_HAVE_MBUF
227 struct usb_m_copy_in_arg {
228         struct usb_page_cache *cache;
229         usb_frlength_t dst_offset;
230 };
231
232 static int
233 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
234 {
235         register struct usb_m_copy_in_arg *ua = arg;
236
237         usbd_copy_in(ua->cache, ua->dst_offset, src, count);
238         ua->dst_offset += count;
239         return (0);
240 }
241
242 void
243 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
244     struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
245 {
246         struct usb_m_copy_in_arg arg = {cache, dst_offset};
247         int error;
248
249         error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
250 }
251 #endif
252
253 /*------------------------------------------------------------------------*
254  *  usb_uiomove - factored out code
255  *------------------------------------------------------------------------*/
256 #if USB_HAVE_USER_IO
257 int
258 usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
259     usb_frlength_t pc_offset, usb_frlength_t len)
260 {
261         struct usb_page_search res;
262         int error = 0;
263
264         while (len != 0) {
265
266                 usbd_get_page(pc, pc_offset, &res);
267
268                 if (res.length > len) {
269                         res.length = len;
270                 }
271                 /*
272                  * "uiomove()" can sleep so one needs to make a wrapper,
273                  * exiting the mutex and checking things
274                  */
275                 error = uiomove(res.buffer, res.length, uio);
276
277                 if (error) {
278                         break;
279                 }
280                 pc_offset += res.length;
281                 len -= res.length;
282         }
283         return (error);
284 }
285 #endif
286
287 /*------------------------------------------------------------------------*
288  *  usbd_copy_out - copy directly from DMA-able memory
289  *------------------------------------------------------------------------*/
290 void
291 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
292     void *ptr, usb_frlength_t len)
293 {
294         struct usb_page_search res;
295
296         while (len != 0) {
297
298                 usbd_get_page(cache, offset, &res);
299
300                 if (res.length > len) {
301                         res.length = len;
302                 }
303                 memcpy(ptr, res.buffer, res.length);
304
305                 offset += res.length;
306                 len -= res.length;
307                 ptr = USB_ADD_BYTES(ptr, res.length);
308         }
309 }
310
311 /*------------------------------------------------------------------------*
312  *  usbd_copy_out_user - copy directly from DMA-able memory to userland
313  *
314  * Return values:
315  *    0: Success
316  * Else: Failure
317  *------------------------------------------------------------------------*/
318 #if USB_HAVE_USER_IO
319 int
320 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
321     void *ptr, usb_frlength_t len)
322 {
323         struct usb_page_search res;
324         int error;
325
326         while (len != 0) {
327
328                 usbd_get_page(cache, offset, &res);
329
330                 if (res.length > len) {
331                         res.length = len;
332                 }
333                 error = copyout(res.buffer, ptr, res.length);
334                 if (error)
335                         return (error);
336
337                 offset += res.length;
338                 len -= res.length;
339                 ptr = USB_ADD_BYTES(ptr, res.length);
340         }
341         return (0);                     /* success */
342 }
343 #endif
344
345 /*------------------------------------------------------------------------*
346  *  usbd_frame_zero - zero DMA-able memory
347  *------------------------------------------------------------------------*/
348 void
349 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
350     usb_frlength_t len)
351 {
352         struct usb_page_search res;
353
354         while (len != 0) {
355
356                 usbd_get_page(cache, offset, &res);
357
358                 if (res.length > len) {
359                         res.length = len;
360                 }
361                 memset(res.buffer, 0, res.length);
362
363                 offset += res.length;
364                 len -= res.length;
365         }
366 }
367
368 #if USB_HAVE_BUSDMA
369
370 /*------------------------------------------------------------------------*
371  *      usb_dma_lock_cb - dummy callback
372  *------------------------------------------------------------------------*/
373 static void
374 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
375 {
376         /* we use "mtx_owned()" instead of this function */
377 }
378
379 /*------------------------------------------------------------------------*
380  *      usb_dma_tag_create - allocate a DMA tag
381  *
382  * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
383  * allow multi-segment mappings. Else all mappings are single-segment.
384  *------------------------------------------------------------------------*/
385 static void
386 usb_dma_tag_create(struct usb_dma_tag *udt,
387     usb_size_t size, usb_size_t align)
388 {
389         bus_dma_tag_t tag;
390
391         if (bus_dma_tag_create
392             ( /* parent    */ udt->tag_parent->tag,
393              /* alignment */ align,
394              /* boundary  */ 0,
395              /* lowaddr   */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
396              /* highaddr  */ BUS_SPACE_MAXADDR,
397              /* filter    */ NULL,
398              /* filterarg */ NULL,
399              /* maxsize   */ size,
400              /* nsegments */ (align == 1 && size > 1) ?
401             (2 + (size / USB_PAGE_SIZE)) : 1,
402              /* maxsegsz  */ (align == 1 && size > USB_PAGE_SIZE) ?
403             USB_PAGE_SIZE : size,
404              /* flags     */ BUS_DMA_KEEP_PG_OFFSET,
405              /* lockfn    */ &usb_dma_lock_cb,
406              /* lockarg   */ NULL,
407             &tag)) {
408                 tag = NULL;
409         }
410         udt->tag = tag;
411 }
412
413 /*------------------------------------------------------------------------*
414  *      usb_dma_tag_free - free a DMA tag
415  *------------------------------------------------------------------------*/
416 static void
417 usb_dma_tag_destroy(struct usb_dma_tag *udt)
418 {
419         bus_dma_tag_destroy(udt->tag);
420 }
421
422 /*------------------------------------------------------------------------*
423  *      usb_pc_alloc_mem_cb - BUS-DMA callback function
424  *------------------------------------------------------------------------*/
425 static void
426 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
427     int nseg, int error)
428 {
429         usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
430 }
431
432 /*------------------------------------------------------------------------*
433  *      usb_pc_load_mem_cb - BUS-DMA callback function
434  *------------------------------------------------------------------------*/
435 static void
436 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
437     int nseg, int error)
438 {
439         usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
440 }
441
442 /*------------------------------------------------------------------------*
443  *      usb_pc_common_mem_cb - BUS-DMA callback function
444  *------------------------------------------------------------------------*/
445 static void
446 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
447     int nseg, int error, uint8_t isload)
448 {
449         struct usb_dma_parent_tag *uptag;
450         struct usb_page_cache *pc;
451         struct usb_page *pg;
452         usb_size_t rem;
453         bus_size_t off;
454         uint8_t owned;
455
456         pc = arg;
457         uptag = pc->tag_parent;
458
459         /*
460          * XXX There is sometimes recursive locking here.
461          * XXX We should try to find a better solution.
462          * XXX Until further the "owned" variable does
463          * XXX the trick.
464          */
465
466         if (error) {
467                 goto done;
468         }
469
470         off = 0;
471         pg = pc->page_start;
472         pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
473         rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
474         pc->page_offset_buf = rem;
475         pc->page_offset_end += rem;
476 #ifdef USB_DEBUG
477         if (nseg > 1 &&
478             ((segs->ds_addr + segs->ds_len) & (USB_PAGE_SIZE - 1)) !=
479             ((segs + 1)->ds_addr & (USB_PAGE_SIZE - 1))) {
480                 /*
481                  * This check verifies there is no page offset hole
482                  * between the first and second segment. See the
483                  * BUS_DMA_KEEP_PG_OFFSET flag.
484                  */
485                 DPRINTFN(0, "Page offset was not preserved\n");
486                 error = 1;
487                 goto done;
488         }
489 #endif
490         while (pc->ismultiseg) {
491                 off += USB_PAGE_SIZE;
492                 if (off >= (segs->ds_len + rem)) {
493                         /* page crossing */
494                         nseg--;
495                         segs++;
496                         off = 0;
497                         rem = 0;
498                         if (nseg == 0)
499                                 break;
500                 }
501                 pg++;
502                 pg->physaddr = (segs->ds_addr + off) & ~(USB_PAGE_SIZE - 1);
503         }
504
505 done:
506         owned = mtx_owned(uptag->mtx);
507         if (!owned)
508                 mtx_lock(uptag->mtx);
509
510         uptag->dma_error = (error ? 1 : 0);
511         if (isload) {
512                 (uptag->func) (uptag);
513         } else {
514                 cv_broadcast(uptag->cv);
515         }
516         if (!owned)
517                 mtx_unlock(uptag->mtx);
518 }
519
520 /*------------------------------------------------------------------------*
521  *      usb_pc_alloc_mem - allocate DMA'able memory
522  *
523  * Returns:
524  *    0: Success
525  * Else: Failure
526  *------------------------------------------------------------------------*/
527 uint8_t
528 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
529     usb_size_t size, usb_size_t align)
530 {
531         struct usb_dma_parent_tag *uptag;
532         struct usb_dma_tag *utag;
533         bus_dmamap_t map;
534         void *ptr;
535         int err;
536
537         uptag = pc->tag_parent;
538
539         if (align != 1) {
540                 /*
541                  * The alignment must be greater or equal to the
542                  * "size" else the object can be split between two
543                  * memory pages and we get a problem!
544                  */
545                 while (align < size) {
546                         align *= 2;
547                         if (align == 0) {
548                                 goto error;
549                         }
550                 }
551 #if 1
552                 /*
553                  * XXX BUS-DMA workaround - FIXME later:
554                  *
555                  * We assume that that the aligment at this point of
556                  * the code is greater than or equal to the size and
557                  * less than two times the size, so that if we double
558                  * the size, the size will be greater than the
559                  * alignment.
560                  *
561                  * The bus-dma system has a check for "alignment"
562                  * being less than "size". If that check fails we end
563                  * up using contigmalloc which is page based even for
564                  * small allocations. Try to avoid that to save
565                  * memory, hence we sometimes to a large number of
566                  * small allocations!
567                  */
568                 if (size <= (USB_PAGE_SIZE / 2)) {
569                         size *= 2;
570                 }
571 #endif
572         }
573         /* get the correct DMA tag */
574         utag = usb_dma_tag_find(uptag, size, align);
575         if (utag == NULL) {
576                 goto error;
577         }
578         /* allocate memory */
579         if (bus_dmamem_alloc(
580             utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
581                 goto error;
582         }
583         /* setup page cache */
584         pc->buffer = ptr;
585         pc->page_start = pg;
586         pc->page_offset_buf = 0;
587         pc->page_offset_end = size;
588         pc->map = map;
589         pc->tag = utag->tag;
590         pc->ismultiseg = (align == 1);
591
592         mtx_lock(uptag->mtx);
593
594         /* load memory into DMA */
595         err = bus_dmamap_load(
596             utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
597             pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
598
599         if (err == EINPROGRESS) {
600                 cv_wait(uptag->cv, uptag->mtx);
601                 err = 0;
602         }
603         mtx_unlock(uptag->mtx);
604
605         if (err || uptag->dma_error) {
606                 bus_dmamem_free(utag->tag, ptr, map);
607                 goto error;
608         }
609         memset(ptr, 0, size);
610
611         usb_pc_cpu_flush(pc);
612
613         return (0);
614
615 error:
616         /* reset most of the page cache */
617         pc->buffer = NULL;
618         pc->page_start = NULL;
619         pc->page_offset_buf = 0;
620         pc->page_offset_end = 0;
621         pc->map = NULL;
622         pc->tag = NULL;
623         return (1);
624 }
625
626 /*------------------------------------------------------------------------*
627  *      usb_pc_free_mem - free DMA memory
628  *
629  * This function is NULL safe.
630  *------------------------------------------------------------------------*/
631 void
632 usb_pc_free_mem(struct usb_page_cache *pc)
633 {
634         if (pc && pc->buffer) {
635
636                 bus_dmamap_unload(pc->tag, pc->map);
637
638                 bus_dmamem_free(pc->tag, pc->buffer, pc->map);
639
640                 pc->buffer = NULL;
641         }
642 }
643
644 /*------------------------------------------------------------------------*
645  *      usb_pc_load_mem - load virtual memory into DMA
646  *
647  * Return values:
648  * 0: Success
649  * Else: Error
650  *------------------------------------------------------------------------*/
651 uint8_t
652 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
653 {
654         /* setup page cache */
655         pc->page_offset_buf = 0;
656         pc->page_offset_end = size;
657         pc->ismultiseg = 1;
658
659         mtx_assert(pc->tag_parent->mtx, MA_OWNED);
660
661         if (size > 0) {
662                 if (sync) {
663                         struct usb_dma_parent_tag *uptag;
664                         int err;
665
666                         uptag = pc->tag_parent;
667
668                         /*
669                          * We have to unload the previous loaded DMA
670                          * pages before trying to load a new one!
671                          */
672                         bus_dmamap_unload(pc->tag, pc->map);
673
674                         /*
675                          * Try to load memory into DMA.
676                          */
677                         err = bus_dmamap_load(
678                             pc->tag, pc->map, pc->buffer, size,
679                             &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
680                         if (err == EINPROGRESS) {
681                                 cv_wait(uptag->cv, uptag->mtx);
682                                 err = 0;
683                         }
684                         if (err || uptag->dma_error) {
685                                 return (1);
686                         }
687                 } else {
688
689                         /*
690                          * We have to unload the previous loaded DMA
691                          * pages before trying to load a new one!
692                          */
693                         bus_dmamap_unload(pc->tag, pc->map);
694
695                         /*
696                          * Try to load memory into DMA. The callback
697                          * will be called in all cases:
698                          */
699                         if (bus_dmamap_load(
700                             pc->tag, pc->map, pc->buffer, size,
701                             &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
702                         }
703                 }
704         } else {
705                 if (!sync) {
706                         /*
707                          * Call callback so that refcount is decremented
708                          * properly:
709                          */
710                         pc->tag_parent->dma_error = 0;
711                         (pc->tag_parent->func) (pc->tag_parent);
712                 }
713         }
714         return (0);
715 }
716
717 /*------------------------------------------------------------------------*
718  *      usb_pc_cpu_invalidate - invalidate CPU cache
719  *------------------------------------------------------------------------*/
720 void
721 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
722 {
723         if (pc->page_offset_end == pc->page_offset_buf) {
724                 /* nothing has been loaded into this page cache! */
725                 return;
726         }
727
728         /*
729          * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the
730          * same time, but in the future we should try to isolate the
731          * different cases to optimise the code. --HPS
732          */
733         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
734         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
735 }
736
737 /*------------------------------------------------------------------------*
738  *      usb_pc_cpu_flush - flush CPU cache
739  *------------------------------------------------------------------------*/
740 void
741 usb_pc_cpu_flush(struct usb_page_cache *pc)
742 {
743         if (pc->page_offset_end == pc->page_offset_buf) {
744                 /* nothing has been loaded into this page cache! */
745                 return;
746         }
747         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
748 }
749
750 /*------------------------------------------------------------------------*
751  *      usb_pc_dmamap_create - create a DMA map
752  *
753  * Returns:
754  *    0: Success
755  * Else: Failure
756  *------------------------------------------------------------------------*/
757 uint8_t
758 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
759 {
760         struct usb_xfer_root *info;
761         struct usb_dma_tag *utag;
762
763         /* get info */
764         info = USB_DMATAG_TO_XROOT(pc->tag_parent);
765
766         /* sanity check */
767         if (info == NULL) {
768                 goto error;
769         }
770         utag = usb_dma_tag_find(pc->tag_parent, size, 1);
771         if (utag == NULL) {
772                 goto error;
773         }
774         /* create DMA map */
775         if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
776                 goto error;
777         }
778         pc->tag = utag->tag;
779         return 0;                       /* success */
780
781 error:
782         pc->map = NULL;
783         pc->tag = NULL;
784         return 1;                       /* failure */
785 }
786
787 /*------------------------------------------------------------------------*
788  *      usb_pc_dmamap_destroy
789  *
790  * This function is NULL safe.
791  *------------------------------------------------------------------------*/
792 void
793 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
794 {
795         if (pc && pc->tag) {
796                 bus_dmamap_destroy(pc->tag, pc->map);
797                 pc->tag = NULL;
798                 pc->map = NULL;
799         }
800 }
801
802 /*------------------------------------------------------------------------*
803  *      usb_dma_tag_find - factored out code
804  *------------------------------------------------------------------------*/
805 struct usb_dma_tag *
806 usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
807     usb_size_t size, usb_size_t align)
808 {
809         struct usb_dma_tag *udt;
810         uint8_t nudt;
811
812         USB_ASSERT(align > 0, ("Invalid parameter align = 0\n"));
813         USB_ASSERT(size > 0, ("Invalid parameter size = 0\n"));
814
815         udt = udpt->utag_first;
816         nudt = udpt->utag_max;
817
818         while (nudt--) {
819
820                 if (udt->align == 0) {
821                         usb_dma_tag_create(udt, size, align);
822                         if (udt->tag == NULL) {
823                                 return (NULL);
824                         }
825                         udt->align = align;
826                         udt->size = size;
827                         return (udt);
828                 }
829                 if ((udt->align == align) && (udt->size == size)) {
830                         return (udt);
831                 }
832                 udt++;
833         }
834         return (NULL);
835 }
836
837 /*------------------------------------------------------------------------*
838  *      usb_dma_tag_setup - initialise USB DMA tags
839  *------------------------------------------------------------------------*/
840 void
841 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
842     struct usb_dma_tag *udt, bus_dma_tag_t dmat,
843     struct mtx *mtx, usb_dma_callback_t *func,
844     uint8_t ndmabits, uint8_t nudt)
845 {
846         memset(udpt, 0, sizeof(*udpt));
847
848         /* sanity checking */
849         if ((nudt == 0) ||
850             (ndmabits == 0) ||
851             (mtx == NULL)) {
852                 /* something is corrupt */
853                 return;
854         }
855         /* initialise condition variable */
856         cv_init(udpt->cv, "USB DMA CV");
857
858         /* store some information */
859         udpt->mtx = mtx;
860         udpt->func = func;
861         udpt->tag = dmat;
862         udpt->utag_first = udt;
863         udpt->utag_max = nudt;
864         udpt->dma_bits = ndmabits;
865
866         while (nudt--) {
867                 memset(udt, 0, sizeof(*udt));
868                 udt->tag_parent = udpt;
869                 udt++;
870         }
871 }
872
873 /*------------------------------------------------------------------------*
874  *      usb_bus_tag_unsetup - factored out code
875  *------------------------------------------------------------------------*/
876 void
877 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
878 {
879         struct usb_dma_tag *udt;
880         uint8_t nudt;
881
882         udt = udpt->utag_first;
883         nudt = udpt->utag_max;
884
885         while (nudt--) {
886
887                 if (udt->align) {
888                         /* destroy the USB DMA tag */
889                         usb_dma_tag_destroy(udt);
890                         udt->align = 0;
891                 }
892                 udt++;
893         }
894
895         if (udpt->utag_max) {
896                 /* destroy the condition variable */
897                 cv_destroy(udpt->cv);
898         }
899 }
900
901 /*------------------------------------------------------------------------*
902  *      usb_bdma_work_loop
903  *
904  * This function handles loading of virtual buffers into DMA and is
905  * only called when "dma_refcount" is zero.
906  *------------------------------------------------------------------------*/
907 void
908 usb_bdma_work_loop(struct usb_xfer_queue *pq)
909 {
910         struct usb_xfer_root *info;
911         struct usb_xfer *xfer;
912         usb_frcount_t nframes;
913
914         xfer = pq->curr;
915         info = xfer->xroot;
916
917         mtx_assert(info->xfer_mtx, MA_OWNED);
918
919         if (xfer->error) {
920                 /* some error happened */
921                 USB_BUS_LOCK(info->bus);
922                 usbd_transfer_done(xfer, 0);
923                 USB_BUS_UNLOCK(info->bus);
924                 return;
925         }
926         if (!xfer->flags_int.bdma_setup) {
927                 struct usb_page *pg;
928                 usb_frlength_t frlength_0;
929                 uint8_t isread;
930
931                 xfer->flags_int.bdma_setup = 1;
932
933                 /* reset BUS-DMA load state */
934
935                 info->dma_error = 0;
936
937                 if (xfer->flags_int.isochronous_xfr) {
938                         /* only one frame buffer */
939                         nframes = 1;
940                         frlength_0 = xfer->sumlen;
941                 } else {
942                         /* can be multiple frame buffers */
943                         nframes = xfer->nframes;
944                         frlength_0 = xfer->frlengths[0];
945                 }
946
947                 /*
948                  * Set DMA direction first. This is needed to
949                  * select the correct cache invalidate and cache
950                  * flush operations.
951                  */
952                 isread = USB_GET_DATA_ISREAD(xfer);
953                 pg = xfer->dma_page_ptr;
954
955                 if (xfer->flags_int.control_xfr &&
956                     xfer->flags_int.control_hdr) {
957                         /* special case */
958                         if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
959                                 /* The device controller writes to memory */
960                                 xfer->frbuffers[0].isread = 1;
961                         } else {
962                                 /* The host controller reads from memory */
963                                 xfer->frbuffers[0].isread = 0;
964                         }
965                 } else {
966                         /* default case */
967                         xfer->frbuffers[0].isread = isread;
968                 }
969
970                 /*
971                  * Setup the "page_start" pointer which points to an array of
972                  * USB pages where information about the physical address of a
973                  * page will be stored. Also initialise the "isread" field of
974                  * the USB page caches.
975                  */
976                 xfer->frbuffers[0].page_start = pg;
977
978                 info->dma_nframes = nframes;
979                 info->dma_currframe = 0;
980                 info->dma_frlength_0 = frlength_0;
981
982                 pg += (frlength_0 / USB_PAGE_SIZE);
983                 pg += 2;
984
985                 while (--nframes > 0) {
986                         xfer->frbuffers[nframes].isread = isread;
987                         xfer->frbuffers[nframes].page_start = pg;
988
989                         pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
990                         pg += 2;
991                 }
992
993         }
994         if (info->dma_error) {
995                 USB_BUS_LOCK(info->bus);
996                 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
997                 USB_BUS_UNLOCK(info->bus);
998                 return;
999         }
1000         if (info->dma_currframe != info->dma_nframes) {
1001
1002                 if (info->dma_currframe == 0) {
1003                         /* special case */
1004                         usb_pc_load_mem(xfer->frbuffers,
1005                             info->dma_frlength_0, 0);
1006                 } else {
1007                         /* default case */
1008                         nframes = info->dma_currframe;
1009                         usb_pc_load_mem(xfer->frbuffers + nframes,
1010                             xfer->frlengths[nframes], 0);
1011                 }
1012
1013                 /* advance frame index */
1014                 info->dma_currframe++;
1015
1016                 return;
1017         }
1018         /* go ahead */
1019         usb_bdma_pre_sync(xfer);
1020
1021         /* start loading next USB transfer, if any */
1022         usb_command_wrapper(pq, NULL);
1023
1024         /* finally start the hardware */
1025         usbd_pipe_enter(xfer);
1026 }
1027
1028 /*------------------------------------------------------------------------*
1029  *      usb_bdma_done_event
1030  *
1031  * This function is called when the BUS-DMA has loaded virtual memory
1032  * into DMA, if any.
1033  *------------------------------------------------------------------------*/
1034 void
1035 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
1036 {
1037         struct usb_xfer_root *info;
1038
1039         info = USB_DMATAG_TO_XROOT(udpt);
1040
1041         mtx_assert(info->xfer_mtx, MA_OWNED);
1042
1043         /* copy error */
1044         info->dma_error = udpt->dma_error;
1045
1046         /* enter workloop again */
1047         usb_command_wrapper(&info->dma_q,
1048             info->dma_q.curr);
1049 }
1050
1051 /*------------------------------------------------------------------------*
1052  *      usb_bdma_pre_sync
1053  *
1054  * This function handles DMA synchronisation that must be done before
1055  * an USB transfer is started.
1056  *------------------------------------------------------------------------*/
1057 void
1058 usb_bdma_pre_sync(struct usb_xfer *xfer)
1059 {
1060         struct usb_page_cache *pc;
1061         usb_frcount_t nframes;
1062
1063         if (xfer->flags_int.isochronous_xfr) {
1064                 /* only one frame buffer */
1065                 nframes = 1;
1066         } else {
1067                 /* can be multiple frame buffers */
1068                 nframes = xfer->nframes;
1069         }
1070
1071         pc = xfer->frbuffers;
1072
1073         while (nframes--) {
1074
1075                 if (pc->isread) {
1076                         usb_pc_cpu_invalidate(pc);
1077                 } else {
1078                         usb_pc_cpu_flush(pc);
1079                 }
1080                 pc++;
1081         }
1082 }
1083
1084 /*------------------------------------------------------------------------*
1085  *      usb_bdma_post_sync
1086  *
1087  * This function handles DMA synchronisation that must be done after
1088  * an USB transfer is complete.
1089  *------------------------------------------------------------------------*/
1090 void
1091 usb_bdma_post_sync(struct usb_xfer *xfer)
1092 {
1093         struct usb_page_cache *pc;
1094         usb_frcount_t nframes;
1095
1096         if (xfer->flags_int.isochronous_xfr) {
1097                 /* only one frame buffer */
1098                 nframes = 1;
1099         } else {
1100                 /* can be multiple frame buffers */
1101                 nframes = xfer->nframes;
1102         }
1103
1104         pc = xfer->frbuffers;
1105
1106         while (nframes--) {
1107                 if (pc->isread) {
1108                         usb_pc_cpu_invalidate(pc);
1109                 }
1110                 pc++;
1111         }
1112 }
1113
1114 #endif