3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/stdint.h>
28 #include <sys/stddef.h>
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
39 #include <sys/sysctl.h>
41 #include <sys/unistd.h>
42 #include <sys/callout.h>
43 #include <sys/malloc.h>
46 #include <dev/usb/usb.h>
47 #include <dev/usb/usbdi.h>
48 #include <dev/usb/usbdi_util.h>
50 #define USB_DEBUG_VAR usb_debug
52 #include <dev/usb/usb_core.h>
53 #include <dev/usb/usb_busdma.h>
54 #include <dev/usb/usb_process.h>
55 #include <dev/usb/usb_transfer.h>
56 #include <dev/usb/usb_device.h>
57 #include <dev/usb/usb_util.h>
58 #include <dev/usb/usb_debug.h>
60 #include <dev/usb/usb_controller.h>
61 #include <dev/usb/usb_bus.h>
64 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
65 static void usb_dma_tag_destroy(struct usb_dma_tag *);
66 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t);
67 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
68 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
69 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
73 /*------------------------------------------------------------------------*
74 * usbd_get_page - lookup DMA-able memory for the given offset
76 * NOTE: Only call this function when the "page_cache" structure has
77 * been properly initialized !
78 *------------------------------------------------------------------------*/
80 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
81 struct usb_page_search *res)
83 struct usb_page *page;
88 /* Case 1 - something has been loaded into DMA */
92 /* Case 1a - Kernel Virtual Address */
94 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
96 offset += pc->page_offset_buf;
98 /* compute destination page */
100 page = pc->page_start;
102 if (pc->ismultiseg) {
104 page += (offset / USB_PAGE_SIZE);
106 offset %= USB_PAGE_SIZE;
108 res->length = USB_PAGE_SIZE - offset;
109 res->physaddr = page->physaddr + offset;
112 res->physaddr = page->physaddr + offset;
116 /* Case 1b - Non Kernel Virtual Address */
118 res->buffer = USB_ADD_BYTES(page->buffer, offset);
123 /* Case 2 - Plain PIO */
125 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
132 /*------------------------------------------------------------------------*
133 * usbd_copy_in - copy directly to DMA-able memory
134 *------------------------------------------------------------------------*/
136 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
137 const void *ptr, usb_frlength_t len)
139 struct usb_page_search buf_res;
143 usbd_get_page(cache, offset, &buf_res);
145 if (buf_res.length > len) {
146 buf_res.length = len;
148 bcopy(ptr, buf_res.buffer, buf_res.length);
150 offset += buf_res.length;
151 len -= buf_res.length;
152 ptr = USB_ADD_BYTES(ptr, buf_res.length);
156 /*------------------------------------------------------------------------*
157 * usbd_copy_in_user - copy directly to DMA-able memory from userland
162 *------------------------------------------------------------------------*/
165 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
166 const void *ptr, usb_frlength_t len)
168 struct usb_page_search buf_res;
173 usbd_get_page(cache, offset, &buf_res);
175 if (buf_res.length > len) {
176 buf_res.length = len;
178 error = copyin(ptr, buf_res.buffer, buf_res.length);
182 offset += buf_res.length;
183 len -= buf_res.length;
184 ptr = USB_ADD_BYTES(ptr, buf_res.length);
186 return (0); /* success */
190 /*------------------------------------------------------------------------*
191 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
192 *------------------------------------------------------------------------*/
194 struct usb_m_copy_in_arg {
195 struct usb_page_cache *cache;
196 usb_frlength_t dst_offset;
200 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
202 register struct usb_m_copy_in_arg *ua = arg;
204 usbd_copy_in(ua->cache, ua->dst_offset, src, count);
205 ua->dst_offset += count;
210 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
211 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
213 struct usb_m_copy_in_arg arg = {cache, dst_offset};
216 error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
220 /*------------------------------------------------------------------------*
221 * usb_uiomove - factored out code
222 *------------------------------------------------------------------------*/
225 usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
226 usb_frlength_t pc_offset, usb_frlength_t len)
228 struct usb_page_search res;
233 usbd_get_page(pc, pc_offset, &res);
235 if (res.length > len) {
239 * "uiomove()" can sleep so one needs to make a wrapper,
240 * exiting the mutex and checking things
242 error = uiomove(res.buffer, res.length, uio);
247 pc_offset += res.length;
254 /*------------------------------------------------------------------------*
255 * usbd_copy_out - copy directly from DMA-able memory
256 *------------------------------------------------------------------------*/
258 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
259 void *ptr, usb_frlength_t len)
261 struct usb_page_search res;
265 usbd_get_page(cache, offset, &res);
267 if (res.length > len) {
270 bcopy(res.buffer, ptr, res.length);
272 offset += res.length;
274 ptr = USB_ADD_BYTES(ptr, res.length);
278 /*------------------------------------------------------------------------*
279 * usbd_copy_out_user - copy directly from DMA-able memory to userland
284 *------------------------------------------------------------------------*/
287 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
288 void *ptr, usb_frlength_t len)
290 struct usb_page_search res;
295 usbd_get_page(cache, offset, &res);
297 if (res.length > len) {
300 error = copyout(res.buffer, ptr, res.length);
304 offset += res.length;
306 ptr = USB_ADD_BYTES(ptr, res.length);
308 return (0); /* success */
312 /*------------------------------------------------------------------------*
313 * usbd_frame_zero - zero DMA-able memory
314 *------------------------------------------------------------------------*/
316 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
319 struct usb_page_search res;
323 usbd_get_page(cache, offset, &res);
325 if (res.length > len) {
328 bzero(res.buffer, res.length);
330 offset += res.length;
337 /*------------------------------------------------------------------------*
338 * usb_dma_lock_cb - dummy callback
339 *------------------------------------------------------------------------*/
341 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
343 /* we use "mtx_owned()" instead of this function */
346 /*------------------------------------------------------------------------*
347 * usb_dma_tag_create - allocate a DMA tag
349 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
350 * allow multi-segment mappings. Else all mappings are single-segment.
351 *------------------------------------------------------------------------*/
353 usb_dma_tag_create(struct usb_dma_tag *udt,
354 usb_size_t size, usb_size_t align)
358 if (bus_dma_tag_create
359 ( /* parent */ udt->tag_parent->tag,
360 /* alignment */ align,
361 /* boundary */ (align == 1) ?
363 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
364 /* highaddr */ BUS_SPACE_MAXADDR,
366 /* filterarg */ NULL,
368 /* nsegments */ (align == 1 && size > 1) ?
369 (2 + (size / USB_PAGE_SIZE)) : 1,
370 /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ?
371 USB_PAGE_SIZE : size,
372 /* flags */ BUS_DMA_KEEP_PG_OFFSET,
373 /* lockfn */ &usb_dma_lock_cb,
381 /*------------------------------------------------------------------------*
382 * usb_dma_tag_free - free a DMA tag
383 *------------------------------------------------------------------------*/
385 usb_dma_tag_destroy(struct usb_dma_tag *udt)
387 bus_dma_tag_destroy(udt->tag);
390 /*------------------------------------------------------------------------*
391 * usb_pc_alloc_mem_cb - BUS-DMA callback function
392 *------------------------------------------------------------------------*/
394 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
397 usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
400 /*------------------------------------------------------------------------*
401 * usb_pc_load_mem_cb - BUS-DMA callback function
402 *------------------------------------------------------------------------*/
404 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
407 usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
410 /*------------------------------------------------------------------------*
411 * usb_pc_common_mem_cb - BUS-DMA callback function
412 *------------------------------------------------------------------------*/
414 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
415 int nseg, int error, uint8_t isload)
417 struct usb_dma_parent_tag *uptag;
418 struct usb_page_cache *pc;
424 uptag = pc->tag_parent;
427 * XXX There is sometimes recursive locking here.
428 * XXX We should try to find a better solution.
429 * XXX Until further the "owned" variable does
437 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
438 rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
439 pc->page_offset_buf = rem;
440 pc->page_offset_end += rem;
443 if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) {
445 * This check verifies that the physical address is correct:
447 DPRINTFN(0, "Page offset was not preserved\n");
456 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
460 owned = mtx_owned(uptag->mtx);
462 mtx_lock(uptag->mtx);
464 uptag->dma_error = (error ? 1 : 0);
466 (uptag->func) (uptag);
468 cv_broadcast(uptag->cv);
471 mtx_unlock(uptag->mtx);
474 /*------------------------------------------------------------------------*
475 * usb_pc_alloc_mem - allocate DMA'able memory
480 *------------------------------------------------------------------------*/
482 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
483 usb_size_t size, usb_size_t align)
485 struct usb_dma_parent_tag *uptag;
486 struct usb_dma_tag *utag;
491 uptag = pc->tag_parent;
495 * The alignment must be greater or equal to the
496 * "size" else the object can be split between two
497 * memory pages and we get a problem!
499 while (align < size) {
507 * XXX BUS-DMA workaround - FIXME later:
509 * We assume that that the aligment at this point of
510 * the code is greater than or equal to the size and
511 * less than two times the size, so that if we double
512 * the size, the size will be greater than the
515 * The bus-dma system has a check for "alignment"
516 * being less than "size". If that check fails we end
517 * up using contigmalloc which is page based even for
518 * small allocations. Try to avoid that to save
519 * memory, hence we sometimes to a large number of
522 if (size <= (USB_PAGE_SIZE / 2)) {
527 /* get the correct DMA tag */
528 utag = usb_dma_tag_find(uptag, size, align);
532 /* allocate memory */
533 if (bus_dmamem_alloc(
534 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
537 /* setup page cache */
540 pc->page_offset_buf = 0;
541 pc->page_offset_end = size;
544 pc->ismultiseg = (align == 1);
546 mtx_lock(uptag->mtx);
548 /* load memory into DMA */
549 err = bus_dmamap_load(
550 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
551 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
553 if (err == EINPROGRESS) {
554 cv_wait(uptag->cv, uptag->mtx);
557 mtx_unlock(uptag->mtx);
559 if (err || uptag->dma_error) {
560 bus_dmamem_free(utag->tag, ptr, map);
565 usb_pc_cpu_flush(pc);
570 /* reset most of the page cache */
572 pc->page_start = NULL;
573 pc->page_offset_buf = 0;
574 pc->page_offset_end = 0;
580 /*------------------------------------------------------------------------*
581 * usb_pc_free_mem - free DMA memory
583 * This function is NULL safe.
584 *------------------------------------------------------------------------*/
586 usb_pc_free_mem(struct usb_page_cache *pc)
588 if (pc && pc->buffer) {
590 bus_dmamap_unload(pc->tag, pc->map);
592 bus_dmamem_free(pc->tag, pc->buffer, pc->map);
598 /*------------------------------------------------------------------------*
599 * usb_pc_load_mem - load virtual memory into DMA
604 *------------------------------------------------------------------------*/
606 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
608 /* setup page cache */
609 pc->page_offset_buf = 0;
610 pc->page_offset_end = size;
613 mtx_assert(pc->tag_parent->mtx, MA_OWNED);
617 struct usb_dma_parent_tag *uptag;
620 uptag = pc->tag_parent;
623 * We have to unload the previous loaded DMA
624 * pages before trying to load a new one!
626 bus_dmamap_unload(pc->tag, pc->map);
629 * Try to load memory into DMA.
631 err = bus_dmamap_load(
632 pc->tag, pc->map, pc->buffer, size,
633 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
634 if (err == EINPROGRESS) {
635 cv_wait(uptag->cv, uptag->mtx);
638 if (err || uptag->dma_error) {
644 * We have to unload the previous loaded DMA
645 * pages before trying to load a new one!
647 bus_dmamap_unload(pc->tag, pc->map);
650 * Try to load memory into DMA. The callback
651 * will be called in all cases:
654 pc->tag, pc->map, pc->buffer, size,
655 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
661 * Call callback so that refcount is decremented
664 pc->tag_parent->dma_error = 0;
665 (pc->tag_parent->func) (pc->tag_parent);
671 /*------------------------------------------------------------------------*
672 * usb_pc_cpu_invalidate - invalidate CPU cache
673 *------------------------------------------------------------------------*/
675 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
677 if (pc->page_offset_end == pc->page_offset_buf) {
678 /* nothing has been loaded into this page cache! */
683 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the
684 * same time, but in the future we should try to isolate the
685 * different cases to optimise the code. --HPS
687 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
688 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
691 /*------------------------------------------------------------------------*
692 * usb_pc_cpu_flush - flush CPU cache
693 *------------------------------------------------------------------------*/
695 usb_pc_cpu_flush(struct usb_page_cache *pc)
697 if (pc->page_offset_end == pc->page_offset_buf) {
698 /* nothing has been loaded into this page cache! */
701 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
704 /*------------------------------------------------------------------------*
705 * usb_pc_dmamap_create - create a DMA map
710 *------------------------------------------------------------------------*/
712 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
714 struct usb_xfer_root *info;
715 struct usb_dma_tag *utag;
718 info = USB_DMATAG_TO_XROOT(pc->tag_parent);
724 utag = usb_dma_tag_find(pc->tag_parent, size, 1);
729 if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
733 return 0; /* success */
738 return 1; /* failure */
741 /*------------------------------------------------------------------------*
742 * usb_pc_dmamap_destroy
744 * This function is NULL safe.
745 *------------------------------------------------------------------------*/
747 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
750 bus_dmamap_destroy(pc->tag, pc->map);
756 /*------------------------------------------------------------------------*
757 * usb_dma_tag_find - factored out code
758 *------------------------------------------------------------------------*/
760 usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
761 usb_size_t size, usb_size_t align)
763 struct usb_dma_tag *udt;
766 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n"));
767 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n"));
769 udt = udpt->utag_first;
770 nudt = udpt->utag_max;
774 if (udt->align == 0) {
775 usb_dma_tag_create(udt, size, align);
776 if (udt->tag == NULL) {
783 if ((udt->align == align) && (udt->size == size)) {
791 /*------------------------------------------------------------------------*
792 * usb_dma_tag_setup - initialise USB DMA tags
793 *------------------------------------------------------------------------*/
795 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
796 struct usb_dma_tag *udt, bus_dma_tag_t dmat,
797 struct mtx *mtx, usb_dma_callback_t *func,
798 uint8_t ndmabits, uint8_t nudt)
800 bzero(udpt, sizeof(*udpt));
802 /* sanity checking */
806 /* something is corrupt */
809 /* initialise condition variable */
810 cv_init(udpt->cv, "USB DMA CV");
812 /* store some information */
816 udpt->utag_first = udt;
817 udpt->utag_max = nudt;
818 udpt->dma_bits = ndmabits;
821 bzero(udt, sizeof(*udt));
822 udt->tag_parent = udpt;
827 /*------------------------------------------------------------------------*
828 * usb_bus_tag_unsetup - factored out code
829 *------------------------------------------------------------------------*/
831 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
833 struct usb_dma_tag *udt;
836 udt = udpt->utag_first;
837 nudt = udpt->utag_max;
842 /* destroy the USB DMA tag */
843 usb_dma_tag_destroy(udt);
849 if (udpt->utag_max) {
850 /* destroy the condition variable */
851 cv_destroy(udpt->cv);
855 /*------------------------------------------------------------------------*
858 * This function handles loading of virtual buffers into DMA and is
859 * only called when "dma_refcount" is zero.
860 *------------------------------------------------------------------------*/
862 usb_bdma_work_loop(struct usb_xfer_queue *pq)
864 struct usb_xfer_root *info;
865 struct usb_xfer *xfer;
866 usb_frcount_t nframes;
871 mtx_assert(info->xfer_mtx, MA_OWNED);
874 /* some error happened */
875 USB_BUS_LOCK(info->bus);
876 usbd_transfer_done(xfer, 0);
877 USB_BUS_UNLOCK(info->bus);
880 if (!xfer->flags_int.bdma_setup) {
882 usb_frlength_t frlength_0;
885 xfer->flags_int.bdma_setup = 1;
887 /* reset BUS-DMA load state */
891 if (xfer->flags_int.isochronous_xfr) {
892 /* only one frame buffer */
894 frlength_0 = xfer->sumlen;
896 /* can be multiple frame buffers */
897 nframes = xfer->nframes;
898 frlength_0 = xfer->frlengths[0];
902 * Set DMA direction first. This is needed to
903 * select the correct cache invalidate and cache
906 isread = USB_GET_DATA_ISREAD(xfer);
907 pg = xfer->dma_page_ptr;
909 if (xfer->flags_int.control_xfr &&
910 xfer->flags_int.control_hdr) {
912 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
913 /* The device controller writes to memory */
914 xfer->frbuffers[0].isread = 1;
916 /* The host controller reads from memory */
917 xfer->frbuffers[0].isread = 0;
921 xfer->frbuffers[0].isread = isread;
925 * Setup the "page_start" pointer which points to an array of
926 * USB pages where information about the physical address of a
927 * page will be stored. Also initialise the "isread" field of
928 * the USB page caches.
930 xfer->frbuffers[0].page_start = pg;
932 info->dma_nframes = nframes;
933 info->dma_currframe = 0;
934 info->dma_frlength_0 = frlength_0;
936 pg += (frlength_0 / USB_PAGE_SIZE);
939 while (--nframes > 0) {
940 xfer->frbuffers[nframes].isread = isread;
941 xfer->frbuffers[nframes].page_start = pg;
943 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
948 if (info->dma_error) {
949 USB_BUS_LOCK(info->bus);
950 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
951 USB_BUS_UNLOCK(info->bus);
954 if (info->dma_currframe != info->dma_nframes) {
956 if (info->dma_currframe == 0) {
958 usb_pc_load_mem(xfer->frbuffers,
959 info->dma_frlength_0, 0);
962 nframes = info->dma_currframe;
963 usb_pc_load_mem(xfer->frbuffers + nframes,
964 xfer->frlengths[nframes], 0);
967 /* advance frame index */
968 info->dma_currframe++;
973 usb_bdma_pre_sync(xfer);
975 /* start loading next USB transfer, if any */
976 usb_command_wrapper(pq, NULL);
978 /* finally start the hardware */
979 usbd_pipe_enter(xfer);
982 /*------------------------------------------------------------------------*
983 * usb_bdma_done_event
985 * This function is called when the BUS-DMA has loaded virtual memory
987 *------------------------------------------------------------------------*/
989 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
991 struct usb_xfer_root *info;
993 info = USB_DMATAG_TO_XROOT(udpt);
995 mtx_assert(info->xfer_mtx, MA_OWNED);
998 info->dma_error = udpt->dma_error;
1000 /* enter workloop again */
1001 usb_command_wrapper(&info->dma_q,
1005 /*------------------------------------------------------------------------*
1008 * This function handles DMA synchronisation that must be done before
1009 * an USB transfer is started.
1010 *------------------------------------------------------------------------*/
1012 usb_bdma_pre_sync(struct usb_xfer *xfer)
1014 struct usb_page_cache *pc;
1015 usb_frcount_t nframes;
1017 if (xfer->flags_int.isochronous_xfr) {
1018 /* only one frame buffer */
1021 /* can be multiple frame buffers */
1022 nframes = xfer->nframes;
1025 pc = xfer->frbuffers;
1030 usb_pc_cpu_invalidate(pc);
1032 usb_pc_cpu_flush(pc);
1038 /*------------------------------------------------------------------------*
1039 * usb_bdma_post_sync
1041 * This function handles DMA synchronisation that must be done after
1042 * an USB transfer is complete.
1043 *------------------------------------------------------------------------*/
1045 usb_bdma_post_sync(struct usb_xfer *xfer)
1047 struct usb_page_cache *pc;
1048 usb_frcount_t nframes;
1050 if (xfer->flags_int.isochronous_xfr) {
1051 /* only one frame buffer */
1054 /* can be multiple frame buffers */
1055 nframes = xfer->nframes;
1058 pc = xfer->frbuffers;
1062 usb_pc_cpu_invalidate(pc);