3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/stdint.h>
28 #include <sys/stddef.h>
29 #include <sys/param.h>
30 #include <sys/queue.h>
31 #include <sys/types.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/linker_set.h>
36 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 #include <sys/sysctl.h>
42 #include <sys/unistd.h>
43 #include <sys/callout.h>
44 #include <sys/malloc.h>
47 #include <dev/usb/usb.h>
48 #include <dev/usb/usbdi.h>
49 #include <dev/usb/usbdi_util.h>
51 #define USB_DEBUG_VAR usb_debug
53 #include <dev/usb/usb_core.h>
54 #include <dev/usb/usb_busdma.h>
55 #include <dev/usb/usb_process.h>
56 #include <dev/usb/usb_transfer.h>
57 #include <dev/usb/usb_device.h>
58 #include <dev/usb/usb_util.h>
59 #include <dev/usb/usb_debug.h>
61 #include <dev/usb/usb_controller.h>
62 #include <dev/usb/usb_bus.h>
65 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
66 static void usb_dma_tag_destroy(struct usb_dma_tag *);
67 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t);
68 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
69 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
70 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
74 /*------------------------------------------------------------------------*
75 * usbd_get_page - lookup DMA-able memory for the given offset
77 * NOTE: Only call this function when the "page_cache" structure has
78 * been properly initialized !
79 *------------------------------------------------------------------------*/
81 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
82 struct usb_page_search *res)
84 struct usb_page *page;
89 /* Case 1 - something has been loaded into DMA */
93 /* Case 1a - Kernel Virtual Address */
95 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
97 offset += pc->page_offset_buf;
99 /* compute destination page */
101 page = pc->page_start;
103 if (pc->ismultiseg) {
105 page += (offset / USB_PAGE_SIZE);
107 offset %= USB_PAGE_SIZE;
109 res->length = USB_PAGE_SIZE - offset;
110 res->physaddr = page->physaddr + offset;
113 res->physaddr = page->physaddr + offset;
117 /* Case 1b - Non Kernel Virtual Address */
119 res->buffer = USB_ADD_BYTES(page->buffer, offset);
124 /* Case 2 - Plain PIO */
126 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
133 /*------------------------------------------------------------------------*
134 * usbd_copy_in - copy directly to DMA-able memory
135 *------------------------------------------------------------------------*/
137 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
138 const void *ptr, usb_frlength_t len)
140 struct usb_page_search buf_res;
144 usbd_get_page(cache, offset, &buf_res);
146 if (buf_res.length > len) {
147 buf_res.length = len;
149 bcopy(ptr, buf_res.buffer, buf_res.length);
151 offset += buf_res.length;
152 len -= buf_res.length;
153 ptr = USB_ADD_BYTES(ptr, buf_res.length);
157 /*------------------------------------------------------------------------*
158 * usbd_copy_in_user - copy directly to DMA-able memory from userland
163 *------------------------------------------------------------------------*/
166 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
167 const void *ptr, usb_frlength_t len)
169 struct usb_page_search buf_res;
174 usbd_get_page(cache, offset, &buf_res);
176 if (buf_res.length > len) {
177 buf_res.length = len;
179 error = copyin(ptr, buf_res.buffer, buf_res.length);
183 offset += buf_res.length;
184 len -= buf_res.length;
185 ptr = USB_ADD_BYTES(ptr, buf_res.length);
187 return (0); /* success */
191 /*------------------------------------------------------------------------*
192 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
193 *------------------------------------------------------------------------*/
195 struct usb_m_copy_in_arg {
196 struct usb_page_cache *cache;
197 usb_frlength_t dst_offset;
201 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
203 register struct usb_m_copy_in_arg *ua = arg;
205 usbd_copy_in(ua->cache, ua->dst_offset, src, count);
206 ua->dst_offset += count;
211 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
212 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
214 struct usb_m_copy_in_arg arg = {cache, dst_offset};
217 error = m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
221 /*------------------------------------------------------------------------*
222 * usb_uiomove - factored out code
223 *------------------------------------------------------------------------*/
226 usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
227 usb_frlength_t pc_offset, usb_frlength_t len)
229 struct usb_page_search res;
234 usbd_get_page(pc, pc_offset, &res);
236 if (res.length > len) {
240 * "uiomove()" can sleep so one needs to make a wrapper,
241 * exiting the mutex and checking things
243 error = uiomove(res.buffer, res.length, uio);
248 pc_offset += res.length;
255 /*------------------------------------------------------------------------*
256 * usbd_copy_out - copy directly from DMA-able memory
257 *------------------------------------------------------------------------*/
259 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
260 void *ptr, usb_frlength_t len)
262 struct usb_page_search res;
266 usbd_get_page(cache, offset, &res);
268 if (res.length > len) {
271 bcopy(res.buffer, ptr, res.length);
273 offset += res.length;
275 ptr = USB_ADD_BYTES(ptr, res.length);
279 /*------------------------------------------------------------------------*
280 * usbd_copy_out_user - copy directly from DMA-able memory to userland
285 *------------------------------------------------------------------------*/
288 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
289 void *ptr, usb_frlength_t len)
291 struct usb_page_search res;
296 usbd_get_page(cache, offset, &res);
298 if (res.length > len) {
301 error = copyout(res.buffer, ptr, res.length);
305 offset += res.length;
307 ptr = USB_ADD_BYTES(ptr, res.length);
309 return (0); /* success */
313 /*------------------------------------------------------------------------*
314 * usbd_frame_zero - zero DMA-able memory
315 *------------------------------------------------------------------------*/
317 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
320 struct usb_page_search res;
324 usbd_get_page(cache, offset, &res);
326 if (res.length > len) {
329 bzero(res.buffer, res.length);
331 offset += res.length;
338 /*------------------------------------------------------------------------*
339 * usb_dma_lock_cb - dummy callback
340 *------------------------------------------------------------------------*/
342 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
344 /* we use "mtx_owned()" instead of this function */
347 /*------------------------------------------------------------------------*
348 * usb_dma_tag_create - allocate a DMA tag
350 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
351 * allow multi-segment mappings. Else all mappings are single-segment.
352 *------------------------------------------------------------------------*/
354 usb_dma_tag_create(struct usb_dma_tag *udt,
355 usb_size_t size, usb_size_t align)
359 if (bus_dma_tag_create
360 ( /* parent */ udt->tag_parent->tag,
361 /* alignment */ align,
362 /* boundary */ (align == 1) ?
364 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
365 /* highaddr */ BUS_SPACE_MAXADDR,
367 /* filterarg */ NULL,
369 /* nsegments */ (align == 1) ?
370 (2 + (size / USB_PAGE_SIZE)) : 1,
371 /* maxsegsz */ (align == 1) ?
372 USB_PAGE_SIZE : size,
373 /* flags */ BUS_DMA_KEEP_PG_OFFSET,
374 /* lockfn */ &usb_dma_lock_cb,
382 /*------------------------------------------------------------------------*
383 * usb_dma_tag_free - free a DMA tag
384 *------------------------------------------------------------------------*/
386 usb_dma_tag_destroy(struct usb_dma_tag *udt)
388 bus_dma_tag_destroy(udt->tag);
391 /*------------------------------------------------------------------------*
392 * usb_pc_alloc_mem_cb - BUS-DMA callback function
393 *------------------------------------------------------------------------*/
395 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
398 usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
401 /*------------------------------------------------------------------------*
402 * usb_pc_load_mem_cb - BUS-DMA callback function
403 *------------------------------------------------------------------------*/
405 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
408 usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
411 /*------------------------------------------------------------------------*
412 * usb_pc_common_mem_cb - BUS-DMA callback function
413 *------------------------------------------------------------------------*/
415 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
416 int nseg, int error, uint8_t isload)
418 struct usb_dma_parent_tag *uptag;
419 struct usb_page_cache *pc;
425 uptag = pc->tag_parent;
428 * XXX There is sometimes recursive locking here.
429 * XXX We should try to find a better solution.
430 * XXX Until further the "owned" variable does
438 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
439 rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
440 pc->page_offset_buf = rem;
441 pc->page_offset_end += rem;
444 if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) {
446 * This check verifies that the physical address is correct:
448 DPRINTFN(0, "Page offset was not preserved!\n");
457 pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
461 owned = mtx_owned(uptag->mtx);
463 mtx_lock(uptag->mtx);
465 uptag->dma_error = (error ? 1 : 0);
467 (uptag->func) (uptag);
469 cv_broadcast(uptag->cv);
472 mtx_unlock(uptag->mtx);
475 /*------------------------------------------------------------------------*
476 * usb_pc_alloc_mem - allocate DMA'able memory
481 *------------------------------------------------------------------------*/
483 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
484 usb_size_t size, usb_size_t align)
486 struct usb_dma_parent_tag *uptag;
487 struct usb_dma_tag *utag;
492 uptag = pc->tag_parent;
496 * The alignment must be greater or equal to the
497 * "size" else the object can be split between two
498 * memory pages and we get a problem!
500 while (align < size) {
508 * XXX BUS-DMA workaround - FIXME later:
510 * We assume that that the aligment at this point of
511 * the code is greater than or equal to the size and
512 * less than two times the size, so that if we double
513 * the size, the size will be greater than the
516 * The bus-dma system has a check for "alignment"
517 * being less than "size". If that check fails we end
518 * up using contigmalloc which is page based even for
519 * small allocations. Try to avoid that to save
520 * memory, hence we sometimes to a large number of
523 if (size <= (USB_PAGE_SIZE / 2)) {
528 /* get the correct DMA tag */
529 utag = usb_dma_tag_find(uptag, size, align);
533 /* allocate memory */
534 if (bus_dmamem_alloc(
535 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
538 /* setup page cache */
541 pc->page_offset_buf = 0;
542 pc->page_offset_end = size;
545 pc->ismultiseg = (align == 1);
547 mtx_lock(uptag->mtx);
549 /* load memory into DMA */
550 err = bus_dmamap_load(
551 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
552 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
554 if (err == EINPROGRESS) {
555 cv_wait(uptag->cv, uptag->mtx);
558 mtx_unlock(uptag->mtx);
560 if (err || uptag->dma_error) {
561 bus_dmamem_free(utag->tag, ptr, map);
566 usb_pc_cpu_flush(pc);
571 /* reset most of the page cache */
573 pc->page_start = NULL;
574 pc->page_offset_buf = 0;
575 pc->page_offset_end = 0;
581 /*------------------------------------------------------------------------*
582 * usb_pc_free_mem - free DMA memory
584 * This function is NULL safe.
585 *------------------------------------------------------------------------*/
587 usb_pc_free_mem(struct usb_page_cache *pc)
589 if (pc && pc->buffer) {
591 bus_dmamap_unload(pc->tag, pc->map);
593 bus_dmamem_free(pc->tag, pc->buffer, pc->map);
599 /*------------------------------------------------------------------------*
600 * usb_pc_load_mem - load virtual memory into DMA
605 *------------------------------------------------------------------------*/
607 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
609 /* setup page cache */
610 pc->page_offset_buf = 0;
611 pc->page_offset_end = size;
614 mtx_assert(pc->tag_parent->mtx, MA_OWNED);
618 struct usb_dma_parent_tag *uptag;
621 uptag = pc->tag_parent;
624 * We have to unload the previous loaded DMA
625 * pages before trying to load a new one!
627 bus_dmamap_unload(pc->tag, pc->map);
630 * Try to load memory into DMA.
632 err = bus_dmamap_load(
633 pc->tag, pc->map, pc->buffer, size,
634 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
635 if (err == EINPROGRESS) {
636 cv_wait(uptag->cv, uptag->mtx);
639 if (err || uptag->dma_error) {
645 * We have to unload the previous loaded DMA
646 * pages before trying to load a new one!
648 bus_dmamap_unload(pc->tag, pc->map);
651 * Try to load memory into DMA. The callback
652 * will be called in all cases:
655 pc->tag, pc->map, pc->buffer, size,
656 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
662 * Call callback so that refcount is decremented
665 pc->tag_parent->dma_error = 0;
666 (pc->tag_parent->func) (pc->tag_parent);
672 /*------------------------------------------------------------------------*
673 * usb_pc_cpu_invalidate - invalidate CPU cache
674 *------------------------------------------------------------------------*/
676 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
678 if (pc->page_offset_end == pc->page_offset_buf) {
679 /* nothing has been loaded into this page cache! */
682 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
683 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
686 /*------------------------------------------------------------------------*
687 * usb_pc_cpu_flush - flush CPU cache
688 *------------------------------------------------------------------------*/
690 usb_pc_cpu_flush(struct usb_page_cache *pc)
692 if (pc->page_offset_end == pc->page_offset_buf) {
693 /* nothing has been loaded into this page cache! */
696 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
699 /*------------------------------------------------------------------------*
700 * usb_pc_dmamap_create - create a DMA map
705 *------------------------------------------------------------------------*/
707 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
709 struct usb_xfer_root *info;
710 struct usb_dma_tag *utag;
713 info = USB_DMATAG_TO_XROOT(pc->tag_parent);
719 utag = usb_dma_tag_find(pc->tag_parent, size, 1);
724 if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
728 return 0; /* success */
733 return 1; /* failure */
736 /*------------------------------------------------------------------------*
737 * usb_pc_dmamap_destroy
739 * This function is NULL safe.
740 *------------------------------------------------------------------------*/
742 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
745 bus_dmamap_destroy(pc->tag, pc->map);
751 /*------------------------------------------------------------------------*
752 * usb_dma_tag_find - factored out code
753 *------------------------------------------------------------------------*/
755 usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
756 usb_size_t size, usb_size_t align)
758 struct usb_dma_tag *udt;
761 USB_ASSERT(align > 0, ("Invalid parameter align = 0!\n"));
762 USB_ASSERT(size > 0, ("Invalid parameter size = 0!\n"));
764 udt = udpt->utag_first;
765 nudt = udpt->utag_max;
769 if (udt->align == 0) {
770 usb_dma_tag_create(udt, size, align);
771 if (udt->tag == NULL) {
778 if ((udt->align == align) && (udt->size == size)) {
786 /*------------------------------------------------------------------------*
787 * usb_dma_tag_setup - initialise USB DMA tags
788 *------------------------------------------------------------------------*/
790 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
791 struct usb_dma_tag *udt, bus_dma_tag_t dmat,
792 struct mtx *mtx, usb_dma_callback_t *func,
793 uint8_t ndmabits, uint8_t nudt)
795 bzero(udpt, sizeof(*udpt));
797 /* sanity checking */
801 /* something is corrupt */
804 /* initialise condition variable */
805 cv_init(udpt->cv, "USB DMA CV");
807 /* store some information */
811 udpt->utag_first = udt;
812 udpt->utag_max = nudt;
813 udpt->dma_bits = ndmabits;
816 bzero(udt, sizeof(*udt));
817 udt->tag_parent = udpt;
822 /*------------------------------------------------------------------------*
823 * usb_bus_tag_unsetup - factored out code
824 *------------------------------------------------------------------------*/
826 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
828 struct usb_dma_tag *udt;
831 udt = udpt->utag_first;
832 nudt = udpt->utag_max;
837 /* destroy the USB DMA tag */
838 usb_dma_tag_destroy(udt);
844 if (udpt->utag_max) {
845 /* destroy the condition variable */
846 cv_destroy(udpt->cv);
850 /*------------------------------------------------------------------------*
853 * This function handles loading of virtual buffers into DMA and is
854 * only called when "dma_refcount" is zero.
855 *------------------------------------------------------------------------*/
857 usb_bdma_work_loop(struct usb_xfer_queue *pq)
859 struct usb_xfer_root *info;
860 struct usb_xfer *xfer;
861 usb_frcount_t nframes;
866 mtx_assert(info->xfer_mtx, MA_OWNED);
869 /* some error happened */
870 USB_BUS_LOCK(info->bus);
871 usbd_transfer_done(xfer, 0);
872 USB_BUS_UNLOCK(info->bus);
875 if (!xfer->flags_int.bdma_setup) {
877 usb_frlength_t frlength_0;
880 xfer->flags_int.bdma_setup = 1;
882 /* reset BUS-DMA load state */
886 if (xfer->flags_int.isochronous_xfr) {
887 /* only one frame buffer */
889 frlength_0 = xfer->sumlen;
891 /* can be multiple frame buffers */
892 nframes = xfer->nframes;
893 frlength_0 = xfer->frlengths[0];
897 * Set DMA direction first. This is needed to
898 * select the correct cache invalidate and cache
901 isread = USB_GET_DATA_ISREAD(xfer);
902 pg = xfer->dma_page_ptr;
904 if (xfer->flags_int.control_xfr &&
905 xfer->flags_int.control_hdr) {
907 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
908 /* The device controller writes to memory */
909 xfer->frbuffers[0].isread = 1;
911 /* The host controller reads from memory */
912 xfer->frbuffers[0].isread = 0;
916 xfer->frbuffers[0].isread = isread;
920 * Setup the "page_start" pointer which points to an array of
921 * USB pages where information about the physical address of a
922 * page will be stored. Also initialise the "isread" field of
923 * the USB page caches.
925 xfer->frbuffers[0].page_start = pg;
927 info->dma_nframes = nframes;
928 info->dma_currframe = 0;
929 info->dma_frlength_0 = frlength_0;
931 pg += (frlength_0 / USB_PAGE_SIZE);
934 while (--nframes > 0) {
935 xfer->frbuffers[nframes].isread = isread;
936 xfer->frbuffers[nframes].page_start = pg;
938 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
943 if (info->dma_error) {
944 USB_BUS_LOCK(info->bus);
945 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
946 USB_BUS_UNLOCK(info->bus);
949 if (info->dma_currframe != info->dma_nframes) {
951 if (info->dma_currframe == 0) {
953 usb_pc_load_mem(xfer->frbuffers,
954 info->dma_frlength_0, 0);
957 nframes = info->dma_currframe;
958 usb_pc_load_mem(xfer->frbuffers + nframes,
959 xfer->frlengths[nframes], 0);
962 /* advance frame index */
963 info->dma_currframe++;
968 usb_bdma_pre_sync(xfer);
970 /* start loading next USB transfer, if any */
971 usb_command_wrapper(pq, NULL);
973 /* finally start the hardware */
974 usbd_pipe_enter(xfer);
977 /*------------------------------------------------------------------------*
978 * usb_bdma_done_event
980 * This function is called when the BUS-DMA has loaded virtual memory
982 *------------------------------------------------------------------------*/
984 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
986 struct usb_xfer_root *info;
988 info = USB_DMATAG_TO_XROOT(udpt);
990 mtx_assert(info->xfer_mtx, MA_OWNED);
993 info->dma_error = udpt->dma_error;
995 /* enter workloop again */
996 usb_command_wrapper(&info->dma_q,
1000 /*------------------------------------------------------------------------*
1003 * This function handles DMA synchronisation that must be done before
1004 * an USB transfer is started.
1005 *------------------------------------------------------------------------*/
1007 usb_bdma_pre_sync(struct usb_xfer *xfer)
1009 struct usb_page_cache *pc;
1010 usb_frcount_t nframes;
1012 if (xfer->flags_int.isochronous_xfr) {
1013 /* only one frame buffer */
1016 /* can be multiple frame buffers */
1017 nframes = xfer->nframes;
1020 pc = xfer->frbuffers;
1025 usb_pc_cpu_invalidate(pc);
1027 usb_pc_cpu_flush(pc);
1033 /*------------------------------------------------------------------------*
1034 * usb_bdma_post_sync
1036 * This function handles DMA synchronisation that must be done after
1037 * an USB transfer is complete.
1038 *------------------------------------------------------------------------*/
1040 usb_bdma_post_sync(struct usb_xfer *xfer)
1042 struct usb_page_cache *pc;
1043 usb_frcount_t nframes;
1045 if (xfer->flags_int.isochronous_xfr) {
1046 /* only one frame buffer */
1049 /* can be multiple frame buffers */
1050 nframes = xfer->nframes;
1053 pc = xfer->frbuffers;
1057 usb_pc_cpu_invalidate(pc);