3 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifdef USB_GLOBAL_INCLUDE_FILE
30 #include USB_GLOBAL_INCLUDE_FILE
32 #include <sys/stdint.h>
33 #include <sys/stddef.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
40 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/condvar.h>
44 #include <sys/sysctl.h>
46 #include <sys/unistd.h>
47 #include <sys/callout.h>
48 #include <sys/malloc.h>
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdi_util.h>
55 #define USB_DEBUG_VAR usb_debug
57 #include <dev/usb/usb_core.h>
58 #include <dev/usb/usb_busdma.h>
59 #include <dev/usb/usb_process.h>
60 #include <dev/usb/usb_transfer.h>
61 #include <dev/usb/usb_device.h>
62 #include <dev/usb/usb_util.h>
63 #include <dev/usb/usb_debug.h>
65 #include <dev/usb/usb_controller.h>
66 #include <dev/usb/usb_bus.h>
67 #endif /* USB_GLOBAL_INCLUDE_FILE */
70 static void usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
71 static void usb_dma_tag_destroy(struct usb_dma_tag *);
72 static void usb_dma_lock_cb(void *, bus_dma_lock_op_t);
73 static void usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
74 static void usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
75 static void usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
79 /*------------------------------------------------------------------------*
80 * usbd_get_page - lookup DMA-able memory for the given offset
82 * NOTE: Only call this function when the "page_cache" structure has
83 * been properly initialized !
84 *------------------------------------------------------------------------*/
86 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
87 struct usb_page_search *res)
90 struct usb_page *page;
94 /* Case 1 - something has been loaded into DMA */
98 /* Case 1a - Kernel Virtual Address */
100 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
102 offset += pc->page_offset_buf;
104 /* compute destination page */
106 page = pc->page_start;
108 if (pc->ismultiseg) {
110 page += (offset / USB_PAGE_SIZE);
112 offset %= USB_PAGE_SIZE;
114 res->length = USB_PAGE_SIZE - offset;
115 res->physaddr = page->physaddr + offset;
117 res->length = (usb_size_t)-1;
118 res->physaddr = page->physaddr + offset;
122 /* Case 1b - Non Kernel Virtual Address */
124 res->buffer = USB_ADD_BYTES(page->buffer, offset);
129 /* Case 2 - Plain PIO */
131 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
132 res->length = (usb_size_t)-1;
138 /*------------------------------------------------------------------------*
139 * usb_pc_buffer_is_aligned - verify alignment
141 * This function is used to check if a page cache buffer is properly
142 * aligned to reduce the use of bounce buffers in PIO mode.
143 *------------------------------------------------------------------------*/
145 usb_pc_buffer_is_aligned(struct usb_page_cache *pc, usb_frlength_t offset,
146 usb_frlength_t len, usb_frlength_t mask)
148 struct usb_page_search buf_res;
152 usbd_get_page(pc, offset, &buf_res);
154 if (buf_res.length > len)
155 buf_res.length = len;
156 if (USB_P2U(buf_res.buffer) & mask)
158 if (buf_res.length & mask)
161 offset += buf_res.length;
162 len -= buf_res.length;
167 /*------------------------------------------------------------------------*
168 * usbd_copy_in - copy directly to DMA-able memory
169 *------------------------------------------------------------------------*/
171 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
172 const void *ptr, usb_frlength_t len)
174 struct usb_page_search buf_res;
178 usbd_get_page(cache, offset, &buf_res);
180 if (buf_res.length > len) {
181 buf_res.length = len;
183 memcpy(buf_res.buffer, ptr, buf_res.length);
185 offset += buf_res.length;
186 len -= buf_res.length;
187 ptr = USB_ADD_BYTES(ptr, buf_res.length);
191 /*------------------------------------------------------------------------*
192 * usbd_copy_in_user - copy directly to DMA-able memory from userland
197 *------------------------------------------------------------------------*/
200 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
201 const void *ptr, usb_frlength_t len)
203 struct usb_page_search buf_res;
208 usbd_get_page(cache, offset, &buf_res);
210 if (buf_res.length > len) {
211 buf_res.length = len;
213 error = copyin(ptr, buf_res.buffer, buf_res.length);
217 offset += buf_res.length;
218 len -= buf_res.length;
219 ptr = USB_ADD_BYTES(ptr, buf_res.length);
221 return (0); /* success */
225 /*------------------------------------------------------------------------*
226 * usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
227 *------------------------------------------------------------------------*/
229 struct usb_m_copy_in_arg {
230 struct usb_page_cache *cache;
231 usb_frlength_t dst_offset;
235 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
237 struct usb_m_copy_in_arg *ua = arg;
239 usbd_copy_in(ua->cache, ua->dst_offset, src, count);
240 ua->dst_offset += count;
245 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
246 struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
248 struct usb_m_copy_in_arg arg = {cache, dst_offset};
249 (void) m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
253 /*------------------------------------------------------------------------*
254 * usb_uiomove - factored out code
255 *------------------------------------------------------------------------*/
258 usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
259 usb_frlength_t pc_offset, usb_frlength_t len)
261 struct usb_page_search res;
266 usbd_get_page(pc, pc_offset, &res);
268 if (res.length > len) {
272 * "uiomove()" can sleep so one needs to make a wrapper,
273 * exiting the mutex and checking things
275 error = uiomove(res.buffer, res.length, uio);
280 pc_offset += res.length;
287 /*------------------------------------------------------------------------*
288 * usbd_copy_out - copy directly from DMA-able memory
289 *------------------------------------------------------------------------*/
291 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
292 void *ptr, usb_frlength_t len)
294 struct usb_page_search res;
298 usbd_get_page(cache, offset, &res);
300 if (res.length > len) {
303 memcpy(ptr, res.buffer, res.length);
305 offset += res.length;
307 ptr = USB_ADD_BYTES(ptr, res.length);
311 /*------------------------------------------------------------------------*
312 * usbd_copy_out_user - copy directly from DMA-able memory to userland
317 *------------------------------------------------------------------------*/
320 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
321 void *ptr, usb_frlength_t len)
323 struct usb_page_search res;
328 usbd_get_page(cache, offset, &res);
330 if (res.length > len) {
333 error = copyout(res.buffer, ptr, res.length);
337 offset += res.length;
339 ptr = USB_ADD_BYTES(ptr, res.length);
341 return (0); /* success */
345 /*------------------------------------------------------------------------*
346 * usbd_frame_zero - zero DMA-able memory
347 *------------------------------------------------------------------------*/
349 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
352 struct usb_page_search res;
356 usbd_get_page(cache, offset, &res);
358 if (res.length > len) {
361 memset(res.buffer, 0, res.length);
363 offset += res.length;
370 /*------------------------------------------------------------------------*
371 * usb_dma_lock_cb - dummy callback
372 *------------------------------------------------------------------------*/
374 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
376 /* we use "mtx_owned()" instead of this function */
379 /*------------------------------------------------------------------------*
380 * usb_dma_tag_create - allocate a DMA tag
382 * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
383 * allow multi-segment mappings. Else all mappings are single-segment.
384 *------------------------------------------------------------------------*/
386 usb_dma_tag_create(struct usb_dma_tag *udt,
387 usb_size_t size, usb_size_t align)
391 if (bus_dma_tag_create
392 ( /* parent */ udt->tag_parent->tag,
393 /* alignment */ align,
395 /* lowaddr */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
396 /* highaddr */ BUS_SPACE_MAXADDR,
398 /* filterarg */ NULL,
400 /* nsegments */ (align == 1 && size > 1) ?
401 (2 + (size / USB_PAGE_SIZE)) : 1,
402 /* maxsegsz */ (align == 1 && size > USB_PAGE_SIZE) ?
403 USB_PAGE_SIZE : size,
404 /* flags */ BUS_DMA_KEEP_PG_OFFSET,
405 /* lockfn */ &usb_dma_lock_cb,
413 /*------------------------------------------------------------------------*
414 * usb_dma_tag_free - free a DMA tag
415 *------------------------------------------------------------------------*/
417 usb_dma_tag_destroy(struct usb_dma_tag *udt)
419 bus_dma_tag_destroy(udt->tag);
422 /*------------------------------------------------------------------------*
423 * usb_pc_alloc_mem_cb - BUS-DMA callback function
424 *------------------------------------------------------------------------*/
426 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
429 usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
432 /*------------------------------------------------------------------------*
433 * usb_pc_load_mem_cb - BUS-DMA callback function
434 *------------------------------------------------------------------------*/
436 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
439 usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
442 /*------------------------------------------------------------------------*
443 * usb_pc_common_mem_cb - BUS-DMA callback function
444 *------------------------------------------------------------------------*/
446 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
447 int nseg, int error, uint8_t isload)
449 struct usb_dma_parent_tag *uptag;
450 struct usb_page_cache *pc;
457 uptag = pc->tag_parent;
460 * XXX There is sometimes recursive locking here.
461 * XXX We should try to find a better solution.
462 * XXX Until further the "owned" variable does
472 pg->physaddr = rounddown2(segs->ds_addr, USB_PAGE_SIZE);
473 rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
474 pc->page_offset_buf = rem;
475 pc->page_offset_end += rem;
480 for (x = 0; x != nseg - 1; x++) {
481 if (((segs[x].ds_addr + segs[x].ds_len) & (USB_PAGE_SIZE - 1)) ==
482 ((segs[x + 1].ds_addr & (USB_PAGE_SIZE - 1))))
485 * This check verifies there is no page offset
486 * hole between any of the segments. See the
487 * BUS_DMA_KEEP_PG_OFFSET flag.
489 DPRINTFN(0, "Page offset was not preserved\n");
495 while (pc->ismultiseg) {
496 off += USB_PAGE_SIZE;
497 if (off >= (segs->ds_len + rem)) {
507 pg->physaddr = rounddown2(segs->ds_addr + off, USB_PAGE_SIZE);
511 owned = mtx_owned(uptag->mtx);
513 USB_MTX_LOCK(uptag->mtx);
515 uptag->dma_error = (error ? 1 : 0);
517 (uptag->func) (uptag);
519 cv_broadcast(uptag->cv);
522 USB_MTX_UNLOCK(uptag->mtx);
525 /*------------------------------------------------------------------------*
526 * usb_pc_alloc_mem - allocate DMA'able memory
531 *------------------------------------------------------------------------*/
533 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
534 usb_size_t size, usb_size_t align)
536 struct usb_dma_parent_tag *uptag;
537 struct usb_dma_tag *utag;
542 uptag = pc->tag_parent;
546 * The alignment must be greater or equal to the
547 * "size" else the object can be split between two
548 * memory pages and we get a problem!
550 while (align < size) {
558 * XXX BUS-DMA workaround - FIXME later:
560 * We assume that that the aligment at this point of
561 * the code is greater than or equal to the size and
562 * less than two times the size, so that if we double
563 * the size, the size will be greater than the
566 * The bus-dma system has a check for "alignment"
567 * being less than "size". If that check fails we end
568 * up using contigmalloc which is page based even for
569 * small allocations. Try to avoid that to save
570 * memory, hence we sometimes to a large number of
573 if (size <= (USB_PAGE_SIZE / 2)) {
578 /* get the correct DMA tag */
579 utag = usb_dma_tag_find(uptag, size, align);
583 /* allocate memory */
584 if (bus_dmamem_alloc(
585 utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
588 /* setup page cache */
591 pc->page_offset_buf = 0;
592 pc->page_offset_end = size;
595 pc->ismultiseg = (align == 1);
597 USB_MTX_LOCK(uptag->mtx);
599 /* load memory into DMA */
600 err = bus_dmamap_load(
601 utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
602 pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
604 if (err == EINPROGRESS) {
605 cv_wait(uptag->cv, uptag->mtx);
608 USB_MTX_UNLOCK(uptag->mtx);
610 if (err || uptag->dma_error) {
611 bus_dmamem_free(utag->tag, ptr, map);
614 memset(ptr, 0, size);
616 usb_pc_cpu_flush(pc);
621 /* reset most of the page cache */
623 pc->page_start = NULL;
624 pc->page_offset_buf = 0;
625 pc->page_offset_end = 0;
631 /*------------------------------------------------------------------------*
632 * usb_pc_free_mem - free DMA memory
634 * This function is NULL safe.
635 *------------------------------------------------------------------------*/
637 usb_pc_free_mem(struct usb_page_cache *pc)
639 if (pc && pc->buffer) {
641 bus_dmamap_unload(pc->tag, pc->map);
643 bus_dmamem_free(pc->tag, pc->buffer, pc->map);
649 /*------------------------------------------------------------------------*
650 * usb_pc_load_mem - load virtual memory into DMA
655 *------------------------------------------------------------------------*/
657 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
659 /* setup page cache */
660 pc->page_offset_buf = 0;
661 pc->page_offset_end = size;
664 USB_MTX_ASSERT(pc->tag_parent->mtx, MA_OWNED);
668 struct usb_dma_parent_tag *uptag;
671 uptag = pc->tag_parent;
674 * We have to unload the previous loaded DMA
675 * pages before trying to load a new one!
677 bus_dmamap_unload(pc->tag, pc->map);
680 * Try to load memory into DMA.
682 err = bus_dmamap_load(
683 pc->tag, pc->map, pc->buffer, size,
684 &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
685 if (err == EINPROGRESS) {
686 cv_wait(uptag->cv, uptag->mtx);
689 if (err || uptag->dma_error) {
695 * We have to unload the previous loaded DMA
696 * pages before trying to load a new one!
698 bus_dmamap_unload(pc->tag, pc->map);
701 * Try to load memory into DMA. The callback
702 * will be called in all cases:
705 pc->tag, pc->map, pc->buffer, size,
706 &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
712 * Call callback so that refcount is decremented
715 pc->tag_parent->dma_error = 0;
716 (pc->tag_parent->func) (pc->tag_parent);
722 /*------------------------------------------------------------------------*
723 * usb_pc_cpu_invalidate - invalidate CPU cache
724 *------------------------------------------------------------------------*/
726 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
728 if (pc->page_offset_end == pc->page_offset_buf) {
729 /* nothing has been loaded into this page cache! */
734 * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the
735 * same time, but in the future we should try to isolate the
736 * different cases to optimise the code. --HPS
738 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
739 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
742 /*------------------------------------------------------------------------*
743 * usb_pc_cpu_flush - flush CPU cache
744 *------------------------------------------------------------------------*/
746 usb_pc_cpu_flush(struct usb_page_cache *pc)
748 if (pc->page_offset_end == pc->page_offset_buf) {
749 /* nothing has been loaded into this page cache! */
752 bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
755 /*------------------------------------------------------------------------*
756 * usb_pc_dmamap_create - create a DMA map
761 *------------------------------------------------------------------------*/
763 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
765 struct usb_xfer_root *info;
766 struct usb_dma_tag *utag;
769 info = USB_DMATAG_TO_XROOT(pc->tag_parent);
775 utag = usb_dma_tag_find(pc->tag_parent, size, 1);
780 if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
784 return 0; /* success */
789 return 1; /* failure */
792 /*------------------------------------------------------------------------*
793 * usb_pc_dmamap_destroy
795 * This function is NULL safe.
796 *------------------------------------------------------------------------*/
798 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
801 bus_dmamap_destroy(pc->tag, pc->map);
807 /*------------------------------------------------------------------------*
808 * usb_dma_tag_find - factored out code
809 *------------------------------------------------------------------------*/
811 usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
812 usb_size_t size, usb_size_t align)
814 struct usb_dma_tag *udt;
817 USB_ASSERT(align > 0, ("Invalid parameter align = 0\n"));
818 USB_ASSERT(size > 0, ("Invalid parameter size = 0\n"));
820 udt = udpt->utag_first;
821 nudt = udpt->utag_max;
825 if (udt->align == 0) {
826 usb_dma_tag_create(udt, size, align);
827 if (udt->tag == NULL) {
834 if ((udt->align == align) && (udt->size == size)) {
842 /*------------------------------------------------------------------------*
843 * usb_dma_tag_setup - initialise USB DMA tags
844 *------------------------------------------------------------------------*/
846 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
847 struct usb_dma_tag *udt, bus_dma_tag_t dmat,
848 struct mtx *mtx, usb_dma_callback_t *func,
849 uint8_t ndmabits, uint8_t nudt)
851 memset(udpt, 0, sizeof(*udpt));
853 /* sanity checking */
857 /* something is corrupt */
860 /* initialise condition variable */
861 cv_init(udpt->cv, "USB DMA CV");
863 /* store some information */
867 udpt->utag_first = udt;
868 udpt->utag_max = nudt;
869 udpt->dma_bits = ndmabits;
872 memset(udt, 0, sizeof(*udt));
873 udt->tag_parent = udpt;
878 /*------------------------------------------------------------------------*
879 * usb_bus_tag_unsetup - factored out code
880 *------------------------------------------------------------------------*/
882 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
884 struct usb_dma_tag *udt;
887 udt = udpt->utag_first;
888 nudt = udpt->utag_max;
893 /* destroy the USB DMA tag */
894 usb_dma_tag_destroy(udt);
900 if (udpt->utag_max) {
901 /* destroy the condition variable */
902 cv_destroy(udpt->cv);
906 /*------------------------------------------------------------------------*
909 * This function handles loading of virtual buffers into DMA and is
910 * only called when "dma_refcount" is zero.
911 *------------------------------------------------------------------------*/
913 usb_bdma_work_loop(struct usb_xfer_queue *pq)
915 struct usb_xfer_root *info;
916 struct usb_xfer *xfer;
917 usb_frcount_t nframes;
922 USB_MTX_ASSERT(info->xfer_mtx, MA_OWNED);
925 /* some error happened */
926 USB_BUS_LOCK(info->bus);
927 usbd_transfer_done(xfer, 0);
928 USB_BUS_UNLOCK(info->bus);
931 if (!xfer->flags_int.bdma_setup) {
933 usb_frlength_t frlength_0;
936 xfer->flags_int.bdma_setup = 1;
938 /* reset BUS-DMA load state */
942 if (xfer->flags_int.isochronous_xfr) {
943 /* only one frame buffer */
945 frlength_0 = xfer->sumlen;
947 /* can be multiple frame buffers */
948 nframes = xfer->nframes;
949 frlength_0 = xfer->frlengths[0];
953 * Set DMA direction first. This is needed to
954 * select the correct cache invalidate and cache
957 isread = USB_GET_DATA_ISREAD(xfer);
958 pg = xfer->dma_page_ptr;
960 if (xfer->flags_int.control_xfr &&
961 xfer->flags_int.control_hdr) {
963 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
964 /* The device controller writes to memory */
965 xfer->frbuffers[0].isread = 1;
967 /* The host controller reads from memory */
968 xfer->frbuffers[0].isread = 0;
972 xfer->frbuffers[0].isread = isread;
976 * Setup the "page_start" pointer which points to an array of
977 * USB pages where information about the physical address of a
978 * page will be stored. Also initialise the "isread" field of
979 * the USB page caches.
981 xfer->frbuffers[0].page_start = pg;
983 info->dma_nframes = nframes;
984 info->dma_currframe = 0;
985 info->dma_frlength_0 = frlength_0;
987 pg += (frlength_0 / USB_PAGE_SIZE);
990 while (--nframes > 0) {
991 xfer->frbuffers[nframes].isread = isread;
992 xfer->frbuffers[nframes].page_start = pg;
994 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
999 if (info->dma_error) {
1000 USB_BUS_LOCK(info->bus);
1001 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
1002 USB_BUS_UNLOCK(info->bus);
1005 if (info->dma_currframe != info->dma_nframes) {
1007 if (info->dma_currframe == 0) {
1009 usb_pc_load_mem(xfer->frbuffers,
1010 info->dma_frlength_0, 0);
1013 nframes = info->dma_currframe;
1014 usb_pc_load_mem(xfer->frbuffers + nframes,
1015 xfer->frlengths[nframes], 0);
1018 /* advance frame index */
1019 info->dma_currframe++;
1024 usb_bdma_pre_sync(xfer);
1026 /* start loading next USB transfer, if any */
1027 usb_command_wrapper(pq, NULL);
1029 /* finally start the hardware */
1030 usbd_pipe_enter(xfer);
1033 /*------------------------------------------------------------------------*
1034 * usb_bdma_done_event
1036 * This function is called when the BUS-DMA has loaded virtual memory
1038 *------------------------------------------------------------------------*/
1040 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
1042 struct usb_xfer_root *info;
1044 info = USB_DMATAG_TO_XROOT(udpt);
1046 USB_MTX_ASSERT(info->xfer_mtx, MA_OWNED);
1049 info->dma_error = udpt->dma_error;
1051 /* enter workloop again */
1052 usb_command_wrapper(&info->dma_q,
1056 /*------------------------------------------------------------------------*
1059 * This function handles DMA synchronisation that must be done before
1060 * an USB transfer is started.
1061 *------------------------------------------------------------------------*/
1063 usb_bdma_pre_sync(struct usb_xfer *xfer)
1065 struct usb_page_cache *pc;
1066 usb_frcount_t nframes;
1068 if (xfer->flags_int.isochronous_xfr) {
1069 /* only one frame buffer */
1072 /* can be multiple frame buffers */
1073 nframes = xfer->nframes;
1076 pc = xfer->frbuffers;
1081 usb_pc_cpu_invalidate(pc);
1083 usb_pc_cpu_flush(pc);
1089 /*------------------------------------------------------------------------*
1090 * usb_bdma_post_sync
1092 * This function handles DMA synchronisation that must be done after
1093 * an USB transfer is complete.
1094 *------------------------------------------------------------------------*/
1096 usb_bdma_post_sync(struct usb_xfer *xfer)
1098 struct usb_page_cache *pc;
1099 usb_frcount_t nframes;
1101 if (xfer->flags_int.isochronous_xfr) {
1102 /* only one frame buffer */
1105 /* can be multiple frame buffers */
1106 nframes = xfer->nframes;
1109 pc = xfer->frbuffers;
1113 usb_pc_cpu_invalidate(pc);