]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/usb/usb_busdma.c
Use __containerof() instead of home-rolled versions.
[FreeBSD/FreeBSD.git] / sys / dev / usb / usb_busdma.c
1 /* $FreeBSD$ */
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4  *
5  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #ifdef USB_GLOBAL_INCLUDE_FILE
30 #include USB_GLOBAL_INCLUDE_FILE
31 #else
32 #include <sys/stdint.h>
33 #include <sys/stddef.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <sys/types.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/bus.h>
40 #include <sys/module.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/condvar.h>
44 #include <sys/sysctl.h>
45 #include <sys/sx.h>
46 #include <sys/unistd.h>
47 #include <sys/callout.h>
48 #include <sys/malloc.h>
49 #include <sys/priv.h>
50
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdi_util.h>
54
55 #define USB_DEBUG_VAR usb_debug
56
57 #include <dev/usb/usb_core.h>
58 #include <dev/usb/usb_busdma.h>
59 #include <dev/usb/usb_process.h>
60 #include <dev/usb/usb_transfer.h>
61 #include <dev/usb/usb_device.h>
62 #include <dev/usb/usb_util.h>
63 #include <dev/usb/usb_debug.h>
64
65 #include <dev/usb/usb_controller.h>
66 #include <dev/usb/usb_bus.h>
67 #endif                  /* USB_GLOBAL_INCLUDE_FILE */
68
69 #if USB_HAVE_BUSDMA
70 static void     usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
71 static void     usb_dma_tag_destroy(struct usb_dma_tag *);
72 static void     usb_dma_lock_cb(void *, bus_dma_lock_op_t);
73 static void     usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
74 static void     usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
75 static void     usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
76                     uint8_t);
77 #endif
78
79 /*------------------------------------------------------------------------*
80  *  usbd_get_page - lookup DMA-able memory for the given offset
81  *
82  * NOTE: Only call this function when the "page_cache" structure has
83  * been properly initialized !
84  *------------------------------------------------------------------------*/
85 void
86 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
87     struct usb_page_search *res)
88 {
89 #if USB_HAVE_BUSDMA
90         struct usb_page *page;
91
92         if (pc->page_start) {
93                 /* Case 1 - something has been loaded into DMA */
94
95                 if (pc->buffer) {
96                         /* Case 1a - Kernel Virtual Address */
97
98                         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
99                 }
100                 offset += pc->page_offset_buf;
101
102                 /* compute destination page */
103
104                 page = pc->page_start;
105
106                 if (pc->ismultiseg) {
107                         page += (offset / USB_PAGE_SIZE);
108
109                         offset %= USB_PAGE_SIZE;
110
111                         res->length = USB_PAGE_SIZE - offset;
112                         res->physaddr = page->physaddr + offset;
113                 } else {
114                         res->length = (usb_size_t)-1;
115                         res->physaddr = page->physaddr + offset;
116                 }
117                 if (!pc->buffer) {
118                         /* Case 1b - Non Kernel Virtual Address */
119
120                         res->buffer = USB_ADD_BYTES(page->buffer, offset);
121                 }
122                 return;
123         }
124 #endif
125         /* Case 2 - Plain PIO */
126
127         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
128         res->length = (usb_size_t)-1;
129 #if USB_HAVE_BUSDMA
130         res->physaddr = 0;
131 #endif
132 }
133
134 /*------------------------------------------------------------------------*
135  *  usb_pc_buffer_is_aligned - verify alignment
136  * 
137  * This function is used to check if a page cache buffer is properly
138  * aligned to reduce the use of bounce buffers in PIO mode.
139  *------------------------------------------------------------------------*/
140 uint8_t
141 usb_pc_buffer_is_aligned(struct usb_page_cache *pc, usb_frlength_t offset,
142     usb_frlength_t len, usb_frlength_t mask)
143 {
144         struct usb_page_search buf_res;
145
146         while (len != 0) {
147                 usbd_get_page(pc, offset, &buf_res);
148
149                 if (buf_res.length > len)
150                         buf_res.length = len;
151                 if (USB_P2U(buf_res.buffer) & mask)
152                         return (0);
153                 if (buf_res.length & mask)
154                         return (0);
155
156                 offset += buf_res.length;
157                 len -= buf_res.length;
158         }
159         return (1);
160 }
161
162 /*------------------------------------------------------------------------*
163  *  usbd_copy_in - copy directly to DMA-able memory
164  *------------------------------------------------------------------------*/
165 void
166 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
167     const void *ptr, usb_frlength_t len)
168 {
169         struct usb_page_search buf_res;
170
171         while (len != 0) {
172                 usbd_get_page(cache, offset, &buf_res);
173
174                 if (buf_res.length > len) {
175                         buf_res.length = len;
176                 }
177                 memcpy(buf_res.buffer, ptr, buf_res.length);
178
179                 offset += buf_res.length;
180                 len -= buf_res.length;
181                 ptr = USB_ADD_BYTES(ptr, buf_res.length);
182         }
183 }
184
185 /*------------------------------------------------------------------------*
186  *  usbd_copy_in_user - copy directly to DMA-able memory from userland
187  *
188  * Return values:
189  *    0: Success
190  * Else: Failure
191  *------------------------------------------------------------------------*/
192 #if USB_HAVE_USER_IO
193 int
194 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
195     const void *ptr, usb_frlength_t len)
196 {
197         struct usb_page_search buf_res;
198         int error;
199
200         while (len != 0) {
201                 usbd_get_page(cache, offset, &buf_res);
202
203                 if (buf_res.length > len) {
204                         buf_res.length = len;
205                 }
206                 error = copyin(ptr, buf_res.buffer, buf_res.length);
207                 if (error)
208                         return (error);
209
210                 offset += buf_res.length;
211                 len -= buf_res.length;
212                 ptr = USB_ADD_BYTES(ptr, buf_res.length);
213         }
214         return (0);                     /* success */
215 }
216 #endif
217
218 /*------------------------------------------------------------------------*
219  *  usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
220  *------------------------------------------------------------------------*/
221 #if USB_HAVE_MBUF
222 struct usb_m_copy_in_arg {
223         struct usb_page_cache *cache;
224         usb_frlength_t dst_offset;
225 };
226
227 static int
228 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
229 {
230         struct usb_m_copy_in_arg *ua = arg;
231
232         usbd_copy_in(ua->cache, ua->dst_offset, src, count);
233         ua->dst_offset += count;
234         return (0);
235 }
236
237 void
238 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
239     struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
240 {
241         struct usb_m_copy_in_arg arg = {cache, dst_offset};
242         (void) m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
243 }
244 #endif
245
246 /*------------------------------------------------------------------------*
247  *  usb_uiomove - factored out code
248  *------------------------------------------------------------------------*/
249 #if USB_HAVE_USER_IO
250 int
251 usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
252     usb_frlength_t pc_offset, usb_frlength_t len)
253 {
254         struct usb_page_search res;
255         int error = 0;
256
257         while (len != 0) {
258                 usbd_get_page(pc, pc_offset, &res);
259
260                 if (res.length > len) {
261                         res.length = len;
262                 }
263                 /*
264                  * "uiomove()" can sleep so one needs to make a wrapper,
265                  * exiting the mutex and checking things
266                  */
267                 error = uiomove(res.buffer, res.length, uio);
268
269                 if (error) {
270                         break;
271                 }
272                 pc_offset += res.length;
273                 len -= res.length;
274         }
275         return (error);
276 }
277 #endif
278
279 /*------------------------------------------------------------------------*
280  *  usbd_copy_out - copy directly from DMA-able memory
281  *------------------------------------------------------------------------*/
282 void
283 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
284     void *ptr, usb_frlength_t len)
285 {
286         struct usb_page_search res;
287
288         while (len != 0) {
289                 usbd_get_page(cache, offset, &res);
290
291                 if (res.length > len) {
292                         res.length = len;
293                 }
294                 memcpy(ptr, res.buffer, res.length);
295
296                 offset += res.length;
297                 len -= res.length;
298                 ptr = USB_ADD_BYTES(ptr, res.length);
299         }
300 }
301
302 /*------------------------------------------------------------------------*
303  *  usbd_copy_out_user - copy directly from DMA-able memory to userland
304  *
305  * Return values:
306  *    0: Success
307  * Else: Failure
308  *------------------------------------------------------------------------*/
309 #if USB_HAVE_USER_IO
310 int
311 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
312     void *ptr, usb_frlength_t len)
313 {
314         struct usb_page_search res;
315         int error;
316
317         while (len != 0) {
318                 usbd_get_page(cache, offset, &res);
319
320                 if (res.length > len) {
321                         res.length = len;
322                 }
323                 error = copyout(res.buffer, ptr, res.length);
324                 if (error)
325                         return (error);
326
327                 offset += res.length;
328                 len -= res.length;
329                 ptr = USB_ADD_BYTES(ptr, res.length);
330         }
331         return (0);                     /* success */
332 }
333 #endif
334
335 /*------------------------------------------------------------------------*
336  *  usbd_frame_zero - zero DMA-able memory
337  *------------------------------------------------------------------------*/
338 void
339 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
340     usb_frlength_t len)
341 {
342         struct usb_page_search res;
343
344         while (len != 0) {
345                 usbd_get_page(cache, offset, &res);
346
347                 if (res.length > len) {
348                         res.length = len;
349                 }
350                 memset(res.buffer, 0, res.length);
351
352                 offset += res.length;
353                 len -= res.length;
354         }
355 }
356
357 #if USB_HAVE_BUSDMA
358
359 /*------------------------------------------------------------------------*
360  *      usb_dma_lock_cb - dummy callback
361  *------------------------------------------------------------------------*/
362 static void
363 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
364 {
365         /* we use "mtx_owned()" instead of this function */
366 }
367
368 /*------------------------------------------------------------------------*
369  *      usb_dma_tag_create - allocate a DMA tag
370  *
371  * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
372  * allow multi-segment mappings. Else all mappings are single-segment.
373  *------------------------------------------------------------------------*/
374 static void
375 usb_dma_tag_create(struct usb_dma_tag *udt,
376     usb_size_t size, usb_size_t align)
377 {
378         bus_dma_tag_t tag;
379
380         if (bus_dma_tag_create
381             ( /* parent    */ udt->tag_parent->tag,
382              /* alignment */ align,
383              /* boundary  */ 0,
384              /* lowaddr   */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
385              /* highaddr  */ BUS_SPACE_MAXADDR,
386              /* filter    */ NULL,
387              /* filterarg */ NULL,
388              /* maxsize   */ size,
389              /* nsegments */ (align == 1 && size > 1) ?
390             (2 + (size / USB_PAGE_SIZE)) : 1,
391              /* maxsegsz  */ (align == 1 && size > USB_PAGE_SIZE) ?
392             USB_PAGE_SIZE : size,
393              /* flags     */ BUS_DMA_KEEP_PG_OFFSET,
394              /* lockfn    */ &usb_dma_lock_cb,
395              /* lockarg   */ NULL,
396             &tag)) {
397                 tag = NULL;
398         }
399         udt->tag = tag;
400 }
401
402 /*------------------------------------------------------------------------*
403  *      usb_dma_tag_free - free a DMA tag
404  *------------------------------------------------------------------------*/
405 static void
406 usb_dma_tag_destroy(struct usb_dma_tag *udt)
407 {
408         bus_dma_tag_destroy(udt->tag);
409 }
410
411 /*------------------------------------------------------------------------*
412  *      usb_pc_alloc_mem_cb - BUS-DMA callback function
413  *------------------------------------------------------------------------*/
414 static void
415 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
416     int nseg, int error)
417 {
418         usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
419 }
420
421 /*------------------------------------------------------------------------*
422  *      usb_pc_load_mem_cb - BUS-DMA callback function
423  *------------------------------------------------------------------------*/
424 static void
425 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
426     int nseg, int error)
427 {
428         usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
429 }
430
431 /*------------------------------------------------------------------------*
432  *      usb_pc_common_mem_cb - BUS-DMA callback function
433  *------------------------------------------------------------------------*/
434 static void
435 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
436     int nseg, int error, uint8_t isload)
437 {
438         struct usb_dma_parent_tag *uptag;
439         struct usb_page_cache *pc;
440         struct usb_page *pg;
441         usb_size_t rem;
442         bus_size_t off;
443         uint8_t owned;
444
445         pc = arg;
446         uptag = pc->tag_parent;
447
448         /*
449          * XXX There is sometimes recursive locking here.
450          * XXX We should try to find a better solution.
451          * XXX Until further the "owned" variable does
452          * XXX the trick.
453          */
454
455         if (error) {
456                 goto done;
457         }
458
459         off = 0;
460         pg = pc->page_start;
461         pg->physaddr = rounddown2(segs->ds_addr, USB_PAGE_SIZE);
462         rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
463         pc->page_offset_buf = rem;
464         pc->page_offset_end += rem;
465 #ifdef USB_DEBUG
466         if (nseg > 1) {
467                 int x;
468
469                 for (x = 0; x != nseg - 1; x++) {
470                         if (((segs[x].ds_addr + segs[x].ds_len) & (USB_PAGE_SIZE - 1)) ==
471                             ((segs[x + 1].ds_addr & (USB_PAGE_SIZE - 1))))
472                                 continue;
473                         /*
474                          * This check verifies there is no page offset
475                          * hole between any of the segments. See the
476                          * BUS_DMA_KEEP_PG_OFFSET flag.
477                          */
478                         DPRINTFN(0, "Page offset was not preserved\n");
479                         error = 1;
480                         goto done;
481                 }
482         }
483 #endif
484         while (pc->ismultiseg) {
485                 off += USB_PAGE_SIZE;
486                 if (off >= (segs->ds_len + rem)) {
487                         /* page crossing */
488                         nseg--;
489                         segs++;
490                         off = 0;
491                         rem = 0;
492                         if (nseg == 0)
493                                 break;
494                 }
495                 pg++;
496                 pg->physaddr = rounddown2(segs->ds_addr + off, USB_PAGE_SIZE);
497         }
498
499 done:
500         owned = mtx_owned(uptag->mtx);
501         if (!owned)
502                 USB_MTX_LOCK(uptag->mtx);
503
504         uptag->dma_error = (error ? 1 : 0);
505         if (isload) {
506                 (uptag->func) (uptag);
507         } else {
508                 cv_broadcast(uptag->cv);
509         }
510         if (!owned)
511                 USB_MTX_UNLOCK(uptag->mtx);
512 }
513
514 /*------------------------------------------------------------------------*
515  *      usb_pc_alloc_mem - allocate DMA'able memory
516  *
517  * Returns:
518  *    0: Success
519  * Else: Failure
520  *------------------------------------------------------------------------*/
521 uint8_t
522 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
523     usb_size_t size, usb_size_t align)
524 {
525         struct usb_dma_parent_tag *uptag;
526         struct usb_dma_tag *utag;
527         bus_dmamap_t map;
528         void *ptr;
529         int err;
530
531         uptag = pc->tag_parent;
532
533         if (align != 1) {
534                 /*
535                  * The alignment must be greater or equal to the
536                  * "size" else the object can be split between two
537                  * memory pages and we get a problem!
538                  */
539                 while (align < size) {
540                         align *= 2;
541                         if (align == 0) {
542                                 goto error;
543                         }
544                 }
545 #if 1
546                 /*
547                  * XXX BUS-DMA workaround - FIXME later:
548                  *
549                  * We assume that that the aligment at this point of
550                  * the code is greater than or equal to the size and
551                  * less than two times the size, so that if we double
552                  * the size, the size will be greater than the
553                  * alignment.
554                  *
555                  * The bus-dma system has a check for "alignment"
556                  * being less than "size". If that check fails we end
557                  * up using contigmalloc which is page based even for
558                  * small allocations. Try to avoid that to save
559                  * memory, hence we sometimes to a large number of
560                  * small allocations!
561                  */
562                 if (size <= (USB_PAGE_SIZE / 2)) {
563                         size *= 2;
564                 }
565 #endif
566         }
567         /* get the correct DMA tag */
568         utag = usb_dma_tag_find(uptag, size, align);
569         if (utag == NULL) {
570                 goto error;
571         }
572         /* allocate memory */
573         if (bus_dmamem_alloc(
574             utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
575                 goto error;
576         }
577         /* setup page cache */
578         pc->buffer = ptr;
579         pc->page_start = pg;
580         pc->page_offset_buf = 0;
581         pc->page_offset_end = size;
582         pc->map = map;
583         pc->tag = utag->tag;
584         pc->ismultiseg = (align == 1);
585
586         USB_MTX_LOCK(uptag->mtx);
587
588         /* load memory into DMA */
589         err = bus_dmamap_load(
590             utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
591             pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
592
593         if (err == EINPROGRESS) {
594                 cv_wait(uptag->cv, uptag->mtx);
595                 err = 0;
596         }
597         USB_MTX_UNLOCK(uptag->mtx);
598
599         if (err || uptag->dma_error) {
600                 bus_dmamem_free(utag->tag, ptr, map);
601                 goto error;
602         }
603         memset(ptr, 0, size);
604
605         usb_pc_cpu_flush(pc);
606
607         return (0);
608
609 error:
610         /* reset most of the page cache */
611         pc->buffer = NULL;
612         pc->page_start = NULL;
613         pc->page_offset_buf = 0;
614         pc->page_offset_end = 0;
615         pc->map = NULL;
616         pc->tag = NULL;
617         return (1);
618 }
619
620 /*------------------------------------------------------------------------*
621  *      usb_pc_free_mem - free DMA memory
622  *
623  * This function is NULL safe.
624  *------------------------------------------------------------------------*/
625 void
626 usb_pc_free_mem(struct usb_page_cache *pc)
627 {
628         if (pc && pc->buffer) {
629                 bus_dmamap_unload(pc->tag, pc->map);
630
631                 bus_dmamem_free(pc->tag, pc->buffer, pc->map);
632
633                 pc->buffer = NULL;
634         }
635 }
636
637 /*------------------------------------------------------------------------*
638  *      usb_pc_load_mem - load virtual memory into DMA
639  *
640  * Return values:
641  * 0: Success
642  * Else: Error
643  *------------------------------------------------------------------------*/
644 uint8_t
645 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
646 {
647         /* setup page cache */
648         pc->page_offset_buf = 0;
649         pc->page_offset_end = size;
650         pc->ismultiseg = 1;
651
652         USB_MTX_ASSERT(pc->tag_parent->mtx, MA_OWNED);
653
654         if (size > 0) {
655                 if (sync) {
656                         struct usb_dma_parent_tag *uptag;
657                         int err;
658
659                         uptag = pc->tag_parent;
660
661                         /*
662                          * We have to unload the previous loaded DMA
663                          * pages before trying to load a new one!
664                          */
665                         bus_dmamap_unload(pc->tag, pc->map);
666
667                         /*
668                          * Try to load memory into DMA.
669                          */
670                         err = bus_dmamap_load(
671                             pc->tag, pc->map, pc->buffer, size,
672                             &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
673                         if (err == EINPROGRESS) {
674                                 cv_wait(uptag->cv, uptag->mtx);
675                                 err = 0;
676                         }
677                         if (err || uptag->dma_error) {
678                                 return (1);
679                         }
680                 } else {
681                         /*
682                          * We have to unload the previous loaded DMA
683                          * pages before trying to load a new one!
684                          */
685                         bus_dmamap_unload(pc->tag, pc->map);
686
687                         /*
688                          * Try to load memory into DMA. The callback
689                          * will be called in all cases:
690                          */
691                         if (bus_dmamap_load(
692                             pc->tag, pc->map, pc->buffer, size,
693                             &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
694                         }
695                 }
696         } else {
697                 if (!sync) {
698                         /*
699                          * Call callback so that refcount is decremented
700                          * properly:
701                          */
702                         pc->tag_parent->dma_error = 0;
703                         (pc->tag_parent->func) (pc->tag_parent);
704                 }
705         }
706         return (0);
707 }
708
709 /*------------------------------------------------------------------------*
710  *      usb_pc_cpu_invalidate - invalidate CPU cache
711  *------------------------------------------------------------------------*/
712 void
713 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
714 {
715         if (pc->page_offset_end == pc->page_offset_buf) {
716                 /* nothing has been loaded into this page cache! */
717                 return;
718         }
719
720         /*
721          * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the
722          * same time, but in the future we should try to isolate the
723          * different cases to optimise the code. --HPS
724          */
725         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
726         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
727 }
728
729 /*------------------------------------------------------------------------*
730  *      usb_pc_cpu_flush - flush CPU cache
731  *------------------------------------------------------------------------*/
732 void
733 usb_pc_cpu_flush(struct usb_page_cache *pc)
734 {
735         if (pc->page_offset_end == pc->page_offset_buf) {
736                 /* nothing has been loaded into this page cache! */
737                 return;
738         }
739         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
740 }
741
742 /*------------------------------------------------------------------------*
743  *      usb_pc_dmamap_create - create a DMA map
744  *
745  * Returns:
746  *    0: Success
747  * Else: Failure
748  *------------------------------------------------------------------------*/
749 uint8_t
750 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
751 {
752         struct usb_xfer_root *info;
753         struct usb_dma_tag *utag;
754
755         /* get info */
756         info = USB_DMATAG_TO_XROOT(pc->tag_parent);
757
758         /* sanity check */
759         if (info == NULL) {
760                 goto error;
761         }
762         utag = usb_dma_tag_find(pc->tag_parent, size, 1);
763         if (utag == NULL) {
764                 goto error;
765         }
766         /* create DMA map */
767         if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
768                 goto error;
769         }
770         pc->tag = utag->tag;
771         return 0;                       /* success */
772
773 error:
774         pc->map = NULL;
775         pc->tag = NULL;
776         return 1;                       /* failure */
777 }
778
779 /*------------------------------------------------------------------------*
780  *      usb_pc_dmamap_destroy
781  *
782  * This function is NULL safe.
783  *------------------------------------------------------------------------*/
784 void
785 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
786 {
787         if (pc && pc->tag) {
788                 bus_dmamap_destroy(pc->tag, pc->map);
789                 pc->tag = NULL;
790                 pc->map = NULL;
791         }
792 }
793
794 /*------------------------------------------------------------------------*
795  *      usb_dma_tag_find - factored out code
796  *------------------------------------------------------------------------*/
797 struct usb_dma_tag *
798 usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
799     usb_size_t size, usb_size_t align)
800 {
801         struct usb_dma_tag *udt;
802         uint8_t nudt;
803
804         USB_ASSERT(align > 0, ("Invalid parameter align = 0\n"));
805         USB_ASSERT(size > 0, ("Invalid parameter size = 0\n"));
806
807         udt = udpt->utag_first;
808         nudt = udpt->utag_max;
809
810         while (nudt--) {
811                 if (udt->align == 0) {
812                         usb_dma_tag_create(udt, size, align);
813                         if (udt->tag == NULL) {
814                                 return (NULL);
815                         }
816                         udt->align = align;
817                         udt->size = size;
818                         return (udt);
819                 }
820                 if ((udt->align == align) && (udt->size == size)) {
821                         return (udt);
822                 }
823                 udt++;
824         }
825         return (NULL);
826 }
827
828 /*------------------------------------------------------------------------*
829  *      usb_dma_tag_setup - initialise USB DMA tags
830  *------------------------------------------------------------------------*/
831 void
832 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
833     struct usb_dma_tag *udt, bus_dma_tag_t dmat,
834     struct mtx *mtx, usb_dma_callback_t *func,
835     uint8_t ndmabits, uint8_t nudt)
836 {
837         memset(udpt, 0, sizeof(*udpt));
838
839         /* sanity checking */
840         if ((nudt == 0) ||
841             (ndmabits == 0) ||
842             (mtx == NULL)) {
843                 /* something is corrupt */
844                 return;
845         }
846         /* initialise condition variable */
847         cv_init(udpt->cv, "USB DMA CV");
848
849         /* store some information */
850         udpt->mtx = mtx;
851         udpt->func = func;
852         udpt->tag = dmat;
853         udpt->utag_first = udt;
854         udpt->utag_max = nudt;
855         udpt->dma_bits = ndmabits;
856
857         while (nudt--) {
858                 memset(udt, 0, sizeof(*udt));
859                 udt->tag_parent = udpt;
860                 udt++;
861         }
862 }
863
864 /*------------------------------------------------------------------------*
865  *      usb_bus_tag_unsetup - factored out code
866  *------------------------------------------------------------------------*/
867 void
868 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
869 {
870         struct usb_dma_tag *udt;
871         uint8_t nudt;
872
873         udt = udpt->utag_first;
874         nudt = udpt->utag_max;
875
876         while (nudt--) {
877                 if (udt->align) {
878                         /* destroy the USB DMA tag */
879                         usb_dma_tag_destroy(udt);
880                         udt->align = 0;
881                 }
882                 udt++;
883         }
884
885         if (udpt->utag_max) {
886                 /* destroy the condition variable */
887                 cv_destroy(udpt->cv);
888         }
889 }
890
891 /*------------------------------------------------------------------------*
892  *      usb_bdma_work_loop
893  *
894  * This function handles loading of virtual buffers into DMA and is
895  * only called when "dma_refcount" is zero.
896  *------------------------------------------------------------------------*/
897 void
898 usb_bdma_work_loop(struct usb_xfer_queue *pq)
899 {
900         struct usb_xfer_root *info;
901         struct usb_xfer *xfer;
902         usb_frcount_t nframes;
903
904         xfer = pq->curr;
905         info = xfer->xroot;
906
907         USB_MTX_ASSERT(info->xfer_mtx, MA_OWNED);
908
909         if (xfer->error) {
910                 /* some error happened */
911                 USB_BUS_LOCK(info->bus);
912                 usbd_transfer_done(xfer, 0);
913                 USB_BUS_UNLOCK(info->bus);
914                 return;
915         }
916         if (!xfer->flags_int.bdma_setup) {
917                 struct usb_page *pg;
918                 usb_frlength_t frlength_0;
919                 uint8_t isread;
920
921                 xfer->flags_int.bdma_setup = 1;
922
923                 /* reset BUS-DMA load state */
924
925                 info->dma_error = 0;
926
927                 if (xfer->flags_int.isochronous_xfr) {
928                         /* only one frame buffer */
929                         nframes = 1;
930                         frlength_0 = xfer->sumlen;
931                 } else {
932                         /* can be multiple frame buffers */
933                         nframes = xfer->nframes;
934                         frlength_0 = xfer->frlengths[0];
935                 }
936
937                 /*
938                  * Set DMA direction first. This is needed to
939                  * select the correct cache invalidate and cache
940                  * flush operations.
941                  */
942                 isread = USB_GET_DATA_ISREAD(xfer);
943                 pg = xfer->dma_page_ptr;
944
945                 if (xfer->flags_int.control_xfr &&
946                     xfer->flags_int.control_hdr) {
947                         /* special case */
948                         if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
949                                 /* The device controller writes to memory */
950                                 xfer->frbuffers[0].isread = 1;
951                         } else {
952                                 /* The host controller reads from memory */
953                                 xfer->frbuffers[0].isread = 0;
954                         }
955                 } else {
956                         /* default case */
957                         xfer->frbuffers[0].isread = isread;
958                 }
959
960                 /*
961                  * Setup the "page_start" pointer which points to an array of
962                  * USB pages where information about the physical address of a
963                  * page will be stored. Also initialise the "isread" field of
964                  * the USB page caches.
965                  */
966                 xfer->frbuffers[0].page_start = pg;
967
968                 info->dma_nframes = nframes;
969                 info->dma_currframe = 0;
970                 info->dma_frlength_0 = frlength_0;
971
972                 pg += (frlength_0 / USB_PAGE_SIZE);
973                 pg += 2;
974
975                 while (--nframes > 0) {
976                         xfer->frbuffers[nframes].isread = isread;
977                         xfer->frbuffers[nframes].page_start = pg;
978
979                         pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
980                         pg += 2;
981                 }
982         }
983         if (info->dma_error) {
984                 USB_BUS_LOCK(info->bus);
985                 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
986                 USB_BUS_UNLOCK(info->bus);
987                 return;
988         }
989         if (info->dma_currframe != info->dma_nframes) {
990                 if (info->dma_currframe == 0) {
991                         /* special case */
992                         usb_pc_load_mem(xfer->frbuffers,
993                             info->dma_frlength_0, 0);
994                 } else {
995                         /* default case */
996                         nframes = info->dma_currframe;
997                         usb_pc_load_mem(xfer->frbuffers + nframes,
998                             xfer->frlengths[nframes], 0);
999                 }
1000
1001                 /* advance frame index */
1002                 info->dma_currframe++;
1003
1004                 return;
1005         }
1006         /* go ahead */
1007         usb_bdma_pre_sync(xfer);
1008
1009         /* start loading next USB transfer, if any */
1010         usb_command_wrapper(pq, NULL);
1011
1012         /* finally start the hardware */
1013         usbd_pipe_enter(xfer);
1014 }
1015
1016 /*------------------------------------------------------------------------*
1017  *      usb_bdma_done_event
1018  *
1019  * This function is called when the BUS-DMA has loaded virtual memory
1020  * into DMA, if any.
1021  *------------------------------------------------------------------------*/
1022 void
1023 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
1024 {
1025         struct usb_xfer_root *info;
1026
1027         info = USB_DMATAG_TO_XROOT(udpt);
1028
1029         USB_MTX_ASSERT(info->xfer_mtx, MA_OWNED);
1030
1031         /* copy error */
1032         info->dma_error = udpt->dma_error;
1033
1034         /* enter workloop again */
1035         usb_command_wrapper(&info->dma_q,
1036             info->dma_q.curr);
1037 }
1038
1039 /*------------------------------------------------------------------------*
1040  *      usb_bdma_pre_sync
1041  *
1042  * This function handles DMA synchronisation that must be done before
1043  * an USB transfer is started.
1044  *------------------------------------------------------------------------*/
1045 void
1046 usb_bdma_pre_sync(struct usb_xfer *xfer)
1047 {
1048         struct usb_page_cache *pc;
1049         usb_frcount_t nframes;
1050
1051         if (xfer->flags_int.isochronous_xfr) {
1052                 /* only one frame buffer */
1053                 nframes = 1;
1054         } else {
1055                 /* can be multiple frame buffers */
1056                 nframes = xfer->nframes;
1057         }
1058
1059         pc = xfer->frbuffers;
1060
1061         while (nframes--) {
1062                 if (pc->isread) {
1063                         usb_pc_cpu_invalidate(pc);
1064                 } else {
1065                         usb_pc_cpu_flush(pc);
1066                 }
1067                 pc++;
1068         }
1069 }
1070
1071 /*------------------------------------------------------------------------*
1072  *      usb_bdma_post_sync
1073  *
1074  * This function handles DMA synchronisation that must be done after
1075  * an USB transfer is complete.
1076  *------------------------------------------------------------------------*/
1077 void
1078 usb_bdma_post_sync(struct usb_xfer *xfer)
1079 {
1080         struct usb_page_cache *pc;
1081         usb_frcount_t nframes;
1082
1083         if (xfer->flags_int.isochronous_xfr) {
1084                 /* only one frame buffer */
1085                 nframes = 1;
1086         } else {
1087                 /* can be multiple frame buffers */
1088                 nframes = xfer->nframes;
1089         }
1090
1091         pc = xfer->frbuffers;
1092
1093         while (nframes--) {
1094                 if (pc->isread) {
1095                         usb_pc_cpu_invalidate(pc);
1096                 }
1097                 pc++;
1098         }
1099 }
1100
1101 #endif