]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/usb/usb_busdma.c
Fix ure device driver susceptible to packet-in-packet attack.
[FreeBSD/FreeBSD.git] / sys / dev / usb / usb_busdma.c
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #ifdef USB_GLOBAL_INCLUDE_FILE
28 #include USB_GLOBAL_INCLUDE_FILE
29 #else
30 #include <sys/stdint.h>
31 #include <sys/stddef.h>
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/module.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/sx.h>
44 #include <sys/unistd.h>
45 #include <sys/callout.h>
46 #include <sys/malloc.h>
47 #include <sys/priv.h>
48
49 #include <dev/usb/usb.h>
50 #include <dev/usb/usbdi.h>
51 #include <dev/usb/usbdi_util.h>
52
53 #define USB_DEBUG_VAR usb_debug
54
55 #include <dev/usb/usb_core.h>
56 #include <dev/usb/usb_busdma.h>
57 #include <dev/usb/usb_process.h>
58 #include <dev/usb/usb_transfer.h>
59 #include <dev/usb/usb_device.h>
60 #include <dev/usb/usb_util.h>
61 #include <dev/usb/usb_debug.h>
62
63 #include <dev/usb/usb_controller.h>
64 #include <dev/usb/usb_bus.h>
65 #endif                  /* USB_GLOBAL_INCLUDE_FILE */
66
67 #if USB_HAVE_BUSDMA
68 static void     usb_dma_tag_create(struct usb_dma_tag *, usb_size_t, usb_size_t);
69 static void     usb_dma_tag_destroy(struct usb_dma_tag *);
70 static void     usb_dma_lock_cb(void *, bus_dma_lock_op_t);
71 static void     usb_pc_alloc_mem_cb(void *, bus_dma_segment_t *, int, int);
72 static void     usb_pc_load_mem_cb(void *, bus_dma_segment_t *, int, int);
73 static void     usb_pc_common_mem_cb(void *, bus_dma_segment_t *, int, int,
74                     uint8_t);
75 #endif
76
77 /*------------------------------------------------------------------------*
78  *  usbd_get_page - lookup DMA-able memory for the given offset
79  *
80  * NOTE: Only call this function when the "page_cache" structure has
81  * been properly initialized !
82  *------------------------------------------------------------------------*/
83 void
84 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
85     struct usb_page_search *res)
86 {
87 #if USB_HAVE_BUSDMA
88         struct usb_page *page;
89
90         if (pc->page_start) {
91
92                 /* Case 1 - something has been loaded into DMA */
93
94                 if (pc->buffer) {
95
96                         /* Case 1a - Kernel Virtual Address */
97
98                         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
99                 }
100                 offset += pc->page_offset_buf;
101
102                 /* compute destination page */
103
104                 page = pc->page_start;
105
106                 if (pc->ismultiseg) {
107
108                         page += (offset / USB_PAGE_SIZE);
109
110                         offset %= USB_PAGE_SIZE;
111
112                         res->length = USB_PAGE_SIZE - offset;
113                         res->physaddr = page->physaddr + offset;
114                 } else {
115                         res->length = (usb_size_t)-1;
116                         res->physaddr = page->physaddr + offset;
117                 }
118                 if (!pc->buffer) {
119
120                         /* Case 1b - Non Kernel Virtual Address */
121
122                         res->buffer = USB_ADD_BYTES(page->buffer, offset);
123                 }
124                 return;
125         }
126 #endif
127         /* Case 2 - Plain PIO */
128
129         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
130         res->length = (usb_size_t)-1;
131 #if USB_HAVE_BUSDMA
132         res->physaddr = 0;
133 #endif
134 }
135
136 /*------------------------------------------------------------------------*
137  *  usb_pc_buffer_is_aligned - verify alignment
138  * 
139  * This function is used to check if a page cache buffer is properly
140  * aligned to reduce the use of bounce buffers in PIO mode.
141  *------------------------------------------------------------------------*/
142 uint8_t
143 usb_pc_buffer_is_aligned(struct usb_page_cache *pc, usb_frlength_t offset,
144     usb_frlength_t len, usb_frlength_t mask)
145 {
146         struct usb_page_search buf_res;
147
148         while (len != 0) {
149
150                 usbd_get_page(pc, offset, &buf_res);
151
152                 if (buf_res.length > len)
153                         buf_res.length = len;
154                 if (USB_P2U(buf_res.buffer) & mask)
155                         return (0);
156                 if (buf_res.length & mask)
157                         return (0);
158
159                 offset += buf_res.length;
160                 len -= buf_res.length;
161         }
162         return (1);
163 }
164
165 /*------------------------------------------------------------------------*
166  *  usbd_copy_in - copy directly to DMA-able memory
167  *------------------------------------------------------------------------*/
168 void
169 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
170     const void *ptr, usb_frlength_t len)
171 {
172         struct usb_page_search buf_res;
173
174         while (len != 0) {
175
176                 usbd_get_page(cache, offset, &buf_res);
177
178                 if (buf_res.length > len) {
179                         buf_res.length = len;
180                 }
181                 memcpy(buf_res.buffer, ptr, buf_res.length);
182
183                 offset += buf_res.length;
184                 len -= buf_res.length;
185                 ptr = USB_ADD_BYTES(ptr, buf_res.length);
186         }
187 }
188
189 /*------------------------------------------------------------------------*
190  *  usbd_copy_in_user - copy directly to DMA-able memory from userland
191  *
192  * Return values:
193  *    0: Success
194  * Else: Failure
195  *------------------------------------------------------------------------*/
196 #if USB_HAVE_USER_IO
197 int
198 usbd_copy_in_user(struct usb_page_cache *cache, usb_frlength_t offset,
199     const void *ptr, usb_frlength_t len)
200 {
201         struct usb_page_search buf_res;
202         int error;
203
204         while (len != 0) {
205
206                 usbd_get_page(cache, offset, &buf_res);
207
208                 if (buf_res.length > len) {
209                         buf_res.length = len;
210                 }
211                 error = copyin(ptr, buf_res.buffer, buf_res.length);
212                 if (error)
213                         return (error);
214
215                 offset += buf_res.length;
216                 len -= buf_res.length;
217                 ptr = USB_ADD_BYTES(ptr, buf_res.length);
218         }
219         return (0);                     /* success */
220 }
221 #endif
222
223 /*------------------------------------------------------------------------*
224  *  usbd_m_copy_in - copy a mbuf chain directly into DMA-able memory
225  *------------------------------------------------------------------------*/
226 #if USB_HAVE_MBUF
227 struct usb_m_copy_in_arg {
228         struct usb_page_cache *cache;
229         usb_frlength_t dst_offset;
230 };
231
232 static int
233 usbd_m_copy_in_cb(void *arg, void *src, uint32_t count)
234 {
235         struct usb_m_copy_in_arg *ua = arg;
236
237         usbd_copy_in(ua->cache, ua->dst_offset, src, count);
238         ua->dst_offset += count;
239         return (0);
240 }
241
242 void
243 usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
244     struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len)
245 {
246         struct usb_m_copy_in_arg arg = {cache, dst_offset};
247         (void) m_apply(m, src_offset, src_len, &usbd_m_copy_in_cb, &arg);
248 }
249 #endif
250
251 /*------------------------------------------------------------------------*
252  *  usb_uiomove - factored out code
253  *------------------------------------------------------------------------*/
254 #if USB_HAVE_USER_IO
255 int
256 usb_uiomove(struct usb_page_cache *pc, struct uio *uio,
257     usb_frlength_t pc_offset, usb_frlength_t len)
258 {
259         struct usb_page_search res;
260         int error = 0;
261
262         while (len != 0) {
263
264                 usbd_get_page(pc, pc_offset, &res);
265
266                 if (res.length > len) {
267                         res.length = len;
268                 }
269                 /*
270                  * "uiomove()" can sleep so one needs to make a wrapper,
271                  * exiting the mutex and checking things
272                  */
273                 error = uiomove(res.buffer, res.length, uio);
274
275                 if (error) {
276                         break;
277                 }
278                 pc_offset += res.length;
279                 len -= res.length;
280         }
281         return (error);
282 }
283 #endif
284
285 /*------------------------------------------------------------------------*
286  *  usbd_copy_out - copy directly from DMA-able memory
287  *------------------------------------------------------------------------*/
288 void
289 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
290     void *ptr, usb_frlength_t len)
291 {
292         struct usb_page_search res;
293
294         while (len != 0) {
295
296                 usbd_get_page(cache, offset, &res);
297
298                 if (res.length > len) {
299                         res.length = len;
300                 }
301                 memcpy(ptr, res.buffer, res.length);
302
303                 offset += res.length;
304                 len -= res.length;
305                 ptr = USB_ADD_BYTES(ptr, res.length);
306         }
307 }
308
309 /*------------------------------------------------------------------------*
310  *  usbd_copy_out_user - copy directly from DMA-able memory to userland
311  *
312  * Return values:
313  *    0: Success
314  * Else: Failure
315  *------------------------------------------------------------------------*/
316 #if USB_HAVE_USER_IO
317 int
318 usbd_copy_out_user(struct usb_page_cache *cache, usb_frlength_t offset,
319     void *ptr, usb_frlength_t len)
320 {
321         struct usb_page_search res;
322         int error;
323
324         while (len != 0) {
325
326                 usbd_get_page(cache, offset, &res);
327
328                 if (res.length > len) {
329                         res.length = len;
330                 }
331                 error = copyout(res.buffer, ptr, res.length);
332                 if (error)
333                         return (error);
334
335                 offset += res.length;
336                 len -= res.length;
337                 ptr = USB_ADD_BYTES(ptr, res.length);
338         }
339         return (0);                     /* success */
340 }
341 #endif
342
343 /*------------------------------------------------------------------------*
344  *  usbd_frame_zero - zero DMA-able memory
345  *------------------------------------------------------------------------*/
346 void
347 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
348     usb_frlength_t len)
349 {
350         struct usb_page_search res;
351
352         while (len != 0) {
353
354                 usbd_get_page(cache, offset, &res);
355
356                 if (res.length > len) {
357                         res.length = len;
358                 }
359                 memset(res.buffer, 0, res.length);
360
361                 offset += res.length;
362                 len -= res.length;
363         }
364 }
365
366 #if USB_HAVE_BUSDMA
367
368 /*------------------------------------------------------------------------*
369  *      usb_dma_lock_cb - dummy callback
370  *------------------------------------------------------------------------*/
371 static void
372 usb_dma_lock_cb(void *arg, bus_dma_lock_op_t op)
373 {
374         /* we use "mtx_owned()" instead of this function */
375 }
376
377 /*------------------------------------------------------------------------*
378  *      usb_dma_tag_create - allocate a DMA tag
379  *
380  * NOTE: If the "align" parameter has a value of 1 the DMA-tag will
381  * allow multi-segment mappings. Else all mappings are single-segment.
382  *------------------------------------------------------------------------*/
383 static void
384 usb_dma_tag_create(struct usb_dma_tag *udt,
385     usb_size_t size, usb_size_t align)
386 {
387         bus_dma_tag_t tag;
388
389         if (bus_dma_tag_create
390             ( /* parent    */ udt->tag_parent->tag,
391              /* alignment */ align,
392              /* boundary  */ 0,
393              /* lowaddr   */ (2ULL << (udt->tag_parent->dma_bits - 1)) - 1,
394              /* highaddr  */ BUS_SPACE_MAXADDR,
395              /* filter    */ NULL,
396              /* filterarg */ NULL,
397              /* maxsize   */ size,
398              /* nsegments */ (align == 1 && size > 1) ?
399             (2 + (size / USB_PAGE_SIZE)) : 1,
400              /* maxsegsz  */ (align == 1 && size > USB_PAGE_SIZE) ?
401             USB_PAGE_SIZE : size,
402              /* flags     */ BUS_DMA_KEEP_PG_OFFSET,
403              /* lockfn    */ &usb_dma_lock_cb,
404              /* lockarg   */ NULL,
405             &tag)) {
406                 tag = NULL;
407         }
408         udt->tag = tag;
409 }
410
411 /*------------------------------------------------------------------------*
412  *      usb_dma_tag_free - free a DMA tag
413  *------------------------------------------------------------------------*/
414 static void
415 usb_dma_tag_destroy(struct usb_dma_tag *udt)
416 {
417         bus_dma_tag_destroy(udt->tag);
418 }
419
420 /*------------------------------------------------------------------------*
421  *      usb_pc_alloc_mem_cb - BUS-DMA callback function
422  *------------------------------------------------------------------------*/
423 static void
424 usb_pc_alloc_mem_cb(void *arg, bus_dma_segment_t *segs,
425     int nseg, int error)
426 {
427         usb_pc_common_mem_cb(arg, segs, nseg, error, 0);
428 }
429
430 /*------------------------------------------------------------------------*
431  *      usb_pc_load_mem_cb - BUS-DMA callback function
432  *------------------------------------------------------------------------*/
433 static void
434 usb_pc_load_mem_cb(void *arg, bus_dma_segment_t *segs,
435     int nseg, int error)
436 {
437         usb_pc_common_mem_cb(arg, segs, nseg, error, 1);
438 }
439
440 /*------------------------------------------------------------------------*
441  *      usb_pc_common_mem_cb - BUS-DMA callback function
442  *------------------------------------------------------------------------*/
443 static void
444 usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
445     int nseg, int error, uint8_t isload)
446 {
447         struct usb_dma_parent_tag *uptag;
448         struct usb_page_cache *pc;
449         struct usb_page *pg;
450         usb_size_t rem;
451         bus_size_t off;
452         uint8_t owned;
453
454         pc = arg;
455         uptag = pc->tag_parent;
456
457         /*
458          * XXX There is sometimes recursive locking here.
459          * XXX We should try to find a better solution.
460          * XXX Until further the "owned" variable does
461          * XXX the trick.
462          */
463
464         if (error) {
465                 goto done;
466         }
467
468         off = 0;
469         pg = pc->page_start;
470         pg->physaddr = rounddown2(segs->ds_addr, USB_PAGE_SIZE);
471         rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
472         pc->page_offset_buf = rem;
473         pc->page_offset_end += rem;
474 #ifdef USB_DEBUG
475         if (nseg > 1) {
476                 int x;
477
478                 for (x = 0; x != nseg - 1; x++) {
479                         if (((segs[x].ds_addr + segs[x].ds_len) & (USB_PAGE_SIZE - 1)) ==
480                             ((segs[x + 1].ds_addr & (USB_PAGE_SIZE - 1))))
481                                 continue;
482                         /*
483                          * This check verifies there is no page offset
484                          * hole between any of the segments. See the
485                          * BUS_DMA_KEEP_PG_OFFSET flag.
486                          */
487                         DPRINTFN(0, "Page offset was not preserved\n");
488                         error = 1;
489                         goto done;
490                 }
491         }
492 #endif
493         while (pc->ismultiseg) {
494                 off += USB_PAGE_SIZE;
495                 if (off >= (segs->ds_len + rem)) {
496                         /* page crossing */
497                         nseg--;
498                         segs++;
499                         off = 0;
500                         rem = 0;
501                         if (nseg == 0)
502                                 break;
503                 }
504                 pg++;
505                 pg->physaddr = rounddown2(segs->ds_addr + off, USB_PAGE_SIZE);
506         }
507
508 done:
509         owned = mtx_owned(uptag->mtx);
510         if (!owned)
511                 mtx_lock(uptag->mtx);
512
513         uptag->dma_error = (error ? 1 : 0);
514         if (isload) {
515                 (uptag->func) (uptag);
516         } else {
517                 cv_broadcast(uptag->cv);
518         }
519         if (!owned)
520                 mtx_unlock(uptag->mtx);
521 }
522
523 /*------------------------------------------------------------------------*
524  *      usb_pc_alloc_mem - allocate DMA'able memory
525  *
526  * Returns:
527  *    0: Success
528  * Else: Failure
529  *------------------------------------------------------------------------*/
530 uint8_t
531 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
532     usb_size_t size, usb_size_t align)
533 {
534         struct usb_dma_parent_tag *uptag;
535         struct usb_dma_tag *utag;
536         bus_dmamap_t map;
537         void *ptr;
538         int err;
539
540         uptag = pc->tag_parent;
541
542         if (align != 1) {
543                 /*
544                  * The alignment must be greater or equal to the
545                  * "size" else the object can be split between two
546                  * memory pages and we get a problem!
547                  */
548                 while (align < size) {
549                         align *= 2;
550                         if (align == 0) {
551                                 goto error;
552                         }
553                 }
554 #if 1
555                 /*
556                  * XXX BUS-DMA workaround - FIXME later:
557                  *
558                  * We assume that that the aligment at this point of
559                  * the code is greater than or equal to the size and
560                  * less than two times the size, so that if we double
561                  * the size, the size will be greater than the
562                  * alignment.
563                  *
564                  * The bus-dma system has a check for "alignment"
565                  * being less than "size". If that check fails we end
566                  * up using contigmalloc which is page based even for
567                  * small allocations. Try to avoid that to save
568                  * memory, hence we sometimes to a large number of
569                  * small allocations!
570                  */
571                 if (size <= (USB_PAGE_SIZE / 2)) {
572                         size *= 2;
573                 }
574 #endif
575         }
576         /* get the correct DMA tag */
577         utag = usb_dma_tag_find(uptag, size, align);
578         if (utag == NULL) {
579                 goto error;
580         }
581         /* allocate memory */
582         if (bus_dmamem_alloc(
583             utag->tag, &ptr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT), &map)) {
584                 goto error;
585         }
586         /* setup page cache */
587         pc->buffer = ptr;
588         pc->page_start = pg;
589         pc->page_offset_buf = 0;
590         pc->page_offset_end = size;
591         pc->map = map;
592         pc->tag = utag->tag;
593         pc->ismultiseg = (align == 1);
594
595         mtx_lock(uptag->mtx);
596
597         /* load memory into DMA */
598         err = bus_dmamap_load(
599             utag->tag, map, ptr, size, &usb_pc_alloc_mem_cb,
600             pc, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
601
602         if (err == EINPROGRESS) {
603                 cv_wait(uptag->cv, uptag->mtx);
604                 err = 0;
605         }
606         mtx_unlock(uptag->mtx);
607
608         if (err || uptag->dma_error) {
609                 bus_dmamem_free(utag->tag, ptr, map);
610                 goto error;
611         }
612         memset(ptr, 0, size);
613
614         usb_pc_cpu_flush(pc);
615
616         return (0);
617
618 error:
619         /* reset most of the page cache */
620         pc->buffer = NULL;
621         pc->page_start = NULL;
622         pc->page_offset_buf = 0;
623         pc->page_offset_end = 0;
624         pc->map = NULL;
625         pc->tag = NULL;
626         return (1);
627 }
628
629 /*------------------------------------------------------------------------*
630  *      usb_pc_free_mem - free DMA memory
631  *
632  * This function is NULL safe.
633  *------------------------------------------------------------------------*/
634 void
635 usb_pc_free_mem(struct usb_page_cache *pc)
636 {
637         if (pc && pc->buffer) {
638
639                 bus_dmamap_unload(pc->tag, pc->map);
640
641                 bus_dmamem_free(pc->tag, pc->buffer, pc->map);
642
643                 pc->buffer = NULL;
644         }
645 }
646
647 /*------------------------------------------------------------------------*
648  *      usb_pc_load_mem - load virtual memory into DMA
649  *
650  * Return values:
651  * 0: Success
652  * Else: Error
653  *------------------------------------------------------------------------*/
654 uint8_t
655 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
656 {
657         /* setup page cache */
658         pc->page_offset_buf = 0;
659         pc->page_offset_end = size;
660         pc->ismultiseg = 1;
661
662         mtx_assert(pc->tag_parent->mtx, MA_OWNED);
663
664         if (size > 0) {
665                 if (sync) {
666                         struct usb_dma_parent_tag *uptag;
667                         int err;
668
669                         uptag = pc->tag_parent;
670
671                         /*
672                          * We have to unload the previous loaded DMA
673                          * pages before trying to load a new one!
674                          */
675                         bus_dmamap_unload(pc->tag, pc->map);
676
677                         /*
678                          * Try to load memory into DMA.
679                          */
680                         err = bus_dmamap_load(
681                             pc->tag, pc->map, pc->buffer, size,
682                             &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK);
683                         if (err == EINPROGRESS) {
684                                 cv_wait(uptag->cv, uptag->mtx);
685                                 err = 0;
686                         }
687                         if (err || uptag->dma_error) {
688                                 return (1);
689                         }
690                 } else {
691
692                         /*
693                          * We have to unload the previous loaded DMA
694                          * pages before trying to load a new one!
695                          */
696                         bus_dmamap_unload(pc->tag, pc->map);
697
698                         /*
699                          * Try to load memory into DMA. The callback
700                          * will be called in all cases:
701                          */
702                         if (bus_dmamap_load(
703                             pc->tag, pc->map, pc->buffer, size,
704                             &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) {
705                         }
706                 }
707         } else {
708                 if (!sync) {
709                         /*
710                          * Call callback so that refcount is decremented
711                          * properly:
712                          */
713                         pc->tag_parent->dma_error = 0;
714                         (pc->tag_parent->func) (pc->tag_parent);
715                 }
716         }
717         return (0);
718 }
719
720 /*------------------------------------------------------------------------*
721  *      usb_pc_cpu_invalidate - invalidate CPU cache
722  *------------------------------------------------------------------------*/
723 void
724 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
725 {
726         if (pc->page_offset_end == pc->page_offset_buf) {
727                 /* nothing has been loaded into this page cache! */
728                 return;
729         }
730
731         /*
732          * TODO: We currently do XXX_POSTREAD and XXX_PREREAD at the
733          * same time, but in the future we should try to isolate the
734          * different cases to optimise the code. --HPS
735          */
736         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_POSTREAD);
737         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREREAD);
738 }
739
740 /*------------------------------------------------------------------------*
741  *      usb_pc_cpu_flush - flush CPU cache
742  *------------------------------------------------------------------------*/
743 void
744 usb_pc_cpu_flush(struct usb_page_cache *pc)
745 {
746         if (pc->page_offset_end == pc->page_offset_buf) {
747                 /* nothing has been loaded into this page cache! */
748                 return;
749         }
750         bus_dmamap_sync(pc->tag, pc->map, BUS_DMASYNC_PREWRITE);
751 }
752
753 /*------------------------------------------------------------------------*
754  *      usb_pc_dmamap_create - create a DMA map
755  *
756  * Returns:
757  *    0: Success
758  * Else: Failure
759  *------------------------------------------------------------------------*/
760 uint8_t
761 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
762 {
763         struct usb_xfer_root *info;
764         struct usb_dma_tag *utag;
765
766         /* get info */
767         info = USB_DMATAG_TO_XROOT(pc->tag_parent);
768
769         /* sanity check */
770         if (info == NULL) {
771                 goto error;
772         }
773         utag = usb_dma_tag_find(pc->tag_parent, size, 1);
774         if (utag == NULL) {
775                 goto error;
776         }
777         /* create DMA map */
778         if (bus_dmamap_create(utag->tag, 0, &pc->map)) {
779                 goto error;
780         }
781         pc->tag = utag->tag;
782         return 0;                       /* success */
783
784 error:
785         pc->map = NULL;
786         pc->tag = NULL;
787         return 1;                       /* failure */
788 }
789
790 /*------------------------------------------------------------------------*
791  *      usb_pc_dmamap_destroy
792  *
793  * This function is NULL safe.
794  *------------------------------------------------------------------------*/
795 void
796 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
797 {
798         if (pc && pc->tag) {
799                 bus_dmamap_destroy(pc->tag, pc->map);
800                 pc->tag = NULL;
801                 pc->map = NULL;
802         }
803 }
804
805 /*------------------------------------------------------------------------*
806  *      usb_dma_tag_find - factored out code
807  *------------------------------------------------------------------------*/
808 struct usb_dma_tag *
809 usb_dma_tag_find(struct usb_dma_parent_tag *udpt,
810     usb_size_t size, usb_size_t align)
811 {
812         struct usb_dma_tag *udt;
813         uint8_t nudt;
814
815         USB_ASSERT(align > 0, ("Invalid parameter align = 0\n"));
816         USB_ASSERT(size > 0, ("Invalid parameter size = 0\n"));
817
818         udt = udpt->utag_first;
819         nudt = udpt->utag_max;
820
821         while (nudt--) {
822
823                 if (udt->align == 0) {
824                         usb_dma_tag_create(udt, size, align);
825                         if (udt->tag == NULL) {
826                                 return (NULL);
827                         }
828                         udt->align = align;
829                         udt->size = size;
830                         return (udt);
831                 }
832                 if ((udt->align == align) && (udt->size == size)) {
833                         return (udt);
834                 }
835                 udt++;
836         }
837         return (NULL);
838 }
839
840 /*------------------------------------------------------------------------*
841  *      usb_dma_tag_setup - initialise USB DMA tags
842  *------------------------------------------------------------------------*/
843 void
844 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
845     struct usb_dma_tag *udt, bus_dma_tag_t dmat,
846     struct mtx *mtx, usb_dma_callback_t *func,
847     uint8_t ndmabits, uint8_t nudt)
848 {
849         memset(udpt, 0, sizeof(*udpt));
850
851         /* sanity checking */
852         if ((nudt == 0) ||
853             (ndmabits == 0) ||
854             (mtx == NULL)) {
855                 /* something is corrupt */
856                 return;
857         }
858         /* initialise condition variable */
859         cv_init(udpt->cv, "USB DMA CV");
860
861         /* store some information */
862         udpt->mtx = mtx;
863         udpt->func = func;
864         udpt->tag = dmat;
865         udpt->utag_first = udt;
866         udpt->utag_max = nudt;
867         udpt->dma_bits = ndmabits;
868
869         while (nudt--) {
870                 memset(udt, 0, sizeof(*udt));
871                 udt->tag_parent = udpt;
872                 udt++;
873         }
874 }
875
876 /*------------------------------------------------------------------------*
877  *      usb_bus_tag_unsetup - factored out code
878  *------------------------------------------------------------------------*/
879 void
880 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
881 {
882         struct usb_dma_tag *udt;
883         uint8_t nudt;
884
885         udt = udpt->utag_first;
886         nudt = udpt->utag_max;
887
888         while (nudt--) {
889
890                 if (udt->align) {
891                         /* destroy the USB DMA tag */
892                         usb_dma_tag_destroy(udt);
893                         udt->align = 0;
894                 }
895                 udt++;
896         }
897
898         if (udpt->utag_max) {
899                 /* destroy the condition variable */
900                 cv_destroy(udpt->cv);
901         }
902 }
903
904 /*------------------------------------------------------------------------*
905  *      usb_bdma_work_loop
906  *
907  * This function handles loading of virtual buffers into DMA and is
908  * only called when "dma_refcount" is zero.
909  *------------------------------------------------------------------------*/
910 void
911 usb_bdma_work_loop(struct usb_xfer_queue *pq)
912 {
913         struct usb_xfer_root *info;
914         struct usb_xfer *xfer;
915         usb_frcount_t nframes;
916
917         xfer = pq->curr;
918         info = xfer->xroot;
919
920         mtx_assert(info->xfer_mtx, MA_OWNED);
921
922         if (xfer->error) {
923                 /* some error happened */
924                 USB_BUS_LOCK(info->bus);
925                 usbd_transfer_done(xfer, 0);
926                 USB_BUS_UNLOCK(info->bus);
927                 return;
928         }
929         if (!xfer->flags_int.bdma_setup) {
930                 struct usb_page *pg;
931                 usb_frlength_t frlength_0;
932                 uint8_t isread;
933
934                 xfer->flags_int.bdma_setup = 1;
935
936                 /* reset BUS-DMA load state */
937
938                 info->dma_error = 0;
939
940                 if (xfer->flags_int.isochronous_xfr) {
941                         /* only one frame buffer */
942                         nframes = 1;
943                         frlength_0 = xfer->sumlen;
944                 } else {
945                         /* can be multiple frame buffers */
946                         nframes = xfer->nframes;
947                         frlength_0 = xfer->frlengths[0];
948                 }
949
950                 /*
951                  * Set DMA direction first. This is needed to
952                  * select the correct cache invalidate and cache
953                  * flush operations.
954                  */
955                 isread = USB_GET_DATA_ISREAD(xfer);
956                 pg = xfer->dma_page_ptr;
957
958                 if (xfer->flags_int.control_xfr &&
959                     xfer->flags_int.control_hdr) {
960                         /* special case */
961                         if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
962                                 /* The device controller writes to memory */
963                                 xfer->frbuffers[0].isread = 1;
964                         } else {
965                                 /* The host controller reads from memory */
966                                 xfer->frbuffers[0].isread = 0;
967                         }
968                 } else {
969                         /* default case */
970                         xfer->frbuffers[0].isread = isread;
971                 }
972
973                 /*
974                  * Setup the "page_start" pointer which points to an array of
975                  * USB pages where information about the physical address of a
976                  * page will be stored. Also initialise the "isread" field of
977                  * the USB page caches.
978                  */
979                 xfer->frbuffers[0].page_start = pg;
980
981                 info->dma_nframes = nframes;
982                 info->dma_currframe = 0;
983                 info->dma_frlength_0 = frlength_0;
984
985                 pg += (frlength_0 / USB_PAGE_SIZE);
986                 pg += 2;
987
988                 while (--nframes > 0) {
989                         xfer->frbuffers[nframes].isread = isread;
990                         xfer->frbuffers[nframes].page_start = pg;
991
992                         pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
993                         pg += 2;
994                 }
995
996         }
997         if (info->dma_error) {
998                 USB_BUS_LOCK(info->bus);
999                 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
1000                 USB_BUS_UNLOCK(info->bus);
1001                 return;
1002         }
1003         if (info->dma_currframe != info->dma_nframes) {
1004
1005                 if (info->dma_currframe == 0) {
1006                         /* special case */
1007                         usb_pc_load_mem(xfer->frbuffers,
1008                             info->dma_frlength_0, 0);
1009                 } else {
1010                         /* default case */
1011                         nframes = info->dma_currframe;
1012                         usb_pc_load_mem(xfer->frbuffers + nframes,
1013                             xfer->frlengths[nframes], 0);
1014                 }
1015
1016                 /* advance frame index */
1017                 info->dma_currframe++;
1018
1019                 return;
1020         }
1021         /* go ahead */
1022         usb_bdma_pre_sync(xfer);
1023
1024         /* start loading next USB transfer, if any */
1025         usb_command_wrapper(pq, NULL);
1026
1027         /* finally start the hardware */
1028         usbd_pipe_enter(xfer);
1029 }
1030
1031 /*------------------------------------------------------------------------*
1032  *      usb_bdma_done_event
1033  *
1034  * This function is called when the BUS-DMA has loaded virtual memory
1035  * into DMA, if any.
1036  *------------------------------------------------------------------------*/
1037 void
1038 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
1039 {
1040         struct usb_xfer_root *info;
1041
1042         info = USB_DMATAG_TO_XROOT(udpt);
1043
1044         mtx_assert(info->xfer_mtx, MA_OWNED);
1045
1046         /* copy error */
1047         info->dma_error = udpt->dma_error;
1048
1049         /* enter workloop again */
1050         usb_command_wrapper(&info->dma_q,
1051             info->dma_q.curr);
1052 }
1053
1054 /*------------------------------------------------------------------------*
1055  *      usb_bdma_pre_sync
1056  *
1057  * This function handles DMA synchronisation that must be done before
1058  * an USB transfer is started.
1059  *------------------------------------------------------------------------*/
1060 void
1061 usb_bdma_pre_sync(struct usb_xfer *xfer)
1062 {
1063         struct usb_page_cache *pc;
1064         usb_frcount_t nframes;
1065
1066         if (xfer->flags_int.isochronous_xfr) {
1067                 /* only one frame buffer */
1068                 nframes = 1;
1069         } else {
1070                 /* can be multiple frame buffers */
1071                 nframes = xfer->nframes;
1072         }
1073
1074         pc = xfer->frbuffers;
1075
1076         while (nframes--) {
1077
1078                 if (pc->isread) {
1079                         usb_pc_cpu_invalidate(pc);
1080                 } else {
1081                         usb_pc_cpu_flush(pc);
1082                 }
1083                 pc++;
1084         }
1085 }
1086
1087 /*------------------------------------------------------------------------*
1088  *      usb_bdma_post_sync
1089  *
1090  * This function handles DMA synchronisation that must be done after
1091  * an USB transfer is complete.
1092  *------------------------------------------------------------------------*/
1093 void
1094 usb_bdma_post_sync(struct usb_xfer *xfer)
1095 {
1096         struct usb_page_cache *pc;
1097         usb_frcount_t nframes;
1098
1099         if (xfer->flags_int.isochronous_xfr) {
1100                 /* only one frame buffer */
1101                 nframes = 1;
1102         } else {
1103                 /* can be multiple frame buffers */
1104                 nframes = xfer->nframes;
1105         }
1106
1107         pc = xfer->frbuffers;
1108
1109         while (nframes--) {
1110                 if (pc->isread) {
1111                         usb_pc_cpu_invalidate(pc);
1112                 }
1113                 pc++;
1114         }
1115 }
1116
1117 #endif