]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/boot/usb/usb_busdma_loader.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / boot / usb / usb_busdma_loader.c
1 /* $FreeBSD$ */
2 /*-
3  * Copyright (c) 2013 Hans Petter Selasky. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <bsd_global.h>
28
29 #if USB_HAVE_BUSDMA
30 static void     usb_pc_common_mem_cb(struct usb_page_cache *pc,
31                     void *vaddr, uint32_t length);
32 #endif
33
34 /*------------------------------------------------------------------------*
35  *  usbd_get_page - lookup DMA-able memory for the given offset
36  *
37  * NOTE: Only call this function when the "page_cache" structure has
38  * been properly initialized !
39  *------------------------------------------------------------------------*/
40 void
41 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
42     struct usb_page_search *res)
43 {
44 #if USB_HAVE_BUSDMA
45         struct usb_page *page;
46
47         if (pc->page_start) {
48
49                 /* Case 1 - something has been loaded into DMA */
50
51                 if (pc->buffer) {
52
53                         /* Case 1a - Kernel Virtual Address */
54
55                         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
56                 }
57                 offset += pc->page_offset_buf;
58
59                 /* compute destination page */
60
61                 page = pc->page_start;
62
63                 if (pc->ismultiseg) {
64
65                         page += (offset / USB_PAGE_SIZE);
66
67                         offset %= USB_PAGE_SIZE;
68
69                         res->length = USB_PAGE_SIZE - offset;
70                         res->physaddr = page->physaddr + offset;
71                 } else {
72                         res->length = (usb_size_t)-1;
73                         res->physaddr = page->physaddr + offset;
74                 }
75                 if (!pc->buffer) {
76
77                         /* Case 1b - Non Kernel Virtual Address */
78
79                         res->buffer = USB_ADD_BYTES(page->buffer, offset);
80                 }
81                 return;
82         }
83 #endif
84         /* Case 2 - Plain PIO */
85
86         res->buffer = USB_ADD_BYTES(pc->buffer, offset);
87         res->length = (usb_size_t)-1;
88 #if USB_HAVE_BUSDMA
89         res->physaddr = 0;
90 #endif
91 }
92
93 /*------------------------------------------------------------------------*
94  *  usbd_copy_in - copy directly to DMA-able memory
95  *------------------------------------------------------------------------*/
96 void
97 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
98     const void *ptr, usb_frlength_t len)
99 {
100         struct usb_page_search buf_res;
101
102         while (len != 0) {
103
104                 usbd_get_page(cache, offset, &buf_res);
105
106                 if (buf_res.length > len) {
107                         buf_res.length = len;
108                 }
109                 memcpy(buf_res.buffer, ptr, buf_res.length);
110
111                 offset += buf_res.length;
112                 len -= buf_res.length;
113                 ptr = USB_ADD_BYTES(ptr, buf_res.length);
114         }
115 }
116
117 /*------------------------------------------------------------------------*
118  *  usbd_copy_out - copy directly from DMA-able memory
119  *------------------------------------------------------------------------*/
120 void
121 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
122     void *ptr, usb_frlength_t len)
123 {
124         struct usb_page_search res;
125
126         while (len != 0) {
127
128                 usbd_get_page(cache, offset, &res);
129
130                 if (res.length > len) {
131                         res.length = len;
132                 }
133                 memcpy(ptr, res.buffer, res.length);
134
135                 offset += res.length;
136                 len -= res.length;
137                 ptr = USB_ADD_BYTES(ptr, res.length);
138         }
139 }
140
141 /*------------------------------------------------------------------------*
142  *  usbd_frame_zero - zero DMA-able memory
143  *------------------------------------------------------------------------*/
144 void
145 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
146     usb_frlength_t len)
147 {
148         struct usb_page_search res;
149
150         while (len != 0) {
151
152                 usbd_get_page(cache, offset, &res);
153
154                 if (res.length > len) {
155                         res.length = len;
156                 }
157                 memset(res.buffer, 0, res.length);
158
159                 offset += res.length;
160                 len -= res.length;
161         }
162 }
163
164 #if USB_HAVE_BUSDMA
165
166 /*------------------------------------------------------------------------*
167  *      usb_pc_common_mem_cb - BUS-DMA callback function
168  *------------------------------------------------------------------------*/
169 static void
170 usb_pc_common_mem_cb(struct usb_page_cache *pc,
171     void *vaddr, uint32_t length)
172 {
173         struct usb_page *pg;
174         usb_size_t rem;
175         bus_size_t off;
176         bus_addr_t phys = (uintptr_t)vaddr;     /* XXX */
177         uint32_t nseg;
178
179         if (length == 0)
180                 nseg = 1;
181         else
182                 nseg = ((length + USB_PAGE_SIZE - 1) / USB_PAGE_SIZE);
183
184         pg = pc->page_start;
185         pg->physaddr = phys & ~(USB_PAGE_SIZE - 1);
186         rem = phys & (USB_PAGE_SIZE - 1);
187         pc->page_offset_buf = rem;
188         pc->page_offset_end += rem;
189         length += rem;
190
191         for (off = USB_PAGE_SIZE; off < length; off += USB_PAGE_SIZE) {
192                 pg++;
193                 pg->physaddr = (phys + off) & ~(USB_PAGE_SIZE - 1);
194         }
195 }
196
197 /*------------------------------------------------------------------------*
198  *      usb_pc_alloc_mem - allocate DMA'able memory
199  *
200  * Returns:
201  *    0: Success
202  * Else: Failure
203  *------------------------------------------------------------------------*/
204 uint8_t
205 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
206     usb_size_t size, usb_size_t align)
207 {
208         void *ptr;
209         uint32_t rem;
210
211         /* allocate zeroed memory */
212
213         if (align != 1) {
214                 ptr = usb_malloc(size + align);
215                 if (ptr == NULL)
216                         goto error;
217
218                 rem = (-((uintptr_t)ptr)) & (align - 1);
219         } else {
220                 ptr = usb_malloc(size);
221                 if (ptr == NULL)
222                         goto error;
223                 rem = 0;
224         }
225
226         /* setup page cache */
227         pc->buffer = ((uint8_t *)ptr) + rem;
228         pc->page_start = pg;
229         pc->page_offset_buf = 0;
230         pc->page_offset_end = size;
231         pc->map = NULL;
232         pc->tag = ptr;
233         pc->ismultiseg = (align == 1);
234
235         /* compute physical address */
236         usb_pc_common_mem_cb(pc, ptr, size);
237
238         usb_pc_cpu_flush(pc);
239         return (0);
240
241 error:
242         /* reset most of the page cache */
243         pc->buffer = NULL;
244         pc->page_start = NULL;
245         pc->page_offset_buf = 0;
246         pc->page_offset_end = 0;
247         pc->map = NULL;
248         pc->tag = NULL;
249         return (1);
250 }
251
252 /*------------------------------------------------------------------------*
253  *      usb_pc_free_mem - free DMA memory
254  *
255  * This function is NULL safe.
256  *------------------------------------------------------------------------*/
257 void
258 usb_pc_free_mem(struct usb_page_cache *pc)
259 {
260         if (pc != NULL && pc->buffer != NULL) {
261                 usb_free(pc->tag);
262                 pc->buffer = NULL;
263         }
264 }
265
266 /*------------------------------------------------------------------------*
267  *      usb_pc_load_mem - load virtual memory into DMA
268  *
269  * Return values:
270  * 0: Success
271  * Else: Error
272  *------------------------------------------------------------------------*/
273 uint8_t
274 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync)
275 {
276         /* setup page cache */
277         pc->page_offset_buf = 0;
278         pc->page_offset_end = size;
279         pc->ismultiseg = 1;
280
281         mtx_assert(pc->tag_parent->mtx, MA_OWNED);
282
283         if (size > 0) {
284                 /* compute physical address */
285                 usb_pc_common_mem_cb(pc, pc->buffer, size);
286         }
287         if (sync == 0) {
288                 /*
289                  * Call callback so that refcount is decremented
290                  * properly:
291                  */
292                 pc->tag_parent->dma_error = 0;
293                 (pc->tag_parent->func) (pc->tag_parent);
294         }
295         return (0);
296 }
297
298 /*------------------------------------------------------------------------*
299  *      usb_pc_cpu_invalidate - invalidate CPU cache
300  *------------------------------------------------------------------------*/
301 void
302 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
303 {
304         if (pc->page_offset_end == pc->page_offset_buf) {
305                 /* nothing has been loaded into this page cache! */
306                 return;
307         }
308         /* NOP */
309 }
310
311 /*------------------------------------------------------------------------*
312  *      usb_pc_cpu_flush - flush CPU cache
313  *------------------------------------------------------------------------*/
314 void
315 usb_pc_cpu_flush(struct usb_page_cache *pc)
316 {
317         if (pc->page_offset_end == pc->page_offset_buf) {
318                 /* nothing has been loaded into this page cache! */
319                 return;
320         }
321         /* NOP */
322 }
323
324 /*------------------------------------------------------------------------*
325  *      usb_pc_dmamap_create - create a DMA map
326  *
327  * Returns:
328  *    0: Success
329  * Else: Failure
330  *------------------------------------------------------------------------*/
331 uint8_t
332 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
333 {
334         return (0);     /* NOP, success */
335 }
336
337 /*------------------------------------------------------------------------*
338  *      usb_pc_dmamap_destroy
339  *
340  * This function is NULL safe.
341  *------------------------------------------------------------------------*/
342 void
343 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
344 {
345         /* NOP */
346 }
347
348 /*------------------------------------------------------------------------*
349  *      usb_dma_tag_setup - initialise USB DMA tags
350  *------------------------------------------------------------------------*/
351 void
352 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
353     struct usb_dma_tag *udt, bus_dma_tag_t dmat,
354     struct mtx *mtx, usb_dma_callback_t *func,
355     uint8_t ndmabits, uint8_t nudt)
356 {
357         memset(udpt, 0, sizeof(*udpt));
358
359         /* sanity checking */
360         if ((nudt == 0) ||
361             (ndmabits == 0) ||
362             (mtx == NULL)) {
363                 /* something is corrupt */
364                 return;
365         }
366         /* initialise condition variable */
367         cv_init(udpt->cv, "USB DMA CV");
368
369         /* store some information */
370         udpt->mtx = mtx;
371         udpt->func = func;
372         udpt->tag = dmat;
373         udpt->utag_first = udt;
374         udpt->utag_max = nudt;
375         udpt->dma_bits = ndmabits;
376
377         while (nudt--) {
378                 memset(udt, 0, sizeof(*udt));
379                 udt->tag_parent = udpt;
380                 udt++;
381         }
382 }
383
384 /*------------------------------------------------------------------------*
385  *      usb_bus_tag_unsetup - factored out code
386  *------------------------------------------------------------------------*/
387 void
388 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
389 {
390         struct usb_dma_tag *udt;
391         uint8_t nudt;
392
393         udt = udpt->utag_first;
394         nudt = udpt->utag_max;
395
396         while (nudt--) {
397                 udt->align = 0;
398                 udt++;
399         }
400
401         if (udpt->utag_max) {
402                 /* destroy the condition variable */
403                 cv_destroy(udpt->cv);
404         }
405 }
406
407 /*------------------------------------------------------------------------*
408  *      usb_bdma_work_loop
409  *
410  * This function handles loading of virtual buffers into DMA and is
411  * only called when "dma_refcount" is zero.
412  *------------------------------------------------------------------------*/
413 void
414 usb_bdma_work_loop(struct usb_xfer_queue *pq)
415 {
416         struct usb_xfer_root *info;
417         struct usb_xfer *xfer;
418         usb_frcount_t nframes;
419
420         xfer = pq->curr;
421         info = xfer->xroot;
422
423         mtx_assert(info->xfer_mtx, MA_OWNED);
424
425         if (xfer->error) {
426                 /* some error happened */
427                 USB_BUS_LOCK(info->bus);
428                 usbd_transfer_done(xfer, 0);
429                 USB_BUS_UNLOCK(info->bus);
430                 return;
431         }
432         if (!xfer->flags_int.bdma_setup) {
433                 struct usb_page *pg;
434                 usb_frlength_t frlength_0;
435                 uint8_t isread;
436
437                 xfer->flags_int.bdma_setup = 1;
438
439                 /* reset BUS-DMA load state */
440
441                 info->dma_error = 0;
442
443                 if (xfer->flags_int.isochronous_xfr) {
444                         /* only one frame buffer */
445                         nframes = 1;
446                         frlength_0 = xfer->sumlen;
447                 } else {
448                         /* can be multiple frame buffers */
449                         nframes = xfer->nframes;
450                         frlength_0 = xfer->frlengths[0];
451                 }
452
453                 /*
454                  * Set DMA direction first. This is needed to
455                  * select the correct cache invalidate and cache
456                  * flush operations.
457                  */
458                 isread = USB_GET_DATA_ISREAD(xfer);
459                 pg = xfer->dma_page_ptr;
460
461                 if (xfer->flags_int.control_xfr &&
462                     xfer->flags_int.control_hdr) {
463                         /* special case */
464                         if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
465                                 /* The device controller writes to memory */
466                                 xfer->frbuffers[0].isread = 1;
467                         } else {
468                                 /* The host controller reads from memory */
469                                 xfer->frbuffers[0].isread = 0;
470                         }
471                 } else {
472                         /* default case */
473                         xfer->frbuffers[0].isread = isread;
474                 }
475
476                 /*
477                  * Setup the "page_start" pointer which points to an array of
478                  * USB pages where information about the physical address of a
479                  * page will be stored. Also initialise the "isread" field of
480                  * the USB page caches.
481                  */
482                 xfer->frbuffers[0].page_start = pg;
483
484                 info->dma_nframes = nframes;
485                 info->dma_currframe = 0;
486                 info->dma_frlength_0 = frlength_0;
487
488                 pg += (frlength_0 / USB_PAGE_SIZE);
489                 pg += 2;
490
491                 while (--nframes > 0) {
492                         xfer->frbuffers[nframes].isread = isread;
493                         xfer->frbuffers[nframes].page_start = pg;
494
495                         pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
496                         pg += 2;
497                 }
498
499         }
500         if (info->dma_error) {
501                 USB_BUS_LOCK(info->bus);
502                 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
503                 USB_BUS_UNLOCK(info->bus);
504                 return;
505         }
506         if (info->dma_currframe != info->dma_nframes) {
507
508                 if (info->dma_currframe == 0) {
509                         /* special case */
510                         usb_pc_load_mem(xfer->frbuffers,
511                             info->dma_frlength_0, 0);
512                 } else {
513                         /* default case */
514                         nframes = info->dma_currframe;
515                         usb_pc_load_mem(xfer->frbuffers + nframes,
516                             xfer->frlengths[nframes], 0);
517                 }
518
519                 /* advance frame index */
520                 info->dma_currframe++;
521
522                 return;
523         }
524         /* go ahead */
525         usb_bdma_pre_sync(xfer);
526
527         /* start loading next USB transfer, if any */
528         usb_command_wrapper(pq, NULL);
529
530         /* finally start the hardware */
531         usbd_pipe_enter(xfer);
532 }
533
534 /*------------------------------------------------------------------------*
535  *      usb_bdma_done_event
536  *
537  * This function is called when the BUS-DMA has loaded virtual memory
538  * into DMA, if any.
539  *------------------------------------------------------------------------*/
540 void
541 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
542 {
543         struct usb_xfer_root *info;
544
545         info = USB_DMATAG_TO_XROOT(udpt);
546
547         mtx_assert(info->xfer_mtx, MA_OWNED);
548
549         /* copy error */
550         info->dma_error = udpt->dma_error;
551
552         /* enter workloop again */
553         usb_command_wrapper(&info->dma_q,
554             info->dma_q.curr);
555 }
556
557 /*------------------------------------------------------------------------*
558  *      usb_bdma_pre_sync
559  *
560  * This function handles DMA synchronisation that must be done before
561  * an USB transfer is started.
562  *------------------------------------------------------------------------*/
563 void
564 usb_bdma_pre_sync(struct usb_xfer *xfer)
565 {
566         struct usb_page_cache *pc;
567         usb_frcount_t nframes;
568
569         if (xfer->flags_int.isochronous_xfr) {
570                 /* only one frame buffer */
571                 nframes = 1;
572         } else {
573                 /* can be multiple frame buffers */
574                 nframes = xfer->nframes;
575         }
576
577         pc = xfer->frbuffers;
578
579         while (nframes--) {
580
581                 if (pc->isread) {
582                         usb_pc_cpu_invalidate(pc);
583                 } else {
584                         usb_pc_cpu_flush(pc);
585                 }
586                 pc++;
587         }
588 }
589
590 /*------------------------------------------------------------------------*
591  *      usb_bdma_post_sync
592  *
593  * This function handles DMA synchronisation that must be done after
594  * an USB transfer is complete.
595  *------------------------------------------------------------------------*/
596 void
597 usb_bdma_post_sync(struct usb_xfer *xfer)
598 {
599         struct usb_page_cache *pc;
600         usb_frcount_t nframes;
601
602         if (xfer->flags_int.isochronous_xfr) {
603                 /* only one frame buffer */
604                 nframes = 1;
605         } else {
606                 /* can be multiple frame buffers */
607                 nframes = xfer->nframes;
608         }
609
610         pc = xfer->frbuffers;
611
612         while (nframes--) {
613                 if (pc->isread) {
614                         usb_pc_cpu_invalidate(pc);
615                 }
616                 pc++;
617         }
618 }
619 #endif