3 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #ifdef USB_GLOBAL_INCLUDE_FILE
28 #include USB_GLOBAL_INCLUDE_FILE
30 #include <sys/stdint.h>
31 #include <sys/stddef.h>
32 #include <sys/param.h>
33 #include <sys/queue.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
38 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
44 #include <sys/unistd.h>
45 #include <sys/callout.h>
46 #include <sys/malloc.h>
50 #include <dev/usb/usb.h>
51 #include <dev/usb/usbdi.h>
52 #include <dev/usb/usbdi_util.h>
54 #define USB_DEBUG_VAR usb_debug
56 #include <dev/usb/usb_core.h>
57 #include <dev/usb/usb_busdma.h>
58 #include <dev/usb/usb_process.h>
59 #include <dev/usb/usb_transfer.h>
60 #include <dev/usb/usb_device.h>
61 #include <dev/usb/usb_debug.h>
62 #include <dev/usb/usb_util.h>
64 #include <dev/usb/usb_controller.h>
65 #include <dev/usb/usb_bus.h>
66 #include <dev/usb/usb_pf.h>
67 #endif /* USB_GLOBAL_INCLUDE_FILE */
69 struct usb_std_packet_size {
71 uint16_t min; /* inclusive */
72 uint16_t max; /* inclusive */
78 static usb_callback_t usb_request_callback;
80 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
82 /* This transfer is used for generic control endpoint transfers */
86 .endpoint = 0x00, /* Control endpoint */
87 .direction = UE_DIR_ANY,
88 .bufsize = USB_EP0_BUFSIZE, /* bytes */
89 .flags = {.proxy_buffer = 1,},
90 .callback = &usb_request_callback,
91 .usb_mode = USB_MODE_DUAL, /* both modes */
94 /* This transfer is used for generic clear stall only */
98 .endpoint = 0x00, /* Control pipe */
99 .direction = UE_DIR_ANY,
100 .bufsize = sizeof(struct usb_device_request),
101 .callback = &usb_do_clear_stall_callback,
102 .timeout = 1000, /* 1 second */
103 .interval = 50, /* 50ms */
104 .usb_mode = USB_MODE_HOST,
108 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
110 /* This transfer is used for generic control endpoint transfers */
114 .endpoint = 0x00, /* Control endpoint */
115 .direction = UE_DIR_ANY,
116 .bufsize = 65535, /* bytes */
117 .callback = &usb_request_callback,
118 .usb_mode = USB_MODE_DUAL, /* both modes */
121 /* This transfer is used for generic clear stall only */
125 .endpoint = 0x00, /* Control pipe */
126 .direction = UE_DIR_ANY,
127 .bufsize = sizeof(struct usb_device_request),
128 .callback = &usb_do_clear_stall_callback,
129 .timeout = 1000, /* 1 second */
130 .interval = 50, /* 50ms */
131 .usb_mode = USB_MODE_HOST,
135 /* function prototypes */
137 static void usbd_update_max_frame_size(struct usb_xfer *);
138 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
139 static void usbd_control_transfer_init(struct usb_xfer *);
140 static int usbd_setup_ctrl_transfer(struct usb_xfer *);
141 static void usb_callback_proc(struct usb_proc_msg *);
142 static void usbd_callback_ss_done_defer(struct usb_xfer *);
143 static void usbd_callback_wrapper(struct usb_xfer_queue *);
144 static void usbd_transfer_start_cb(void *);
145 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
146 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
147 uint8_t type, enum usb_dev_speed speed);
149 /*------------------------------------------------------------------------*
150 * usb_request_callback
151 *------------------------------------------------------------------------*/
153 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
155 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
156 usb_handle_request_callback(xfer, error);
158 usbd_do_request_callback(xfer, error);
161 /*------------------------------------------------------------------------*
162 * usbd_update_max_frame_size
164 * This function updates the maximum frame size, hence high speed USB
165 * can transfer multiple consecutive packets.
166 *------------------------------------------------------------------------*/
168 usbd_update_max_frame_size(struct usb_xfer *xfer)
170 /* compute maximum frame size */
171 /* this computation should not overflow 16-bit */
172 /* max = 15 * 1024 */
174 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
177 /*------------------------------------------------------------------------*
180 * The following function is called when we need to
181 * synchronize with DMA hardware.
184 * 0: no DMA delay required
185 * Else: milliseconds of DMA delay
186 *------------------------------------------------------------------------*/
188 usbd_get_dma_delay(struct usb_device *udev)
190 struct usb_bus_methods *mtod;
193 mtod = udev->bus->methods;
196 if (mtod->get_dma_delay) {
197 (mtod->get_dma_delay) (udev, &temp);
199 * Round up and convert to milliseconds. Note that we use
200 * 1024 milliseconds per second. to save a division.
208 /*------------------------------------------------------------------------*
209 * usbd_transfer_setup_sub_malloc
211 * This function will allocate one or more DMA'able memory chunks
212 * according to "size", "align" and "count" arguments. "ppc" is
213 * pointed to a linear array of USB page caches afterwards.
215 * If the "align" argument is equal to "1" a non-contiguous allocation
216 * can happen. Else if the "align" argument is greater than "1", the
217 * allocation will always be contiguous in memory.
222 *------------------------------------------------------------------------*/
225 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
226 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
229 struct usb_page_cache *pc;
240 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
242 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
245 return (0); /* nothing to allocate */
248 * Make sure that the size is aligned properly.
250 size = -((-size) & (-align));
253 * Try multi-allocation chunks to reduce the number of DMA
254 * allocations, hence DMA allocations are slow.
257 /* special case - non-cached multi page DMA memory */
259 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
261 } else if (size >= USB_PAGE_SIZE) {
266 /* compute number of objects per page */
267 #ifdef USB_DMA_SINGLE_ALLOC
270 n_obj = (USB_PAGE_SIZE / size);
273 * Compute number of DMA chunks, rounded up
276 n_dma_pc = ((count + n_obj - 1) / n_obj);
281 * DMA memory is allocated once, but mapped twice. That's why
282 * there is one list for auto-free and another list for
283 * non-auto-free which only holds the mapping and not the
286 if (parm->buf == NULL) {
287 /* reserve memory (auto-free) */
288 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
289 parm->dma_page_cache_ptr += n_dma_pc;
291 /* reserve memory (no-auto-free) */
292 parm->dma_page_ptr += count * n_dma_pg;
293 parm->xfer_page_cache_ptr += count;
296 for (x = 0; x != n_dma_pc; x++) {
297 /* need to initialize the page cache */
298 parm->dma_page_cache_ptr[x].tag_parent =
299 &parm->curr_xfer->xroot->dma_parent_tag;
301 for (x = 0; x != count; x++) {
302 /* need to initialize the page cache */
303 parm->xfer_page_cache_ptr[x].tag_parent =
304 &parm->curr_xfer->xroot->dma_parent_tag;
309 *ppc = parm->xfer_page_cache_ptr;
311 *ppc = parm->dma_page_cache_ptr;
313 r = count; /* set remainder count */
314 z = n_obj * size; /* set allocation size */
315 pc = parm->xfer_page_cache_ptr;
316 pg = parm->dma_page_ptr;
320 * Avoid mapping memory twice if only a single object
321 * should be allocated per page cache:
323 for (x = 0; x != n_dma_pc; x++) {
324 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
326 return (1); /* failure */
328 /* Make room for one DMA page cache and "n_dma_pg" pages */
329 parm->dma_page_cache_ptr++;
333 for (x = 0; x != n_dma_pc; x++) {
336 /* compute last remainder */
340 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
342 return (1); /* failure */
344 /* Set beginning of current buffer */
345 buf = parm->dma_page_cache_ptr->buffer;
346 /* Make room for one DMA page cache and "n_dma_pg" pages */
347 parm->dma_page_cache_ptr++;
350 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
352 /* Load sub-chunk into DMA */
353 if (usb_pc_dmamap_create(pc, size)) {
354 return (1); /* failure */
356 pc->buffer = USB_ADD_BYTES(buf, y * size);
359 mtx_lock(pc->tag_parent->mtx);
360 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
361 mtx_unlock(pc->tag_parent->mtx);
362 return (1); /* failure */
364 mtx_unlock(pc->tag_parent->mtx);
369 parm->xfer_page_cache_ptr = pc;
370 parm->dma_page_ptr = pg;
375 /*------------------------------------------------------------------------*
376 * usbd_transfer_setup_sub - transfer setup subroutine
378 * This function must be called from the "xfer_setup" callback of the
379 * USB Host or Device controller driver when setting up an USB
380 * transfer. This function will setup correct packet sizes, buffer
381 * sizes, flags and more, that are stored in the "usb_xfer"
383 *------------------------------------------------------------------------*/
385 usbd_transfer_setup_sub(struct usb_setup_params *parm)
391 struct usb_xfer *xfer = parm->curr_xfer;
392 const struct usb_config *setup = parm->curr_setup;
393 struct usb_endpoint_ss_comp_descriptor *ecomp;
394 struct usb_endpoint_descriptor *edesc;
395 struct usb_std_packet_size std_size;
396 usb_frcount_t n_frlengths;
397 usb_frcount_t n_frbuffers;
404 * Sanity check. The following parameters must be initialized before
405 * calling this function.
407 if ((parm->hc_max_packet_size == 0) ||
408 (parm->hc_max_packet_count == 0) ||
409 (parm->hc_max_frame_size == 0)) {
410 parm->err = USB_ERR_INVAL;
413 edesc = xfer->endpoint->edesc;
414 ecomp = xfer->endpoint->ecomp;
416 type = (edesc->bmAttributes & UE_XFERTYPE);
418 xfer->flags = setup->flags;
419 xfer->nframes = setup->frames;
420 xfer->timeout = setup->timeout;
421 xfer->callback = setup->callback;
422 xfer->interval = setup->interval;
423 xfer->endpointno = edesc->bEndpointAddress;
424 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
425 xfer->max_packet_count = 1;
426 /* make a shadow copy: */
427 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
429 parm->bufsize = setup->bufsize;
431 switch (parm->speed) {
436 xfer->max_packet_count +=
437 (xfer->max_packet_size >> 11) & 3;
439 /* check for invalid max packet count */
440 if (xfer->max_packet_count > 3)
441 xfer->max_packet_count = 3;
446 xfer->max_packet_size &= 0x7FF;
448 case USB_SPEED_SUPER:
449 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
452 xfer->max_packet_count += ecomp->bMaxBurst;
454 if ((xfer->max_packet_count == 0) ||
455 (xfer->max_packet_count > 16))
456 xfer->max_packet_count = 16;
460 xfer->max_packet_count = 1;
466 mult = UE_GET_SS_ISO_MULT(
467 ecomp->bmAttributes) + 1;
471 xfer->max_packet_count *= mult;
477 xfer->max_packet_size &= 0x7FF;
482 /* range check "max_packet_count" */
484 if (xfer->max_packet_count > parm->hc_max_packet_count) {
485 xfer->max_packet_count = parm->hc_max_packet_count;
488 /* store max packet size value before filtering */
490 maxp_old = xfer->max_packet_size;
492 /* filter "wMaxPacketSize" according to HC capabilities */
494 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
495 (xfer->max_packet_size == 0)) {
496 xfer->max_packet_size = parm->hc_max_packet_size;
498 /* filter "wMaxPacketSize" according to standard sizes */
500 usbd_get_std_packet_size(&std_size, type, parm->speed);
502 if (std_size.range.min || std_size.range.max) {
504 if (xfer->max_packet_size < std_size.range.min) {
505 xfer->max_packet_size = std_size.range.min;
507 if (xfer->max_packet_size > std_size.range.max) {
508 xfer->max_packet_size = std_size.range.max;
512 if (xfer->max_packet_size >= std_size.fixed[3]) {
513 xfer->max_packet_size = std_size.fixed[3];
514 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
515 xfer->max_packet_size = std_size.fixed[2];
516 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
517 xfer->max_packet_size = std_size.fixed[1];
519 /* only one possibility left */
520 xfer->max_packet_size = std_size.fixed[0];
525 * Check if the max packet size was outside its allowed range
526 * and clamped to a valid value:
528 if (maxp_old != xfer->max_packet_size)
529 xfer->flags_int.maxp_was_clamped = 1;
531 /* compute "max_frame_size" */
533 usbd_update_max_frame_size(xfer);
535 /* check interrupt interval and transfer pre-delay */
537 if (type == UE_ISOCHRONOUS) {
539 uint16_t frame_limit;
541 xfer->interval = 0; /* not used, must be zero */
542 xfer->flags_int.isochronous_xfr = 1; /* set flag */
544 if (xfer->timeout == 0) {
546 * set a default timeout in
547 * case something goes wrong!
549 xfer->timeout = 1000 / 4;
551 switch (parm->speed) {
554 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
558 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
559 xfer->fps_shift = edesc->bInterval;
560 if (xfer->fps_shift > 0)
562 if (xfer->fps_shift > 3)
564 if (xfer->flags.pre_scale_frames != 0)
565 xfer->nframes <<= (3 - xfer->fps_shift);
569 if (xfer->nframes > frame_limit) {
571 * this is not going to work
574 parm->err = USB_ERR_INVAL;
577 if (xfer->nframes == 0) {
579 * this is not a valid value
581 parm->err = USB_ERR_ZERO_NFRAMES;
587 * If a value is specified use that else check the
588 * endpoint descriptor!
590 if (type == UE_INTERRUPT) {
594 if (xfer->interval == 0) {
596 xfer->interval = edesc->bInterval;
598 switch (parm->speed) {
604 if (xfer->interval < 4)
606 else if (xfer->interval > 16)
607 xfer->interval = (1 << (16 - 4));
610 (1 << (xfer->interval - 4));
615 if (xfer->interval == 0) {
617 * One millisecond is the smallest
618 * interval we support:
626 while ((temp != 0) && (temp < xfer->interval)) {
631 switch (parm->speed) {
636 xfer->fps_shift += 3;
643 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
644 * to be equal to zero when setting up USB transfers, hence
645 * this leads to alot of extra code in the USB kernel.
648 if ((xfer->max_frame_size == 0) ||
649 (xfer->max_packet_size == 0)) {
653 if ((parm->bufsize <= MIN_PKT) &&
654 (type != UE_CONTROL) &&
658 xfer->max_packet_size = MIN_PKT;
659 xfer->max_packet_count = 1;
660 parm->bufsize = 0; /* automatic setup length */
661 usbd_update_max_frame_size(xfer);
664 parm->err = USB_ERR_ZERO_MAXP;
673 * check if we should setup a default
677 if (parm->bufsize == 0) {
679 parm->bufsize = xfer->max_frame_size;
681 if (type == UE_ISOCHRONOUS) {
682 parm->bufsize *= xfer->nframes;
686 * check if we are about to setup a proxy
690 if (xfer->flags.proxy_buffer) {
692 /* round bufsize up */
694 parm->bufsize += (xfer->max_frame_size - 1);
696 if (parm->bufsize < xfer->max_frame_size) {
697 /* length wrapped around */
698 parm->err = USB_ERR_INVAL;
701 /* subtract remainder */
703 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
705 /* add length of USB device request structure, if any */
707 if (type == UE_CONTROL) {
708 parm->bufsize += REQ_SIZE; /* SETUP message */
711 xfer->max_data_length = parm->bufsize;
713 /* Setup "n_frlengths" and "n_frbuffers" */
715 if (type == UE_ISOCHRONOUS) {
716 n_frlengths = xfer->nframes;
720 if (type == UE_CONTROL) {
721 xfer->flags_int.control_xfr = 1;
722 if (xfer->nframes == 0) {
723 if (parm->bufsize <= REQ_SIZE) {
725 * there will never be any data
734 if (xfer->nframes == 0) {
739 n_frlengths = xfer->nframes;
740 n_frbuffers = xfer->nframes;
744 * check if we have room for the
745 * USB device request structure:
748 if (type == UE_CONTROL) {
750 if (xfer->max_data_length < REQ_SIZE) {
751 /* length wrapped around or too small bufsize */
752 parm->err = USB_ERR_INVAL;
755 xfer->max_data_length -= REQ_SIZE;
758 * Setup "frlengths" and shadow "frlengths" for keeping the
759 * initial frame lengths when a USB transfer is complete. This
760 * information is useful when computing isochronous offsets.
762 xfer->frlengths = parm->xfer_length_ptr;
763 parm->xfer_length_ptr += 2 * n_frlengths;
765 /* setup "frbuffers" */
766 xfer->frbuffers = parm->xfer_page_cache_ptr;
767 parm->xfer_page_cache_ptr += n_frbuffers;
769 /* initialize max frame count */
770 xfer->max_frame_count = xfer->nframes;
773 * check if we need to setup
777 if (!xfer->flags.ext_buffer) {
779 struct usb_page_search page_info;
780 struct usb_page_cache *pc;
782 if (usbd_transfer_setup_sub_malloc(parm,
783 &pc, parm->bufsize, 1, 1)) {
784 parm->err = USB_ERR_NOMEM;
785 } else if (parm->buf != NULL) {
787 usbd_get_page(pc, 0, &page_info);
789 xfer->local_buffer = page_info.buffer;
791 usbd_xfer_set_frame_offset(xfer, 0, 0);
793 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
794 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
799 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
801 if (parm->buf != NULL) {
803 USB_ADD_BYTES(parm->buf, parm->size[0]);
805 usbd_xfer_set_frame_offset(xfer, 0, 0);
807 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
808 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
811 parm->size[0] += parm->bufsize;
813 /* align data again */
814 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
818 * Compute maximum buffer size
821 if (parm->bufsize_max < parm->bufsize) {
822 parm->bufsize_max = parm->bufsize;
825 if (xfer->flags_int.bdma_enable) {
827 * Setup "dma_page_ptr".
829 * Proof for formula below:
831 * Assume there are three USB frames having length "a", "b" and
832 * "c". These USB frames will at maximum need "z"
833 * "usb_page" structures. "z" is given by:
835 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
836 * ((c / USB_PAGE_SIZE) + 2);
838 * Constraining "a", "b" and "c" like this:
840 * (a + b + c) <= parm->bufsize
844 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
846 * Here is the general formula:
848 xfer->dma_page_ptr = parm->dma_page_ptr;
849 parm->dma_page_ptr += (2 * n_frbuffers);
850 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
854 /* correct maximum data length */
855 xfer->max_data_length = 0;
857 /* subtract USB frame remainder from "hc_max_frame_size" */
859 xfer->max_hc_frame_size =
860 (parm->hc_max_frame_size -
861 (parm->hc_max_frame_size % xfer->max_frame_size));
863 if (xfer->max_hc_frame_size == 0) {
864 parm->err = USB_ERR_INVAL;
868 /* initialize frame buffers */
871 for (x = 0; x != n_frbuffers; x++) {
872 xfer->frbuffers[x].tag_parent =
873 &xfer->xroot->dma_parent_tag;
875 if (xfer->flags_int.bdma_enable &&
876 (parm->bufsize_max > 0)) {
878 if (usb_pc_dmamap_create(
880 parm->bufsize_max)) {
881 parm->err = USB_ERR_NOMEM;
891 * Set some dummy values so that we avoid division by zero:
893 xfer->max_hc_frame_size = 1;
894 xfer->max_frame_size = 1;
895 xfer->max_packet_size = 1;
896 xfer->max_data_length = 0;
898 xfer->max_frame_count = 0;
903 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
907 uint8_t type = setup_start[n_setup].type;
908 if (type == UE_BULK || type == UE_BULK_INTR ||
915 /*------------------------------------------------------------------------*
916 * usbd_transfer_setup - setup an array of USB transfers
918 * NOTE: You must always call "usbd_transfer_unsetup" after calling
919 * "usbd_transfer_setup" if success was returned.
921 * The idea is that the USB device driver should pre-allocate all its
922 * transfers by one call to this function.
927 *------------------------------------------------------------------------*/
929 usbd_transfer_setup(struct usb_device *udev,
930 const uint8_t *ifaces, struct usb_xfer **ppxfer,
931 const struct usb_config *setup_start, uint16_t n_setup,
932 void *priv_sc, struct mtx *xfer_mtx)
934 const struct usb_config *setup_end = setup_start + n_setup;
935 const struct usb_config *setup;
936 struct usb_setup_params *parm;
937 struct usb_endpoint *ep;
938 struct usb_xfer_root *info;
939 struct usb_xfer *xfer;
941 usb_error_t error = 0;
946 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
947 "usbd_transfer_setup can sleep!");
949 /* do some checking first */
952 DPRINTFN(6, "setup array has zero length!\n");
953 return (USB_ERR_INVAL);
956 DPRINTFN(6, "ifaces array is NULL!\n");
957 return (USB_ERR_INVAL);
959 if (xfer_mtx == NULL) {
960 DPRINTFN(6, "using global lock\n");
964 /* more sanity checks */
966 for (setup = setup_start, n = 0;
967 setup != setup_end; setup++, n++) {
968 if (setup->bufsize == (usb_frlength_t)-1) {
969 error = USB_ERR_BAD_BUFSIZE;
970 DPRINTF("invalid bufsize\n");
972 if (setup->callback == NULL) {
973 error = USB_ERR_NO_CALLBACK;
974 DPRINTF("no callback\n");
982 /* Protect scratch area */
983 do_unlock = usbd_ctrl_lock(udev);
988 parm = &udev->scratch.xfer_setup[0].parm;
989 memset(parm, 0, sizeof(*parm));
992 parm->speed = usbd_get_speed(udev);
993 parm->hc_max_packet_count = 1;
995 if (parm->speed >= USB_SPEED_MAX) {
996 parm->err = USB_ERR_INVAL;
999 /* setup all transfers */
1005 * Initialize the "usb_xfer_root" structure,
1006 * which is common for all our USB transfers.
1008 info = USB_ADD_BYTES(buf, 0);
1010 info->memory_base = buf;
1011 info->memory_size = parm->size[0];
1014 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
1015 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
1017 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
1018 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
1020 cv_init(&info->cv_drain, "WDRAIN");
1022 info->xfer_mtx = xfer_mtx;
1024 usb_dma_tag_setup(&info->dma_parent_tag,
1025 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
1026 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits,
1030 info->bus = udev->bus;
1033 TAILQ_INIT(&info->done_q.head);
1034 info->done_q.command = &usbd_callback_wrapper;
1036 TAILQ_INIT(&info->dma_q.head);
1037 info->dma_q.command = &usb_bdma_work_loop;
1039 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
1040 info->done_m[0].xroot = info;
1041 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
1042 info->done_m[1].xroot = info;
1045 * In device side mode control endpoint
1046 * requests need to run from a separate
1047 * context, else there is a chance of
1050 if (setup_start == usb_control_ep_cfg ||
1051 setup_start == usb_control_ep_quirk_cfg)
1053 USB_BUS_CONTROL_XFER_PROC(udev->bus);
1054 else if (xfer_mtx == &Giant)
1056 USB_BUS_GIANT_PROC(udev->bus);
1057 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1059 USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1062 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1068 parm->size[0] += sizeof(info[0]);
1070 for (setup = setup_start, n = 0;
1071 setup != setup_end; setup++, n++) {
1073 /* skip USB transfers without callbacks: */
1074 if (setup->callback == NULL) {
1077 /* see if there is a matching endpoint */
1078 ep = usbd_get_endpoint(udev,
1079 ifaces[setup->if_index], setup);
1082 * Check that the USB PIPE is valid and that
1083 * the endpoint mode is proper.
1085 * Make sure we don't allocate a streams
1086 * transfer when such a combination is not
1089 if ((ep == NULL) || (ep->methods == NULL) ||
1090 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1091 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1092 (setup->stream_id != 0 &&
1093 (setup->stream_id >= USB_MAX_EP_STREAMS ||
1094 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1095 if (setup->flags.no_pipe_ok)
1097 if ((setup->usb_mode != USB_MODE_DUAL) &&
1098 (setup->usb_mode != udev->flags.usb_mode))
1100 parm->err = USB_ERR_NO_PIPE;
1104 /* align data properly */
1105 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1107 /* store current setup pointer */
1108 parm->curr_setup = setup;
1112 * Common initialization of the
1113 * "usb_xfer" structure.
1115 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1116 xfer->address = udev->address;
1117 xfer->priv_sc = priv_sc;
1120 usb_callout_init_mtx(&xfer->timeout_handle,
1121 &udev->bus->bus_mtx, 0);
1124 * Setup a dummy xfer, hence we are
1125 * writing to the "usb_xfer"
1126 * structure pointed to by "xfer"
1127 * before we have allocated any
1130 xfer = &udev->scratch.xfer_setup[0].dummy;
1131 memset(xfer, 0, sizeof(*xfer));
1135 /* set transfer endpoint pointer */
1136 xfer->endpoint = ep;
1138 /* set transfer stream ID */
1139 xfer->stream_id = setup->stream_id;
1141 parm->size[0] += sizeof(xfer[0]);
1142 parm->methods = xfer->endpoint->methods;
1143 parm->curr_xfer = xfer;
1146 * Call the Host or Device controller transfer
1149 (udev->bus->methods->xfer_setup) (parm);
1151 /* check for error */
1157 * Increment the endpoint refcount. This
1158 * basically prevents setting a new
1159 * configuration and alternate setting
1160 * when USB transfers are in use on
1161 * the given interface. Search the USB
1162 * code for "endpoint->refcount_alloc" if you
1163 * want more information.
1165 USB_BUS_LOCK(info->bus);
1166 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1167 parm->err = USB_ERR_INVAL;
1169 xfer->endpoint->refcount_alloc++;
1171 if (xfer->endpoint->refcount_alloc == 0)
1172 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1173 USB_BUS_UNLOCK(info->bus);
1176 * Whenever we set ppxfer[] then we
1177 * also need to increment the
1180 info->setup_refcount++;
1183 * Transfer is successfully setup and
1189 /* check for error */
1194 if (buf != NULL || parm->err != 0)
1197 /* if no transfers, nothing to do */
1201 /* align data properly */
1202 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1204 /* store offset temporarily */
1205 parm->size[1] = parm->size[0];
1208 * The number of DMA tags required depends on
1209 * the number of endpoints. The current estimate
1210 * for maximum number of DMA tags per endpoint
1212 * 1) for loading memory
1213 * 2) for allocating memory
1214 * 3) for fixing memory [UHCI]
1216 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1219 * DMA tags for QH, TD, Data and more.
1221 parm->dma_tag_max += 8;
1223 parm->dma_tag_p += parm->dma_tag_max;
1225 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1228 /* align data properly */
1229 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1231 /* store offset temporarily */
1232 parm->size[3] = parm->size[0];
1234 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1237 /* align data properly */
1238 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1240 /* store offset temporarily */
1241 parm->size[4] = parm->size[0];
1243 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1246 /* store end offset temporarily */
1247 parm->size[5] = parm->size[0];
1249 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1252 /* store end offset temporarily */
1254 parm->size[2] = parm->size[0];
1256 /* align data properly */
1257 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1259 parm->size[6] = parm->size[0];
1261 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1264 /* align data properly */
1265 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1267 /* allocate zeroed memory */
1268 buf = malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1271 parm->err = USB_ERR_NOMEM;
1272 DPRINTFN(0, "cannot allocate memory block for "
1273 "configuration (%d bytes)\n",
1277 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1278 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1279 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1280 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1281 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1286 if (info->setup_refcount == 0) {
1288 * "usbd_transfer_unsetup_sub" will unlock
1289 * the bus mutex before returning !
1291 USB_BUS_LOCK(info->bus);
1293 /* something went wrong */
1294 usbd_transfer_unsetup_sub(info, 0);
1298 /* check if any errors happened */
1300 usbd_transfer_unsetup(ppxfer, n_setup);
1305 usbd_ctrl_unlock(udev);
1310 /*------------------------------------------------------------------------*
1311 * usbd_transfer_unsetup_sub - factored out code
1312 *------------------------------------------------------------------------*/
1314 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1317 struct usb_page_cache *pc;
1320 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1322 /* wait for any outstanding DMA operations */
1326 temp = usbd_get_dma_delay(info->udev);
1328 usb_pause_mtx(&info->bus->bus_mtx,
1329 USB_MS_TO_TICKS(temp));
1333 /* make sure that our done messages are not queued anywhere */
1334 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1336 USB_BUS_UNLOCK(info->bus);
1339 /* free DMA'able memory, if any */
1340 pc = info->dma_page_cache_start;
1341 while (pc != info->dma_page_cache_end) {
1342 usb_pc_free_mem(pc);
1346 /* free DMA maps in all "xfer->frbuffers" */
1347 pc = info->xfer_page_cache_start;
1348 while (pc != info->xfer_page_cache_end) {
1349 usb_pc_dmamap_destroy(pc);
1353 /* free all DMA tags */
1354 usb_dma_tag_unsetup(&info->dma_parent_tag);
1357 cv_destroy(&info->cv_drain);
1360 * free the "memory_base" last, hence the "info" structure is
1361 * contained within the "memory_base"!
1363 free(info->memory_base, M_USB);
1366 /*------------------------------------------------------------------------*
1367 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1369 * NOTE: All USB transfers in progress will get called back passing
1370 * the error code "USB_ERR_CANCELLED" before this function
1372 *------------------------------------------------------------------------*/
1374 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1376 struct usb_xfer *xfer;
1377 struct usb_xfer_root *info;
1378 uint8_t needs_delay = 0;
1380 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1381 "usbd_transfer_unsetup can sleep!");
1384 xfer = pxfer[n_setup];
1391 USB_XFER_LOCK(xfer);
1392 USB_BUS_LOCK(info->bus);
1395 * HINT: when you start/stop a transfer, it might be a
1396 * good idea to directly use the "pxfer[]" structure:
1398 * usbd_transfer_start(sc->pxfer[0]);
1399 * usbd_transfer_stop(sc->pxfer[0]);
1401 * That way, if your code has many parts that will not
1402 * stop running under the same lock, in other words
1403 * "xfer_mtx", the usbd_transfer_start and
1404 * usbd_transfer_stop functions will simply return
1405 * when they detect a NULL pointer argument.
1407 * To avoid any races we clear the "pxfer[]" pointer
1408 * while holding the private mutex of the driver:
1410 pxfer[n_setup] = NULL;
1412 USB_BUS_UNLOCK(info->bus);
1413 USB_XFER_UNLOCK(xfer);
1415 usbd_transfer_drain(xfer);
1418 if (xfer->flags_int.bdma_enable)
1422 * NOTE: default endpoint does not have an
1423 * interface, even if endpoint->iface_index == 0
1425 USB_BUS_LOCK(info->bus);
1426 xfer->endpoint->refcount_alloc--;
1427 USB_BUS_UNLOCK(info->bus);
1429 usb_callout_drain(&xfer->timeout_handle);
1431 USB_BUS_LOCK(info->bus);
1433 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1434 "reference count\n"));
1436 info->setup_refcount--;
1438 if (info->setup_refcount == 0) {
1439 usbd_transfer_unsetup_sub(info,
1442 USB_BUS_UNLOCK(info->bus);
1447 /*------------------------------------------------------------------------*
1448 * usbd_control_transfer_init - factored out code
1450 * In USB Device Mode we have to wait for the SETUP packet which
1451 * containst the "struct usb_device_request" structure, before we can
1452 * transfer any data. In USB Host Mode we already have the SETUP
1453 * packet at the moment the USB transfer is started. This leads us to
1454 * having to setup the USB transfer at two different places in
1455 * time. This function just contains factored out control transfer
1456 * initialisation code, so that we don't duplicate the code.
1457 *------------------------------------------------------------------------*/
1459 usbd_control_transfer_init(struct usb_xfer *xfer)
1461 struct usb_device_request req;
1463 /* copy out the USB request header */
1465 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1467 /* setup remainder */
1469 xfer->flags_int.control_rem = UGETW(req.wLength);
1471 /* copy direction to endpoint variable */
1473 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1475 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1478 /*------------------------------------------------------------------------*
1479 * usbd_control_transfer_did_data
1481 * This function returns non-zero if a control endpoint has
1482 * transferred the first DATA packet after the SETUP packet.
1483 * Else it returns zero.
1484 *------------------------------------------------------------------------*/
1486 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1488 struct usb_device_request req;
1490 /* SETUP packet is not yet sent */
1491 if (xfer->flags_int.control_hdr != 0)
1494 /* copy out the USB request header */
1495 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1497 /* compare remainder to the initial value */
1498 return (xfer->flags_int.control_rem != UGETW(req.wLength));
1501 /*------------------------------------------------------------------------*
1502 * usbd_setup_ctrl_transfer
1504 * This function handles initialisation of control transfers. Control
1505 * transfers are special in that regard that they can both transmit
1511 *------------------------------------------------------------------------*/
1513 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1517 /* Check for control endpoint stall */
1518 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1519 /* the control transfer is no longer active */
1520 xfer->flags_int.control_stall = 1;
1521 xfer->flags_int.control_act = 0;
1523 /* don't stall control transfer by default */
1524 xfer->flags_int.control_stall = 0;
1527 /* Check for invalid number of frames */
1528 if (xfer->nframes > 2) {
1530 * If you need to split a control transfer, you
1531 * have to do one part at a time. Only with
1532 * non-control transfers you can do multiple
1535 DPRINTFN(0, "Too many frames: %u\n",
1536 (unsigned int)xfer->nframes);
1541 * Check if there is a control
1542 * transfer in progress:
1544 if (xfer->flags_int.control_act) {
1546 if (xfer->flags_int.control_hdr) {
1548 /* clear send header flag */
1550 xfer->flags_int.control_hdr = 0;
1552 /* setup control transfer */
1553 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1554 usbd_control_transfer_init(xfer);
1557 /* get data length */
1563 /* the size of the SETUP structure is hardcoded ! */
1565 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1566 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1567 xfer->frlengths[0], sizeof(struct
1568 usb_device_request));
1571 /* check USB mode */
1572 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1574 /* check number of frames */
1575 if (xfer->nframes != 1) {
1577 * We need to receive the setup
1578 * message first so that we know the
1581 DPRINTF("Misconfigured transfer\n");
1585 * Set a dummy "control_rem" value. This
1586 * variable will be overwritten later by a
1587 * call to "usbd_control_transfer_init()" !
1589 xfer->flags_int.control_rem = 0xFFFF;
1592 /* setup "endpoint" and "control_rem" */
1594 usbd_control_transfer_init(xfer);
1597 /* set transfer-header flag */
1599 xfer->flags_int.control_hdr = 1;
1601 /* get data length */
1603 len = (xfer->sumlen - sizeof(struct usb_device_request));
1606 /* update did data flag */
1608 xfer->flags_int.control_did_data =
1609 usbd_control_transfer_did_data(xfer);
1611 /* check if there is a length mismatch */
1613 if (len > xfer->flags_int.control_rem) {
1614 DPRINTFN(0, "Length (%d) greater than "
1615 "remaining length (%d)\n", len,
1616 xfer->flags_int.control_rem);
1619 /* check if we are doing a short transfer */
1621 if (xfer->flags.force_short_xfer) {
1622 xfer->flags_int.control_rem = 0;
1624 if ((len != xfer->max_data_length) &&
1625 (len != xfer->flags_int.control_rem) &&
1626 (xfer->nframes != 1)) {
1627 DPRINTFN(0, "Short control transfer without "
1628 "force_short_xfer set\n");
1631 xfer->flags_int.control_rem -= len;
1634 /* the status part is executed when "control_act" is 0 */
1636 if ((xfer->flags_int.control_rem > 0) ||
1637 (xfer->flags.manual_status)) {
1638 /* don't execute the STATUS stage yet */
1639 xfer->flags_int.control_act = 1;
1642 if ((!xfer->flags_int.control_hdr) &&
1643 (xfer->nframes == 1)) {
1645 * This is not a valid operation!
1647 DPRINTFN(0, "Invalid parameter "
1652 /* time to execute the STATUS stage */
1653 xfer->flags_int.control_act = 0;
1655 return (0); /* success */
1658 return (1); /* failure */
1661 /*------------------------------------------------------------------------*
1662 * usbd_transfer_submit - start USB hardware for the given transfer
1664 * This function should only be called from the USB callback.
1665 *------------------------------------------------------------------------*/
1667 usbd_transfer_submit(struct usb_xfer *xfer)
1669 struct usb_xfer_root *info;
1670 struct usb_bus *bus;
1676 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1677 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1681 if (USB_DEBUG_VAR > 0) {
1684 usb_dump_endpoint(xfer->endpoint);
1686 USB_BUS_UNLOCK(bus);
1690 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1691 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1693 /* Only open the USB transfer once! */
1694 if (!xfer->flags_int.open) {
1695 xfer->flags_int.open = 1;
1700 (xfer->endpoint->methods->open) (xfer);
1701 USB_BUS_UNLOCK(bus);
1703 /* set "transferring" flag */
1704 xfer->flags_int.transferring = 1;
1707 /* increment power reference */
1708 usbd_transfer_power_ref(xfer, 1);
1711 * Check if the transfer is waiting on a queue, most
1712 * frequently the "done_q":
1714 if (xfer->wait_queue) {
1716 usbd_transfer_dequeue(xfer);
1717 USB_BUS_UNLOCK(bus);
1719 /* clear "did_dma_delay" flag */
1720 xfer->flags_int.did_dma_delay = 0;
1722 /* clear "did_close" flag */
1723 xfer->flags_int.did_close = 0;
1726 /* clear "bdma_setup" flag */
1727 xfer->flags_int.bdma_setup = 0;
1729 /* by default we cannot cancel any USB transfer immediately */
1730 xfer->flags_int.can_cancel_immed = 0;
1732 /* clear lengths and frame counts by default */
1737 /* clear any previous errors */
1740 /* Check if the device is still alive */
1741 if (info->udev->state < USB_STATE_POWERED) {
1744 * Must return cancelled error code else
1745 * device drivers can hang.
1747 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1748 USB_BUS_UNLOCK(bus);
1753 if (xfer->nframes == 0) {
1754 if (xfer->flags.stall_pipe) {
1756 * Special case - want to stall without transferring
1759 DPRINTF("xfer=%p nframes=0: stall "
1760 "or clear stall!\n", xfer);
1762 xfer->flags_int.can_cancel_immed = 1;
1763 /* start the transfer */
1764 usb_command_wrapper(&xfer->endpoint->
1765 endpoint_q[xfer->stream_id], xfer);
1766 USB_BUS_UNLOCK(bus);
1770 usbd_transfer_done(xfer, USB_ERR_INVAL);
1771 USB_BUS_UNLOCK(bus);
1774 /* compute some variables */
1776 for (x = 0; x != xfer->nframes; x++) {
1777 /* make a copy of the frlenghts[] */
1778 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1779 /* compute total transfer length */
1780 xfer->sumlen += xfer->frlengths[x];
1781 if (xfer->sumlen < xfer->frlengths[x]) {
1782 /* length wrapped around */
1784 usbd_transfer_done(xfer, USB_ERR_INVAL);
1785 USB_BUS_UNLOCK(bus);
1790 /* clear some internal flags */
1792 xfer->flags_int.short_xfer_ok = 0;
1793 xfer->flags_int.short_frames_ok = 0;
1795 /* check if this is a control transfer */
1797 if (xfer->flags_int.control_xfr) {
1799 if (usbd_setup_ctrl_transfer(xfer)) {
1801 usbd_transfer_done(xfer, USB_ERR_STALLED);
1802 USB_BUS_UNLOCK(bus);
1807 * Setup filtered version of some transfer flags,
1808 * in case of data read direction
1810 if (USB_GET_DATA_ISREAD(xfer)) {
1812 if (xfer->flags.short_frames_ok) {
1813 xfer->flags_int.short_xfer_ok = 1;
1814 xfer->flags_int.short_frames_ok = 1;
1815 } else if (xfer->flags.short_xfer_ok) {
1816 xfer->flags_int.short_xfer_ok = 1;
1818 /* check for control transfer */
1819 if (xfer->flags_int.control_xfr) {
1821 * 1) Control transfers do not support
1822 * reception of multiple short USB
1823 * frames in host mode and device side
1824 * mode, with exception of:
1826 * 2) Due to sometimes buggy device
1827 * side firmware we need to do a
1828 * STATUS stage in case of short
1829 * control transfers in USB host mode.
1830 * The STATUS stage then becomes the
1831 * "alt_next" to the DATA stage.
1833 xfer->flags_int.short_frames_ok = 1;
1838 * Check if BUS-DMA support is enabled and try to load virtual
1839 * buffers into DMA, if any:
1842 if (xfer->flags_int.bdma_enable) {
1843 /* insert the USB transfer last in the BUS-DMA queue */
1844 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1849 * Enter the USB transfer into the Host Controller or
1850 * Device Controller schedule:
1852 usbd_pipe_enter(xfer);
1855 /*------------------------------------------------------------------------*
1856 * usbd_pipe_enter - factored out code
1857 *------------------------------------------------------------------------*/
1859 usbd_pipe_enter(struct usb_xfer *xfer)
1861 struct usb_endpoint *ep;
1863 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1865 USB_BUS_LOCK(xfer->xroot->bus);
1867 ep = xfer->endpoint;
1871 /* the transfer can now be cancelled */
1872 xfer->flags_int.can_cancel_immed = 1;
1874 /* enter the transfer */
1875 (ep->methods->enter) (xfer);
1877 /* check for transfer error */
1879 /* some error has happened */
1880 usbd_transfer_done(xfer, 0);
1881 USB_BUS_UNLOCK(xfer->xroot->bus);
1885 /* start the transfer */
1886 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1887 USB_BUS_UNLOCK(xfer->xroot->bus);
1890 /*------------------------------------------------------------------------*
1891 * usbd_transfer_start - start an USB transfer
1893 * NOTE: Calling this function more than one time will only
1894 * result in a single transfer start, until the USB transfer
1896 *------------------------------------------------------------------------*/
1898 usbd_transfer_start(struct usb_xfer *xfer)
1901 /* transfer is gone */
1904 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1906 /* mark the USB transfer started */
1908 if (!xfer->flags_int.started) {
1909 /* lock the BUS lock to avoid races updating flags_int */
1910 USB_BUS_LOCK(xfer->xroot->bus);
1911 xfer->flags_int.started = 1;
1912 USB_BUS_UNLOCK(xfer->xroot->bus);
1914 /* check if the USB transfer callback is already transferring */
1916 if (xfer->flags_int.transferring) {
1919 USB_BUS_LOCK(xfer->xroot->bus);
1920 /* call the USB transfer callback */
1921 usbd_callback_ss_done_defer(xfer);
1922 USB_BUS_UNLOCK(xfer->xroot->bus);
1925 /*------------------------------------------------------------------------*
1926 * usbd_transfer_stop - stop an USB transfer
1928 * NOTE: Calling this function more than one time will only
1929 * result in a single transfer stop.
1930 * NOTE: When this function returns it is not safe to free nor
1931 * reuse any DMA buffers. See "usbd_transfer_drain()".
1932 *------------------------------------------------------------------------*/
1934 usbd_transfer_stop(struct usb_xfer *xfer)
1936 struct usb_endpoint *ep;
1939 /* transfer is gone */
1942 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1944 /* check if the USB transfer was ever opened */
1946 if (!xfer->flags_int.open) {
1947 if (xfer->flags_int.started) {
1948 /* nothing to do except clearing the "started" flag */
1949 /* lock the BUS lock to avoid races updating flags_int */
1950 USB_BUS_LOCK(xfer->xroot->bus);
1951 xfer->flags_int.started = 0;
1952 USB_BUS_UNLOCK(xfer->xroot->bus);
1956 /* try to stop the current USB transfer */
1958 USB_BUS_LOCK(xfer->xroot->bus);
1959 /* override any previous error */
1960 xfer->error = USB_ERR_CANCELLED;
1963 * Clear "open" and "started" when both private and USB lock
1964 * is locked so that we don't get a race updating "flags_int"
1966 xfer->flags_int.open = 0;
1967 xfer->flags_int.started = 0;
1970 * Check if we can cancel the USB transfer immediately.
1972 if (xfer->flags_int.transferring) {
1973 if (xfer->flags_int.can_cancel_immed &&
1974 (!xfer->flags_int.did_close)) {
1977 * The following will lead to an USB_ERR_CANCELLED
1978 * error code being passed to the USB callback.
1980 (xfer->endpoint->methods->close) (xfer);
1981 /* only close once */
1982 xfer->flags_int.did_close = 1;
1984 /* need to wait for the next done callback */
1989 /* close here and now */
1990 (xfer->endpoint->methods->close) (xfer);
1993 * Any additional DMA delay is done by
1994 * "usbd_transfer_unsetup()".
1998 * Special case. Check if we need to restart a blocked
2001 ep = xfer->endpoint;
2004 * If the current USB transfer is completing we need
2005 * to start the next one:
2007 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2008 usb_command_wrapper(
2009 &ep->endpoint_q[xfer->stream_id], NULL);
2013 USB_BUS_UNLOCK(xfer->xroot->bus);
2016 /*------------------------------------------------------------------------*
2017 * usbd_transfer_pending
2019 * This function will check if an USB transfer is pending which is a
2020 * little bit complicated!
2023 * 1: Pending: The USB transfer will receive a callback in the future.
2024 *------------------------------------------------------------------------*/
2026 usbd_transfer_pending(struct usb_xfer *xfer)
2028 struct usb_xfer_root *info;
2029 struct usb_xfer_queue *pq;
2032 /* transfer is gone */
2035 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2037 if (xfer->flags_int.transferring) {
2041 USB_BUS_LOCK(xfer->xroot->bus);
2042 if (xfer->wait_queue) {
2043 /* we are waiting on a queue somewhere */
2044 USB_BUS_UNLOCK(xfer->xroot->bus);
2050 if (pq->curr == xfer) {
2051 /* we are currently scheduled for callback */
2052 USB_BUS_UNLOCK(xfer->xroot->bus);
2055 /* we are not pending */
2056 USB_BUS_UNLOCK(xfer->xroot->bus);
2060 /*------------------------------------------------------------------------*
2061 * usbd_transfer_drain
2063 * This function will stop the USB transfer and wait for any
2064 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2065 * are loaded into DMA can safely be freed or reused after that this
2066 * function has returned.
2067 *------------------------------------------------------------------------*/
2069 usbd_transfer_drain(struct usb_xfer *xfer)
2071 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2072 "usbd_transfer_drain can sleep!");
2075 /* transfer is gone */
2078 if (xfer->xroot->xfer_mtx != &Giant) {
2079 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2081 USB_XFER_LOCK(xfer);
2083 usbd_transfer_stop(xfer);
2085 while (usbd_transfer_pending(xfer) ||
2086 xfer->flags_int.doing_callback) {
2089 * It is allowed that the callback can drop its
2090 * transfer mutex. In that case checking only
2091 * "usbd_transfer_pending()" is not enough to tell if
2092 * the USB transfer is fully drained. We also need to
2093 * check the internal "doing_callback" flag.
2095 xfer->flags_int.draining = 1;
2098 * Wait until the current outstanding USB
2099 * transfer is complete !
2101 cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2103 USB_XFER_UNLOCK(xfer);
2106 struct usb_page_cache *
2107 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2109 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2111 return (&xfer->frbuffers[frindex]);
2115 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2117 struct usb_page_search page_info;
2119 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2121 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2122 return (page_info.buffer);
2125 /*------------------------------------------------------------------------*
2126 * usbd_xfer_get_fps_shift
2128 * The following function is only useful for isochronous transfers. It
2129 * returns how many times the frame execution rate has been shifted
2135 *------------------------------------------------------------------------*/
2137 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2139 return (xfer->fps_shift);
2143 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2145 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2147 return (xfer->frlengths[frindex]);
2150 /*------------------------------------------------------------------------*
2151 * usbd_xfer_set_frame_data
2153 * This function sets the pointer of the buffer that should
2154 * loaded directly into DMA for the given USB frame. Passing "ptr"
2155 * equal to NULL while the corresponding "frlength" is greater
2156 * than zero gives undefined results!
2157 *------------------------------------------------------------------------*/
2159 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2160 void *ptr, usb_frlength_t len)
2162 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2164 /* set virtual address to load and length */
2165 xfer->frbuffers[frindex].buffer = ptr;
2166 usbd_xfer_set_frame_len(xfer, frindex, len);
2170 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2171 void **ptr, int *len)
2173 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2176 *ptr = xfer->frbuffers[frindex].buffer;
2178 *len = xfer->frlengths[frindex];
2181 /*------------------------------------------------------------------------*
2182 * usbd_xfer_old_frame_length
2184 * This function returns the framelength of the given frame at the
2185 * time the transfer was submitted. This function can be used to
2186 * compute the starting data pointer of the next isochronous frame
2187 * when an isochronous transfer has completed.
2188 *------------------------------------------------------------------------*/
2190 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2192 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2194 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2198 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2202 *actlen = xfer->actlen;
2204 *sumlen = xfer->sumlen;
2205 if (aframes != NULL)
2206 *aframes = xfer->aframes;
2207 if (nframes != NULL)
2208 *nframes = xfer->nframes;
2211 /*------------------------------------------------------------------------*
2212 * usbd_xfer_set_frame_offset
2214 * This function sets the frame data buffer offset relative to the beginning
2215 * of the USB DMA buffer allocated for this USB transfer.
2216 *------------------------------------------------------------------------*/
2218 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2219 usb_frcount_t frindex)
2221 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2222 "when the USB buffer is external\n"));
2223 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2225 /* set virtual address to load */
2226 xfer->frbuffers[frindex].buffer =
2227 USB_ADD_BYTES(xfer->local_buffer, offset);
2231 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2237 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2243 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2249 usbd_xfer_max_frames(struct usb_xfer *xfer)
2251 return (xfer->max_frame_count);
2255 usbd_xfer_max_len(struct usb_xfer *xfer)
2257 return (xfer->max_data_length);
2261 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2263 return (xfer->max_frame_size);
2267 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2270 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2272 xfer->frlengths[frindex] = len;
2275 /*------------------------------------------------------------------------*
2276 * usb_callback_proc - factored out code
2278 * This function performs USB callbacks.
2279 *------------------------------------------------------------------------*/
2281 usb_callback_proc(struct usb_proc_msg *_pm)
2283 struct usb_done_msg *pm = (void *)_pm;
2284 struct usb_xfer_root *info = pm->xroot;
2286 /* Change locking order */
2287 USB_BUS_UNLOCK(info->bus);
2290 * We exploit the fact that the mutex is the same for all
2291 * callbacks that will be called from this thread:
2293 mtx_lock(info->xfer_mtx);
2294 USB_BUS_LOCK(info->bus);
2296 /* Continue where we lost track */
2297 usb_command_wrapper(&info->done_q,
2300 mtx_unlock(info->xfer_mtx);
2303 /*------------------------------------------------------------------------*
2304 * usbd_callback_ss_done_defer
2306 * This function will defer the start, stop and done callback to the
2308 *------------------------------------------------------------------------*/
2310 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2312 struct usb_xfer_root *info = xfer->xroot;
2313 struct usb_xfer_queue *pq = &info->done_q;
2315 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2317 if (pq->curr != xfer) {
2318 usbd_transfer_enqueue(pq, xfer);
2320 if (!pq->recurse_1) {
2323 * We have to postpone the callback due to the fact we
2324 * will have a Lock Order Reversal, LOR, if we try to
2327 (void) usb_proc_msignal(info->done_p,
2328 &info->done_m[0], &info->done_m[1]);
2330 /* clear second recurse flag */
2337 /*------------------------------------------------------------------------*
2338 * usbd_callback_wrapper
2340 * This is a wrapper for USB callbacks. This wrapper does some
2341 * auto-magic things like figuring out if we can call the callback
2342 * directly from the current context or if we need to wakeup the
2343 * interrupt process.
2344 *------------------------------------------------------------------------*/
2346 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2348 struct usb_xfer *xfer = pq->curr;
2349 struct usb_xfer_root *info = xfer->xroot;
2351 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2352 if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
2353 SCHEDULER_STOPPED() == 0) {
2355 * Cases that end up here:
2357 * 5) HW interrupt done callback or other source.
2358 * 6) HW completed transfer during callback
2360 DPRINTFN(3, "case 5 and 6\n");
2363 * We have to postpone the callback due to the fact we
2364 * will have a Lock Order Reversal, LOR, if we try to
2367 * Postponing the callback also ensures that other USB
2368 * transfer queues get a chance.
2370 (void) usb_proc_msignal(info->done_p,
2371 &info->done_m[0], &info->done_m[1]);
2375 * Cases that end up here:
2377 * 1) We are starting a transfer
2378 * 2) We are prematurely calling back a transfer
2379 * 3) We are stopping a transfer
2380 * 4) We are doing an ordinary callback
2382 DPRINTFN(3, "case 1-4\n");
2383 /* get next USB transfer in the queue */
2384 info->done_q.curr = NULL;
2386 /* set flag in case of drain */
2387 xfer->flags_int.doing_callback = 1;
2389 USB_BUS_UNLOCK(info->bus);
2390 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2392 /* set correct USB state for callback */
2393 if (!xfer->flags_int.transferring) {
2394 xfer->usb_state = USB_ST_SETUP;
2395 if (!xfer->flags_int.started) {
2396 /* we got stopped before we even got started */
2397 USB_BUS_LOCK(info->bus);
2402 if (usbd_callback_wrapper_sub(xfer)) {
2403 /* the callback has been deferred */
2404 USB_BUS_LOCK(info->bus);
2408 /* decrement power reference */
2409 usbd_transfer_power_ref(xfer, -1);
2411 xfer->flags_int.transferring = 0;
2414 xfer->usb_state = USB_ST_ERROR;
2416 /* set transferred state */
2417 xfer->usb_state = USB_ST_TRANSFERRED;
2419 /* sync DMA memory, if any */
2420 if (xfer->flags_int.bdma_enable &&
2421 (!xfer->flags_int.bdma_no_post_sync)) {
2422 usb_bdma_post_sync(xfer);
2429 if (xfer->usb_state != USB_ST_SETUP) {
2430 USB_BUS_LOCK(info->bus);
2431 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2432 USB_BUS_UNLOCK(info->bus);
2435 /* call processing routine */
2436 (xfer->callback) (xfer, xfer->error);
2438 /* pickup the USB mutex again */
2439 USB_BUS_LOCK(info->bus);
2442 * Check if we got started after that we got cancelled, but
2443 * before we managed to do the callback.
2445 if ((!xfer->flags_int.open) &&
2446 (xfer->flags_int.started) &&
2447 (xfer->usb_state == USB_ST_ERROR)) {
2448 /* clear flag in case of drain */
2449 xfer->flags_int.doing_callback = 0;
2450 /* try to loop, but not recursivly */
2451 usb_command_wrapper(&info->done_q, xfer);
2456 /* clear flag in case of drain */
2457 xfer->flags_int.doing_callback = 0;
2460 * Check if we are draining.
2462 if (xfer->flags_int.draining &&
2463 (!xfer->flags_int.transferring)) {
2464 /* "usbd_transfer_drain()" is waiting for end of transfer */
2465 xfer->flags_int.draining = 0;
2466 cv_broadcast(&info->cv_drain);
2469 /* do the next callback, if any */
2470 usb_command_wrapper(&info->done_q,
2474 /*------------------------------------------------------------------------*
2475 * usb_dma_delay_done_cb
2477 * This function is called when the DMA delay has been exectuded, and
2478 * will make sure that the callback is called to complete the USB
2479 * transfer. This code path is ususally only used when there is an USB
2480 * error like USB_ERR_CANCELLED.
2481 *------------------------------------------------------------------------*/
2483 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2485 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2487 DPRINTFN(3, "Completed %p\n", xfer);
2489 /* queue callback for execution, again */
2490 usbd_transfer_done(xfer, 0);
2493 /*------------------------------------------------------------------------*
2494 * usbd_transfer_dequeue
2496 * - This function is used to remove an USB transfer from a USB
2499 * - This function can be called multiple times in a row.
2500 *------------------------------------------------------------------------*/
2502 usbd_transfer_dequeue(struct usb_xfer *xfer)
2504 struct usb_xfer_queue *pq;
2506 pq = xfer->wait_queue;
2508 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2509 xfer->wait_queue = NULL;
2513 /*------------------------------------------------------------------------*
2514 * usbd_transfer_enqueue
2516 * - This function is used to insert an USB transfer into a USB *
2519 * - This function can be called multiple times in a row.
2520 *------------------------------------------------------------------------*/
2522 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2525 * Insert the USB transfer into the queue, if it is not
2526 * already on a USB transfer queue:
2528 if (xfer->wait_queue == NULL) {
2529 xfer->wait_queue = pq;
2530 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2534 /*------------------------------------------------------------------------*
2535 * usbd_transfer_done
2537 * - This function is used to remove an USB transfer from the busdma,
2538 * pipe or interrupt queue.
2540 * - This function is used to queue the USB transfer on the done
2543 * - This function is used to stop any USB transfer timeouts.
2544 *------------------------------------------------------------------------*/
2546 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2548 struct usb_xfer_root *info = xfer->xroot;
2550 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2552 DPRINTF("err=%s\n", usbd_errstr(error));
2555 * If we are not transferring then just return.
2556 * This can happen during transfer cancel.
2558 if (!xfer->flags_int.transferring) {
2559 DPRINTF("not transferring\n");
2560 /* end of control transfer, if any */
2561 xfer->flags_int.control_act = 0;
2564 /* only set transfer error, if not already set */
2565 if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2566 xfer->error = error;
2568 /* stop any callouts */
2569 usb_callout_stop(&xfer->timeout_handle);
2572 * If we are waiting on a queue, just remove the USB transfer
2573 * from the queue, if any. We should have the required locks
2574 * locked to do the remove when this function is called.
2576 usbd_transfer_dequeue(xfer);
2579 if (mtx_owned(info->xfer_mtx)) {
2580 struct usb_xfer_queue *pq;
2583 * If the private USB lock is not locked, then we assume
2584 * that the BUS-DMA load stage has been passed:
2588 if (pq->curr == xfer) {
2589 /* start the next BUS-DMA load, if any */
2590 usb_command_wrapper(pq, NULL);
2594 /* keep some statistics */
2595 if (xfer->error == USB_ERR_CANCELLED) {
2596 info->udev->stats_cancelled.uds_requests
2597 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2598 } else if (xfer->error != USB_ERR_NORMAL_COMPLETION) {
2599 info->udev->stats_err.uds_requests
2600 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2602 info->udev->stats_ok.uds_requests
2603 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2606 /* call the USB transfer callback */
2607 usbd_callback_ss_done_defer(xfer);
2610 /*------------------------------------------------------------------------*
2611 * usbd_transfer_start_cb
2613 * This function is called to start the USB transfer when
2614 * "xfer->interval" is greater than zero, and and the endpoint type is
2616 *------------------------------------------------------------------------*/
2618 usbd_transfer_start_cb(void *arg)
2620 struct usb_xfer *xfer = arg;
2621 struct usb_endpoint *ep = xfer->endpoint;
2623 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2628 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2631 /* the transfer can now be cancelled */
2632 xfer->flags_int.can_cancel_immed = 1;
2634 /* start USB transfer, if no error */
2635 if (xfer->error == 0)
2636 (ep->methods->start) (xfer);
2638 /* check for transfer error */
2640 /* some error has happened */
2641 usbd_transfer_done(xfer, 0);
2645 /*------------------------------------------------------------------------*
2646 * usbd_xfer_set_stall
2648 * This function is used to set the stall flag outside the
2649 * callback. This function is NULL safe.
2650 *------------------------------------------------------------------------*/
2652 usbd_xfer_set_stall(struct usb_xfer *xfer)
2658 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2660 /* avoid any races by locking the USB mutex */
2661 USB_BUS_LOCK(xfer->xroot->bus);
2662 xfer->flags.stall_pipe = 1;
2663 USB_BUS_UNLOCK(xfer->xroot->bus);
2667 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2669 return (xfer->endpoint->is_stalled);
2672 /*------------------------------------------------------------------------*
2673 * usbd_transfer_clear_stall
2675 * This function is used to clear the stall flag outside the
2676 * callback. This function is NULL safe.
2677 *------------------------------------------------------------------------*/
2679 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2685 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2687 /* avoid any races by locking the USB mutex */
2688 USB_BUS_LOCK(xfer->xroot->bus);
2690 xfer->flags.stall_pipe = 0;
2692 USB_BUS_UNLOCK(xfer->xroot->bus);
2695 /*------------------------------------------------------------------------*
2698 * This function is used to add an USB transfer to the pipe transfer list.
2699 *------------------------------------------------------------------------*/
2701 usbd_pipe_start(struct usb_xfer_queue *pq)
2703 struct usb_endpoint *ep;
2704 struct usb_xfer *xfer;
2708 ep = xfer->endpoint;
2710 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2713 * If the endpoint is already stalled we do nothing !
2715 if (ep->is_stalled) {
2719 * Check if we are supposed to stall the endpoint:
2721 if (xfer->flags.stall_pipe) {
2722 struct usb_device *udev;
2723 struct usb_xfer_root *info;
2725 /* clear stall command */
2726 xfer->flags.stall_pipe = 0;
2728 /* get pointer to USB device */
2733 * Only stall BULK and INTERRUPT endpoints.
2735 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2736 if ((type == UE_BULK) ||
2737 (type == UE_INTERRUPT)) {
2742 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2743 (udev->bus->methods->set_stall) (
2744 udev, ep, &did_stall);
2745 } else if (udev->ctrl_xfer[1]) {
2746 info = udev->ctrl_xfer[1]->xroot;
2748 USB_BUS_CS_PROC(info->bus),
2749 &udev->cs_msg[0], &udev->cs_msg[1]);
2751 /* should not happen */
2752 DPRINTFN(0, "No stall handler\n");
2755 * Check if we should stall. Some USB hardware
2756 * handles set- and clear-stall in hardware.
2760 * The transfer will be continued when
2761 * the clear-stall control endpoint
2762 * message is received.
2767 } else if (type == UE_ISOCHRONOUS) {
2770 * Make sure any FIFO overflow or other FIFO
2771 * error conditions go away by resetting the
2772 * endpoint FIFO through the clear stall
2775 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2776 (udev->bus->methods->clear_stall) (udev, ep);
2780 /* Set or clear stall complete - special case */
2781 if (xfer->nframes == 0) {
2782 /* we are complete */
2784 usbd_transfer_done(xfer, 0);
2790 * 1) Start the first transfer queued.
2792 * 2) Re-start the current USB transfer.
2795 * Check if there should be any
2796 * pre transfer start delay:
2798 if (xfer->interval > 0) {
2799 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2800 if ((type == UE_BULK) ||
2801 (type == UE_CONTROL)) {
2802 usbd_transfer_timeout_ms(xfer,
2803 &usbd_transfer_start_cb,
2811 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2813 /* the transfer can now be cancelled */
2814 xfer->flags_int.can_cancel_immed = 1;
2816 /* start USB transfer, if no error */
2817 if (xfer->error == 0)
2818 (ep->methods->start) (xfer);
2820 /* check for transfer error */
2822 /* some error has happened */
2823 usbd_transfer_done(xfer, 0);
2827 /*------------------------------------------------------------------------*
2828 * usbd_transfer_timeout_ms
2830 * This function is used to setup a timeout on the given USB
2831 * transfer. If the timeout has been deferred the callback given by
2832 * "cb" will get called after "ms" milliseconds.
2833 *------------------------------------------------------------------------*/
2835 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2836 void (*cb) (void *arg), usb_timeout_t ms)
2838 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2841 usb_callout_reset(&xfer->timeout_handle,
2842 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2845 /*------------------------------------------------------------------------*
2846 * usbd_callback_wrapper_sub
2848 * - This function will update variables in an USB transfer after
2849 * that the USB transfer is complete.
2851 * - This function is used to start the next USB transfer on the
2852 * ep transfer queue, if any.
2854 * NOTE: In some special cases the USB transfer will not be removed from
2855 * the pipe queue, but remain first. To enforce USB transfer removal call
2856 * this function passing the error code "USB_ERR_CANCELLED".
2860 * Else: The callback has been deferred.
2861 *------------------------------------------------------------------------*/
2863 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2865 struct usb_endpoint *ep;
2866 struct usb_bus *bus;
2869 bus = xfer->xroot->bus;
2871 if ((!xfer->flags_int.open) &&
2872 (!xfer->flags_int.did_close)) {
2875 (xfer->endpoint->methods->close) (xfer);
2876 USB_BUS_UNLOCK(bus);
2877 /* only close once */
2878 xfer->flags_int.did_close = 1;
2879 return (1); /* wait for new callback */
2882 * If we have a non-hardware induced error we
2883 * need to do the DMA delay!
2885 if (xfer->error != 0 && !xfer->flags_int.did_dma_delay &&
2886 (xfer->error == USB_ERR_CANCELLED ||
2887 xfer->error == USB_ERR_TIMEOUT ||
2888 bus->methods->start_dma_delay != NULL)) {
2892 /* only delay once */
2893 xfer->flags_int.did_dma_delay = 1;
2895 /* we can not cancel this delay */
2896 xfer->flags_int.can_cancel_immed = 0;
2898 temp = usbd_get_dma_delay(xfer->xroot->udev);
2900 DPRINTFN(3, "DMA delay, %u ms, "
2901 "on %p\n", temp, xfer);
2906 * Some hardware solutions have dedicated
2907 * events when it is safe to free DMA'ed
2908 * memory. For the other hardware platforms we
2909 * use a static delay.
2911 if (bus->methods->start_dma_delay != NULL) {
2912 (bus->methods->start_dma_delay) (xfer);
2914 usbd_transfer_timeout_ms(xfer,
2915 (void (*)(void *))&usb_dma_delay_done_cb,
2918 USB_BUS_UNLOCK(bus);
2919 return (1); /* wait for new callback */
2922 /* check actual number of frames */
2923 if (xfer->aframes > xfer->nframes) {
2924 if (xfer->error == 0) {
2925 panic("%s: actual number of frames, %d, is "
2926 "greater than initial number of frames, %d\n",
2927 __FUNCTION__, xfer->aframes, xfer->nframes);
2929 /* just set some valid value */
2930 xfer->aframes = xfer->nframes;
2933 /* compute actual length */
2936 for (x = 0; x != xfer->aframes; x++) {
2937 xfer->actlen += xfer->frlengths[x];
2941 * Frames that were not transferred get zero actual length in
2942 * case the USB device driver does not check the actual number
2943 * of frames transferred, "xfer->aframes":
2945 for (; x < xfer->nframes; x++) {
2946 usbd_xfer_set_frame_len(xfer, x, 0);
2949 /* check actual length */
2950 if (xfer->actlen > xfer->sumlen) {
2951 if (xfer->error == 0) {
2952 panic("%s: actual length, %d, is greater than "
2953 "initial length, %d\n",
2954 __FUNCTION__, xfer->actlen, xfer->sumlen);
2956 /* just set some valid value */
2957 xfer->actlen = xfer->sumlen;
2960 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2961 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2962 xfer->aframes, xfer->nframes);
2965 /* end of control transfer, if any */
2966 xfer->flags_int.control_act = 0;
2968 #if USB_HAVE_TT_SUPPORT
2969 switch (xfer->error) {
2970 case USB_ERR_NORMAL_COMPLETION:
2971 case USB_ERR_SHORT_XFER:
2972 case USB_ERR_STALLED:
2973 case USB_ERR_CANCELLED:
2977 /* try to reset the TT, if any */
2979 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2980 USB_BUS_UNLOCK(bus);
2984 /* check if we should block the execution queue */
2985 if ((xfer->error != USB_ERR_CANCELLED) &&
2986 (xfer->flags.pipe_bof)) {
2987 DPRINTFN(2, "xfer=%p: Block On Failure "
2988 "on endpoint=%p\n", xfer, xfer->endpoint);
2992 /* check for short transfers */
2993 if (xfer->actlen < xfer->sumlen) {
2995 /* end of control transfer, if any */
2996 xfer->flags_int.control_act = 0;
2998 if (!xfer->flags_int.short_xfer_ok) {
2999 xfer->error = USB_ERR_SHORT_XFER;
3000 if (xfer->flags.pipe_bof) {
3001 DPRINTFN(2, "xfer=%p: Block On Failure on "
3002 "Short Transfer on endpoint %p.\n",
3003 xfer, xfer->endpoint);
3009 * Check if we are in the middle of a
3012 if (xfer->flags_int.control_act) {
3013 DPRINTFN(5, "xfer=%p: Control transfer "
3014 "active on endpoint=%p\n", xfer, xfer->endpoint);
3020 ep = xfer->endpoint;
3023 * If the current USB transfer is completing we need to start the
3027 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
3028 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
3030 if (ep->endpoint_q[xfer->stream_id].curr != NULL ||
3031 TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL) {
3032 /* there is another USB transfer waiting */
3034 /* this is the last USB transfer */
3035 /* clear isochronous sync flag */
3036 xfer->endpoint->is_synced = 0;
3039 USB_BUS_UNLOCK(bus);
3044 /*------------------------------------------------------------------------*
3045 * usb_command_wrapper
3047 * This function is used to execute commands non-recursivly on an USB
3049 *------------------------------------------------------------------------*/
3051 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
3055 * If the transfer is not already processing,
3058 if (pq->curr != xfer) {
3059 usbd_transfer_enqueue(pq, xfer);
3060 if (pq->curr != NULL) {
3061 /* something is already processing */
3062 DPRINTFN(6, "busy %p\n", pq->curr);
3067 /* Get next element in queue */
3071 if (!pq->recurse_1) {
3073 /* clear third recurse flag */
3077 /* set two first recurse flags */
3081 if (pq->curr == NULL) {
3082 xfer = TAILQ_FIRST(&pq->head);
3084 TAILQ_REMOVE(&pq->head, xfer,
3086 xfer->wait_queue = NULL;
3092 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3094 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3097 * Set third recurse flag to indicate
3098 * recursion happened:
3102 } while (!pq->recurse_2);
3104 /* clear first recurse flag */
3108 /* clear second recurse flag */
3113 /*------------------------------------------------------------------------*
3114 * usbd_ctrl_transfer_setup
3116 * This function is used to setup the default USB control endpoint
3118 *------------------------------------------------------------------------*/
3120 usbd_ctrl_transfer_setup(struct usb_device *udev)
3122 struct usb_xfer *xfer;
3124 uint8_t iface_index;
3126 /* check for root HUB */
3127 if (udev->parent_hub == NULL)
3131 xfer = udev->ctrl_xfer[0];
3133 USB_XFER_LOCK(xfer);
3135 ((xfer->address == udev->address) &&
3136 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3137 udev->ddesc.bMaxPacketSize));
3138 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3141 * NOTE: checking "xfer->address" and
3142 * starting the USB transfer must be
3145 usbd_transfer_start(xfer);
3148 USB_XFER_UNLOCK(xfer);
3155 * All parameters are exactly the same like before.
3161 * Update wMaxPacketSize for the default control endpoint:
3163 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3164 udev->ddesc.bMaxPacketSize;
3167 * Unsetup any existing USB transfer:
3169 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3172 * Reset clear stall error counter.
3174 udev->clear_stall_errors = 0;
3177 * Try to setup a new USB transfer for the
3178 * default control endpoint:
3181 if (usbd_transfer_setup(udev, &iface_index,
3182 udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3183 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3184 &udev->device_mtx)) {
3185 DPRINTFN(0, "could not setup default "
3192 /*------------------------------------------------------------------------*
3193 * usbd_clear_data_toggle - factored out code
3195 * NOTE: the intention of this function is not to reset the hardware
3197 *------------------------------------------------------------------------*/
3199 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3201 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3203 /* check that we have a valid case */
3204 if (udev->flags.usb_mode == USB_MODE_HOST &&
3205 udev->parent_hub != NULL &&
3206 udev->bus->methods->clear_stall != NULL &&
3207 ep->methods != NULL) {
3208 (udev->bus->methods->clear_stall) (udev, ep);
3212 /*------------------------------------------------------------------------*
3213 * usbd_clear_data_toggle - factored out code
3215 * NOTE: the intention of this function is not to reset the hardware
3216 * data toggle on the USB device side.
3217 *------------------------------------------------------------------------*/
3219 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3221 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3223 USB_BUS_LOCK(udev->bus);
3224 ep->toggle_next = 0;
3225 /* some hardware needs a callback to clear the data toggle */
3226 usbd_clear_stall_locked(udev, ep);
3227 USB_BUS_UNLOCK(udev->bus);
3230 /*------------------------------------------------------------------------*
3231 * usbd_clear_stall_callback - factored out clear stall callback
3234 * xfer1: Clear Stall Control Transfer
3235 * xfer2: Stalled USB Transfer
3237 * This function is NULL safe.
3243 * Clear stall config example:
3245 * static const struct usb_config my_clearstall = {
3246 * .type = UE_CONTROL,
3248 * .direction = UE_DIR_ANY,
3249 * .interval = 50, //50 milliseconds
3250 * .bufsize = sizeof(struct usb_device_request),
3251 * .timeout = 1000, //1.000 seconds
3252 * .callback = &my_clear_stall_callback, // **
3253 * .usb_mode = USB_MODE_HOST,
3256 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3257 * passing the correct parameters.
3258 *------------------------------------------------------------------------*/
3260 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3261 struct usb_xfer *xfer2)
3263 struct usb_device_request req;
3265 if (xfer2 == NULL) {
3266 /* looks like we are tearing down */
3267 DPRINTF("NULL input parameter\n");
3270 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3271 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3273 switch (USB_GET_STATE(xfer1)) {
3277 * pre-clear the data toggle to DATA0 ("umass.c" and
3278 * "ata-usb.c" depends on this)
3281 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3283 /* setup a clear-stall packet */
3285 req.bmRequestType = UT_WRITE_ENDPOINT;
3286 req.bRequest = UR_CLEAR_FEATURE;
3287 USETW(req.wValue, UF_ENDPOINT_HALT);
3288 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3290 USETW(req.wLength, 0);
3293 * "usbd_transfer_setup_sub()" will ensure that
3294 * we have sufficient room in the buffer for
3295 * the request structure!
3298 /* copy in the transfer */
3300 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3303 xfer1->frlengths[0] = sizeof(req);
3306 usbd_transfer_submit(xfer1);
3309 case USB_ST_TRANSFERRED:
3312 default: /* Error */
3313 if (xfer1->error == USB_ERR_CANCELLED) {
3318 return (1); /* Clear Stall Finished */
3321 /*------------------------------------------------------------------------*
3322 * usbd_transfer_poll
3324 * The following function gets called from the USB keyboard driver and
3325 * UMASS when the system has paniced.
3327 * NOTE: It is currently not possible to resume normal operation on
3328 * the USB controller which has been polled, due to clearing of the
3329 * "up_dsleep" and "up_msleep" flags.
3330 *------------------------------------------------------------------------*/
3332 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3334 struct usb_xfer *xfer;
3335 struct usb_xfer_root *xroot;
3336 struct usb_device *udev;
3337 struct usb_proc_msg *pm;
3342 for (n = 0; n != max; n++) {
3343 /* Extra checks to avoid panic */
3346 continue; /* no USB transfer */
3347 xroot = xfer->xroot;
3349 continue; /* no USB root */
3352 continue; /* no USB device */
3353 if (udev->bus == NULL)
3354 continue; /* no BUS structure */
3355 if (udev->bus->methods == NULL)
3356 continue; /* no BUS methods */
3357 if (udev->bus->methods->xfer_poll == NULL)
3358 continue; /* no poll method */
3360 /* make sure that the BUS mutex is not locked */
3362 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3363 mtx_unlock(&xroot->udev->bus->bus_mtx);
3367 /* make sure that the transfer mutex is not locked */
3369 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3370 mtx_unlock(xroot->xfer_mtx);
3374 /* Make sure cv_signal() and cv_broadcast() is not called */
3375 USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3376 USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3377 USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3378 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus)->up_msleep = 0;
3379 USB_BUS_NON_GIANT_BULK_PROC(udev->bus)->up_msleep = 0;
3381 /* poll USB hardware */
3382 (udev->bus->methods->xfer_poll) (udev->bus);
3384 USB_BUS_LOCK(xroot->bus);
3386 /* check for clear stall */
3387 if (udev->ctrl_xfer[1] != NULL) {
3389 /* poll clear stall start */
3390 pm = &udev->cs_msg[0].hdr;
3391 (pm->pm_callback) (pm);
3392 /* poll clear stall done thread */
3393 pm = &udev->ctrl_xfer[1]->
3394 xroot->done_m[0].hdr;
3395 (pm->pm_callback) (pm);
3398 /* poll done thread */
3399 pm = &xroot->done_m[0].hdr;
3400 (pm->pm_callback) (pm);
3402 USB_BUS_UNLOCK(xroot->bus);
3404 /* restore transfer mutex */
3406 mtx_lock(xroot->xfer_mtx);
3408 /* restore BUS mutex */
3410 mtx_lock(&xroot->udev->bus->bus_mtx);
3415 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3416 uint8_t type, enum usb_dev_speed speed)
3418 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3419 [USB_SPEED_LOW] = 8,
3420 [USB_SPEED_FULL] = 64,
3421 [USB_SPEED_HIGH] = 1024,
3422 [USB_SPEED_VARIABLE] = 1024,
3423 [USB_SPEED_SUPER] = 1024,
3426 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3427 [USB_SPEED_LOW] = 0, /* invalid */
3428 [USB_SPEED_FULL] = 1023,
3429 [USB_SPEED_HIGH] = 1024,
3430 [USB_SPEED_VARIABLE] = 3584,
3431 [USB_SPEED_SUPER] = 1024,
3434 static const uint16_t control_min[USB_SPEED_MAX] = {
3435 [USB_SPEED_LOW] = 8,
3436 [USB_SPEED_FULL] = 8,
3437 [USB_SPEED_HIGH] = 64,
3438 [USB_SPEED_VARIABLE] = 512,
3439 [USB_SPEED_SUPER] = 512,
3442 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3443 [USB_SPEED_LOW] = 8,
3444 [USB_SPEED_FULL] = 8,
3445 [USB_SPEED_HIGH] = 512,
3446 [USB_SPEED_VARIABLE] = 512,
3447 [USB_SPEED_SUPER] = 1024,
3452 memset(ptr, 0, sizeof(*ptr));
3456 ptr->range.max = intr_range_max[speed];
3458 case UE_ISOCHRONOUS:
3459 ptr->range.max = isoc_range_max[speed];
3462 if (type == UE_BULK)
3463 temp = bulk_min[speed];
3464 else /* UE_CONTROL */
3465 temp = control_min[speed];
3467 /* default is fixed */
3468 ptr->fixed[0] = temp;
3469 ptr->fixed[1] = temp;
3470 ptr->fixed[2] = temp;
3471 ptr->fixed[3] = temp;
3473 if (speed == USB_SPEED_FULL) {
3474 /* multiple sizes */
3479 if ((speed == USB_SPEED_VARIABLE) &&
3480 (type == UE_BULK)) {
3481 /* multiple sizes */
3482 ptr->fixed[2] = 1024;
3483 ptr->fixed[3] = 1536;
3490 usbd_xfer_softc(struct usb_xfer *xfer)
3492 return (xfer->priv_sc);
3496 usbd_xfer_get_priv(struct usb_xfer *xfer)
3498 return (xfer->priv_fifo);
3502 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3504 xfer->priv_fifo = ptr;
3508 usbd_xfer_state(struct usb_xfer *xfer)
3510 return (xfer->usb_state);
3514 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3517 case USB_FORCE_SHORT_XFER:
3518 xfer->flags.force_short_xfer = 1;
3520 case USB_SHORT_XFER_OK:
3521 xfer->flags.short_xfer_ok = 1;
3523 case USB_MULTI_SHORT_OK:
3524 xfer->flags.short_frames_ok = 1;
3526 case USB_MANUAL_STATUS:
3527 xfer->flags.manual_status = 1;
3533 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3536 case USB_FORCE_SHORT_XFER:
3537 xfer->flags.force_short_xfer = 0;
3539 case USB_SHORT_XFER_OK:
3540 xfer->flags.short_xfer_ok = 0;
3542 case USB_MULTI_SHORT_OK:
3543 xfer->flags.short_frames_ok = 0;
3545 case USB_MANUAL_STATUS:
3546 xfer->flags.manual_status = 0;
3552 * The following function returns in milliseconds when the isochronous
3553 * transfer was completed by the hardware. The returned value wraps
3554 * around 65536 milliseconds.
3557 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3559 return (xfer->isoc_time_complete);
3563 * The following function returns non-zero if the max packet size
3564 * field was clamped to a valid value. Else it returns zero.
3567 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3569 return (xfer->flags_int.maxp_was_clamped);