2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 tablet USB tablet mouse
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
40 #include <sys/types.h>
41 #include <sys/queue.h>
51 #include <machine/vmm_snapshot.h>
53 #include <dev/usb/usbdi.h>
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usb_freebsd.h>
66 static int xhci_debug = 0;
67 #define DPRINTF(params) if (xhci_debug) PRINTLN params
68 #define WPRINTF(params) PRINTLN params
71 #define XHCI_NAME "xhci"
72 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */
74 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */
77 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping
78 * to 4k to avoid going over the guest physical memory barrier.
80 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */
82 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */
84 #define XHCI_CAPLEN (4*8) /* offset of op register space */
85 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */
86 #define XHCI_PORTREGS_START 0x400
87 #define XHCI_DOORBELL_MAX 256
89 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */
91 /* caplength and hci-version registers */
92 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF)
93 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16)
94 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF)
96 /* hcsparams1 register */
97 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF)
98 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8)
99 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24)
101 /* hcsparams2 register */
102 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F)
103 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4)
104 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21)
105 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27)
107 /* hcsparams3 register */
108 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF)
109 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16)
111 /* hccparams1 register */
112 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01)
113 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1)
114 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2)
115 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3)
116 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4)
117 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5)
118 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6)
119 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7)
120 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8)
121 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9)
122 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10)
123 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11)
124 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12)
125 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16)
127 /* hccparams2 register */
128 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01)
129 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1)
130 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2)
131 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3)
132 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4)
133 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5)
135 /* other registers */
136 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03)
137 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F)
140 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */
141 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */
142 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */
144 /* port register set */
145 #define XHCI_PORTREGS_BASE 0x400 /* base offset */
146 #define XHCI_PORTREGS_PORT0 0x3F0
147 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */
149 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL)
150 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL)
152 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \
153 (((b) & (m)) << (s)))
154 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \
155 (((b) & ((m) << (s)))))
157 #define SNAP_DEV_NAME_LEN 128
159 struct pci_xhci_trb_ring {
160 uint64_t ringaddr; /* current dequeue guest address */
161 uint32_t ccs; /* consumer cycle state */
164 /* device endpoint transfer/stream rings */
165 struct pci_xhci_dev_ep {
167 struct xhci_trb *_epu_tr;
168 struct xhci_stream_ctx *_epu_sctx;
170 #define ep_tr _ep_trbsctx._epu_tr
171 #define ep_sctx _ep_trbsctx._epu_sctx
174 * Caches the value of MaxPStreams from the endpoint context
175 * when an endpoint is initialized and is used to validate the
176 * use of ep_ringaddr vs ep_sctx_trbs[] as well as the length
179 uint32_t ep_MaxPStreams;
181 struct pci_xhci_trb_ring _epu_trb;
182 struct pci_xhci_trb_ring *_epu_sctx_trbs;
184 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr
185 #define ep_ccs _ep_trb_rings._epu_trb.ccs
186 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs
188 struct usb_data_xfer *ep_xfer; /* transfer chain */
191 /* device context base address array: maps slot->device context */
193 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */
196 /* port status registers */
197 struct pci_xhci_portregs {
198 uint32_t portsc; /* port status and control */
199 uint32_t portpmsc; /* port pwr mgmt status & control */
200 uint32_t portli; /* port link info */
201 uint32_t porthlpmc; /* port hardware LPM control */
203 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10)
205 /* xHC operational registers */
206 struct pci_xhci_opregs {
207 uint32_t usbcmd; /* usb command */
208 uint32_t usbsts; /* usb status */
209 uint32_t pgsz; /* page size */
210 uint32_t dnctrl; /* device notification control */
211 uint64_t crcr; /* command ring control */
212 uint64_t dcbaap; /* device ctx base addr array ptr */
213 uint32_t config; /* configure */
215 /* guest mapped addresses: */
216 struct xhci_trb *cr_p; /* crcr dequeue */
217 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */
220 /* xHC runtime registers */
221 struct pci_xhci_rtsregs {
222 uint32_t mfindex; /* microframe index */
223 struct { /* interrupter register set */
224 uint32_t iman; /* interrupter management */
225 uint32_t imod; /* interrupter moderation */
226 uint32_t erstsz; /* event ring segment table size */
228 uint64_t erstba; /* event ring seg-tbl base addr */
229 uint64_t erdp; /* event ring dequeue ptr */
232 /* guest mapped addresses */
233 struct xhci_event_ring_seg *erstba_p;
234 struct xhci_trb *erst_p; /* event ring segment tbl */
235 int er_deq_seg; /* event ring dequeue segment */
236 int er_enq_idx; /* event ring enqueue index - xHCI */
237 int er_enq_seg; /* event ring enqueue segment */
238 uint32_t er_events_cnt; /* number of events in ER */
239 uint32_t event_pcs; /* producer cycle state flag */
243 struct pci_xhci_softc;
247 * USB device emulation container.
248 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each
249 * emulated device instance.
251 struct pci_xhci_dev_emu {
252 struct pci_xhci_softc *xsc;
255 struct xhci_dev_ctx *dev_ctx;
256 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS];
259 struct usb_devemu *dev_ue; /* USB emulated dev */
260 void *dev_sc; /* device's softc */
265 struct pci_xhci_softc {
266 struct pci_devinst *xsc_pi;
270 uint32_t caplength; /* caplen & hciversion */
271 uint32_t hcsparams1; /* structural parameters 1 */
272 uint32_t hcsparams2; /* structural parameters 2 */
273 uint32_t hcsparams3; /* structural parameters 3 */
274 uint32_t hccparams1; /* capability parameters 1 */
275 uint32_t dboff; /* doorbell offset */
276 uint32_t rtsoff; /* runtime register space offset */
277 uint32_t hccparams2; /* capability parameters 2 */
279 uint32_t regsend; /* end of configuration registers */
281 struct pci_xhci_opregs opregs;
282 struct pci_xhci_rtsregs rtsregs;
284 struct pci_xhci_portregs *portregs;
285 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */
286 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */
293 /* portregs and devices arrays are set up to start from idx=1 */
294 #define XHCI_PORTREG_PTR(x,n) &(x)->portregs[(n)]
295 #define XHCI_DEVINST_PTR(x,n) (x)->devices[(n)]
296 #define XHCI_SLOTDEV_PTR(x,n) (x)->slots[(n)]
298 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH)
300 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \
301 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1)))
302 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \
303 (a), XHCI_GADDR_SIZE(a))
305 static int xhci_in_use;
307 /* map USB errors to XHCI */
308 static const int xhci_usb_errors[USB_ERR_MAX] = {
309 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS,
310 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE,
311 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
312 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID,
313 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE,
314 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED,
315 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER,
316 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER,
317 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER,
318 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL,
319 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE,
320 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE,
321 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE,
322 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED,
323 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED,
324 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE,
325 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON,
326 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE,
327 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB,
328 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
329 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED,
330 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT,
331 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL,
332 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED,
333 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF,
334 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB,
335 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED,
336 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED,
337 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED,
339 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \
340 XHCI_TRB_ERROR_INVALID)
342 static int pci_xhci_insert_event(struct pci_xhci_softc *sc,
343 struct xhci_trb *evtrb, int do_intr);
344 static void pci_xhci_dump_trb(struct xhci_trb *trb);
345 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc);
346 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot);
347 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm);
348 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc,
349 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
350 struct xhci_endp_ctx *ep_ctx, uint32_t streamid,
351 uint64_t ringaddr, int ccs);
354 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode,
357 evtrb->qwTrb0 = port << 24;
358 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode);
359 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype);
363 /* controller reset */
365 pci_xhci_reset(struct pci_xhci_softc *sc)
369 sc->rtsregs.er_enq_idx = 0;
370 sc->rtsregs.er_events_cnt = 0;
371 sc->rtsregs.event_pcs = 1;
373 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
374 pci_xhci_reset_slot(sc, i);
379 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd)
384 if (cmd & XHCI_CMD_RS) {
385 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0;
387 sc->opregs.usbcmd |= XHCI_CMD_RS;
388 sc->opregs.usbsts &= ~XHCI_STS_HCH;
389 sc->opregs.usbsts |= XHCI_STS_PCD;
391 /* Queue port change event on controller run from stop */
393 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
394 struct pci_xhci_dev_emu *dev;
395 struct pci_xhci_portregs *port;
396 struct xhci_trb evtrb;
398 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL)
401 port = XHCI_PORTREG_PTR(sc, i);
402 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS;
403 port->portsc &= ~XHCI_PS_PLS_MASK;
406 * XHCI 4.19.3 USB2 RxDetect->Polling,
409 if (dev->dev_ue->ue_usbver == 2)
411 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL);
414 XHCI_PS_PLS_SET(UPS_PORT_LS_U0);
416 pci_xhci_set_evtrb(&evtrb, i,
417 XHCI_TRB_ERROR_SUCCESS,
418 XHCI_TRB_EVENT_PORT_STS_CHANGE);
420 if (pci_xhci_insert_event(sc, &evtrb, 0) !=
421 XHCI_TRB_ERROR_SUCCESS)
425 sc->opregs.usbcmd &= ~XHCI_CMD_RS;
426 sc->opregs.usbsts |= XHCI_STS_HCH;
427 sc->opregs.usbsts &= ~XHCI_STS_PCD;
430 /* start execution of schedule; stop when set to 0 */
431 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS;
433 if (cmd & XHCI_CMD_HCRST) {
434 /* reset controller */
436 cmd &= ~XHCI_CMD_HCRST;
439 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS);
442 pci_xhci_assert_interrupt(sc);
448 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset,
451 struct xhci_trb evtrb;
452 struct pci_xhci_portregs *p;
454 uint32_t oldpls, newpls;
456 if (sc->portregs == NULL)
459 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ;
460 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ;
462 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx",
463 offset, port, value));
467 if (port > XHCI_MAX_DEVS) {
468 DPRINTF(("pci_xhci: portregs_write port %d > ndevices",
473 if (XHCI_DEVINST_PTR(sc, port) == NULL) {
474 DPRINTF(("pci_xhci: portregs_write to unattached port %d",
478 p = XHCI_PORTREG_PTR(sc, port);
481 /* port reset or warm reset */
482 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) {
483 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR);
487 if ((p->portsc & XHCI_PS_PP) == 0) {
488 WPRINTF(("pci_xhci: portregs_write to unpowered "
493 /* Port status and control register */
494 oldpls = XHCI_PS_PLS_GET(p->portsc);
495 newpls = XHCI_PS_PLS_GET(value);
497 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK |
498 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK;
500 if (XHCI_DEVINST_PTR(sc, port))
501 p->portsc |= XHCI_PS_CCS;
503 p->portsc |= (value &
507 XHCI_PS_PLS_MASK | /* link state */
509 XHCI_PS_PIC_MASK | /* port indicator */
510 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR));
512 /* clear control bits */
513 p->portsc &= ~(value &
523 /* port disable request; for USB3, don't care */
524 if (value & XHCI_PS_PED)
525 DPRINTF(("Disable port %d request", port));
527 if (!(value & XHCI_PS_LWS))
530 DPRINTF(("Port new PLS: %d", newpls));
534 if (oldpls != newpls) {
535 p->portsc &= ~XHCI_PS_PLS_MASK;
536 p->portsc |= XHCI_PS_PLS_SET(newpls) |
539 if (oldpls != 0 && newpls == 0) {
540 pci_xhci_set_evtrb(&evtrb, port,
541 XHCI_TRB_ERROR_SUCCESS,
542 XHCI_TRB_EVENT_PORT_STS_CHANGE);
544 pci_xhci_insert_event(sc, &evtrb, 1);
550 DPRINTF(("Unhandled change port %d PLS %u",
556 /* Port power management status and control register */
560 /* Port link information register */
561 DPRINTF(("pci_xhci attempted write to PORTLI, port %d",
566 * Port hardware LPM control register.
567 * For USB3, this register is reserved.
569 p->porthlpmc = value;
574 static struct xhci_dev_ctx *
575 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot)
577 uint64_t devctx_addr;
578 struct xhci_dev_ctx *devctx;
580 assert(slot > 0 && slot <= XHCI_MAX_DEVS);
581 assert(XHCI_SLOTDEV_PTR(sc, slot) != NULL);
582 assert(sc->opregs.dcbaa_p != NULL);
584 devctx_addr = sc->opregs.dcbaa_p->dcba[slot];
586 if (devctx_addr == 0) {
587 DPRINTF(("get_dev_ctx devctx_addr == 0"));
591 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx",
593 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL);
598 static struct xhci_trb *
599 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb,
602 struct xhci_trb *next;
604 assert(curtrb != NULL);
606 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) {
608 *guestaddr = curtrb->qwTrb0 & ~0xFUL;
610 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL);
613 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL;
622 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc)
625 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY;
626 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND;
627 sc->opregs.usbsts |= XHCI_STS_EINT;
629 /* only trigger interrupt if permitted */
630 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) &&
631 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) {
632 if (pci_msi_enabled(sc->xsc_pi))
633 pci_generate_msi(sc->xsc_pi, 0);
635 pci_lintr_assert(sc->xsc_pi);
640 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc)
643 if (!pci_msi_enabled(sc->xsc_pi))
644 pci_lintr_assert(sc->xsc_pi);
648 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid)
650 struct xhci_dev_ctx *dev_ctx;
651 struct pci_xhci_dev_ep *devep;
652 struct xhci_endp_ctx *ep_ctx;
653 uint32_t i, pstreams;
655 dev_ctx = dev->dev_ctx;
656 ep_ctx = &dev_ctx->ctx_ep[epid];
657 devep = &dev->eps[epid];
658 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0);
660 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams));
661 assert(devep->ep_sctx_trbs == NULL);
663 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 &
664 XHCI_EPCTX_2_TR_DQ_PTR_MASK);
665 devep->ep_sctx_trbs = calloc(pstreams,
666 sizeof(struct pci_xhci_trb_ring));
667 for (i = 0; i < pstreams; i++) {
668 devep->ep_sctx_trbs[i].ringaddr =
669 devep->ep_sctx[i].qwSctx0 &
670 XHCI_SCTX_0_TR_DQ_PTR_MASK;
671 devep->ep_sctx_trbs[i].ccs =
672 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0);
675 DPRINTF(("init_ep %d with no pstreams", epid));
676 devep->ep_ringaddr = ep_ctx->qwEpCtx2 &
677 XHCI_EPCTX_2_TR_DQ_PTR_MASK;
678 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2);
679 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr);
680 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs));
682 devep->ep_MaxPStreams = pstreams;
684 if (devep->ep_xfer == NULL) {
685 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer));
686 USB_DATA_XFER_INIT(devep->ep_xfer);
691 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid)
693 struct xhci_dev_ctx *dev_ctx;
694 struct pci_xhci_dev_ep *devep;
695 struct xhci_endp_ctx *ep_ctx;
697 DPRINTF(("pci_xhci disable_ep %d", epid));
699 dev_ctx = dev->dev_ctx;
700 ep_ctx = &dev_ctx->ctx_ep[epid];
701 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED;
703 devep = &dev->eps[epid];
704 if (devep->ep_MaxPStreams > 0)
705 free(devep->ep_sctx_trbs);
707 if (devep->ep_xfer != NULL) {
708 free(devep->ep_xfer);
709 devep->ep_xfer = NULL;
712 memset(devep, 0, sizeof(struct pci_xhci_dev_ep));
716 /* reset device at slot and data structures related to it */
718 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot)
720 struct pci_xhci_dev_emu *dev;
722 dev = XHCI_SLOTDEV_PTR(sc, slot);
725 DPRINTF(("xhci reset unassigned slot (%d)?", slot));
727 dev->dev_slotstate = XHCI_ST_DISABLED;
730 /* TODO: reset ring buffer pointers */
734 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb,
737 struct pci_xhci_rtsregs *rts;
741 struct xhci_trb *evtrbptr;
743 err = XHCI_TRB_ERROR_SUCCESS;
747 erdp = rts->intrreg.erdp & ~0xF;
748 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) /
749 sizeof(struct xhci_trb);
751 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]",
752 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3));
753 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u",
754 erdp_idx, rts->er_deq_seg, rts->er_enq_idx,
755 rts->er_enq_seg, rts->event_pcs));
756 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)",
757 erdp, rts->erstba_p->qwEvrsTablePtr,
758 rts->erstba_p->dwEvrsTableSize, do_intr));
760 evtrbptr = &rts->erst_p[rts->er_enq_idx];
762 /* TODO: multi-segment table */
763 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) {
764 DPRINTF(("pci_xhci[%d] cannot insert event; ring full",
766 err = XHCI_TRB_ERROR_EV_RING_FULL;
770 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) {
771 struct xhci_trb errev;
773 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) {
775 DPRINTF(("pci_xhci[%d] insert evt err: ring full",
779 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET(
780 XHCI_TRB_ERROR_EV_RING_FULL);
781 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET(
782 XHCI_TRB_EVENT_HOST_CTRL) |
784 rts->er_events_cnt++;
785 memcpy(&rts->erst_p[rts->er_enq_idx], &errev,
786 sizeof(struct xhci_trb));
787 rts->er_enq_idx = (rts->er_enq_idx + 1) %
788 rts->erstba_p->dwEvrsTableSize;
789 err = XHCI_TRB_ERROR_EV_RING_FULL;
795 rts->er_events_cnt++;
798 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT;
799 evtrb->dwTrb3 |= rts->event_pcs;
801 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb));
802 rts->er_enq_idx = (rts->er_enq_idx + 1) %
803 rts->erstba_p->dwEvrsTableSize;
805 if (rts->er_enq_idx == 0)
810 pci_xhci_assert_interrupt(sc);
816 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot)
818 struct pci_xhci_dev_emu *dev;
822 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
823 if (sc->portregs != NULL)
824 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
825 dev = XHCI_SLOTDEV_PTR(sc, i);
826 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) {
828 dev->dev_slotstate = XHCI_ST_ENABLED;
829 cmderr = XHCI_TRB_ERROR_SUCCESS;
830 dev->hci.hci_address = i;
835 DPRINTF(("pci_xhci enable slot (error=%d) slot %u",
836 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot));
842 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot)
844 struct pci_xhci_dev_emu *dev;
847 DPRINTF(("pci_xhci disable slot %u", slot));
849 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
850 if (sc->portregs == NULL)
853 if (slot > XHCI_MAX_SLOTS) {
854 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
858 dev = XHCI_SLOTDEV_PTR(sc, slot);
860 if (dev->dev_slotstate == XHCI_ST_DISABLED) {
861 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
863 dev->dev_slotstate = XHCI_ST_DISABLED;
864 cmderr = XHCI_TRB_ERROR_SUCCESS;
865 /* TODO: reset events and endpoints */
868 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
875 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot)
877 struct pci_xhci_dev_emu *dev;
878 struct xhci_dev_ctx *dev_ctx;
879 struct xhci_endp_ctx *ep_ctx;
883 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
884 if (sc->portregs == NULL)
887 DPRINTF(("pci_xhci reset device slot %u", slot));
889 dev = XHCI_SLOTDEV_PTR(sc, slot);
890 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED)
891 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
893 dev->dev_slotstate = XHCI_ST_DEFAULT;
895 dev->hci.hci_address = 0;
896 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
899 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
900 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT,
903 /* number of contexts */
904 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
905 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
907 /* reset all eps other than ep-0 */
908 for (i = 2; i <= 31; i++) {
909 ep_ctx = &dev_ctx->ctx_ep[i];
910 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0,
911 XHCI_ST_EPCTX_DISABLED, 0x7, 0);
914 cmderr = XHCI_TRB_ERROR_SUCCESS;
917 pci_xhci_reset_slot(sc, slot);
924 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot,
925 struct xhci_trb *trb)
927 struct pci_xhci_dev_emu *dev;
928 struct xhci_input_dev_ctx *input_ctx;
929 struct xhci_slot_ctx *islot_ctx;
930 struct xhci_dev_ctx *dev_ctx;
931 struct xhci_endp_ctx *ep0_ctx;
934 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
935 islot_ctx = &input_ctx->ctx_slot;
936 ep0_ctx = &input_ctx->ctx_ep[1];
938 cmderr = XHCI_TRB_ERROR_SUCCESS;
940 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,",
941 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
942 DPRINTF((" slot %08x %08x %08x %08x",
943 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
944 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
945 DPRINTF((" ep0 %08x %08x %016lx %08x",
946 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
949 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */
950 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
951 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) {
952 DPRINTF(("pci_xhci: address device, input ctl invalid"));
953 cmderr = XHCI_TRB_ERROR_TRB;
957 /* assign address to slot */
958 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
960 DPRINTF(("pci_xhci: address device, dev ctx"));
961 DPRINTF((" slot %08x %08x %08x %08x",
962 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
963 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
965 dev = XHCI_SLOTDEV_PTR(sc, slot);
968 dev->hci.hci_address = slot;
969 dev->dev_ctx = dev_ctx;
971 if (dev->dev_ue->ue_reset == NULL ||
972 dev->dev_ue->ue_reset(dev->dev_sc) < 0) {
973 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
977 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx));
979 dev_ctx->ctx_slot.dwSctx3 =
980 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) |
981 XHCI_SCTX_3_DEV_ADDR_SET(slot);
983 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx));
984 ep0_ctx = &dev_ctx->ctx_ep[1];
985 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) |
986 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING);
988 pci_xhci_init_ep(dev, 1);
990 dev->dev_slotstate = XHCI_ST_ADDRESSED;
992 DPRINTF(("pci_xhci: address device, output ctx"));
993 DPRINTF((" slot %08x %08x %08x %08x",
994 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
995 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
996 DPRINTF((" ep0 %08x %08x %016lx %08x",
997 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1005 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot,
1006 struct xhci_trb *trb)
1008 struct xhci_input_dev_ctx *input_ctx;
1009 struct pci_xhci_dev_emu *dev;
1010 struct xhci_dev_ctx *dev_ctx;
1011 struct xhci_endp_ctx *ep_ctx, *iep_ctx;
1015 cmderr = XHCI_TRB_ERROR_SUCCESS;
1017 DPRINTF(("pci_xhci config_ep slot %u", slot));
1019 dev = XHCI_SLOTDEV_PTR(sc, slot);
1020 assert(dev != NULL);
1022 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) {
1023 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u",
1025 if (dev->dev_ue->ue_stop != NULL)
1026 dev->dev_ue->ue_stop(dev->dev_sc);
1028 dev->dev_slotstate = XHCI_ST_ADDRESSED;
1030 dev->hci.hci_address = 0;
1031 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1033 /* number of contexts */
1034 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
1035 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
1038 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1039 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED,
1042 /* disable endpoints */
1043 for (i = 2; i < 32; i++)
1044 pci_xhci_disable_ep(dev, i);
1046 cmderr = XHCI_TRB_ERROR_SUCCESS;
1051 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) {
1052 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed",
1053 dev->dev_slotstate));
1054 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
1058 /* In addressed/configured state;
1059 * for each drop endpoint ctx flag:
1060 * ep->state = DISABLED
1061 * for each add endpoint ctx flag:
1063 * ep->state = RUNNING
1064 * for each drop+add endpoint flag:
1065 * reset ep resources
1067 * ep->state = RUNNING
1068 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled)
1069 * slot->state = configured
1072 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1073 dev_ctx = dev->dev_ctx;
1074 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x",
1075 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1,
1076 input_ctx->ctx_input.dwInCtx7));
1078 for (i = 2; i <= 31; i++) {
1079 ep_ctx = &dev_ctx->ctx_ep[i];
1081 if (input_ctx->ctx_input.dwInCtx0 &
1082 XHCI_INCTX_0_DROP_MASK(i)) {
1083 DPRINTF((" config ep - dropping ep %d", i));
1084 pci_xhci_disable_ep(dev, i);
1087 if (input_ctx->ctx_input.dwInCtx1 &
1088 XHCI_INCTX_1_ADD_MASK(i)) {
1089 iep_ctx = &input_ctx->ctx_ep[i];
1091 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x",
1092 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1,
1093 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4));
1095 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx));
1097 pci_xhci_init_ep(dev, i);
1100 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1101 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1105 /* slot state to configured */
1106 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1107 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27);
1108 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY(
1109 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27);
1110 dev->dev_slotstate = XHCI_ST_CONFIGURED;
1112 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x "
1114 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1115 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1122 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot,
1123 struct xhci_trb *trb)
1125 struct pci_xhci_dev_emu *dev;
1126 struct pci_xhci_dev_ep *devep;
1127 struct xhci_dev_ctx *dev_ctx;
1128 struct xhci_endp_ctx *ep_ctx;
1129 uint32_t cmderr, epid;
1132 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1134 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot));
1136 cmderr = XHCI_TRB_ERROR_SUCCESS;
1138 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1140 dev = XHCI_SLOTDEV_PTR(sc, slot);
1141 assert(dev != NULL);
1143 if (type == XHCI_TRB_TYPE_STOP_EP &&
1144 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) {
1145 /* XXX suspend endpoint for 10ms */
1148 if (epid < 1 || epid > 31) {
1149 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid));
1150 cmderr = XHCI_TRB_ERROR_TRB;
1154 devep = &dev->eps[epid];
1155 if (devep->ep_xfer != NULL)
1156 USB_DATA_XFER_RESET(devep->ep_xfer);
1158 dev_ctx = dev->dev_ctx;
1159 assert(dev_ctx != NULL);
1161 ep_ctx = &dev_ctx->ctx_ep[epid];
1163 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1165 if (devep->ep_MaxPStreams == 0)
1166 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs;
1168 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x",
1169 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1172 if (type == XHCI_TRB_TYPE_RESET_EP &&
1173 (dev->dev_ue->ue_reset == NULL ||
1174 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) {
1175 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
1185 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep,
1186 struct pci_xhci_dev_ep *devep, uint32_t streamid)
1188 struct xhci_stream_ctx *sctx;
1190 if (devep->ep_MaxPStreams == 0)
1191 return (XHCI_TRB_ERROR_TRB);
1193 if (devep->ep_MaxPStreams > XHCI_STREAMS_MAX)
1194 return (XHCI_TRB_ERROR_INVALID_SID);
1196 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) {
1197 DPRINTF(("pci_xhci: find_stream; LSA bit not set"));
1198 return (XHCI_TRB_ERROR_INVALID_SID);
1201 /* only support primary stream */
1202 if (streamid > devep->ep_MaxPStreams)
1203 return (XHCI_TRB_ERROR_STREAM_TYPE);
1205 sctx = (struct xhci_stream_ctx *)XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) +
1207 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0))
1208 return (XHCI_TRB_ERROR_STREAM_TYPE);
1210 return (XHCI_TRB_ERROR_SUCCESS);
1215 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot,
1216 struct xhci_trb *trb)
1218 struct pci_xhci_dev_emu *dev;
1219 struct pci_xhci_dev_ep *devep;
1220 struct xhci_dev_ctx *dev_ctx;
1221 struct xhci_endp_ctx *ep_ctx;
1222 uint32_t cmderr, epid;
1225 cmderr = XHCI_TRB_ERROR_SUCCESS;
1227 dev = XHCI_SLOTDEV_PTR(sc, slot);
1228 assert(dev != NULL);
1230 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u",
1231 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7),
1232 (uint32_t)(trb->qwTrb0 & 0x1)));
1233 DPRINTF((" stream-id %u, slot %u, epid %u, C %u",
1234 (trb->dwTrb2 >> 16) & 0xFFFF,
1235 XHCI_TRB_3_SLOT_GET(trb->dwTrb3),
1236 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1));
1238 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1239 if (epid < 1 || epid > 31) {
1240 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid));
1241 cmderr = XHCI_TRB_ERROR_TRB;
1245 dev_ctx = dev->dev_ctx;
1246 assert(dev_ctx != NULL);
1248 ep_ctx = &dev_ctx->ctx_ep[epid];
1249 devep = &dev->eps[epid];
1251 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) {
1252 case XHCI_ST_EPCTX_STOPPED:
1253 case XHCI_ST_EPCTX_ERROR:
1256 DPRINTF(("pci_xhci cmd set_tr invalid state %x",
1257 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)));
1258 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE;
1262 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2);
1263 if (devep->ep_MaxPStreams > 0) {
1264 cmderr = pci_xhci_find_stream(sc, ep_ctx, devep, streamid);
1265 if (cmderr == XHCI_TRB_ERROR_SUCCESS) {
1266 assert(devep->ep_sctx != NULL);
1268 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0;
1269 devep->ep_sctx_trbs[streamid].ringaddr =
1271 devep->ep_sctx_trbs[streamid].ccs =
1272 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0);
1275 if (streamid != 0) {
1276 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0",
1279 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL;
1280 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL;
1281 devep->ep_ccs = trb->qwTrb0 & 0x1;
1282 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr);
1284 DPRINTF(("pci_xhci set_tr first TRB:"));
1285 pci_xhci_dump_trb(devep->ep_tr);
1287 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1294 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot,
1295 struct xhci_trb *trb)
1297 struct xhci_input_dev_ctx *input_ctx;
1298 struct xhci_slot_ctx *islot_ctx;
1299 struct xhci_dev_ctx *dev_ctx;
1300 struct xhci_endp_ctx *ep0_ctx;
1303 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1304 islot_ctx = &input_ctx->ctx_slot;
1305 ep0_ctx = &input_ctx->ctx_ep[1];
1307 cmderr = XHCI_TRB_ERROR_SUCCESS;
1308 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,",
1309 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
1310 DPRINTF((" slot %08x %08x %08x %08x",
1311 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
1312 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
1313 DPRINTF((" ep0 %08x %08x %016lx %08x",
1314 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1315 ep0_ctx->dwEpCtx4));
1317 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */
1318 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
1319 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) {
1320 DPRINTF(("pci_xhci: eval ctx, input ctl invalid"));
1321 cmderr = XHCI_TRB_ERROR_TRB;
1325 /* assign address to slot; in this emulation, slot_id = address */
1326 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1328 DPRINTF(("pci_xhci: eval ctx, dev ctx"));
1329 DPRINTF((" slot %08x %08x %08x %08x",
1330 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1331 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1333 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */
1334 /* set max exit latency */
1335 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY(
1336 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1,
1339 /* set interrupter target */
1340 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY(
1341 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2,
1344 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */
1345 /* set max packet size */
1346 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY(
1347 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1,
1350 ep0_ctx = &dev_ctx->ctx_ep[1];
1353 DPRINTF(("pci_xhci: eval ctx, output ctx"));
1354 DPRINTF((" slot %08x %08x %08x %08x",
1355 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1356 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1357 DPRINTF((" ep0 %08x %08x %016lx %08x",
1358 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1359 ep0_ctx->dwEpCtx4));
1366 pci_xhci_complete_commands(struct pci_xhci_softc *sc)
1368 struct xhci_trb evtrb;
1369 struct xhci_trb *trb;
1371 uint32_t ccs; /* cycle state (XHCI 4.9.2) */
1378 sc->opregs.crcr |= XHCI_CRCR_LO_CRR;
1380 trb = sc->opregs.cr_p;
1381 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS;
1382 crcr = sc->opregs.crcr & ~0xF;
1385 sc->opregs.cr_p = trb;
1387 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1389 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) !=
1390 (ccs & XHCI_TRB_3_CYCLE_BIT))
1393 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x"
1394 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u",
1395 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3,
1396 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs));
1398 cmderr = XHCI_TRB_ERROR_SUCCESS;
1400 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) |
1401 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE);
1405 case XHCI_TRB_TYPE_LINK: /* 0x06 */
1406 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1407 ccs ^= XHCI_CRCR_LO_RCS;
1410 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */
1411 cmderr = pci_xhci_cmd_enable_slot(sc, &slot);
1414 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */
1415 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1416 cmderr = pci_xhci_cmd_disable_slot(sc, slot);
1419 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */
1420 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1421 cmderr = pci_xhci_cmd_address_device(sc, slot, trb);
1424 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */
1425 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1426 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb);
1429 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */
1430 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1431 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb);
1434 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */
1435 DPRINTF(("Reset Endpoint on slot %d", slot));
1436 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1437 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1440 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */
1441 DPRINTF(("Stop Endpoint on slot %d", slot));
1442 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1443 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1446 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */
1447 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1448 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb);
1451 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */
1452 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1453 cmderr = pci_xhci_cmd_reset_device(sc, slot);
1456 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */
1460 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */
1463 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */
1466 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */
1469 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */
1472 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */
1476 DPRINTF(("pci_xhci: unsupported cmd %x", type));
1480 if (type != XHCI_TRB_TYPE_LINK) {
1482 * insert command completion event and assert intr
1484 evtrb.qwTrb0 = crcr;
1485 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr);
1486 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot);
1487 DPRINTF(("pci_xhci: command 0x%x result: 0x%x",
1489 pci_xhci_insert_event(sc, &evtrb, 1);
1492 trb = pci_xhci_trb_next(sc, trb, &crcr);
1495 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs;
1496 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR;
1501 pci_xhci_dump_trb(struct xhci_trb *trb)
1503 static const char *trbtypes[] = {
1531 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1532 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x",
1534 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID",
1535 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3));
1539 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer,
1540 uint32_t slot, uint32_t epid, int *do_intr)
1542 struct pci_xhci_dev_emu *dev;
1543 struct pci_xhci_dev_ep *devep;
1544 struct xhci_dev_ctx *dev_ctx;
1545 struct xhci_endp_ctx *ep_ctx;
1546 struct xhci_trb *trb;
1547 struct xhci_trb evtrb;
1552 dev = XHCI_SLOTDEV_PTR(sc, slot);
1553 devep = &dev->eps[epid];
1554 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1556 assert(dev_ctx != NULL);
1558 ep_ctx = &dev_ctx->ctx_ep[epid];
1560 err = XHCI_TRB_ERROR_SUCCESS;
1564 /* go through list of TRBs and insert event(s) */
1565 for (i = xfer->head; xfer->ndata > 0; ) {
1566 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data;
1567 trb = XHCI_GADDR(sc, evtrb.qwTrb0);
1568 trbflags = trb->dwTrb3;
1570 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x "
1572 i, xfer->data[i].processed, xfer->data[i].blen,
1573 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0,
1575 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0));
1577 if (!xfer->data[i].processed) {
1583 edtla += xfer->data[i].bdone;
1585 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs);
1587 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx,
1588 xfer->data[i].streamid, xfer->data[i].trbnext,
1591 /* Only interrupt if IOC or short packet */
1592 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) &&
1593 !((err == XHCI_TRB_ERROR_SHORT_PKT) &&
1594 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) {
1596 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1600 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) |
1601 XHCI_TRB_2_REM_SET(xfer->data[i].blen);
1603 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) |
1604 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid);
1606 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) {
1607 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla));
1608 evtrb.qwTrb0 = trb->qwTrb0;
1609 evtrb.dwTrb2 = (edtla & 0xFFFFF) |
1610 XHCI_TRB_2_ERROR_SET(err);
1611 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT;
1617 err = pci_xhci_insert_event(sc, &evtrb, 0);
1618 if (err != XHCI_TRB_ERROR_SUCCESS) {
1622 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1629 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc,
1630 struct pci_xhci_dev_emu *dev __unused, struct pci_xhci_dev_ep *devep,
1631 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, uint64_t ringaddr, int ccs)
1634 if (devep->ep_MaxPStreams != 0) {
1635 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) |
1638 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL;
1639 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1;
1640 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1);
1642 DPRINTF(("xhci update ep-ring stream %d, addr %lx",
1643 streamid, devep->ep_sctx[streamid].qwSctx0));
1645 devep->ep_ringaddr = ringaddr & ~0xFUL;
1646 devep->ep_ccs = ccs & 0x1;
1647 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL);
1648 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1);
1650 DPRINTF(("xhci update ep-ring, addr %lx",
1651 (devep->ep_ringaddr | devep->ep_ccs)));
1656 * Outstanding transfer still in progress (device NAK'd earlier) so retry
1657 * the transfer again to see if it succeeds.
1660 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc,
1661 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1662 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid)
1664 struct usb_data_xfer *xfer;
1668 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1669 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1674 xfer = devep->ep_xfer;
1675 USB_DATA_XFER_LOCK(xfer);
1677 /* outstanding requests queued up */
1678 if (dev->dev_ue->ue_data != NULL) {
1679 err = dev->dev_ue->ue_data(dev->dev_sc, xfer,
1680 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2);
1681 if (err == USB_ERR_CANCELLED) {
1682 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) ==
1684 err = XHCI_TRB_ERROR_SUCCESS;
1686 err = pci_xhci_xfer_complete(sc, xfer, slot, epid,
1688 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) {
1689 pci_xhci_assert_interrupt(sc);
1693 /* XXX should not do it if error? */
1694 USB_DATA_XFER_RESET(xfer);
1698 USB_DATA_XFER_UNLOCK(xfer);
1706 pci_xhci_handle_transfer(struct pci_xhci_softc *sc,
1707 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1708 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot,
1709 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid)
1711 struct xhci_trb *setup_trb;
1712 struct usb_data_xfer *xfer;
1713 struct usb_data_xfer_block *xfer_block;
1719 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0,
1720 XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1722 xfer = devep->ep_xfer;
1723 USB_DATA_XFER_LOCK(xfer);
1725 DPRINTF(("pci_xhci handle_transfer slot %u", slot));
1728 err = XHCI_TRB_ERROR_INVALID;
1734 pci_xhci_dump_trb(trb);
1736 trbflags = trb->dwTrb3;
1738 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK &&
1739 (trbflags & XHCI_TRB_3_CYCLE_BIT) !=
1740 (ccs & XHCI_TRB_3_CYCLE_BIT)) {
1741 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x",
1742 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs));
1748 switch (XHCI_TRB_3_TYPE_GET(trbflags)) {
1749 case XHCI_TRB_TYPE_LINK:
1750 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1753 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1755 xfer_block->processed = 1;
1758 case XHCI_TRB_TYPE_SETUP_STAGE:
1759 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 ||
1760 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) {
1761 DPRINTF(("pci_xhci: invalid setup trb"));
1762 err = XHCI_TRB_ERROR_TRB;
1769 xfer->ureq = malloc(
1770 sizeof(struct usb_device_request));
1771 memcpy(xfer->ureq, &val,
1772 sizeof(struct usb_device_request));
1774 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1776 xfer_block->processed = 1;
1779 case XHCI_TRB_TYPE_NORMAL:
1780 case XHCI_TRB_TYPE_ISOCH:
1781 if (setup_trb != NULL) {
1782 DPRINTF(("pci_xhci: trb not supposed to be in "
1784 err = XHCI_TRB_ERROR_TRB;
1789 case XHCI_TRB_TYPE_DATA_STAGE:
1790 xfer_block = usb_data_xfer_append(xfer,
1791 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ?
1792 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)),
1793 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs);
1796 case XHCI_TRB_TYPE_STATUS_STAGE:
1797 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1801 case XHCI_TRB_TYPE_NOOP:
1802 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1804 xfer_block->processed = 1;
1807 case XHCI_TRB_TYPE_EVENT_DATA:
1808 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1810 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) {
1811 xfer_block->processed = 1;
1816 DPRINTF(("pci_xhci: handle xfer unexpected trb type "
1818 XHCI_TRB_3_TYPE_GET(trbflags)));
1819 err = XHCI_TRB_ERROR_TRB;
1823 trb = pci_xhci_trb_next(sc, trb, &addr);
1825 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb));
1828 xfer_block->trbnext = addr;
1829 xfer_block->streamid = streamid;
1832 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) &&
1833 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) {
1837 /* handle current batch that requires interrupt on complete */
1838 if (trbflags & XHCI_TRB_3_IOC_BIT) {
1839 DPRINTF(("pci_xhci: trb IOC bit set"));
1846 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata));
1848 if (xfer->ndata <= 0)
1854 if (dev->dev_ue->ue_request != NULL)
1855 usberr = dev->dev_ue->ue_request(dev->dev_sc, xfer);
1857 usberr = USB_ERR_NOT_STARTED;
1858 err = USB_TO_XHCI_ERR(usberr);
1859 if (err == XHCI_TRB_ERROR_SUCCESS ||
1860 err == XHCI_TRB_ERROR_STALL ||
1861 err == XHCI_TRB_ERROR_SHORT_PKT) {
1862 err = pci_xhci_xfer_complete(sc, xfer, slot, epid,
1864 if (err != XHCI_TRB_ERROR_SUCCESS)
1869 /* handle data transfer */
1870 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1871 err = XHCI_TRB_ERROR_SUCCESS;
1875 if (err == XHCI_TRB_ERROR_EV_RING_FULL)
1876 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__));
1879 USB_DATA_XFER_UNLOCK(xfer);
1882 pci_xhci_assert_interrupt(sc);
1885 USB_DATA_XFER_RESET(xfer);
1886 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs",
1892 USB_DATA_XFER_RESET(xfer);
1898 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot,
1899 uint32_t epid, uint32_t streamid)
1901 struct pci_xhci_dev_emu *dev;
1902 struct pci_xhci_dev_ep *devep;
1903 struct xhci_dev_ctx *dev_ctx;
1904 struct xhci_endp_ctx *ep_ctx;
1905 struct pci_xhci_trb_ring *sctx_tr;
1906 struct xhci_trb *trb;
1911 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u",
1912 slot, epid, streamid));
1914 if (slot == 0 || slot > XHCI_MAX_SLOTS) {
1915 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot));
1919 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) {
1920 DPRINTF(("pci_xhci: invalid endpoint %u", epid));
1924 dev = XHCI_SLOTDEV_PTR(sc, slot);
1925 devep = &dev->eps[epid];
1926 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1930 ep_ctx = &dev_ctx->ctx_ep[epid];
1934 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x",
1935 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1938 if (ep_ctx->qwEpCtx2 == 0)
1941 /* handle pending transfers */
1942 if (devep->ep_xfer->ndata > 0) {
1943 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1947 /* get next trb work item */
1948 if (devep->ep_MaxPStreams != 0) {
1950 * Stream IDs of 0, 65535 (any stream), and 65534
1951 * (prime) are invalid.
1953 if (streamid == 0 || streamid == 65534 || streamid == 65535) {
1954 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1958 error = pci_xhci_find_stream(sc, ep_ctx, devep, streamid);
1959 if (error != XHCI_TRB_ERROR_SUCCESS) {
1960 DPRINTF(("pci_xhci: invalid stream %u: %d",
1964 sctx_tr = &devep->ep_sctx_trbs[streamid];
1965 ringaddr = sctx_tr->ringaddr;
1967 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL);
1968 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x",
1969 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1970 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1972 if (streamid != 0) {
1973 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1976 ringaddr = devep->ep_ringaddr;
1977 ccs = devep->ep_ccs;
1979 DPRINTF(("doorbell, ccs %lx, trb ccs %x",
1980 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1981 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1984 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) {
1985 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?",
1986 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid));
1990 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid,
1991 ringaddr, ccs, streamid);
1995 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset,
1999 offset = (offset - sc->dboff) / sizeof(uint32_t);
2001 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx",
2004 if (XHCI_HALTED(sc)) {
2005 DPRINTF(("pci_xhci: controller halted"));
2010 pci_xhci_complete_commands(sc);
2011 else if (sc->portregs != NULL)
2012 pci_xhci_device_doorbell(sc, offset,
2013 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value));
2017 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset,
2020 struct pci_xhci_rtsregs *rts;
2022 offset -= sc->rtsoff;
2025 DPRINTF(("pci_xhci attempted write to MFINDEX"));
2029 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx",
2032 offset -= 0x20; /* start of intrreg */
2038 if (value & XHCI_IMAN_INTR_PEND)
2039 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2040 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) |
2041 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND);
2043 if (!(value & XHCI_IMAN_INTR_ENA))
2044 pci_xhci_deassert_interrupt(sc);
2049 rts->intrreg.imod = value;
2053 rts->intrreg.erstsz = value & 0xFFFF;
2057 /* ERSTBA low bits */
2058 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) |
2063 /* ERSTBA high bits */
2064 rts->intrreg.erstba = (value << 32) |
2065 MASK_64_LO(sc->rtsregs.intrreg.erstba);
2067 rts->erstba_p = XHCI_GADDR(sc,
2068 sc->rtsregs.intrreg.erstba & ~0x3FUL);
2070 rts->erst_p = XHCI_GADDR(sc,
2071 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL);
2073 rts->er_enq_idx = 0;
2074 rts->er_events_cnt = 0;
2076 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u",
2078 rts->erstba_p->qwEvrsTablePtr,
2079 rts->erstba_p->dwEvrsTableSize));
2085 MASK_64_HI(sc->rtsregs.intrreg.erdp) |
2086 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) |
2088 if (value & XHCI_ERDP_LO_BUSY) {
2089 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY;
2090 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2093 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value);
2098 /* ERDP high bits */
2099 rts->intrreg.erdp = (value << 32) |
2100 MASK_64_LO(sc->rtsregs.intrreg.erdp);
2102 if (rts->er_events_cnt > 0) {
2106 erdp = rts->intrreg.erdp & ~0xF;
2107 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) /
2108 sizeof(struct xhci_trb);
2110 if (erdp_i <= rts->er_enq_idx)
2111 rts->er_events_cnt = rts->er_enq_idx - erdp_i;
2113 rts->er_events_cnt =
2114 rts->erstba_p->dwEvrsTableSize -
2115 (erdp_i - rts->er_enq_idx);
2117 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u",
2118 erdp, rts->er_events_cnt));
2124 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx",
2131 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2133 struct pci_xhci_portregs *portregs;
2137 if (sc->portregs == NULL)
2140 port = (offset - 0x3F0) / 0x10;
2142 if (port > XHCI_MAX_DEVS) {
2143 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS",
2146 /* return default value for unused port */
2147 return (XHCI_PS_SPEED_SET(3));
2150 offset = (offset - 0x3F0) % 0x10;
2152 portregs = XHCI_PORTREG_PTR(sc, port);
2153 p = &portregs->portsc;
2154 p += offset / sizeof(uint32_t);
2156 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x",
2163 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset,
2166 offset -= XHCI_CAPLEN;
2169 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx",
2174 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F);
2178 /* clear bits on write */
2179 sc->opregs.usbsts &= ~(value &
2180 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS|
2181 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR));
2189 sc->opregs.dnctrl = value & 0xFFFF;
2193 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) {
2194 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2195 sc->opregs.crcr |= value &
2196 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2198 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) |
2199 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS));
2204 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) {
2205 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) |
2208 sc->opregs.cr_p = XHCI_GADDR(sc,
2209 sc->opregs.crcr & ~0xF);
2212 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) {
2213 /* Stop operation of Command Ring */
2216 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) {
2222 case XHCI_DCBAAP_LO:
2223 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) |
2224 (value & 0xFFFFFFC0);
2227 case XHCI_DCBAAP_HI:
2228 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) |
2230 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL);
2232 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)",
2233 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p));
2237 sc->opregs.config = value & 0x03FF;
2241 if (offset >= 0x400)
2242 pci_xhci_portregs_write(sc, offset, value);
2250 pci_xhci_write(struct vmctx *ctx __unused, int vcpu __unused,
2251 struct pci_devinst *pi, int baridx, uint64_t offset, int size __unused,
2254 struct pci_xhci_softc *sc;
2258 assert(baridx == 0);
2260 pthread_mutex_lock(&sc->mtx);
2261 if (offset < XHCI_CAPLEN) /* read only registers */
2262 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset));
2263 else if (offset < sc->dboff)
2264 pci_xhci_hostop_write(sc, offset, value);
2265 else if (offset < sc->rtsoff)
2266 pci_xhci_dbregs_write(sc, offset, value);
2267 else if (offset < sc->regsend)
2268 pci_xhci_rtsregs_write(sc, offset, value);
2270 WPRINTF(("pci_xhci: write invalid offset %ld", offset));
2272 pthread_mutex_unlock(&sc->mtx);
2276 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset)
2281 case XHCI_CAPLENGTH: /* 0x00 */
2282 value = sc->caplength;
2285 case XHCI_HCSPARAMS1: /* 0x04 */
2286 value = sc->hcsparams1;
2289 case XHCI_HCSPARAMS2: /* 0x08 */
2290 value = sc->hcsparams2;
2293 case XHCI_HCSPARAMS3: /* 0x0C */
2294 value = sc->hcsparams3;
2297 case XHCI_HCSPARAMS0: /* 0x10 */
2298 value = sc->hccparams1;
2301 case XHCI_DBOFF: /* 0x14 */
2305 case XHCI_RTSOFF: /* 0x18 */
2309 case XHCI_HCCPRAMS2: /* 0x1C */
2310 value = sc->hccparams2;
2318 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx",
2325 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset)
2329 offset = (offset - XHCI_CAPLEN);
2332 case XHCI_USBCMD: /* 0x00 */
2333 value = sc->opregs.usbcmd;
2336 case XHCI_USBSTS: /* 0x04 */
2337 value = sc->opregs.usbsts;
2340 case XHCI_PAGESIZE: /* 0x08 */
2341 value = sc->opregs.pgsz;
2344 case XHCI_DNCTRL: /* 0x14 */
2345 value = sc->opregs.dnctrl;
2348 case XHCI_CRCR_LO: /* 0x18 */
2349 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR;
2352 case XHCI_CRCR_HI: /* 0x1C */
2356 case XHCI_DCBAAP_LO: /* 0x30 */
2357 value = sc->opregs.dcbaap & 0xFFFFFFFF;
2360 case XHCI_DCBAAP_HI: /* 0x34 */
2361 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF;
2364 case XHCI_CONFIG: /* 0x38 */
2365 value = sc->opregs.config;
2369 if (offset >= 0x400)
2370 value = pci_xhci_portregs_read(sc, offset);
2378 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx",
2385 pci_xhci_dbregs_read(struct pci_xhci_softc *sc __unused,
2386 uint64_t offset __unused)
2388 /* read doorbell always returns 0 */
2393 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2397 offset -= sc->rtsoff;
2400 if (offset == XHCI_MFINDEX) {
2401 value = sc->rtsregs.mfindex;
2402 } else if (offset >= 0x20) {
2409 assert(offset < sizeof(sc->rtsregs.intrreg));
2411 p = &sc->rtsregs.intrreg.iman;
2412 p += item / sizeof(uint32_t);
2416 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x",
2423 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset)
2427 offset -= sc->regsend;
2432 /* rev major | rev minor | next-cap | cap-id */
2433 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS;
2436 /* name string = "USB" */
2440 /* psic | proto-defined | compat # | compat offset */
2441 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start;
2446 /* rev major | rev minor | next-cap | cap-id */
2447 value = (0x03 << 24) | XHCI_ID_PROTOCOLS;
2450 /* name string = "USB" */
2454 /* psic | proto-defined | compat # | compat offset */
2455 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start;
2460 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset));
2464 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x",
2472 pci_xhci_read(struct vmctx *ctx __unused, int vcpu __unused,
2473 struct pci_devinst *pi, int baridx, uint64_t offset, int size)
2475 struct pci_xhci_softc *sc;
2480 assert(baridx == 0);
2482 pthread_mutex_lock(&sc->mtx);
2483 if (offset < XHCI_CAPLEN)
2484 value = pci_xhci_hostcap_read(sc, offset);
2485 else if (offset < sc->dboff)
2486 value = pci_xhci_hostop_read(sc, offset);
2487 else if (offset < sc->rtsoff)
2488 value = pci_xhci_dbregs_read(sc, offset);
2489 else if (offset < sc->regsend)
2490 value = pci_xhci_rtsregs_read(sc, offset);
2491 else if (offset < (sc->regsend + 4*32))
2492 value = pci_xhci_xecp_read(sc, offset);
2495 WPRINTF(("pci_xhci: read invalid offset %ld", offset));
2498 pthread_mutex_unlock(&sc->mtx);
2508 value &= 0xFFFFFFFF;
2516 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm)
2518 struct pci_xhci_portregs *port;
2519 struct pci_xhci_dev_emu *dev;
2520 struct xhci_trb evtrb;
2523 assert(portn <= XHCI_MAX_DEVS);
2525 DPRINTF(("xhci reset port %d", portn));
2527 port = XHCI_PORTREG_PTR(sc, portn);
2528 dev = XHCI_DEVINST_PTR(sc, portn);
2530 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC);
2531 port->portsc |= XHCI_PS_PED |
2532 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2534 if (warm && dev->dev_ue->ue_usbver == 3) {
2535 port->portsc |= XHCI_PS_WRC;
2538 if ((port->portsc & XHCI_PS_PRC) == 0) {
2539 port->portsc |= XHCI_PS_PRC;
2541 pci_xhci_set_evtrb(&evtrb, portn,
2542 XHCI_TRB_ERROR_SUCCESS,
2543 XHCI_TRB_EVENT_PORT_STS_CHANGE);
2544 error = pci_xhci_insert_event(sc, &evtrb, 1);
2545 if (error != XHCI_TRB_ERROR_SUCCESS)
2546 DPRINTF(("xhci reset port insert event "
2553 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn)
2555 struct pci_xhci_portregs *port;
2556 struct pci_xhci_dev_emu *dev;
2558 port = XHCI_PORTREG_PTR(sc, portn);
2559 dev = XHCI_DEVINST_PTR(sc, portn);
2561 port->portsc = XHCI_PS_CCS | /* connected */
2562 XHCI_PS_PP; /* port power */
2564 if (dev->dev_ue->ue_usbver == 2) {
2565 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) |
2566 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2568 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) |
2569 XHCI_PS_PED | /* enabled */
2570 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2573 DPRINTF(("Init port %d 0x%x", portn, port->portsc));
2575 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP;
2576 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc));
2581 pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
2583 struct pci_xhci_dev_emu *dev;
2584 struct xhci_dev_ctx *dev_ctx;
2585 struct xhci_trb evtrb;
2586 struct pci_xhci_softc *sc;
2587 struct pci_xhci_portregs *p;
2588 struct xhci_endp_ctx *ep_ctx;
2593 dir_in = epctx & 0x80;
2594 epid = epctx & ~0x80;
2596 /* HW endpoint contexts are 0-15; convert to epid based on dir */
2597 epid = (epid * 2) + (dir_in ? 1 : 0);
2599 assert(epid >= 1 && epid <= 31);
2604 /* check if device is ready; OS has to initialise it */
2605 if (sc->rtsregs.erstba_p == NULL ||
2606 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 ||
2607 dev->dev_ctx == NULL)
2610 p = XHCI_PORTREG_PTR(sc, hci->hci_port);
2612 /* raise event if link U3 (suspended) state */
2613 if (XHCI_PS_PLS_GET(p->portsc) == 3) {
2614 p->portsc &= ~XHCI_PS_PLS_MASK;
2615 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME);
2616 if ((p->portsc & XHCI_PS_PLC) != 0)
2619 p->portsc |= XHCI_PS_PLC;
2621 pci_xhci_set_evtrb(&evtrb, hci->hci_port,
2622 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE);
2623 error = pci_xhci_insert_event(sc, &evtrb, 0);
2624 if (error != XHCI_TRB_ERROR_SUCCESS)
2628 dev_ctx = dev->dev_ctx;
2629 ep_ctx = &dev_ctx->ctx_ep[epid];
2630 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) {
2631 DPRINTF(("xhci device interrupt on disabled endpoint %d",
2636 DPRINTF(("xhci device interrupt on endpoint %d", epid));
2638 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0);
2645 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid __unused,
2646 void *param __unused)
2648 DPRINTF(("xhci device event port %d", hci->hci_port));
2653 * Each controller contains a "slot" node which contains a list of
2654 * child nodes each of which is a device. Each slot node's name
2655 * corresponds to a specific controller slot. These nodes
2656 * contain a "device" variable identifying the device model of the
2657 * USB device. For example:
2666 pci_xhci_legacy_config(nvlist_t *nvl, const char *opts)
2669 nvlist_t *slots_nvl, *slot_nvl;
2670 char *cp, *opt, *str, *tofree;
2676 slots_nvl = create_relative_config_node(nvl, "slot");
2678 tofree = str = strdup(opts);
2679 while ((opt = strsep(&str, ",")) != NULL) {
2680 /* device[=<config>] */
2681 cp = strchr(opt, '=');
2687 snprintf(node_name, sizeof(node_name), "%d", slot);
2689 slot_nvl = create_relative_config_node(slots_nvl, node_name);
2690 set_config_value_node(slot_nvl, "device", opt);
2693 * NB: Given that we split on commas above, the legacy
2694 * format only supports a single option.
2696 if (cp != NULL && *cp != '\0')
2697 pci_parse_legacy_config(slot_nvl, cp);
2704 pci_xhci_parse_devices(struct pci_xhci_softc *sc, nvlist_t *nvl)
2706 struct pci_xhci_dev_emu *dev;
2707 struct usb_devemu *ue;
2708 const nvlist_t *slots_nvl, *slot_nvl;
2709 const char *name, *device;
2711 void *devsc, *cookie;
2713 int type, usb3_port, usb2_port, i, ndevices;
2715 usb3_port = sc->usb3_port_start;
2716 usb2_port = sc->usb2_port_start;
2718 sc->devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *));
2719 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *));
2721 /* port and slot numbering start from 1 */
2727 slots_nvl = find_relative_config_node(nvl, "slot");
2728 if (slots_nvl == NULL)
2732 while ((name = nvlist_next(slots_nvl, &type, &cookie)) != NULL) {
2733 if (usb2_port == ((sc->usb2_port_start) + XHCI_MAX_DEVS/2) ||
2734 usb3_port == ((sc->usb3_port_start) + XHCI_MAX_DEVS/2)) {
2735 WPRINTF(("pci_xhci max number of USB 2 or 3 "
2736 "devices reached, max %d", XHCI_MAX_DEVS/2));
2740 if (type != NV_TYPE_NVLIST) {
2742 "pci_xhci: config variable '%s' under slot node",
2747 slot = strtol(name, &cp, 0);
2748 if (*cp != '\0' || slot <= 0 || slot > XHCI_MAX_SLOTS) {
2749 EPRINTLN("pci_xhci: invalid slot '%s'", name);
2753 if (XHCI_SLOTDEV_PTR(sc, slot) != NULL) {
2754 EPRINTLN("pci_xhci: duplicate slot '%s'", name);
2758 slot_nvl = nvlist_get_nvlist(slots_nvl, name);
2759 device = get_config_value_node(slot_nvl, "device");
2760 if (device == NULL) {
2762 "pci_xhci: missing \"device\" value for slot '%s'",
2767 ue = usb_emu_finddev(device);
2769 EPRINTLN("pci_xhci: unknown device model \"%s\"",
2774 DPRINTF(("pci_xhci adding device %s", device));
2776 dev = calloc(1, sizeof(struct pci_xhci_dev_emu));
2778 dev->hci.hci_sc = dev;
2779 dev->hci.hci_intr = pci_xhci_dev_intr;
2780 dev->hci.hci_event = pci_xhci_dev_event;
2782 if (ue->ue_usbver == 2) {
2783 if (usb2_port == sc->usb2_port_start +
2784 XHCI_MAX_DEVS / 2) {
2785 WPRINTF(("pci_xhci max number of USB 2 devices "
2786 "reached, max %d", XHCI_MAX_DEVS / 2));
2789 dev->hci.hci_port = usb2_port;
2792 if (usb3_port == sc->usb3_port_start +
2793 XHCI_MAX_DEVS / 2) {
2794 WPRINTF(("pci_xhci max number of USB 3 devices "
2795 "reached, max %d", XHCI_MAX_DEVS / 2));
2798 dev->hci.hci_port = usb3_port;
2801 XHCI_DEVINST_PTR(sc, dev->hci.hci_port) = dev;
2803 dev->hci.hci_address = 0;
2804 devsc = ue->ue_init(&dev->hci, nvl);
2805 if (devsc == NULL) {
2810 dev->dev_sc = devsc;
2812 XHCI_SLOTDEV_PTR(sc, slot) = dev;
2817 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs));
2821 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
2822 pci_xhci_init_port(sc, i);
2825 WPRINTF(("pci_xhci no USB devices configured"));
2830 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
2831 free(XHCI_DEVINST_PTR(sc, i));
2834 free(sc->devices + 1);
2835 free(sc->slots + 1);
2841 pci_xhci_init(struct vmctx *ctx __unused, struct pci_devinst *pi, nvlist_t *nvl)
2843 struct pci_xhci_softc *sc;
2847 WPRINTF(("pci_xhci controller already defined"));
2852 sc = calloc(1, sizeof(struct pci_xhci_softc));
2856 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1;
2857 sc->usb3_port_start = 1;
2859 /* discover devices */
2860 error = pci_xhci_parse_devices(sc, nvl);
2866 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) |
2867 XHCI_SET_HCIVERSION(0x0100);
2868 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) |
2869 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */
2870 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS);
2871 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) |
2872 XHCI_SET_HCSP2_IST(0x04);
2873 sc->hcsparams3 = 0; /* no latency */
2874 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */
2875 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */
2876 XHCI_SET_HCCP1_SPC(1) | /* short packet */
2877 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX);
2878 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) |
2879 XHCI_SET_HCCP2_U3C(1);
2880 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START +
2881 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs));
2883 /* dboff must be 32-bit aligned */
2884 if (sc->dboff & 0x3)
2885 sc->dboff = (sc->dboff + 0x3) & ~0x3;
2887 /* rtsoff must be 32-bytes aligned */
2888 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32);
2889 if (sc->rtsoff & 0x1F)
2890 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F;
2892 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff,
2895 sc->opregs.usbsts = XHCI_STS_HCH;
2896 sc->opregs.pgsz = XHCI_PAGESIZE_4K;
2900 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */
2903 * Set extended capabilities pointer to be after regsend;
2904 * value of xecp field is 32-bit offset.
2906 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4);
2908 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31);
2909 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2910 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS);
2911 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB);
2912 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI);
2913 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0);
2915 pci_emul_add_msicap(pi, 1);
2917 /* regsend + xecp registers */
2918 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32);
2919 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32));
2922 pci_lintr_request(pi);
2924 pthread_mutex_init(&sc->mtx, NULL);
2934 #ifdef BHYVE_SNAPSHOT
2936 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[])
2939 struct pci_xhci_dev_emu *dev, *slot;
2941 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS);
2943 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
2944 for (j = 1; j <= XHCI_MAX_DEVS; j++) {
2945 slot = XHCI_SLOTDEV_PTR(sc, i);
2946 dev = XHCI_DEVINST_PTR(sc, j);
2955 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc __unused,
2956 struct pci_xhci_dev_emu *dev, int idx, struct vm_snapshot_meta *meta)
2960 struct usb_data_xfer *xfer;
2961 struct usb_data_xfer_block *xfer_block;
2963 /* some sanity checks */
2964 if (meta->op == VM_SNAPSHOT_SAVE)
2965 xfer = dev->eps[idx].ep_xfer;
2967 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done);
2973 if (meta->op == VM_SNAPSHOT_RESTORE) {
2974 pci_xhci_init_ep(dev, idx);
2975 xfer = dev->eps[idx].ep_xfer;
2978 /* save / restore proper */
2979 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) {
2980 xfer_block = &xfer->data[k];
2982 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf,
2983 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret,
2985 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done);
2986 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done);
2987 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done);
2988 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done);
2989 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done);
2990 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done);
2991 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done);
2994 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done);
2996 /* xfer->ureq is not allocated at restore time */
2997 if (meta->op == VM_SNAPSHOT_RESTORE)
2998 xfer->ureq = malloc(sizeof(struct usb_device_request));
3000 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq,
3001 sizeof(struct usb_device_request),
3005 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done);
3006 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done);
3007 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done);
3014 pci_xhci_snapshot(struct vm_snapshot_meta *meta)
3019 struct pci_devinst *pi;
3020 struct pci_xhci_softc *sc;
3021 struct pci_xhci_portregs *port;
3022 struct pci_xhci_dev_emu *dev;
3023 char dname[SNAP_DEV_NAME_LEN];
3024 int maps[XHCI_MAX_SLOTS + 1];
3026 pi = meta->dev_data;
3029 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done);
3030 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done);
3031 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done);
3032 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done);
3033 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done);
3034 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done);
3035 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done);
3036 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done);
3037 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done);
3040 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done);
3041 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done);
3042 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done);
3043 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done);
3044 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done);
3045 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done);
3046 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done);
3049 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p,
3050 XHCI_GADDR_SIZE(sc->opregs.cr_p), true, meta, ret, done);
3052 /* opregs.dcbaa_p */
3053 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p,
3054 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), true, meta, ret, done);
3057 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done);
3059 /* rtsregs.intrreg */
3060 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done);
3061 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done);
3062 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done);
3063 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done);
3064 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done);
3065 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done);
3067 /* rtsregs.erstba_p */
3068 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p,
3069 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), true, meta, ret, done);
3071 /* rtsregs.erst_p */
3072 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p,
3073 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), true, meta, ret, done);
3075 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done);
3076 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done);
3077 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done);
3078 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done);
3079 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done);
3081 /* sanity checking */
3082 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3083 dev = XHCI_DEVINST_PTR(sc, i);
3087 if (meta->op == VM_SNAPSHOT_SAVE)
3089 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done);
3091 /* check if the restored device (when restoring) is sane */
3092 if (restore_idx != i) {
3093 fprintf(stderr, "%s: idx not matching: actual: %d, "
3094 "expected: %d\r\n", __func__, restore_idx, i);
3099 if (meta->op == VM_SNAPSHOT_SAVE) {
3100 memset(dname, 0, sizeof(dname));
3101 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1);
3104 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done);
3106 if (meta->op == VM_SNAPSHOT_RESTORE) {
3107 dname[sizeof(dname) - 1] = '\0';
3108 if (strcmp(dev->dev_ue->ue_emu, dname)) {
3109 fprintf(stderr, "%s: device names mismatch: "
3110 "actual: %s, expected: %s\r\n",
3111 __func__, dname, dev->dev_ue->ue_emu);
3120 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3121 port = XHCI_PORTREG_PTR(sc, i);
3122 dev = XHCI_DEVINST_PTR(sc, i);
3127 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done);
3128 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done);
3129 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done);
3130 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done);
3134 if (meta->op == VM_SNAPSHOT_SAVE)
3135 pci_xhci_map_devs_slots(sc, maps);
3137 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
3138 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done);
3140 if (meta->op == VM_SNAPSHOT_SAVE) {
3141 dev = XHCI_SLOTDEV_PTR(sc, i);
3142 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
3144 dev = XHCI_DEVINST_PTR(sc, maps[i]);
3148 XHCI_SLOTDEV_PTR(sc, i) = dev;
3158 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx,
3159 XHCI_GADDR_SIZE(dev->dev_ctx), true, meta, ret, done);
3161 if (dev->dev_ctx != NULL) {
3162 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) {
3163 ret = pci_xhci_snapshot_ep(sc, dev, j, meta);
3169 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done);
3171 /* devices[i]->dev_sc */
3172 dev->dev_ue->ue_snapshot(dev->dev_sc, meta);
3174 /* devices[i]->hci */
3175 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done);
3176 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done);
3179 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done);
3180 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done);
3187 static const struct pci_devemu pci_de_xhci = {
3189 .pe_init = pci_xhci_init,
3190 .pe_legacy_config = pci_xhci_legacy_config,
3191 .pe_barwrite = pci_xhci_write,
3192 .pe_barread = pci_xhci_read,
3193 #ifdef BHYVE_SNAPSHOT
3194 .pe_snapshot = pci_xhci_snapshot,
3197 PCI_EMUL_SET(pci_de_xhci);