2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 tablet USB tablet mouse
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
40 #include <sys/types.h>
41 #include <sys/queue.h>
51 #include <machine/vmm_snapshot.h>
53 #include <dev/usb/usbdi.h>
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usb_freebsd.h>
66 static int xhci_debug = 0;
67 #define DPRINTF(params) if (xhci_debug) PRINTLN params
68 #define WPRINTF(params) PRINTLN params
71 #define XHCI_NAME "xhci"
72 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */
74 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */
77 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping
78 * to 4k to avoid going over the guest physical memory barrier.
80 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */
82 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */
84 #define XHCI_CAPLEN (4*8) /* offset of op register space */
85 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */
86 #define XHCI_PORTREGS_START 0x400
87 #define XHCI_DOORBELL_MAX 256
89 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */
91 /* caplength and hci-version registers */
92 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF)
93 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16)
94 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF)
96 /* hcsparams1 register */
97 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF)
98 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8)
99 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24)
101 /* hcsparams2 register */
102 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F)
103 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4)
104 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21)
105 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27)
107 /* hcsparams3 register */
108 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF)
109 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16)
111 /* hccparams1 register */
112 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01)
113 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1)
114 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2)
115 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3)
116 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4)
117 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5)
118 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6)
119 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7)
120 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8)
121 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9)
122 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10)
123 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11)
124 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12)
125 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16)
127 /* hccparams2 register */
128 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01)
129 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1)
130 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2)
131 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3)
132 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4)
133 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5)
135 /* other registers */
136 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03)
137 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F)
140 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */
141 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */
142 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */
144 /* port register set */
145 #define XHCI_PORTREGS_BASE 0x400 /* base offset */
146 #define XHCI_PORTREGS_PORT0 0x3F0
147 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */
149 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL)
150 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL)
152 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \
153 (((b) & (m)) << (s)))
154 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \
155 (((b) & ((m) << (s)))))
157 #define SNAP_DEV_NAME_LEN 128
159 struct pci_xhci_trb_ring {
160 uint64_t ringaddr; /* current dequeue guest address */
161 uint32_t ccs; /* consumer cycle state */
164 /* device endpoint transfer/stream rings */
165 struct pci_xhci_dev_ep {
167 struct xhci_trb *_epu_tr;
168 struct xhci_stream_ctx *_epu_sctx;
170 #define ep_tr _ep_trbsctx._epu_tr
171 #define ep_sctx _ep_trbsctx._epu_sctx
174 * Caches the value of MaxPStreams from the endpoint context
175 * when an endpoint is initialized and is used to validate the
176 * use of ep_ringaddr vs ep_sctx_trbs[] as well as the length
179 uint32_t ep_MaxPStreams;
181 struct pci_xhci_trb_ring _epu_trb;
182 struct pci_xhci_trb_ring *_epu_sctx_trbs;
184 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr
185 #define ep_ccs _ep_trb_rings._epu_trb.ccs
186 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs
188 struct usb_data_xfer *ep_xfer; /* transfer chain */
191 /* device context base address array: maps slot->device context */
193 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */
196 /* port status registers */
197 struct pci_xhci_portregs {
198 uint32_t portsc; /* port status and control */
199 uint32_t portpmsc; /* port pwr mgmt status & control */
200 uint32_t portli; /* port link info */
201 uint32_t porthlpmc; /* port hardware LPM control */
203 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10)
205 /* xHC operational registers */
206 struct pci_xhci_opregs {
207 uint32_t usbcmd; /* usb command */
208 uint32_t usbsts; /* usb status */
209 uint32_t pgsz; /* page size */
210 uint32_t dnctrl; /* device notification control */
211 uint64_t crcr; /* command ring control */
212 uint64_t dcbaap; /* device ctx base addr array ptr */
213 uint32_t config; /* configure */
215 /* guest mapped addresses: */
216 struct xhci_trb *cr_p; /* crcr dequeue */
217 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */
220 /* xHC runtime registers */
221 struct pci_xhci_rtsregs {
222 uint32_t mfindex; /* microframe index */
223 struct { /* interrupter register set */
224 uint32_t iman; /* interrupter management */
225 uint32_t imod; /* interrupter moderation */
226 uint32_t erstsz; /* event ring segment table size */
228 uint64_t erstba; /* event ring seg-tbl base addr */
229 uint64_t erdp; /* event ring dequeue ptr */
232 /* guest mapped addresses */
233 struct xhci_event_ring_seg *erstba_p;
234 struct xhci_trb *erst_p; /* event ring segment tbl */
235 int er_deq_seg; /* event ring dequeue segment */
236 int er_enq_idx; /* event ring enqueue index - xHCI */
237 int er_enq_seg; /* event ring enqueue segment */
238 uint32_t er_events_cnt; /* number of events in ER */
239 uint32_t event_pcs; /* producer cycle state flag */
243 struct pci_xhci_softc;
247 * USB device emulation container.
248 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each
249 * emulated device instance.
251 struct pci_xhci_dev_emu {
252 struct pci_xhci_softc *xsc;
255 struct xhci_dev_ctx *dev_ctx;
256 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS];
259 struct usb_devemu *dev_ue; /* USB emulated dev */
260 void *dev_sc; /* device's softc */
265 struct pci_xhci_softc {
266 struct pci_devinst *xsc_pi;
270 uint32_t caplength; /* caplen & hciversion */
271 uint32_t hcsparams1; /* structural parameters 1 */
272 uint32_t hcsparams2; /* structural parameters 2 */
273 uint32_t hcsparams3; /* structural parameters 3 */
274 uint32_t hccparams1; /* capability parameters 1 */
275 uint32_t dboff; /* doorbell offset */
276 uint32_t rtsoff; /* runtime register space offset */
277 uint32_t hccparams2; /* capability parameters 2 */
279 uint32_t regsend; /* end of configuration registers */
281 struct pci_xhci_opregs opregs;
282 struct pci_xhci_rtsregs rtsregs;
284 struct pci_xhci_portregs *portregs;
285 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */
286 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */
293 /* port and slot numbering start from 1 */
294 #define XHCI_PORTREG_PTR(x,n) &((x)->portregs[(n) - 1])
295 #define XHCI_DEVINST_PTR(x,n) ((x)->devices[(n) - 1])
296 #define XHCI_SLOTDEV_PTR(x,n) ((x)->slots[(n) - 1])
298 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH)
300 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \
301 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1)))
302 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \
303 (a), XHCI_GADDR_SIZE(a))
305 static int xhci_in_use;
307 /* map USB errors to XHCI */
308 static const int xhci_usb_errors[USB_ERR_MAX] = {
309 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS,
310 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE,
311 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
312 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID,
313 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE,
314 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED,
315 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER,
316 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER,
317 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER,
318 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL,
319 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE,
320 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE,
321 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE,
322 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED,
323 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED,
324 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE,
325 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON,
326 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE,
327 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB,
328 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
329 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED,
330 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT,
331 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL,
332 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED,
333 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF,
334 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB,
335 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED,
336 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED,
337 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED,
339 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \
340 XHCI_TRB_ERROR_INVALID)
342 static int pci_xhci_insert_event(struct pci_xhci_softc *sc,
343 struct xhci_trb *evtrb, int do_intr);
344 static void pci_xhci_dump_trb(struct xhci_trb *trb);
345 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc);
346 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot);
347 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm);
348 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc,
349 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
350 struct xhci_endp_ctx *ep_ctx, uint32_t streamid,
351 uint64_t ringaddr, int ccs);
354 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode,
357 evtrb->qwTrb0 = port << 24;
358 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode);
359 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype);
363 /* controller reset */
365 pci_xhci_reset(struct pci_xhci_softc *sc)
369 sc->rtsregs.er_enq_idx = 0;
370 sc->rtsregs.er_events_cnt = 0;
371 sc->rtsregs.event_pcs = 1;
373 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
374 pci_xhci_reset_slot(sc, i);
379 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd)
384 if (cmd & XHCI_CMD_RS) {
385 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0;
387 sc->opregs.usbcmd |= XHCI_CMD_RS;
388 sc->opregs.usbsts &= ~XHCI_STS_HCH;
389 sc->opregs.usbsts |= XHCI_STS_PCD;
391 /* Queue port change event on controller run from stop */
393 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
394 struct pci_xhci_dev_emu *dev;
395 struct pci_xhci_portregs *port;
396 struct xhci_trb evtrb;
398 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL)
401 port = XHCI_PORTREG_PTR(sc, i);
402 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS;
403 port->portsc &= ~XHCI_PS_PLS_MASK;
406 * XHCI 4.19.3 USB2 RxDetect->Polling,
409 if (dev->dev_ue->ue_usbver == 2)
411 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL);
414 XHCI_PS_PLS_SET(UPS_PORT_LS_U0);
416 pci_xhci_set_evtrb(&evtrb, i,
417 XHCI_TRB_ERROR_SUCCESS,
418 XHCI_TRB_EVENT_PORT_STS_CHANGE);
420 if (pci_xhci_insert_event(sc, &evtrb, 0) !=
421 XHCI_TRB_ERROR_SUCCESS)
425 sc->opregs.usbcmd &= ~XHCI_CMD_RS;
426 sc->opregs.usbsts |= XHCI_STS_HCH;
427 sc->opregs.usbsts &= ~XHCI_STS_PCD;
430 /* start execution of schedule; stop when set to 0 */
431 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS;
433 if (cmd & XHCI_CMD_HCRST) {
434 /* reset controller */
436 cmd &= ~XHCI_CMD_HCRST;
439 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS);
442 pci_xhci_assert_interrupt(sc);
448 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset,
451 struct xhci_trb evtrb;
452 struct pci_xhci_portregs *p;
454 uint32_t oldpls, newpls;
456 if (sc->portregs == NULL)
459 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ;
460 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ;
462 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx",
463 offset, port, value));
467 if (port > XHCI_MAX_DEVS) {
468 DPRINTF(("pci_xhci: portregs_write port %d > ndevices",
473 if (XHCI_DEVINST_PTR(sc, port) == NULL) {
474 DPRINTF(("pci_xhci: portregs_write to unattached port %d",
478 p = XHCI_PORTREG_PTR(sc, port);
481 /* port reset or warm reset */
482 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) {
483 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR);
487 if ((p->portsc & XHCI_PS_PP) == 0) {
488 WPRINTF(("pci_xhci: portregs_write to unpowered "
493 /* Port status and control register */
494 oldpls = XHCI_PS_PLS_GET(p->portsc);
495 newpls = XHCI_PS_PLS_GET(value);
497 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK |
498 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK;
500 if (XHCI_DEVINST_PTR(sc, port))
501 p->portsc |= XHCI_PS_CCS;
503 p->portsc |= (value &
507 XHCI_PS_PLS_MASK | /* link state */
509 XHCI_PS_PIC_MASK | /* port indicator */
510 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR));
512 /* clear control bits */
513 p->portsc &= ~(value &
523 /* port disable request; for USB3, don't care */
524 if (value & XHCI_PS_PED)
525 DPRINTF(("Disable port %d request", port));
527 if (!(value & XHCI_PS_LWS))
530 DPRINTF(("Port new PLS: %d", newpls));
534 if (oldpls != newpls) {
535 p->portsc &= ~XHCI_PS_PLS_MASK;
536 p->portsc |= XHCI_PS_PLS_SET(newpls) |
539 if (oldpls != 0 && newpls == 0) {
540 pci_xhci_set_evtrb(&evtrb, port,
541 XHCI_TRB_ERROR_SUCCESS,
542 XHCI_TRB_EVENT_PORT_STS_CHANGE);
544 pci_xhci_insert_event(sc, &evtrb, 1);
550 DPRINTF(("Unhandled change port %d PLS %u",
556 /* Port power management status and control register */
560 /* Port link information register */
561 DPRINTF(("pci_xhci attempted write to PORTLI, port %d",
566 * Port hardware LPM control register.
567 * For USB3, this register is reserved.
569 p->porthlpmc = value;
572 DPRINTF(("pci_xhci: unaligned portreg write offset %#lx",
578 static struct xhci_dev_ctx *
579 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot)
581 uint64_t devctx_addr;
582 struct xhci_dev_ctx *devctx;
584 assert(slot > 0 && slot <= XHCI_MAX_DEVS);
585 assert(XHCI_SLOTDEV_PTR(sc, slot) != NULL);
586 assert(sc->opregs.dcbaa_p != NULL);
588 devctx_addr = sc->opregs.dcbaa_p->dcba[slot];
590 if (devctx_addr == 0) {
591 DPRINTF(("get_dev_ctx devctx_addr == 0"));
595 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx",
597 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL);
602 static struct xhci_trb *
603 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb,
606 struct xhci_trb *next;
608 assert(curtrb != NULL);
610 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) {
612 *guestaddr = curtrb->qwTrb0 & ~0xFUL;
614 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL);
617 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL;
626 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc)
629 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY;
630 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND;
631 sc->opregs.usbsts |= XHCI_STS_EINT;
633 /* only trigger interrupt if permitted */
634 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) &&
635 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) {
636 if (pci_msi_enabled(sc->xsc_pi))
637 pci_generate_msi(sc->xsc_pi, 0);
639 pci_lintr_assert(sc->xsc_pi);
644 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc)
647 if (!pci_msi_enabled(sc->xsc_pi))
648 pci_lintr_assert(sc->xsc_pi);
652 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid)
654 struct xhci_dev_ctx *dev_ctx;
655 struct pci_xhci_dev_ep *devep;
656 struct xhci_endp_ctx *ep_ctx;
657 uint32_t i, pstreams;
659 dev_ctx = dev->dev_ctx;
660 ep_ctx = &dev_ctx->ctx_ep[epid];
661 devep = &dev->eps[epid];
662 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0);
664 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams));
665 assert(devep->ep_sctx_trbs == NULL);
667 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 &
668 XHCI_EPCTX_2_TR_DQ_PTR_MASK);
669 devep->ep_sctx_trbs = calloc(pstreams,
670 sizeof(struct pci_xhci_trb_ring));
671 for (i = 0; i < pstreams; i++) {
672 devep->ep_sctx_trbs[i].ringaddr =
673 devep->ep_sctx[i].qwSctx0 &
674 XHCI_SCTX_0_TR_DQ_PTR_MASK;
675 devep->ep_sctx_trbs[i].ccs =
676 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0);
679 DPRINTF(("init_ep %d with no pstreams", epid));
680 devep->ep_ringaddr = ep_ctx->qwEpCtx2 &
681 XHCI_EPCTX_2_TR_DQ_PTR_MASK;
682 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2);
683 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr);
684 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs));
686 devep->ep_MaxPStreams = pstreams;
688 if (devep->ep_xfer == NULL) {
689 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer));
690 USB_DATA_XFER_INIT(devep->ep_xfer);
695 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid)
697 struct xhci_dev_ctx *dev_ctx;
698 struct pci_xhci_dev_ep *devep;
699 struct xhci_endp_ctx *ep_ctx;
701 DPRINTF(("pci_xhci disable_ep %d", epid));
703 dev_ctx = dev->dev_ctx;
704 ep_ctx = &dev_ctx->ctx_ep[epid];
705 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED;
707 devep = &dev->eps[epid];
708 if (devep->ep_MaxPStreams > 0)
709 free(devep->ep_sctx_trbs);
711 if (devep->ep_xfer != NULL) {
712 free(devep->ep_xfer);
713 devep->ep_xfer = NULL;
716 memset(devep, 0, sizeof(struct pci_xhci_dev_ep));
720 /* reset device at slot and data structures related to it */
722 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot)
724 struct pci_xhci_dev_emu *dev;
726 dev = XHCI_SLOTDEV_PTR(sc, slot);
729 DPRINTF(("xhci reset unassigned slot (%d)?", slot));
731 dev->dev_slotstate = XHCI_ST_DISABLED;
734 /* TODO: reset ring buffer pointers */
738 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb,
741 struct pci_xhci_rtsregs *rts;
745 struct xhci_trb *evtrbptr;
747 err = XHCI_TRB_ERROR_SUCCESS;
751 erdp = rts->intrreg.erdp & ~0xF;
752 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) /
753 sizeof(struct xhci_trb);
755 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]",
756 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3));
757 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u",
758 erdp_idx, rts->er_deq_seg, rts->er_enq_idx,
759 rts->er_enq_seg, rts->event_pcs));
760 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)",
761 erdp, rts->erstba_p->qwEvrsTablePtr,
762 rts->erstba_p->dwEvrsTableSize, do_intr));
764 evtrbptr = &rts->erst_p[rts->er_enq_idx];
766 /* TODO: multi-segment table */
767 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) {
768 DPRINTF(("pci_xhci[%d] cannot insert event; ring full",
770 err = XHCI_TRB_ERROR_EV_RING_FULL;
774 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) {
775 struct xhci_trb errev;
777 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) {
779 DPRINTF(("pci_xhci[%d] insert evt err: ring full",
783 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET(
784 XHCI_TRB_ERROR_EV_RING_FULL);
785 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET(
786 XHCI_TRB_EVENT_HOST_CTRL) |
788 rts->er_events_cnt++;
789 memcpy(&rts->erst_p[rts->er_enq_idx], &errev,
790 sizeof(struct xhci_trb));
791 rts->er_enq_idx = (rts->er_enq_idx + 1) %
792 rts->erstba_p->dwEvrsTableSize;
793 err = XHCI_TRB_ERROR_EV_RING_FULL;
799 rts->er_events_cnt++;
802 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT;
803 evtrb->dwTrb3 |= rts->event_pcs;
805 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb));
806 rts->er_enq_idx = (rts->er_enq_idx + 1) %
807 rts->erstba_p->dwEvrsTableSize;
809 if (rts->er_enq_idx == 0)
814 pci_xhci_assert_interrupt(sc);
820 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot)
822 struct pci_xhci_dev_emu *dev;
826 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
827 if (sc->portregs != NULL)
828 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
829 dev = XHCI_SLOTDEV_PTR(sc, i);
830 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) {
832 dev->dev_slotstate = XHCI_ST_ENABLED;
833 cmderr = XHCI_TRB_ERROR_SUCCESS;
834 dev->hci.hci_address = i;
839 DPRINTF(("pci_xhci enable slot (error=%d) slot %u",
840 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot));
846 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot)
848 struct pci_xhci_dev_emu *dev;
851 DPRINTF(("pci_xhci disable slot %u", slot));
853 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
854 if (sc->portregs == NULL)
857 if (slot > XHCI_MAX_SLOTS) {
858 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
862 dev = XHCI_SLOTDEV_PTR(sc, slot);
864 if (dev->dev_slotstate == XHCI_ST_DISABLED) {
865 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
867 dev->dev_slotstate = XHCI_ST_DISABLED;
868 cmderr = XHCI_TRB_ERROR_SUCCESS;
869 /* TODO: reset events and endpoints */
872 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
879 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot)
881 struct pci_xhci_dev_emu *dev;
882 struct xhci_dev_ctx *dev_ctx;
883 struct xhci_endp_ctx *ep_ctx;
887 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
888 if (sc->portregs == NULL)
891 DPRINTF(("pci_xhci reset device slot %u", slot));
893 dev = XHCI_SLOTDEV_PTR(sc, slot);
894 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED)
895 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
897 dev->dev_slotstate = XHCI_ST_DEFAULT;
899 dev->hci.hci_address = 0;
900 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
903 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
904 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT,
907 /* number of contexts */
908 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
909 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
911 /* reset all eps other than ep-0 */
912 for (i = 2; i <= 31; i++) {
913 ep_ctx = &dev_ctx->ctx_ep[i];
914 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0,
915 XHCI_ST_EPCTX_DISABLED, 0x7, 0);
918 cmderr = XHCI_TRB_ERROR_SUCCESS;
921 pci_xhci_reset_slot(sc, slot);
928 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot,
929 struct xhci_trb *trb)
931 struct pci_xhci_dev_emu *dev;
932 struct xhci_input_dev_ctx *input_ctx;
933 struct xhci_slot_ctx *islot_ctx;
934 struct xhci_dev_ctx *dev_ctx;
935 struct xhci_endp_ctx *ep0_ctx;
938 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
939 islot_ctx = &input_ctx->ctx_slot;
940 ep0_ctx = &input_ctx->ctx_ep[1];
942 cmderr = XHCI_TRB_ERROR_SUCCESS;
944 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,",
945 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
946 DPRINTF((" slot %08x %08x %08x %08x",
947 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
948 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
949 DPRINTF((" ep0 %08x %08x %016lx %08x",
950 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
953 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */
954 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
955 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) {
956 DPRINTF(("pci_xhci: address device, input ctl invalid"));
957 cmderr = XHCI_TRB_ERROR_TRB;
961 /* assign address to slot */
962 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
964 DPRINTF(("pci_xhci: address device, dev ctx"));
965 DPRINTF((" slot %08x %08x %08x %08x",
966 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
967 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
969 dev = XHCI_SLOTDEV_PTR(sc, slot);
972 dev->hci.hci_address = slot;
973 dev->dev_ctx = dev_ctx;
975 if (dev->dev_ue->ue_reset == NULL ||
976 dev->dev_ue->ue_reset(dev->dev_sc) < 0) {
977 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
981 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx));
983 dev_ctx->ctx_slot.dwSctx3 =
984 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) |
985 XHCI_SCTX_3_DEV_ADDR_SET(slot);
987 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx));
988 ep0_ctx = &dev_ctx->ctx_ep[1];
989 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) |
990 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING);
992 pci_xhci_init_ep(dev, 1);
994 dev->dev_slotstate = XHCI_ST_ADDRESSED;
996 DPRINTF(("pci_xhci: address device, output ctx"));
997 DPRINTF((" slot %08x %08x %08x %08x",
998 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
999 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1000 DPRINTF((" ep0 %08x %08x %016lx %08x",
1001 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1002 ep0_ctx->dwEpCtx4));
1009 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot,
1010 struct xhci_trb *trb)
1012 struct xhci_input_dev_ctx *input_ctx;
1013 struct pci_xhci_dev_emu *dev;
1014 struct xhci_dev_ctx *dev_ctx;
1015 struct xhci_endp_ctx *ep_ctx, *iep_ctx;
1019 cmderr = XHCI_TRB_ERROR_SUCCESS;
1021 DPRINTF(("pci_xhci config_ep slot %u", slot));
1023 dev = XHCI_SLOTDEV_PTR(sc, slot);
1024 assert(dev != NULL);
1026 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) {
1027 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u",
1029 if (dev->dev_ue->ue_stop != NULL)
1030 dev->dev_ue->ue_stop(dev->dev_sc);
1032 dev->dev_slotstate = XHCI_ST_ADDRESSED;
1034 dev->hci.hci_address = 0;
1035 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1037 /* number of contexts */
1038 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
1039 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
1042 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1043 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED,
1046 /* disable endpoints */
1047 for (i = 2; i < 32; i++)
1048 pci_xhci_disable_ep(dev, i);
1050 cmderr = XHCI_TRB_ERROR_SUCCESS;
1055 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) {
1056 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed",
1057 dev->dev_slotstate));
1058 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
1062 /* In addressed/configured state;
1063 * for each drop endpoint ctx flag:
1064 * ep->state = DISABLED
1065 * for each add endpoint ctx flag:
1067 * ep->state = RUNNING
1068 * for each drop+add endpoint flag:
1069 * reset ep resources
1071 * ep->state = RUNNING
1072 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled)
1073 * slot->state = configured
1076 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1077 dev_ctx = dev->dev_ctx;
1078 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x",
1079 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1,
1080 input_ctx->ctx_input.dwInCtx7));
1082 for (i = 2; i <= 31; i++) {
1083 ep_ctx = &dev_ctx->ctx_ep[i];
1085 if (input_ctx->ctx_input.dwInCtx0 &
1086 XHCI_INCTX_0_DROP_MASK(i)) {
1087 DPRINTF((" config ep - dropping ep %d", i));
1088 pci_xhci_disable_ep(dev, i);
1091 if (input_ctx->ctx_input.dwInCtx1 &
1092 XHCI_INCTX_1_ADD_MASK(i)) {
1093 iep_ctx = &input_ctx->ctx_ep[i];
1095 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x",
1096 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1,
1097 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4));
1099 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx));
1101 pci_xhci_init_ep(dev, i);
1104 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1105 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1109 /* slot state to configured */
1110 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1111 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27);
1112 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY(
1113 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27);
1114 dev->dev_slotstate = XHCI_ST_CONFIGURED;
1116 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x "
1118 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1119 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1126 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot,
1127 struct xhci_trb *trb)
1129 struct pci_xhci_dev_emu *dev;
1130 struct pci_xhci_dev_ep *devep;
1131 struct xhci_dev_ctx *dev_ctx;
1132 struct xhci_endp_ctx *ep_ctx;
1133 uint32_t cmderr, epid;
1136 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1138 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot));
1140 cmderr = XHCI_TRB_ERROR_SUCCESS;
1142 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1144 dev = XHCI_SLOTDEV_PTR(sc, slot);
1145 assert(dev != NULL);
1147 if (type == XHCI_TRB_TYPE_STOP_EP &&
1148 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) {
1149 /* XXX suspend endpoint for 10ms */
1152 if (epid < 1 || epid > 31) {
1153 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid));
1154 cmderr = XHCI_TRB_ERROR_TRB;
1158 devep = &dev->eps[epid];
1159 if (devep->ep_xfer != NULL)
1160 USB_DATA_XFER_RESET(devep->ep_xfer);
1162 dev_ctx = dev->dev_ctx;
1163 assert(dev_ctx != NULL);
1165 ep_ctx = &dev_ctx->ctx_ep[epid];
1167 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1169 if (devep->ep_MaxPStreams == 0)
1170 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs;
1172 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x",
1173 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1176 if (type == XHCI_TRB_TYPE_RESET_EP &&
1177 (dev->dev_ue->ue_reset == NULL ||
1178 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) {
1179 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
1189 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep,
1190 struct pci_xhci_dev_ep *devep, uint32_t streamid)
1192 struct xhci_stream_ctx *sctx;
1194 if (devep->ep_MaxPStreams == 0)
1195 return (XHCI_TRB_ERROR_TRB);
1197 if (devep->ep_MaxPStreams > XHCI_STREAMS_MAX)
1198 return (XHCI_TRB_ERROR_INVALID_SID);
1200 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) {
1201 DPRINTF(("pci_xhci: find_stream; LSA bit not set"));
1202 return (XHCI_TRB_ERROR_INVALID_SID);
1205 /* only support primary stream */
1206 if (streamid > devep->ep_MaxPStreams)
1207 return (XHCI_TRB_ERROR_STREAM_TYPE);
1209 sctx = (struct xhci_stream_ctx *)XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) +
1211 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0))
1212 return (XHCI_TRB_ERROR_STREAM_TYPE);
1214 return (XHCI_TRB_ERROR_SUCCESS);
1219 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot,
1220 struct xhci_trb *trb)
1222 struct pci_xhci_dev_emu *dev;
1223 struct pci_xhci_dev_ep *devep;
1224 struct xhci_dev_ctx *dev_ctx;
1225 struct xhci_endp_ctx *ep_ctx;
1226 uint32_t cmderr, epid;
1229 cmderr = XHCI_TRB_ERROR_SUCCESS;
1231 dev = XHCI_SLOTDEV_PTR(sc, slot);
1232 assert(dev != NULL);
1234 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u",
1235 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7),
1236 (uint32_t)(trb->qwTrb0 & 0x1)));
1237 DPRINTF((" stream-id %u, slot %u, epid %u, C %u",
1238 (trb->dwTrb2 >> 16) & 0xFFFF,
1239 XHCI_TRB_3_SLOT_GET(trb->dwTrb3),
1240 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1));
1242 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1243 if (epid < 1 || epid > 31) {
1244 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid));
1245 cmderr = XHCI_TRB_ERROR_TRB;
1249 dev_ctx = dev->dev_ctx;
1250 assert(dev_ctx != NULL);
1252 ep_ctx = &dev_ctx->ctx_ep[epid];
1253 devep = &dev->eps[epid];
1255 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) {
1256 case XHCI_ST_EPCTX_STOPPED:
1257 case XHCI_ST_EPCTX_ERROR:
1260 DPRINTF(("pci_xhci cmd set_tr invalid state %x",
1261 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)));
1262 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE;
1266 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2);
1267 if (devep->ep_MaxPStreams > 0) {
1268 cmderr = pci_xhci_find_stream(sc, ep_ctx, devep, streamid);
1269 if (cmderr == XHCI_TRB_ERROR_SUCCESS) {
1270 assert(devep->ep_sctx != NULL);
1272 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0;
1273 devep->ep_sctx_trbs[streamid].ringaddr =
1275 devep->ep_sctx_trbs[streamid].ccs =
1276 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0);
1279 if (streamid != 0) {
1280 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0",
1283 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL;
1284 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL;
1285 devep->ep_ccs = trb->qwTrb0 & 0x1;
1286 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr);
1288 DPRINTF(("pci_xhci set_tr first TRB:"));
1289 pci_xhci_dump_trb(devep->ep_tr);
1291 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1298 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot,
1299 struct xhci_trb *trb)
1301 struct xhci_input_dev_ctx *input_ctx;
1302 struct xhci_slot_ctx *islot_ctx;
1303 struct xhci_dev_ctx *dev_ctx;
1304 struct xhci_endp_ctx *ep0_ctx;
1307 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1308 islot_ctx = &input_ctx->ctx_slot;
1309 ep0_ctx = &input_ctx->ctx_ep[1];
1311 cmderr = XHCI_TRB_ERROR_SUCCESS;
1312 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,",
1313 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
1314 DPRINTF((" slot %08x %08x %08x %08x",
1315 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
1316 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
1317 DPRINTF((" ep0 %08x %08x %016lx %08x",
1318 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1319 ep0_ctx->dwEpCtx4));
1321 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */
1322 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
1323 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) {
1324 DPRINTF(("pci_xhci: eval ctx, input ctl invalid"));
1325 cmderr = XHCI_TRB_ERROR_TRB;
1329 /* assign address to slot; in this emulation, slot_id = address */
1330 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1332 DPRINTF(("pci_xhci: eval ctx, dev ctx"));
1333 DPRINTF((" slot %08x %08x %08x %08x",
1334 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1335 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1337 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */
1338 /* set max exit latency */
1339 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY(
1340 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1,
1343 /* set interrupter target */
1344 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY(
1345 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2,
1348 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */
1349 /* set max packet size */
1350 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY(
1351 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1,
1354 ep0_ctx = &dev_ctx->ctx_ep[1];
1357 DPRINTF(("pci_xhci: eval ctx, output ctx"));
1358 DPRINTF((" slot %08x %08x %08x %08x",
1359 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1360 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1361 DPRINTF((" ep0 %08x %08x %016lx %08x",
1362 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1363 ep0_ctx->dwEpCtx4));
1370 pci_xhci_complete_commands(struct pci_xhci_softc *sc)
1372 struct xhci_trb evtrb;
1373 struct xhci_trb *trb;
1375 uint32_t ccs; /* cycle state (XHCI 4.9.2) */
1382 sc->opregs.crcr |= XHCI_CRCR_LO_CRR;
1384 trb = sc->opregs.cr_p;
1385 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS;
1386 crcr = sc->opregs.crcr & ~0xF;
1389 sc->opregs.cr_p = trb;
1391 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1393 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) !=
1394 (ccs & XHCI_TRB_3_CYCLE_BIT))
1397 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x"
1398 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u",
1399 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3,
1400 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs));
1402 cmderr = XHCI_TRB_ERROR_SUCCESS;
1404 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) |
1405 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE);
1409 case XHCI_TRB_TYPE_LINK: /* 0x06 */
1410 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1411 ccs ^= XHCI_CRCR_LO_RCS;
1414 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */
1415 cmderr = pci_xhci_cmd_enable_slot(sc, &slot);
1418 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */
1419 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1420 cmderr = pci_xhci_cmd_disable_slot(sc, slot);
1423 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */
1424 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1425 cmderr = pci_xhci_cmd_address_device(sc, slot, trb);
1428 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */
1429 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1430 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb);
1433 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */
1434 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1435 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb);
1438 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */
1439 DPRINTF(("Reset Endpoint on slot %d", slot));
1440 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1441 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1444 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */
1445 DPRINTF(("Stop Endpoint on slot %d", slot));
1446 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1447 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1450 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */
1451 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1452 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb);
1455 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */
1456 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1457 cmderr = pci_xhci_cmd_reset_device(sc, slot);
1460 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */
1464 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */
1467 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */
1470 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */
1473 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */
1476 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */
1480 DPRINTF(("pci_xhci: unsupported cmd %x", type));
1484 if (type != XHCI_TRB_TYPE_LINK) {
1486 * insert command completion event and assert intr
1488 evtrb.qwTrb0 = crcr;
1489 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr);
1490 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot);
1491 DPRINTF(("pci_xhci: command 0x%x result: 0x%x",
1493 pci_xhci_insert_event(sc, &evtrb, 1);
1496 trb = pci_xhci_trb_next(sc, trb, &crcr);
1499 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs;
1500 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR;
1505 pci_xhci_dump_trb(struct xhci_trb *trb)
1507 static const char *trbtypes[] = {
1535 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1536 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x",
1538 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID",
1539 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3));
1543 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer,
1544 uint32_t slot, uint32_t epid, int *do_intr)
1546 struct pci_xhci_dev_emu *dev;
1547 struct pci_xhci_dev_ep *devep;
1548 struct xhci_dev_ctx *dev_ctx;
1549 struct xhci_endp_ctx *ep_ctx;
1550 struct xhci_trb *trb;
1551 struct xhci_trb evtrb;
1556 dev = XHCI_SLOTDEV_PTR(sc, slot);
1557 devep = &dev->eps[epid];
1558 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1560 assert(dev_ctx != NULL);
1562 ep_ctx = &dev_ctx->ctx_ep[epid];
1564 err = XHCI_TRB_ERROR_SUCCESS;
1568 /* go through list of TRBs and insert event(s) */
1569 for (i = xfer->head; xfer->ndata > 0; ) {
1570 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data;
1571 trb = XHCI_GADDR(sc, evtrb.qwTrb0);
1572 trbflags = trb->dwTrb3;
1574 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x "
1576 i, xfer->data[i].processed, xfer->data[i].blen,
1577 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0,
1579 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0));
1581 if (!xfer->data[i].processed) {
1587 edtla += xfer->data[i].bdone;
1589 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs);
1591 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx,
1592 xfer->data[i].streamid, xfer->data[i].trbnext,
1595 /* Only interrupt if IOC or short packet */
1596 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) &&
1597 !((err == XHCI_TRB_ERROR_SHORT_PKT) &&
1598 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) {
1600 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1604 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) |
1605 XHCI_TRB_2_REM_SET(xfer->data[i].blen);
1607 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) |
1608 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid);
1610 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) {
1611 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla));
1612 evtrb.qwTrb0 = trb->qwTrb0;
1613 evtrb.dwTrb2 = (edtla & 0xFFFFF) |
1614 XHCI_TRB_2_ERROR_SET(err);
1615 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT;
1621 err = pci_xhci_insert_event(sc, &evtrb, 0);
1622 if (err != XHCI_TRB_ERROR_SUCCESS) {
1626 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1633 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc,
1634 struct pci_xhci_dev_emu *dev __unused, struct pci_xhci_dev_ep *devep,
1635 struct xhci_endp_ctx *ep_ctx, uint32_t streamid, uint64_t ringaddr, int ccs)
1638 if (devep->ep_MaxPStreams != 0) {
1639 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) |
1642 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL;
1643 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1;
1644 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1);
1646 DPRINTF(("xhci update ep-ring stream %d, addr %lx",
1647 streamid, devep->ep_sctx[streamid].qwSctx0));
1649 devep->ep_ringaddr = ringaddr & ~0xFUL;
1650 devep->ep_ccs = ccs & 0x1;
1651 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL);
1652 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1);
1654 DPRINTF(("xhci update ep-ring, addr %lx",
1655 (devep->ep_ringaddr | devep->ep_ccs)));
1660 * Outstanding transfer still in progress (device NAK'd earlier) so retry
1661 * the transfer again to see if it succeeds.
1664 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc,
1665 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1666 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid)
1668 struct usb_data_xfer *xfer;
1672 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1673 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1678 xfer = devep->ep_xfer;
1679 USB_DATA_XFER_LOCK(xfer);
1681 /* outstanding requests queued up */
1682 if (dev->dev_ue->ue_data != NULL) {
1683 err = dev->dev_ue->ue_data(dev->dev_sc, xfer,
1684 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2);
1685 if (err == USB_ERR_CANCELLED) {
1686 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) ==
1688 err = XHCI_TRB_ERROR_SUCCESS;
1690 err = pci_xhci_xfer_complete(sc, xfer, slot, epid,
1692 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) {
1693 pci_xhci_assert_interrupt(sc);
1697 /* XXX should not do it if error? */
1698 USB_DATA_XFER_RESET(xfer);
1702 USB_DATA_XFER_UNLOCK(xfer);
1710 pci_xhci_handle_transfer(struct pci_xhci_softc *sc,
1711 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1712 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot,
1713 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid)
1715 struct xhci_trb *setup_trb;
1716 struct usb_data_xfer *xfer;
1717 struct usb_data_xfer_block *xfer_block;
1723 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0,
1724 XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1726 xfer = devep->ep_xfer;
1727 USB_DATA_XFER_LOCK(xfer);
1729 DPRINTF(("pci_xhci handle_transfer slot %u", slot));
1732 err = XHCI_TRB_ERROR_INVALID;
1738 pci_xhci_dump_trb(trb);
1740 trbflags = trb->dwTrb3;
1742 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK &&
1743 (trbflags & XHCI_TRB_3_CYCLE_BIT) !=
1744 (ccs & XHCI_TRB_3_CYCLE_BIT)) {
1745 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x",
1746 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs));
1752 switch (XHCI_TRB_3_TYPE_GET(trbflags)) {
1753 case XHCI_TRB_TYPE_LINK:
1754 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1757 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1759 xfer_block->processed = 1;
1762 case XHCI_TRB_TYPE_SETUP_STAGE:
1763 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 ||
1764 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) {
1765 DPRINTF(("pci_xhci: invalid setup trb"));
1766 err = XHCI_TRB_ERROR_TRB;
1773 xfer->ureq = malloc(
1774 sizeof(struct usb_device_request));
1775 memcpy(xfer->ureq, &val,
1776 sizeof(struct usb_device_request));
1778 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1780 xfer_block->processed = 1;
1783 case XHCI_TRB_TYPE_NORMAL:
1784 case XHCI_TRB_TYPE_ISOCH:
1785 if (setup_trb != NULL) {
1786 DPRINTF(("pci_xhci: trb not supposed to be in "
1788 err = XHCI_TRB_ERROR_TRB;
1793 case XHCI_TRB_TYPE_DATA_STAGE:
1794 xfer_block = usb_data_xfer_append(xfer,
1795 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ?
1796 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)),
1797 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs);
1800 case XHCI_TRB_TYPE_STATUS_STAGE:
1801 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1805 case XHCI_TRB_TYPE_NOOP:
1806 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1808 xfer_block->processed = 1;
1811 case XHCI_TRB_TYPE_EVENT_DATA:
1812 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1814 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) {
1815 xfer_block->processed = 1;
1820 DPRINTF(("pci_xhci: handle xfer unexpected trb type "
1822 XHCI_TRB_3_TYPE_GET(trbflags)));
1823 err = XHCI_TRB_ERROR_TRB;
1827 trb = pci_xhci_trb_next(sc, trb, &addr);
1829 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb));
1832 xfer_block->trbnext = addr;
1833 xfer_block->streamid = streamid;
1836 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) &&
1837 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) {
1841 /* handle current batch that requires interrupt on complete */
1842 if (trbflags & XHCI_TRB_3_IOC_BIT) {
1843 DPRINTF(("pci_xhci: trb IOC bit set"));
1850 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata));
1852 if (xfer->ndata <= 0)
1858 if (dev->dev_ue->ue_request != NULL)
1859 usberr = dev->dev_ue->ue_request(dev->dev_sc, xfer);
1861 usberr = USB_ERR_NOT_STARTED;
1862 err = USB_TO_XHCI_ERR(usberr);
1863 if (err == XHCI_TRB_ERROR_SUCCESS ||
1864 err == XHCI_TRB_ERROR_STALL ||
1865 err == XHCI_TRB_ERROR_SHORT_PKT) {
1866 err = pci_xhci_xfer_complete(sc, xfer, slot, epid,
1868 if (err != XHCI_TRB_ERROR_SUCCESS)
1873 /* handle data transfer */
1874 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1875 err = XHCI_TRB_ERROR_SUCCESS;
1879 if (err == XHCI_TRB_ERROR_EV_RING_FULL)
1880 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__));
1883 USB_DATA_XFER_UNLOCK(xfer);
1886 pci_xhci_assert_interrupt(sc);
1889 USB_DATA_XFER_RESET(xfer);
1890 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs",
1896 USB_DATA_XFER_RESET(xfer);
1902 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot,
1903 uint32_t epid, uint32_t streamid)
1905 struct pci_xhci_dev_emu *dev;
1906 struct pci_xhci_dev_ep *devep;
1907 struct xhci_dev_ctx *dev_ctx;
1908 struct xhci_endp_ctx *ep_ctx;
1909 struct pci_xhci_trb_ring *sctx_tr;
1910 struct xhci_trb *trb;
1915 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u",
1916 slot, epid, streamid));
1918 if (slot == 0 || slot > XHCI_MAX_SLOTS) {
1919 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot));
1923 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) {
1924 DPRINTF(("pci_xhci: invalid endpoint %u", epid));
1928 dev = XHCI_SLOTDEV_PTR(sc, slot);
1929 devep = &dev->eps[epid];
1930 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1934 ep_ctx = &dev_ctx->ctx_ep[epid];
1938 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x",
1939 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1942 if (ep_ctx->qwEpCtx2 == 0)
1945 /* handle pending transfers */
1946 if (devep->ep_xfer->ndata > 0) {
1947 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1951 /* get next trb work item */
1952 if (devep->ep_MaxPStreams != 0) {
1954 * Stream IDs of 0, 65535 (any stream), and 65534
1955 * (prime) are invalid.
1957 if (streamid == 0 || streamid == 65534 || streamid == 65535) {
1958 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1962 error = pci_xhci_find_stream(sc, ep_ctx, devep, streamid);
1963 if (error != XHCI_TRB_ERROR_SUCCESS) {
1964 DPRINTF(("pci_xhci: invalid stream %u: %d",
1968 sctx_tr = &devep->ep_sctx_trbs[streamid];
1969 ringaddr = sctx_tr->ringaddr;
1971 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL);
1972 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x",
1973 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1974 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1976 if (streamid != 0) {
1977 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1980 ringaddr = devep->ep_ringaddr;
1981 ccs = devep->ep_ccs;
1983 DPRINTF(("doorbell, ccs %lx, trb ccs %x",
1984 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1985 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1988 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) {
1989 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?",
1990 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid));
1994 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid,
1995 ringaddr, ccs, streamid);
1999 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset,
2003 offset = (offset - sc->dboff) / sizeof(uint32_t);
2005 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx",
2008 if (XHCI_HALTED(sc)) {
2009 DPRINTF(("pci_xhci: controller halted"));
2014 pci_xhci_complete_commands(sc);
2015 else if (sc->portregs != NULL)
2016 pci_xhci_device_doorbell(sc, offset,
2017 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value));
2021 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset,
2024 struct pci_xhci_rtsregs *rts;
2026 offset -= sc->rtsoff;
2029 DPRINTF(("pci_xhci attempted write to MFINDEX"));
2033 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx",
2036 offset -= 0x20; /* start of intrreg */
2042 if (value & XHCI_IMAN_INTR_PEND)
2043 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2044 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) |
2045 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND);
2047 if (!(value & XHCI_IMAN_INTR_ENA))
2048 pci_xhci_deassert_interrupt(sc);
2053 rts->intrreg.imod = value;
2057 rts->intrreg.erstsz = value & 0xFFFF;
2061 /* ERSTBA low bits */
2062 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) |
2067 /* ERSTBA high bits */
2068 rts->intrreg.erstba = (value << 32) |
2069 MASK_64_LO(sc->rtsregs.intrreg.erstba);
2071 rts->erstba_p = XHCI_GADDR(sc,
2072 sc->rtsregs.intrreg.erstba & ~0x3FUL);
2074 rts->erst_p = XHCI_GADDR(sc,
2075 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL);
2077 rts->er_enq_idx = 0;
2078 rts->er_events_cnt = 0;
2080 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u",
2082 rts->erstba_p->qwEvrsTablePtr,
2083 rts->erstba_p->dwEvrsTableSize));
2089 MASK_64_HI(sc->rtsregs.intrreg.erdp) |
2090 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) |
2092 if (value & XHCI_ERDP_LO_BUSY) {
2093 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY;
2094 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2097 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value);
2102 /* ERDP high bits */
2103 rts->intrreg.erdp = (value << 32) |
2104 MASK_64_LO(sc->rtsregs.intrreg.erdp);
2106 if (rts->er_events_cnt > 0) {
2110 erdp = rts->intrreg.erdp & ~0xF;
2111 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) /
2112 sizeof(struct xhci_trb);
2114 if (erdp_i <= rts->er_enq_idx)
2115 rts->er_events_cnt = rts->er_enq_idx - erdp_i;
2117 rts->er_events_cnt =
2118 rts->erstba_p->dwEvrsTableSize -
2119 (erdp_i - rts->er_enq_idx);
2121 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u",
2122 erdp, rts->er_events_cnt));
2128 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx",
2135 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2137 struct pci_xhci_portregs *portregs;
2141 if (sc->portregs == NULL)
2144 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ;
2145 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ;
2147 if (port > XHCI_MAX_DEVS) {
2148 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS",
2151 /* return default value for unused port */
2152 return (XHCI_PS_SPEED_SET(3));
2155 portregs = XHCI_PORTREG_PTR(sc, port);
2158 reg = portregs->portsc;
2161 reg = portregs->portpmsc;
2164 reg = portregs->portli;
2167 reg = portregs->porthlpmc;
2170 DPRINTF(("pci_xhci: unaligned portregs read offset %#lx",
2176 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x",
2177 offset, port, reg));
2183 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset,
2186 offset -= XHCI_CAPLEN;
2189 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx",
2194 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F);
2198 /* clear bits on write */
2199 sc->opregs.usbsts &= ~(value &
2200 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS|
2201 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR));
2209 sc->opregs.dnctrl = value & 0xFFFF;
2213 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) {
2214 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2215 sc->opregs.crcr |= value &
2216 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2218 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) |
2219 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS));
2224 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) {
2225 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) |
2228 sc->opregs.cr_p = XHCI_GADDR(sc,
2229 sc->opregs.crcr & ~0xF);
2232 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) {
2233 /* Stop operation of Command Ring */
2236 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) {
2242 case XHCI_DCBAAP_LO:
2243 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) |
2244 (value & 0xFFFFFFC0);
2247 case XHCI_DCBAAP_HI:
2248 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) |
2250 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL);
2252 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)",
2253 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p));
2257 sc->opregs.config = value & 0x03FF;
2261 if (offset >= 0x400)
2262 pci_xhci_portregs_write(sc, offset, value);
2270 pci_xhci_write(struct pci_devinst *pi, int baridx, uint64_t offset,
2271 int size __unused, uint64_t value)
2273 struct pci_xhci_softc *sc;
2277 assert(baridx == 0);
2279 pthread_mutex_lock(&sc->mtx);
2280 if (offset < XHCI_CAPLEN) /* read only registers */
2281 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset));
2282 else if (offset < sc->dboff)
2283 pci_xhci_hostop_write(sc, offset, value);
2284 else if (offset < sc->rtsoff)
2285 pci_xhci_dbregs_write(sc, offset, value);
2286 else if (offset < sc->regsend)
2287 pci_xhci_rtsregs_write(sc, offset, value);
2289 WPRINTF(("pci_xhci: write invalid offset %ld", offset));
2291 pthread_mutex_unlock(&sc->mtx);
2295 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset)
2300 case XHCI_CAPLENGTH: /* 0x00 */
2301 value = sc->caplength;
2304 case XHCI_HCSPARAMS1: /* 0x04 */
2305 value = sc->hcsparams1;
2308 case XHCI_HCSPARAMS2: /* 0x08 */
2309 value = sc->hcsparams2;
2312 case XHCI_HCSPARAMS3: /* 0x0C */
2313 value = sc->hcsparams3;
2316 case XHCI_HCSPARAMS0: /* 0x10 */
2317 value = sc->hccparams1;
2320 case XHCI_DBOFF: /* 0x14 */
2324 case XHCI_RTSOFF: /* 0x18 */
2328 case XHCI_HCCPRAMS2: /* 0x1C */
2329 value = sc->hccparams2;
2337 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx",
2344 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset)
2348 offset = (offset - XHCI_CAPLEN);
2351 case XHCI_USBCMD: /* 0x00 */
2352 value = sc->opregs.usbcmd;
2355 case XHCI_USBSTS: /* 0x04 */
2356 value = sc->opregs.usbsts;
2359 case XHCI_PAGESIZE: /* 0x08 */
2360 value = sc->opregs.pgsz;
2363 case XHCI_DNCTRL: /* 0x14 */
2364 value = sc->opregs.dnctrl;
2367 case XHCI_CRCR_LO: /* 0x18 */
2368 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR;
2371 case XHCI_CRCR_HI: /* 0x1C */
2375 case XHCI_DCBAAP_LO: /* 0x30 */
2376 value = sc->opregs.dcbaap & 0xFFFFFFFF;
2379 case XHCI_DCBAAP_HI: /* 0x34 */
2380 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF;
2383 case XHCI_CONFIG: /* 0x38 */
2384 value = sc->opregs.config;
2388 if (offset >= 0x400)
2389 value = pci_xhci_portregs_read(sc, offset);
2397 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx",
2404 pci_xhci_dbregs_read(struct pci_xhci_softc *sc __unused,
2405 uint64_t offset __unused)
2407 /* read doorbell always returns 0 */
2412 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2416 offset -= sc->rtsoff;
2419 if (offset == XHCI_MFINDEX) {
2420 value = sc->rtsregs.mfindex;
2421 } else if (offset >= 0x20) {
2428 assert(offset < sizeof(sc->rtsregs.intrreg));
2430 p = &sc->rtsregs.intrreg.iman;
2431 p += item / sizeof(uint32_t);
2435 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x",
2442 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset)
2446 offset -= sc->regsend;
2451 /* rev major | rev minor | next-cap | cap-id */
2452 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS;
2455 /* name string = "USB" */
2459 /* psic | proto-defined | compat # | compat offset */
2460 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start;
2465 /* rev major | rev minor | next-cap | cap-id */
2466 value = (0x03 << 24) | XHCI_ID_PROTOCOLS;
2469 /* name string = "USB" */
2473 /* psic | proto-defined | compat # | compat offset */
2474 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start;
2479 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset));
2483 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x",
2491 pci_xhci_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
2493 struct pci_xhci_softc *sc;
2498 assert(baridx == 0);
2500 pthread_mutex_lock(&sc->mtx);
2501 if (offset < XHCI_CAPLEN)
2502 value = pci_xhci_hostcap_read(sc, offset);
2503 else if (offset < sc->dboff)
2504 value = pci_xhci_hostop_read(sc, offset);
2505 else if (offset < sc->rtsoff)
2506 value = pci_xhci_dbregs_read(sc, offset);
2507 else if (offset < sc->regsend)
2508 value = pci_xhci_rtsregs_read(sc, offset);
2509 else if (offset < (sc->regsend + 4*32))
2510 value = pci_xhci_xecp_read(sc, offset);
2513 WPRINTF(("pci_xhci: read invalid offset %ld", offset));
2516 pthread_mutex_unlock(&sc->mtx);
2526 value &= 0xFFFFFFFF;
2534 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm)
2536 struct pci_xhci_portregs *port;
2537 struct pci_xhci_dev_emu *dev;
2538 struct xhci_trb evtrb;
2541 assert(portn <= XHCI_MAX_DEVS);
2543 DPRINTF(("xhci reset port %d", portn));
2545 port = XHCI_PORTREG_PTR(sc, portn);
2546 dev = XHCI_DEVINST_PTR(sc, portn);
2548 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC);
2549 port->portsc |= XHCI_PS_PED |
2550 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2552 if (warm && dev->dev_ue->ue_usbver == 3) {
2553 port->portsc |= XHCI_PS_WRC;
2556 if ((port->portsc & XHCI_PS_PRC) == 0) {
2557 port->portsc |= XHCI_PS_PRC;
2559 pci_xhci_set_evtrb(&evtrb, portn,
2560 XHCI_TRB_ERROR_SUCCESS,
2561 XHCI_TRB_EVENT_PORT_STS_CHANGE);
2562 error = pci_xhci_insert_event(sc, &evtrb, 1);
2563 if (error != XHCI_TRB_ERROR_SUCCESS)
2564 DPRINTF(("xhci reset port insert event "
2571 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn)
2573 struct pci_xhci_portregs *port;
2574 struct pci_xhci_dev_emu *dev;
2576 port = XHCI_PORTREG_PTR(sc, portn);
2577 dev = XHCI_DEVINST_PTR(sc, portn);
2579 port->portsc = XHCI_PS_CCS | /* connected */
2580 XHCI_PS_PP; /* port power */
2582 if (dev->dev_ue->ue_usbver == 2) {
2583 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) |
2584 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2586 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) |
2587 XHCI_PS_PED | /* enabled */
2588 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2591 DPRINTF(("Init port %d 0x%x", portn, port->portsc));
2593 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP;
2594 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc));
2599 pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
2601 struct pci_xhci_dev_emu *dev;
2602 struct xhci_dev_ctx *dev_ctx;
2603 struct xhci_trb evtrb;
2604 struct pci_xhci_softc *sc;
2605 struct pci_xhci_portregs *p;
2606 struct xhci_endp_ctx *ep_ctx;
2611 dir_in = epctx & 0x80;
2612 epid = epctx & ~0x80;
2614 /* HW endpoint contexts are 0-15; convert to epid based on dir */
2615 epid = (epid * 2) + (dir_in ? 1 : 0);
2617 assert(epid >= 1 && epid <= 31);
2622 /* check if device is ready; OS has to initialise it */
2623 if (sc->rtsregs.erstba_p == NULL ||
2624 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 ||
2625 dev->dev_ctx == NULL)
2628 p = XHCI_PORTREG_PTR(sc, hci->hci_port);
2630 /* raise event if link U3 (suspended) state */
2631 if (XHCI_PS_PLS_GET(p->portsc) == 3) {
2632 p->portsc &= ~XHCI_PS_PLS_MASK;
2633 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME);
2634 if ((p->portsc & XHCI_PS_PLC) != 0)
2637 p->portsc |= XHCI_PS_PLC;
2639 pci_xhci_set_evtrb(&evtrb, hci->hci_port,
2640 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE);
2641 error = pci_xhci_insert_event(sc, &evtrb, 0);
2642 if (error != XHCI_TRB_ERROR_SUCCESS)
2646 dev_ctx = dev->dev_ctx;
2647 ep_ctx = &dev_ctx->ctx_ep[epid];
2648 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) {
2649 DPRINTF(("xhci device interrupt on disabled endpoint %d",
2654 DPRINTF(("xhci device interrupt on endpoint %d", epid));
2656 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0);
2663 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid __unused,
2664 void *param __unused)
2666 DPRINTF(("xhci device event port %d", hci->hci_port));
2671 * Each controller contains a "slot" node which contains a list of
2672 * child nodes each of which is a device. Each slot node's name
2673 * corresponds to a specific controller slot. These nodes
2674 * contain a "device" variable identifying the device model of the
2675 * USB device. For example:
2684 pci_xhci_legacy_config(nvlist_t *nvl, const char *opts)
2687 nvlist_t *slots_nvl, *slot_nvl;
2688 char *cp, *opt, *str, *tofree;
2694 slots_nvl = create_relative_config_node(nvl, "slot");
2696 tofree = str = strdup(opts);
2697 while ((opt = strsep(&str, ",")) != NULL) {
2698 /* device[=<config>] */
2699 cp = strchr(opt, '=');
2705 snprintf(node_name, sizeof(node_name), "%d", slot);
2707 slot_nvl = create_relative_config_node(slots_nvl, node_name);
2708 set_config_value_node(slot_nvl, "device", opt);
2711 * NB: Given that we split on commas above, the legacy
2712 * format only supports a single option.
2714 if (cp != NULL && *cp != '\0')
2715 pci_parse_legacy_config(slot_nvl, cp);
2722 pci_xhci_parse_devices(struct pci_xhci_softc *sc, nvlist_t *nvl)
2724 struct pci_xhci_dev_emu *dev;
2725 struct usb_devemu *ue;
2726 const nvlist_t *slots_nvl, *slot_nvl;
2727 const char *name, *device;
2729 void *devsc, *cookie;
2731 int type, usb3_port, usb2_port, i, ndevices;
2733 usb3_port = sc->usb3_port_start;
2734 usb2_port = sc->usb2_port_start;
2736 sc->devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *));
2737 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *));
2741 slots_nvl = find_relative_config_node(nvl, "slot");
2742 if (slots_nvl == NULL)
2746 while ((name = nvlist_next(slots_nvl, &type, &cookie)) != NULL) {
2747 if (usb2_port == ((sc->usb2_port_start) + XHCI_MAX_DEVS/2) ||
2748 usb3_port == ((sc->usb3_port_start) + XHCI_MAX_DEVS/2)) {
2749 WPRINTF(("pci_xhci max number of USB 2 or 3 "
2750 "devices reached, max %d", XHCI_MAX_DEVS/2));
2754 if (type != NV_TYPE_NVLIST) {
2756 "pci_xhci: config variable '%s' under slot node",
2761 slot = strtol(name, &cp, 0);
2762 if (*cp != '\0' || slot <= 0 || slot > XHCI_MAX_SLOTS) {
2763 EPRINTLN("pci_xhci: invalid slot '%s'", name);
2767 if (XHCI_SLOTDEV_PTR(sc, slot) != NULL) {
2768 EPRINTLN("pci_xhci: duplicate slot '%s'", name);
2772 slot_nvl = nvlist_get_nvlist(slots_nvl, name);
2773 device = get_config_value_node(slot_nvl, "device");
2774 if (device == NULL) {
2776 "pci_xhci: missing \"device\" value for slot '%s'",
2781 ue = usb_emu_finddev(device);
2783 EPRINTLN("pci_xhci: unknown device model \"%s\"",
2788 DPRINTF(("pci_xhci adding device %s", device));
2790 dev = calloc(1, sizeof(struct pci_xhci_dev_emu));
2792 dev->hci.hci_sc = dev;
2793 dev->hci.hci_intr = pci_xhci_dev_intr;
2794 dev->hci.hci_event = pci_xhci_dev_event;
2796 if (ue->ue_usbver == 2) {
2797 if (usb2_port == sc->usb2_port_start +
2798 XHCI_MAX_DEVS / 2) {
2799 WPRINTF(("pci_xhci max number of USB 2 devices "
2800 "reached, max %d", XHCI_MAX_DEVS / 2));
2803 dev->hci.hci_port = usb2_port;
2806 if (usb3_port == sc->usb3_port_start +
2807 XHCI_MAX_DEVS / 2) {
2808 WPRINTF(("pci_xhci max number of USB 3 devices "
2809 "reached, max %d", XHCI_MAX_DEVS / 2));
2812 dev->hci.hci_port = usb3_port;
2815 XHCI_DEVINST_PTR(sc, dev->hci.hci_port) = dev;
2817 dev->hci.hci_address = 0;
2818 devsc = ue->ue_init(&dev->hci, nvl);
2819 if (devsc == NULL) {
2824 dev->dev_sc = devsc;
2826 XHCI_SLOTDEV_PTR(sc, slot) = dev;
2831 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs));
2834 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
2835 pci_xhci_init_port(sc, i);
2838 WPRINTF(("pci_xhci no USB devices configured"));
2843 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
2844 free(XHCI_DEVINST_PTR(sc, i));
2854 pci_xhci_init(struct pci_devinst *pi, nvlist_t *nvl)
2856 struct pci_xhci_softc *sc;
2860 WPRINTF(("pci_xhci controller already defined"));
2865 sc = calloc(1, sizeof(struct pci_xhci_softc));
2869 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1;
2870 sc->usb3_port_start = 1;
2872 /* discover devices */
2873 error = pci_xhci_parse_devices(sc, nvl);
2879 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) |
2880 XHCI_SET_HCIVERSION(0x0100);
2881 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) |
2882 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */
2883 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS);
2884 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) |
2885 XHCI_SET_HCSP2_IST(0x04);
2886 sc->hcsparams3 = 0; /* no latency */
2887 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */
2888 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */
2889 XHCI_SET_HCCP1_SPC(1) | /* short packet */
2890 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX);
2891 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) |
2892 XHCI_SET_HCCP2_U3C(1);
2893 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START +
2894 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs));
2896 /* dboff must be 32-bit aligned */
2897 if (sc->dboff & 0x3)
2898 sc->dboff = (sc->dboff + 0x3) & ~0x3;
2900 /* rtsoff must be 32-bytes aligned */
2901 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32);
2902 if (sc->rtsoff & 0x1F)
2903 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F;
2905 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff,
2908 sc->opregs.usbsts = XHCI_STS_HCH;
2909 sc->opregs.pgsz = XHCI_PAGESIZE_4K;
2913 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */
2916 * Set extended capabilities pointer to be after regsend;
2917 * value of xecp field is 32-bit offset.
2919 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4);
2921 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31);
2922 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2923 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS);
2924 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB);
2925 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI);
2926 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0);
2928 pci_emul_add_msicap(pi, 1);
2930 /* regsend + xecp registers */
2931 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32);
2932 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32));
2935 pci_lintr_request(pi);
2937 pthread_mutex_init(&sc->mtx, NULL);
2947 #ifdef BHYVE_SNAPSHOT
2949 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[])
2952 struct pci_xhci_dev_emu *dev, *slot;
2954 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS);
2956 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
2957 for (j = 1; j <= XHCI_MAX_DEVS; j++) {
2958 slot = XHCI_SLOTDEV_PTR(sc, i);
2959 dev = XHCI_DEVINST_PTR(sc, j);
2968 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc __unused,
2969 struct pci_xhci_dev_emu *dev, int idx, struct vm_snapshot_meta *meta)
2973 struct usb_data_xfer *xfer;
2974 struct usb_data_xfer_block *xfer_block;
2976 /* some sanity checks */
2977 if (meta->op == VM_SNAPSHOT_SAVE)
2978 xfer = dev->eps[idx].ep_xfer;
2980 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done);
2986 if (meta->op == VM_SNAPSHOT_RESTORE) {
2987 pci_xhci_init_ep(dev, idx);
2988 xfer = dev->eps[idx].ep_xfer;
2991 /* save / restore proper */
2992 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) {
2993 xfer_block = &xfer->data[k];
2995 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf,
2996 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret,
2998 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done);
2999 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done);
3000 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done);
3001 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done);
3002 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done);
3003 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done);
3004 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done);
3007 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done);
3009 /* xfer->ureq is not allocated at restore time */
3010 if (meta->op == VM_SNAPSHOT_RESTORE)
3011 xfer->ureq = malloc(sizeof(struct usb_device_request));
3013 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq,
3014 sizeof(struct usb_device_request),
3018 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done);
3019 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done);
3020 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done);
3027 pci_xhci_snapshot(struct vm_snapshot_meta *meta)
3032 struct pci_devinst *pi;
3033 struct pci_xhci_softc *sc;
3034 struct pci_xhci_portregs *port;
3035 struct pci_xhci_dev_emu *dev;
3036 char dname[SNAP_DEV_NAME_LEN];
3037 int maps[XHCI_MAX_SLOTS + 1];
3039 pi = meta->dev_data;
3042 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done);
3043 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done);
3044 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done);
3045 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done);
3046 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done);
3047 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done);
3048 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done);
3049 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done);
3050 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done);
3053 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done);
3054 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done);
3055 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done);
3056 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done);
3057 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done);
3058 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done);
3059 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done);
3062 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p,
3063 XHCI_GADDR_SIZE(sc->opregs.cr_p), true, meta, ret, done);
3065 /* opregs.dcbaa_p */
3066 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p,
3067 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), true, meta, ret, done);
3070 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done);
3072 /* rtsregs.intrreg */
3073 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done);
3074 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done);
3075 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done);
3076 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done);
3077 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done);
3078 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done);
3080 /* rtsregs.erstba_p */
3081 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p,
3082 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), true, meta, ret, done);
3084 /* rtsregs.erst_p */
3085 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p,
3086 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), true, meta, ret, done);
3088 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done);
3089 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done);
3090 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done);
3091 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done);
3092 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done);
3094 /* sanity checking */
3095 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3096 dev = XHCI_DEVINST_PTR(sc, i);
3100 if (meta->op == VM_SNAPSHOT_SAVE)
3102 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done);
3104 /* check if the restored device (when restoring) is sane */
3105 if (restore_idx != i) {
3106 fprintf(stderr, "%s: idx not matching: actual: %d, "
3107 "expected: %d\r\n", __func__, restore_idx, i);
3112 if (meta->op == VM_SNAPSHOT_SAVE) {
3113 memset(dname, 0, sizeof(dname));
3114 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1);
3117 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done);
3119 if (meta->op == VM_SNAPSHOT_RESTORE) {
3120 dname[sizeof(dname) - 1] = '\0';
3121 if (strcmp(dev->dev_ue->ue_emu, dname)) {
3122 fprintf(stderr, "%s: device names mismatch: "
3123 "actual: %s, expected: %s\r\n",
3124 __func__, dname, dev->dev_ue->ue_emu);
3133 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3134 port = XHCI_PORTREG_PTR(sc, i);
3135 dev = XHCI_DEVINST_PTR(sc, i);
3140 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done);
3141 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done);
3142 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done);
3143 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done);
3147 if (meta->op == VM_SNAPSHOT_SAVE)
3148 pci_xhci_map_devs_slots(sc, maps);
3150 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
3151 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done);
3153 if (meta->op == VM_SNAPSHOT_SAVE) {
3154 dev = XHCI_SLOTDEV_PTR(sc, i);
3155 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
3157 dev = XHCI_DEVINST_PTR(sc, maps[i]);
3161 XHCI_SLOTDEV_PTR(sc, i) = dev;
3171 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx,
3172 XHCI_GADDR_SIZE(dev->dev_ctx), true, meta, ret, done);
3174 if (dev->dev_ctx != NULL) {
3175 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) {
3176 ret = pci_xhci_snapshot_ep(sc, dev, j, meta);
3182 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done);
3184 /* devices[i]->dev_sc */
3185 dev->dev_ue->ue_snapshot(dev->dev_sc, meta);
3187 /* devices[i]->hci */
3188 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done);
3189 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done);
3192 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done);
3193 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done);
3200 static const struct pci_devemu pci_de_xhci = {
3202 .pe_init = pci_xhci_init,
3203 .pe_legacy_config = pci_xhci_legacy_config,
3204 .pe_barwrite = pci_xhci_write,
3205 .pe_barread = pci_xhci_read,
3206 #ifdef BHYVE_SNAPSHOT
3207 .pe_snapshot = pci_xhci_snapshot,
3210 PCI_EMUL_SET(pci_de_xhci);