2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 tablet USB tablet mouse
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
40 #include <sys/types.h>
41 #include <sys/queue.h>
51 #include <machine/vmm_snapshot.h>
53 #include <dev/usb/usbdi.h>
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usb_freebsd.h>
65 static int xhci_debug = 0;
66 #define DPRINTF(params) if (xhci_debug) PRINTLN params
67 #define WPRINTF(params) PRINTLN params
70 #define XHCI_NAME "xhci"
71 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */
73 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */
76 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping
77 * to 4k to avoid going over the guest physical memory barrier.
79 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */
81 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */
83 #define XHCI_CAPLEN (4*8) /* offset of op register space */
84 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */
85 #define XHCI_PORTREGS_START 0x400
86 #define XHCI_DOORBELL_MAX 256
88 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */
90 /* caplength and hci-version registers */
91 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF)
92 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16)
93 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF)
95 /* hcsparams1 register */
96 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF)
97 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8)
98 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24)
100 /* hcsparams2 register */
101 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F)
102 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4)
103 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21)
104 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27)
106 /* hcsparams3 register */
107 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF)
108 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16)
110 /* hccparams1 register */
111 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01)
112 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1)
113 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2)
114 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3)
115 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4)
116 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5)
117 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6)
118 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7)
119 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8)
120 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9)
121 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10)
122 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11)
123 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12)
124 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16)
126 /* hccparams2 register */
127 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01)
128 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1)
129 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2)
130 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3)
131 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4)
132 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5)
134 /* other registers */
135 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03)
136 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F)
139 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */
140 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */
141 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */
143 /* port register set */
144 #define XHCI_PORTREGS_BASE 0x400 /* base offset */
145 #define XHCI_PORTREGS_PORT0 0x3F0
146 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */
148 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL)
149 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL)
151 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \
152 (((b) & (m)) << (s)))
153 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \
154 (((b) & ((m) << (s)))))
156 #define SNAP_DEV_NAME_LEN 128
158 struct pci_xhci_trb_ring {
159 uint64_t ringaddr; /* current dequeue guest address */
160 uint32_t ccs; /* consumer cycle state */
163 /* device endpoint transfer/stream rings */
164 struct pci_xhci_dev_ep {
166 struct xhci_trb *_epu_tr;
167 struct xhci_stream_ctx *_epu_sctx;
169 #define ep_tr _ep_trbsctx._epu_tr
170 #define ep_sctx _ep_trbsctx._epu_sctx
173 struct pci_xhci_trb_ring _epu_trb;
174 struct pci_xhci_trb_ring *_epu_sctx_trbs;
176 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr
177 #define ep_ccs _ep_trb_rings._epu_trb.ccs
178 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs
180 struct usb_data_xfer *ep_xfer; /* transfer chain */
183 /* device context base address array: maps slot->device context */
185 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */
188 /* port status registers */
189 struct pci_xhci_portregs {
190 uint32_t portsc; /* port status and control */
191 uint32_t portpmsc; /* port pwr mgmt status & control */
192 uint32_t portli; /* port link info */
193 uint32_t porthlpmc; /* port hardware LPM control */
195 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10)
197 /* xHC operational registers */
198 struct pci_xhci_opregs {
199 uint32_t usbcmd; /* usb command */
200 uint32_t usbsts; /* usb status */
201 uint32_t pgsz; /* page size */
202 uint32_t dnctrl; /* device notification control */
203 uint64_t crcr; /* command ring control */
204 uint64_t dcbaap; /* device ctx base addr array ptr */
205 uint32_t config; /* configure */
207 /* guest mapped addresses: */
208 struct xhci_trb *cr_p; /* crcr dequeue */
209 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */
212 /* xHC runtime registers */
213 struct pci_xhci_rtsregs {
214 uint32_t mfindex; /* microframe index */
215 struct { /* interrupter register set */
216 uint32_t iman; /* interrupter management */
217 uint32_t imod; /* interrupter moderation */
218 uint32_t erstsz; /* event ring segment table size */
220 uint64_t erstba; /* event ring seg-tbl base addr */
221 uint64_t erdp; /* event ring dequeue ptr */
224 /* guest mapped addresses */
225 struct xhci_event_ring_seg *erstba_p;
226 struct xhci_trb *erst_p; /* event ring segment tbl */
227 int er_deq_seg; /* event ring dequeue segment */
228 int er_enq_idx; /* event ring enqueue index - xHCI */
229 int er_enq_seg; /* event ring enqueue segment */
230 uint32_t er_events_cnt; /* number of events in ER */
231 uint32_t event_pcs; /* producer cycle state flag */
235 struct pci_xhci_softc;
239 * USB device emulation container.
240 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each
241 * emulated device instance.
243 struct pci_xhci_dev_emu {
244 struct pci_xhci_softc *xsc;
247 struct xhci_dev_ctx *dev_ctx;
248 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS];
251 struct usb_devemu *dev_ue; /* USB emulated dev */
252 void *dev_sc; /* device's softc */
257 struct pci_xhci_softc {
258 struct pci_devinst *xsc_pi;
262 uint32_t caplength; /* caplen & hciversion */
263 uint32_t hcsparams1; /* structural parameters 1 */
264 uint32_t hcsparams2; /* structural parameters 2 */
265 uint32_t hcsparams3; /* structural parameters 3 */
266 uint32_t hccparams1; /* capability parameters 1 */
267 uint32_t dboff; /* doorbell offset */
268 uint32_t rtsoff; /* runtime register space offset */
269 uint32_t hccparams2; /* capability parameters 2 */
271 uint32_t regsend; /* end of configuration registers */
273 struct pci_xhci_opregs opregs;
274 struct pci_xhci_rtsregs rtsregs;
276 struct pci_xhci_portregs *portregs;
277 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */
278 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */
286 /* portregs and devices arrays are set up to start from idx=1 */
287 #define XHCI_PORTREG_PTR(x,n) &(x)->portregs[(n)]
288 #define XHCI_DEVINST_PTR(x,n) (x)->devices[(n)]
289 #define XHCI_SLOTDEV_PTR(x,n) (x)->slots[(n)]
291 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH)
293 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \
294 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1)))
295 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \
296 (a), XHCI_GADDR_SIZE(a))
298 static int xhci_in_use;
300 /* map USB errors to XHCI */
301 static const int xhci_usb_errors[USB_ERR_MAX] = {
302 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS,
303 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE,
304 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
305 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID,
306 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE,
307 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED,
308 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER,
309 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER,
310 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER,
311 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL,
312 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE,
313 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE,
314 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE,
315 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED,
316 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED,
317 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE,
318 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON,
319 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE,
320 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB,
321 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
322 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED,
323 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT,
324 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL,
325 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED,
326 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF,
327 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB,
328 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED,
329 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED,
330 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED,
332 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \
333 XHCI_TRB_ERROR_INVALID)
335 static int pci_xhci_insert_event(struct pci_xhci_softc *sc,
336 struct xhci_trb *evtrb, int do_intr);
337 static void pci_xhci_dump_trb(struct xhci_trb *trb);
338 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc);
339 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot);
340 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm);
341 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc,
342 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
343 struct xhci_endp_ctx *ep_ctx, uint32_t streamid,
344 uint64_t ringaddr, int ccs);
347 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode,
350 evtrb->qwTrb0 = port << 24;
351 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode);
352 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype);
356 /* controller reset */
358 pci_xhci_reset(struct pci_xhci_softc *sc)
362 sc->rtsregs.er_enq_idx = 0;
363 sc->rtsregs.er_events_cnt = 0;
364 sc->rtsregs.event_pcs = 1;
366 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
367 pci_xhci_reset_slot(sc, i);
372 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd)
377 if (cmd & XHCI_CMD_RS) {
378 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0;
380 sc->opregs.usbcmd |= XHCI_CMD_RS;
381 sc->opregs.usbsts &= ~XHCI_STS_HCH;
382 sc->opregs.usbsts |= XHCI_STS_PCD;
384 /* Queue port change event on controller run from stop */
386 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
387 struct pci_xhci_dev_emu *dev;
388 struct pci_xhci_portregs *port;
389 struct xhci_trb evtrb;
391 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL)
394 port = XHCI_PORTREG_PTR(sc, i);
395 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS;
396 port->portsc &= ~XHCI_PS_PLS_MASK;
399 * XHCI 4.19.3 USB2 RxDetect->Polling,
402 if (dev->dev_ue->ue_usbver == 2)
404 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL);
407 XHCI_PS_PLS_SET(UPS_PORT_LS_U0);
409 pci_xhci_set_evtrb(&evtrb, i,
410 XHCI_TRB_ERROR_SUCCESS,
411 XHCI_TRB_EVENT_PORT_STS_CHANGE);
413 if (pci_xhci_insert_event(sc, &evtrb, 0) !=
414 XHCI_TRB_ERROR_SUCCESS)
418 sc->opregs.usbcmd &= ~XHCI_CMD_RS;
419 sc->opregs.usbsts |= XHCI_STS_HCH;
420 sc->opregs.usbsts &= ~XHCI_STS_PCD;
423 /* start execution of schedule; stop when set to 0 */
424 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS;
426 if (cmd & XHCI_CMD_HCRST) {
427 /* reset controller */
429 cmd &= ~XHCI_CMD_HCRST;
432 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS);
435 pci_xhci_assert_interrupt(sc);
441 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset,
444 struct xhci_trb evtrb;
445 struct pci_xhci_portregs *p;
447 uint32_t oldpls, newpls;
449 if (sc->portregs == NULL)
452 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ;
453 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ;
455 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx",
456 offset, port, value));
460 if (port > XHCI_MAX_DEVS) {
461 DPRINTF(("pci_xhci: portregs_write port %d > ndevices",
466 if (XHCI_DEVINST_PTR(sc, port) == NULL) {
467 DPRINTF(("pci_xhci: portregs_write to unattached port %d",
471 p = XHCI_PORTREG_PTR(sc, port);
474 /* port reset or warm reset */
475 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) {
476 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR);
480 if ((p->portsc & XHCI_PS_PP) == 0) {
481 WPRINTF(("pci_xhci: portregs_write to unpowered "
486 /* Port status and control register */
487 oldpls = XHCI_PS_PLS_GET(p->portsc);
488 newpls = XHCI_PS_PLS_GET(value);
490 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK |
491 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK;
493 if (XHCI_DEVINST_PTR(sc, port))
494 p->portsc |= XHCI_PS_CCS;
496 p->portsc |= (value &
500 XHCI_PS_PLS_MASK | /* link state */
502 XHCI_PS_PIC_MASK | /* port indicator */
503 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR));
505 /* clear control bits */
506 p->portsc &= ~(value &
516 /* port disable request; for USB3, don't care */
517 if (value & XHCI_PS_PED)
518 DPRINTF(("Disable port %d request", port));
520 if (!(value & XHCI_PS_LWS))
523 DPRINTF(("Port new PLS: %d", newpls));
527 if (oldpls != newpls) {
528 p->portsc &= ~XHCI_PS_PLS_MASK;
529 p->portsc |= XHCI_PS_PLS_SET(newpls) |
532 if (oldpls != 0 && newpls == 0) {
533 pci_xhci_set_evtrb(&evtrb, port,
534 XHCI_TRB_ERROR_SUCCESS,
535 XHCI_TRB_EVENT_PORT_STS_CHANGE);
537 pci_xhci_insert_event(sc, &evtrb, 1);
543 DPRINTF(("Unhandled change port %d PLS %u",
549 /* Port power management status and control register */
553 /* Port link information register */
554 DPRINTF(("pci_xhci attempted write to PORTLI, port %d",
559 * Port hardware LPM control register.
560 * For USB3, this register is reserved.
562 p->porthlpmc = value;
567 struct xhci_dev_ctx *
568 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot)
570 uint64_t devctx_addr;
571 struct xhci_dev_ctx *devctx;
573 assert(slot > 0 && slot <= sc->ndevices);
574 assert(sc->opregs.dcbaa_p != NULL);
576 devctx_addr = sc->opregs.dcbaa_p->dcba[slot];
578 if (devctx_addr == 0) {
579 DPRINTF(("get_dev_ctx devctx_addr == 0"));
583 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx",
585 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL);
591 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb,
594 struct xhci_trb *next;
596 assert(curtrb != NULL);
598 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) {
600 *guestaddr = curtrb->qwTrb0 & ~0xFUL;
602 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL);
605 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL;
614 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc)
617 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY;
618 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND;
619 sc->opregs.usbsts |= XHCI_STS_EINT;
621 /* only trigger interrupt if permitted */
622 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) &&
623 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) {
624 if (pci_msi_enabled(sc->xsc_pi))
625 pci_generate_msi(sc->xsc_pi, 0);
627 pci_lintr_assert(sc->xsc_pi);
632 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc)
635 if (!pci_msi_enabled(sc->xsc_pi))
636 pci_lintr_assert(sc->xsc_pi);
640 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid)
642 struct xhci_dev_ctx *dev_ctx;
643 struct pci_xhci_dev_ep *devep;
644 struct xhci_endp_ctx *ep_ctx;
648 dev_ctx = dev->dev_ctx;
649 ep_ctx = &dev_ctx->ctx_ep[epid];
650 devep = &dev->eps[epid];
651 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0);
653 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams));
654 assert(devep->ep_sctx_trbs == NULL);
656 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 &
657 XHCI_EPCTX_2_TR_DQ_PTR_MASK);
658 devep->ep_sctx_trbs = calloc(pstreams,
659 sizeof(struct pci_xhci_trb_ring));
660 for (i = 0; i < pstreams; i++) {
661 devep->ep_sctx_trbs[i].ringaddr =
662 devep->ep_sctx[i].qwSctx0 &
663 XHCI_SCTX_0_TR_DQ_PTR_MASK;
664 devep->ep_sctx_trbs[i].ccs =
665 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0);
668 DPRINTF(("init_ep %d with no pstreams", epid));
669 devep->ep_ringaddr = ep_ctx->qwEpCtx2 &
670 XHCI_EPCTX_2_TR_DQ_PTR_MASK;
671 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2);
672 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr);
673 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs));
676 if (devep->ep_xfer == NULL) {
677 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer));
678 USB_DATA_XFER_INIT(devep->ep_xfer);
683 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid)
685 struct xhci_dev_ctx *dev_ctx;
686 struct pci_xhci_dev_ep *devep;
687 struct xhci_endp_ctx *ep_ctx;
689 DPRINTF(("pci_xhci disable_ep %d", epid));
691 dev_ctx = dev->dev_ctx;
692 ep_ctx = &dev_ctx->ctx_ep[epid];
693 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED;
695 devep = &dev->eps[epid];
696 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0 &&
697 devep->ep_sctx_trbs != NULL)
698 free(devep->ep_sctx_trbs);
700 if (devep->ep_xfer != NULL) {
701 free(devep->ep_xfer);
702 devep->ep_xfer = NULL;
705 memset(devep, 0, sizeof(struct pci_xhci_dev_ep));
709 /* reset device at slot and data structures related to it */
711 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot)
713 struct pci_xhci_dev_emu *dev;
715 dev = XHCI_SLOTDEV_PTR(sc, slot);
718 DPRINTF(("xhci reset unassigned slot (%d)?", slot));
720 dev->dev_slotstate = XHCI_ST_DISABLED;
723 /* TODO: reset ring buffer pointers */
727 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb,
730 struct pci_xhci_rtsregs *rts;
734 struct xhci_trb *evtrbptr;
736 err = XHCI_TRB_ERROR_SUCCESS;
740 erdp = rts->intrreg.erdp & ~0xF;
741 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) /
742 sizeof(struct xhci_trb);
744 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]",
745 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3));
746 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u",
747 erdp_idx, rts->er_deq_seg, rts->er_enq_idx,
748 rts->er_enq_seg, rts->event_pcs));
749 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)",
750 erdp, rts->erstba_p->qwEvrsTablePtr,
751 rts->erstba_p->dwEvrsTableSize, do_intr));
753 evtrbptr = &rts->erst_p[rts->er_enq_idx];
755 /* TODO: multi-segment table */
756 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) {
757 DPRINTF(("pci_xhci[%d] cannot insert event; ring full",
759 err = XHCI_TRB_ERROR_EV_RING_FULL;
763 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) {
764 struct xhci_trb errev;
766 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) {
768 DPRINTF(("pci_xhci[%d] insert evt err: ring full",
772 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET(
773 XHCI_TRB_ERROR_EV_RING_FULL);
774 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET(
775 XHCI_TRB_EVENT_HOST_CTRL) |
777 rts->er_events_cnt++;
778 memcpy(&rts->erst_p[rts->er_enq_idx], &errev,
779 sizeof(struct xhci_trb));
780 rts->er_enq_idx = (rts->er_enq_idx + 1) %
781 rts->erstba_p->dwEvrsTableSize;
782 err = XHCI_TRB_ERROR_EV_RING_FULL;
788 rts->er_events_cnt++;
791 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT;
792 evtrb->dwTrb3 |= rts->event_pcs;
794 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb));
795 rts->er_enq_idx = (rts->er_enq_idx + 1) %
796 rts->erstba_p->dwEvrsTableSize;
798 if (rts->er_enq_idx == 0)
803 pci_xhci_assert_interrupt(sc);
809 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot)
811 struct pci_xhci_dev_emu *dev;
815 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
816 if (sc->portregs != NULL)
817 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
818 dev = XHCI_SLOTDEV_PTR(sc, i);
819 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) {
821 dev->dev_slotstate = XHCI_ST_ENABLED;
822 cmderr = XHCI_TRB_ERROR_SUCCESS;
823 dev->hci.hci_address = i;
828 DPRINTF(("pci_xhci enable slot (error=%d) slot %u",
829 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot));
835 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot)
837 struct pci_xhci_dev_emu *dev;
840 DPRINTF(("pci_xhci disable slot %u", slot));
842 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
843 if (sc->portregs == NULL)
846 if (slot > sc->ndevices) {
847 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
851 dev = XHCI_SLOTDEV_PTR(sc, slot);
853 if (dev->dev_slotstate == XHCI_ST_DISABLED) {
854 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
856 dev->dev_slotstate = XHCI_ST_DISABLED;
857 cmderr = XHCI_TRB_ERROR_SUCCESS;
858 /* TODO: reset events and endpoints */
867 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot)
869 struct pci_xhci_dev_emu *dev;
870 struct xhci_dev_ctx *dev_ctx;
871 struct xhci_endp_ctx *ep_ctx;
875 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
876 if (sc->portregs == NULL)
879 DPRINTF(("pci_xhci reset device slot %u", slot));
881 dev = XHCI_SLOTDEV_PTR(sc, slot);
882 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED)
883 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
885 dev->dev_slotstate = XHCI_ST_DEFAULT;
887 dev->hci.hci_address = 0;
888 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
891 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
892 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT,
895 /* number of contexts */
896 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
897 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
899 /* reset all eps other than ep-0 */
900 for (i = 2; i <= 31; i++) {
901 ep_ctx = &dev_ctx->ctx_ep[i];
902 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0,
903 XHCI_ST_EPCTX_DISABLED, 0x7, 0);
906 cmderr = XHCI_TRB_ERROR_SUCCESS;
909 pci_xhci_reset_slot(sc, slot);
916 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot,
917 struct xhci_trb *trb)
919 struct pci_xhci_dev_emu *dev;
920 struct xhci_input_dev_ctx *input_ctx;
921 struct xhci_slot_ctx *islot_ctx;
922 struct xhci_dev_ctx *dev_ctx;
923 struct xhci_endp_ctx *ep0_ctx;
926 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
927 islot_ctx = &input_ctx->ctx_slot;
928 ep0_ctx = &input_ctx->ctx_ep[1];
930 cmderr = XHCI_TRB_ERROR_SUCCESS;
932 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,",
933 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
934 DPRINTF((" slot %08x %08x %08x %08x",
935 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
936 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
937 DPRINTF((" ep0 %08x %08x %016lx %08x",
938 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
941 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */
942 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
943 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) {
944 DPRINTF(("pci_xhci: address device, input ctl invalid"));
945 cmderr = XHCI_TRB_ERROR_TRB;
949 /* assign address to slot */
950 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
952 DPRINTF(("pci_xhci: address device, dev ctx"));
953 DPRINTF((" slot %08x %08x %08x %08x",
954 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
955 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
957 dev = XHCI_SLOTDEV_PTR(sc, slot);
960 dev->hci.hci_address = slot;
961 dev->dev_ctx = dev_ctx;
963 if (dev->dev_ue->ue_reset == NULL ||
964 dev->dev_ue->ue_reset(dev->dev_sc) < 0) {
965 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
969 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx));
971 dev_ctx->ctx_slot.dwSctx3 =
972 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) |
973 XHCI_SCTX_3_DEV_ADDR_SET(slot);
975 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx));
976 ep0_ctx = &dev_ctx->ctx_ep[1];
977 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) |
978 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING);
980 pci_xhci_init_ep(dev, 1);
982 dev->dev_slotstate = XHCI_ST_ADDRESSED;
984 DPRINTF(("pci_xhci: address device, output ctx"));
985 DPRINTF((" slot %08x %08x %08x %08x",
986 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
987 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
988 DPRINTF((" ep0 %08x %08x %016lx %08x",
989 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
997 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot,
998 struct xhci_trb *trb)
1000 struct xhci_input_dev_ctx *input_ctx;
1001 struct pci_xhci_dev_emu *dev;
1002 struct xhci_dev_ctx *dev_ctx;
1003 struct xhci_endp_ctx *ep_ctx, *iep_ctx;
1007 cmderr = XHCI_TRB_ERROR_SUCCESS;
1009 DPRINTF(("pci_xhci config_ep slot %u", slot));
1011 dev = XHCI_SLOTDEV_PTR(sc, slot);
1012 assert(dev != NULL);
1014 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) {
1015 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u",
1017 if (dev->dev_ue->ue_stop != NULL)
1018 dev->dev_ue->ue_stop(dev->dev_sc);
1020 dev->dev_slotstate = XHCI_ST_ADDRESSED;
1022 dev->hci.hci_address = 0;
1023 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1025 /* number of contexts */
1026 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
1027 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
1030 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1031 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED,
1034 /* disable endpoints */
1035 for (i = 2; i < 32; i++)
1036 pci_xhci_disable_ep(dev, i);
1038 cmderr = XHCI_TRB_ERROR_SUCCESS;
1043 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) {
1044 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed",
1045 dev->dev_slotstate));
1046 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
1050 /* In addressed/configured state;
1051 * for each drop endpoint ctx flag:
1052 * ep->state = DISABLED
1053 * for each add endpoint ctx flag:
1055 * ep->state = RUNNING
1056 * for each drop+add endpoint flag:
1057 * reset ep resources
1059 * ep->state = RUNNING
1060 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled)
1061 * slot->state = configured
1064 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1065 dev_ctx = dev->dev_ctx;
1066 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x",
1067 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1,
1068 input_ctx->ctx_input.dwInCtx7));
1070 for (i = 2; i <= 31; i++) {
1071 ep_ctx = &dev_ctx->ctx_ep[i];
1073 if (input_ctx->ctx_input.dwInCtx0 &
1074 XHCI_INCTX_0_DROP_MASK(i)) {
1075 DPRINTF((" config ep - dropping ep %d", i));
1076 pci_xhci_disable_ep(dev, i);
1079 if (input_ctx->ctx_input.dwInCtx1 &
1080 XHCI_INCTX_1_ADD_MASK(i)) {
1081 iep_ctx = &input_ctx->ctx_ep[i];
1083 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x",
1084 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1,
1085 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4));
1087 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx));
1089 pci_xhci_init_ep(dev, i);
1092 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1093 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1097 /* slot state to configured */
1098 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1099 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27);
1100 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY(
1101 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27);
1102 dev->dev_slotstate = XHCI_ST_CONFIGURED;
1104 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x "
1106 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1107 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1114 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot,
1115 struct xhci_trb *trb)
1117 struct pci_xhci_dev_emu *dev;
1118 struct pci_xhci_dev_ep *devep;
1119 struct xhci_dev_ctx *dev_ctx;
1120 struct xhci_endp_ctx *ep_ctx;
1121 uint32_t cmderr, epid;
1124 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1126 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot));
1128 cmderr = XHCI_TRB_ERROR_SUCCESS;
1130 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1132 dev = XHCI_SLOTDEV_PTR(sc, slot);
1133 assert(dev != NULL);
1135 if (type == XHCI_TRB_TYPE_STOP_EP &&
1136 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) {
1137 /* XXX suspend endpoint for 10ms */
1140 if (epid < 1 || epid > 31) {
1141 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid));
1142 cmderr = XHCI_TRB_ERROR_TRB;
1146 devep = &dev->eps[epid];
1147 if (devep->ep_xfer != NULL)
1148 USB_DATA_XFER_RESET(devep->ep_xfer);
1150 dev_ctx = dev->dev_ctx;
1151 assert(dev_ctx != NULL);
1153 ep_ctx = &dev_ctx->ctx_ep[epid];
1155 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1157 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) == 0)
1158 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs;
1160 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x",
1161 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1164 if (type == XHCI_TRB_TYPE_RESET_EP &&
1165 (dev->dev_ue->ue_reset == NULL ||
1166 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) {
1167 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
1177 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep,
1178 uint32_t streamid, struct xhci_stream_ctx **osctx)
1180 struct xhci_stream_ctx *sctx;
1181 uint32_t maxpstreams;
1183 maxpstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep->dwEpCtx0);
1184 if (maxpstreams == 0)
1185 return (XHCI_TRB_ERROR_TRB);
1187 if (maxpstreams > XHCI_STREAMS_MAX)
1188 return (XHCI_TRB_ERROR_INVALID_SID);
1190 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) {
1191 DPRINTF(("pci_xhci: find_stream; LSA bit not set"));
1192 return (XHCI_TRB_ERROR_INVALID_SID);
1195 /* only support primary stream */
1196 if (streamid > maxpstreams)
1197 return (XHCI_TRB_ERROR_STREAM_TYPE);
1199 sctx = XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + streamid;
1200 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0))
1201 return (XHCI_TRB_ERROR_STREAM_TYPE);
1205 return (XHCI_TRB_ERROR_SUCCESS);
1210 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot,
1211 struct xhci_trb *trb)
1213 struct pci_xhci_dev_emu *dev;
1214 struct pci_xhci_dev_ep *devep;
1215 struct xhci_dev_ctx *dev_ctx;
1216 struct xhci_endp_ctx *ep_ctx;
1217 uint32_t cmderr, epid;
1220 cmderr = XHCI_TRB_ERROR_SUCCESS;
1222 dev = XHCI_SLOTDEV_PTR(sc, slot);
1223 assert(dev != NULL);
1225 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u",
1226 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7),
1227 (uint32_t)(trb->qwTrb0 & 0x1)));
1228 DPRINTF((" stream-id %u, slot %u, epid %u, C %u",
1229 (trb->dwTrb2 >> 16) & 0xFFFF,
1230 XHCI_TRB_3_SLOT_GET(trb->dwTrb3),
1231 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1));
1233 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1234 if (epid < 1 || epid > 31) {
1235 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid));
1236 cmderr = XHCI_TRB_ERROR_TRB;
1240 dev_ctx = dev->dev_ctx;
1241 assert(dev_ctx != NULL);
1243 ep_ctx = &dev_ctx->ctx_ep[epid];
1244 devep = &dev->eps[epid];
1246 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) {
1247 case XHCI_ST_EPCTX_STOPPED:
1248 case XHCI_ST_EPCTX_ERROR:
1251 DPRINTF(("pci_xhci cmd set_tr invalid state %x",
1252 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)));
1253 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE;
1257 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2);
1258 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0) {
1259 struct xhci_stream_ctx *sctx;
1262 cmderr = pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx);
1264 assert(devep->ep_sctx != NULL);
1266 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0;
1267 devep->ep_sctx_trbs[streamid].ringaddr =
1269 devep->ep_sctx_trbs[streamid].ccs =
1270 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0);
1273 if (streamid != 0) {
1274 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0",
1277 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL;
1278 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL;
1279 devep->ep_ccs = trb->qwTrb0 & 0x1;
1280 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr);
1282 DPRINTF(("pci_xhci set_tr first TRB:"));
1283 pci_xhci_dump_trb(devep->ep_tr);
1285 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1292 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot,
1293 struct xhci_trb *trb)
1295 struct xhci_input_dev_ctx *input_ctx;
1296 struct xhci_slot_ctx *islot_ctx;
1297 struct xhci_dev_ctx *dev_ctx;
1298 struct xhci_endp_ctx *ep0_ctx;
1301 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1302 islot_ctx = &input_ctx->ctx_slot;
1303 ep0_ctx = &input_ctx->ctx_ep[1];
1305 cmderr = XHCI_TRB_ERROR_SUCCESS;
1306 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,",
1307 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
1308 DPRINTF((" slot %08x %08x %08x %08x",
1309 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
1310 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
1311 DPRINTF((" ep0 %08x %08x %016lx %08x",
1312 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1313 ep0_ctx->dwEpCtx4));
1315 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */
1316 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
1317 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) {
1318 DPRINTF(("pci_xhci: eval ctx, input ctl invalid"));
1319 cmderr = XHCI_TRB_ERROR_TRB;
1323 /* assign address to slot; in this emulation, slot_id = address */
1324 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1326 DPRINTF(("pci_xhci: eval ctx, dev ctx"));
1327 DPRINTF((" slot %08x %08x %08x %08x",
1328 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1329 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1331 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */
1332 /* set max exit latency */
1333 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY(
1334 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1,
1337 /* set interrupter target */
1338 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY(
1339 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2,
1342 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */
1343 /* set max packet size */
1344 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY(
1345 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1,
1348 ep0_ctx = &dev_ctx->ctx_ep[1];
1351 DPRINTF(("pci_xhci: eval ctx, output ctx"));
1352 DPRINTF((" slot %08x %08x %08x %08x",
1353 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1354 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1355 DPRINTF((" ep0 %08x %08x %016lx %08x",
1356 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1357 ep0_ctx->dwEpCtx4));
1364 pci_xhci_complete_commands(struct pci_xhci_softc *sc)
1366 struct xhci_trb evtrb;
1367 struct xhci_trb *trb;
1369 uint32_t ccs; /* cycle state (XHCI 4.9.2) */
1376 sc->opregs.crcr |= XHCI_CRCR_LO_CRR;
1378 trb = sc->opregs.cr_p;
1379 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS;
1380 crcr = sc->opregs.crcr & ~0xF;
1383 sc->opregs.cr_p = trb;
1385 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1387 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) !=
1388 (ccs & XHCI_TRB_3_CYCLE_BIT))
1391 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x"
1392 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u",
1393 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3,
1394 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs));
1396 cmderr = XHCI_TRB_ERROR_SUCCESS;
1398 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) |
1399 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE);
1403 case XHCI_TRB_TYPE_LINK: /* 0x06 */
1404 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1405 ccs ^= XHCI_CRCR_LO_RCS;
1408 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */
1409 cmderr = pci_xhci_cmd_enable_slot(sc, &slot);
1412 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */
1413 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1414 cmderr = pci_xhci_cmd_disable_slot(sc, slot);
1417 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */
1418 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1419 cmderr = pci_xhci_cmd_address_device(sc, slot, trb);
1422 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */
1423 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1424 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb);
1427 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */
1428 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1429 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb);
1432 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */
1433 DPRINTF(("Reset Endpoint on slot %d", slot));
1434 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1435 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1438 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */
1439 DPRINTF(("Stop Endpoint on slot %d", slot));
1440 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1441 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1444 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */
1445 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1446 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb);
1449 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */
1450 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1451 cmderr = pci_xhci_cmd_reset_device(sc, slot);
1454 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */
1458 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */
1461 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */
1464 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */
1467 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */
1470 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */
1474 DPRINTF(("pci_xhci: unsupported cmd %x", type));
1478 if (type != XHCI_TRB_TYPE_LINK) {
1480 * insert command completion event and assert intr
1482 evtrb.qwTrb0 = crcr;
1483 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr);
1484 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot);
1485 DPRINTF(("pci_xhci: command 0x%x result: 0x%x",
1487 pci_xhci_insert_event(sc, &evtrb, 1);
1490 trb = pci_xhci_trb_next(sc, trb, &crcr);
1493 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs;
1494 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR;
1499 pci_xhci_dump_trb(struct xhci_trb *trb)
1501 static const char *trbtypes[] = {
1529 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1530 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x",
1532 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID",
1533 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3));
1537 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer,
1538 uint32_t slot, uint32_t epid, int *do_intr)
1540 struct pci_xhci_dev_emu *dev;
1541 struct pci_xhci_dev_ep *devep;
1542 struct xhci_dev_ctx *dev_ctx;
1543 struct xhci_endp_ctx *ep_ctx;
1544 struct xhci_trb *trb;
1545 struct xhci_trb evtrb;
1550 dev = XHCI_SLOTDEV_PTR(sc, slot);
1551 devep = &dev->eps[epid];
1552 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1554 assert(dev_ctx != NULL);
1556 ep_ctx = &dev_ctx->ctx_ep[epid];
1558 err = XHCI_TRB_ERROR_SUCCESS;
1562 /* go through list of TRBs and insert event(s) */
1563 for (i = xfer->head; xfer->ndata > 0; ) {
1564 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data;
1565 trb = XHCI_GADDR(sc, evtrb.qwTrb0);
1566 trbflags = trb->dwTrb3;
1568 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x "
1570 i, xfer->data[i].processed, xfer->data[i].blen,
1571 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0,
1573 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0));
1575 if (!xfer->data[i].processed) {
1581 edtla += xfer->data[i].bdone;
1583 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs);
1585 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx,
1586 xfer->data[i].streamid, xfer->data[i].trbnext,
1589 /* Only interrupt if IOC or short packet */
1590 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) &&
1591 !((err == XHCI_TRB_ERROR_SHORT_PKT) &&
1592 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) {
1594 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1598 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) |
1599 XHCI_TRB_2_REM_SET(xfer->data[i].blen);
1601 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) |
1602 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid);
1604 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) {
1605 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla));
1606 evtrb.qwTrb0 = trb->qwTrb0;
1607 evtrb.dwTrb2 = (edtla & 0xFFFFF) |
1608 XHCI_TRB_2_ERROR_SET(err);
1609 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT;
1615 err = pci_xhci_insert_event(sc, &evtrb, 0);
1616 if (err != XHCI_TRB_ERROR_SUCCESS) {
1620 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1627 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev,
1628 struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx,
1629 uint32_t streamid, uint64_t ringaddr, int ccs)
1632 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) {
1633 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) |
1636 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL;
1637 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1;
1638 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1);
1640 DPRINTF(("xhci update ep-ring stream %d, addr %lx",
1641 streamid, devep->ep_sctx[streamid].qwSctx0));
1643 devep->ep_ringaddr = ringaddr & ~0xFUL;
1644 devep->ep_ccs = ccs & 0x1;
1645 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL);
1646 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1);
1648 DPRINTF(("xhci update ep-ring, addr %lx",
1649 (devep->ep_ringaddr | devep->ep_ccs)));
1654 * Outstanding transfer still in progress (device NAK'd earlier) so retry
1655 * the transfer again to see if it succeeds.
1658 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc,
1659 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1660 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid)
1662 struct usb_data_xfer *xfer;
1666 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1667 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1672 xfer = devep->ep_xfer;
1673 USB_DATA_XFER_LOCK(xfer);
1675 /* outstanding requests queued up */
1676 if (dev->dev_ue->ue_data != NULL) {
1677 err = dev->dev_ue->ue_data(dev->dev_sc, xfer,
1678 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2);
1679 if (err == USB_ERR_CANCELLED) {
1680 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) ==
1682 err = XHCI_TRB_ERROR_SUCCESS;
1684 err = pci_xhci_xfer_complete(sc, xfer, slot, epid,
1686 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) {
1687 pci_xhci_assert_interrupt(sc);
1691 /* XXX should not do it if error? */
1692 USB_DATA_XFER_RESET(xfer);
1696 USB_DATA_XFER_UNLOCK(xfer);
1704 pci_xhci_handle_transfer(struct pci_xhci_softc *sc,
1705 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1706 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot,
1707 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid)
1709 struct xhci_trb *setup_trb;
1710 struct usb_data_xfer *xfer;
1711 struct usb_data_xfer_block *xfer_block;
1717 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0,
1718 XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1720 xfer = devep->ep_xfer;
1721 USB_DATA_XFER_LOCK(xfer);
1723 DPRINTF(("pci_xhci handle_transfer slot %u", slot));
1732 pci_xhci_dump_trb(trb);
1734 trbflags = trb->dwTrb3;
1736 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK &&
1737 (trbflags & XHCI_TRB_3_CYCLE_BIT) !=
1738 (ccs & XHCI_TRB_3_CYCLE_BIT)) {
1739 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x",
1740 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs));
1746 switch (XHCI_TRB_3_TYPE_GET(trbflags)) {
1747 case XHCI_TRB_TYPE_LINK:
1748 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1751 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1753 xfer_block->processed = 1;
1756 case XHCI_TRB_TYPE_SETUP_STAGE:
1757 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 ||
1758 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) {
1759 DPRINTF(("pci_xhci: invalid setup trb"));
1760 err = XHCI_TRB_ERROR_TRB;
1767 xfer->ureq = malloc(
1768 sizeof(struct usb_device_request));
1769 memcpy(xfer->ureq, &val,
1770 sizeof(struct usb_device_request));
1772 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1774 xfer_block->processed = 1;
1777 case XHCI_TRB_TYPE_NORMAL:
1778 case XHCI_TRB_TYPE_ISOCH:
1779 if (setup_trb != NULL) {
1780 DPRINTF(("pci_xhci: trb not supposed to be in "
1782 err = XHCI_TRB_ERROR_TRB;
1787 case XHCI_TRB_TYPE_DATA_STAGE:
1788 xfer_block = usb_data_xfer_append(xfer,
1789 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ?
1790 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)),
1791 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs);
1794 case XHCI_TRB_TYPE_STATUS_STAGE:
1795 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1799 case XHCI_TRB_TYPE_NOOP:
1800 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1802 xfer_block->processed = 1;
1805 case XHCI_TRB_TYPE_EVENT_DATA:
1806 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1808 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) {
1809 xfer_block->processed = 1;
1814 DPRINTF(("pci_xhci: handle xfer unexpected trb type "
1816 XHCI_TRB_3_TYPE_GET(trbflags)));
1817 err = XHCI_TRB_ERROR_TRB;
1821 trb = pci_xhci_trb_next(sc, trb, &addr);
1823 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb));
1826 xfer_block->trbnext = addr;
1827 xfer_block->streamid = streamid;
1830 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) &&
1831 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) {
1835 /* handle current batch that requires interrupt on complete */
1836 if (trbflags & XHCI_TRB_3_IOC_BIT) {
1837 DPRINTF(("pci_xhci: trb IOC bit set"));
1844 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata));
1847 err = USB_ERR_NOT_STARTED;
1848 if (dev->dev_ue->ue_request != NULL)
1849 err = dev->dev_ue->ue_request(dev->dev_sc, xfer);
1852 /* handle data transfer */
1853 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1854 err = XHCI_TRB_ERROR_SUCCESS;
1858 err = USB_TO_XHCI_ERR(err);
1859 if ((err == XHCI_TRB_ERROR_SUCCESS) ||
1860 (err == XHCI_TRB_ERROR_SHORT_PKT)) {
1861 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, &do_intr);
1862 if (err != XHCI_TRB_ERROR_SUCCESS)
1867 if (err == XHCI_TRB_ERROR_EV_RING_FULL)
1868 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__));
1871 USB_DATA_XFER_UNLOCK(xfer);
1874 pci_xhci_assert_interrupt(sc);
1877 USB_DATA_XFER_RESET(xfer);
1878 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs",
1884 USB_DATA_XFER_RESET(xfer);
1890 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot,
1891 uint32_t epid, uint32_t streamid)
1893 struct pci_xhci_dev_emu *dev;
1894 struct pci_xhci_dev_ep *devep;
1895 struct xhci_dev_ctx *dev_ctx;
1896 struct xhci_endp_ctx *ep_ctx;
1897 struct pci_xhci_trb_ring *sctx_tr;
1898 struct xhci_trb *trb;
1902 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u",
1903 slot, epid, streamid));
1905 if (slot == 0 || slot > sc->ndevices) {
1906 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot));
1910 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) {
1911 DPRINTF(("pci_xhci: invalid endpoint %u", epid));
1915 dev = XHCI_SLOTDEV_PTR(sc, slot);
1916 devep = &dev->eps[epid];
1917 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1921 ep_ctx = &dev_ctx->ctx_ep[epid];
1925 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x",
1926 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1929 if (ep_ctx->qwEpCtx2 == 0)
1932 /* handle pending transfers */
1933 if (devep->ep_xfer->ndata > 0) {
1934 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1938 /* get next trb work item */
1939 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) {
1940 struct xhci_stream_ctx *sctx;
1943 * Stream IDs of 0, 65535 (any stream), and 65534
1944 * (prime) are invalid.
1946 if (streamid == 0 || streamid == 65534 || streamid == 65535) {
1947 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1952 pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx);
1954 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1957 sctx_tr = &devep->ep_sctx_trbs[streamid];
1958 ringaddr = sctx_tr->ringaddr;
1960 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL);
1961 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x",
1962 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1963 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1965 if (streamid != 0) {
1966 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1969 ringaddr = devep->ep_ringaddr;
1970 ccs = devep->ep_ccs;
1972 DPRINTF(("doorbell, ccs %lx, trb ccs %x",
1973 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1974 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1977 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) {
1978 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?",
1979 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid));
1983 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid,
1984 ringaddr, ccs, streamid);
1988 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset,
1992 offset = (offset - sc->dboff) / sizeof(uint32_t);
1994 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx",
1997 if (XHCI_HALTED(sc)) {
1998 DPRINTF(("pci_xhci: controller halted"));
2003 pci_xhci_complete_commands(sc);
2004 else if (sc->portregs != NULL)
2005 pci_xhci_device_doorbell(sc, offset,
2006 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value));
2010 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset,
2013 struct pci_xhci_rtsregs *rts;
2015 offset -= sc->rtsoff;
2018 DPRINTF(("pci_xhci attempted write to MFINDEX"));
2022 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx",
2025 offset -= 0x20; /* start of intrreg */
2031 if (value & XHCI_IMAN_INTR_PEND)
2032 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2033 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) |
2034 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND);
2036 if (!(value & XHCI_IMAN_INTR_ENA))
2037 pci_xhci_deassert_interrupt(sc);
2042 rts->intrreg.imod = value;
2046 rts->intrreg.erstsz = value & 0xFFFF;
2050 /* ERSTBA low bits */
2051 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) |
2056 /* ERSTBA high bits */
2057 rts->intrreg.erstba = (value << 32) |
2058 MASK_64_LO(sc->rtsregs.intrreg.erstba);
2060 rts->erstba_p = XHCI_GADDR(sc,
2061 sc->rtsregs.intrreg.erstba & ~0x3FUL);
2063 rts->erst_p = XHCI_GADDR(sc,
2064 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL);
2066 rts->er_enq_idx = 0;
2067 rts->er_events_cnt = 0;
2069 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u",
2071 rts->erstba_p->qwEvrsTablePtr,
2072 rts->erstba_p->dwEvrsTableSize));
2078 MASK_64_HI(sc->rtsregs.intrreg.erdp) |
2079 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) |
2081 if (value & XHCI_ERDP_LO_BUSY) {
2082 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY;
2083 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2086 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value);
2091 /* ERDP high bits */
2092 rts->intrreg.erdp = (value << 32) |
2093 MASK_64_LO(sc->rtsregs.intrreg.erdp);
2095 if (rts->er_events_cnt > 0) {
2099 erdp = rts->intrreg.erdp & ~0xF;
2100 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) /
2101 sizeof(struct xhci_trb);
2103 if (erdp_i <= rts->er_enq_idx)
2104 rts->er_events_cnt = rts->er_enq_idx - erdp_i;
2106 rts->er_events_cnt =
2107 rts->erstba_p->dwEvrsTableSize -
2108 (erdp_i - rts->er_enq_idx);
2110 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u",
2111 erdp, rts->er_events_cnt));
2117 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx",
2124 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2129 if (sc->portregs == NULL)
2132 port = (offset - 0x3F0) / 0x10;
2134 if (port > XHCI_MAX_DEVS) {
2135 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS",
2138 /* return default value for unused port */
2139 return (XHCI_PS_SPEED_SET(3));
2142 offset = (offset - 0x3F0) % 0x10;
2144 p = &sc->portregs[port].portsc;
2145 p += offset / sizeof(uint32_t);
2147 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x",
2154 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset,
2157 offset -= XHCI_CAPLEN;
2160 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx",
2165 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F);
2169 /* clear bits on write */
2170 sc->opregs.usbsts &= ~(value &
2171 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS|
2172 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR));
2180 sc->opregs.dnctrl = value & 0xFFFF;
2184 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) {
2185 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2186 sc->opregs.crcr |= value &
2187 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2189 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) |
2190 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS));
2195 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) {
2196 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) |
2199 sc->opregs.cr_p = XHCI_GADDR(sc,
2200 sc->opregs.crcr & ~0xF);
2203 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) {
2204 /* Stop operation of Command Ring */
2207 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) {
2213 case XHCI_DCBAAP_LO:
2214 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) |
2215 (value & 0xFFFFFFC0);
2218 case XHCI_DCBAAP_HI:
2219 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) |
2221 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL);
2223 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)",
2224 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p));
2228 sc->opregs.config = value & 0x03FF;
2232 if (offset >= 0x400)
2233 pci_xhci_portregs_write(sc, offset, value);
2241 pci_xhci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2242 int baridx, uint64_t offset, int size, uint64_t value)
2244 struct pci_xhci_softc *sc;
2248 assert(baridx == 0);
2251 pthread_mutex_lock(&sc->mtx);
2252 if (offset < XHCI_CAPLEN) /* read only registers */
2253 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset));
2254 else if (offset < sc->dboff)
2255 pci_xhci_hostop_write(sc, offset, value);
2256 else if (offset < sc->rtsoff)
2257 pci_xhci_dbregs_write(sc, offset, value);
2258 else if (offset < sc->regsend)
2259 pci_xhci_rtsregs_write(sc, offset, value);
2261 WPRINTF(("pci_xhci: write invalid offset %ld", offset));
2263 pthread_mutex_unlock(&sc->mtx);
2267 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset)
2272 case XHCI_CAPLENGTH: /* 0x00 */
2273 value = sc->caplength;
2276 case XHCI_HCSPARAMS1: /* 0x04 */
2277 value = sc->hcsparams1;
2280 case XHCI_HCSPARAMS2: /* 0x08 */
2281 value = sc->hcsparams2;
2284 case XHCI_HCSPARAMS3: /* 0x0C */
2285 value = sc->hcsparams3;
2288 case XHCI_HCSPARAMS0: /* 0x10 */
2289 value = sc->hccparams1;
2292 case XHCI_DBOFF: /* 0x14 */
2296 case XHCI_RTSOFF: /* 0x18 */
2300 case XHCI_HCCPRAMS2: /* 0x1C */
2301 value = sc->hccparams2;
2309 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx",
2316 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset)
2320 offset = (offset - XHCI_CAPLEN);
2323 case XHCI_USBCMD: /* 0x00 */
2324 value = sc->opregs.usbcmd;
2327 case XHCI_USBSTS: /* 0x04 */
2328 value = sc->opregs.usbsts;
2331 case XHCI_PAGESIZE: /* 0x08 */
2332 value = sc->opregs.pgsz;
2335 case XHCI_DNCTRL: /* 0x14 */
2336 value = sc->opregs.dnctrl;
2339 case XHCI_CRCR_LO: /* 0x18 */
2340 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR;
2343 case XHCI_CRCR_HI: /* 0x1C */
2347 case XHCI_DCBAAP_LO: /* 0x30 */
2348 value = sc->opregs.dcbaap & 0xFFFFFFFF;
2351 case XHCI_DCBAAP_HI: /* 0x34 */
2352 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF;
2355 case XHCI_CONFIG: /* 0x38 */
2356 value = sc->opregs.config;
2360 if (offset >= 0x400)
2361 value = pci_xhci_portregs_read(sc, offset);
2369 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx",
2376 pci_xhci_dbregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2379 /* read doorbell always returns 0 */
2384 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2388 offset -= sc->rtsoff;
2391 if (offset == XHCI_MFINDEX) {
2392 value = sc->rtsregs.mfindex;
2393 } else if (offset >= 0x20) {
2400 assert(offset < sizeof(sc->rtsregs.intrreg));
2402 p = &sc->rtsregs.intrreg.iman;
2403 p += item / sizeof(uint32_t);
2407 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x",
2414 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset)
2418 offset -= sc->regsend;
2423 /* rev major | rev minor | next-cap | cap-id */
2424 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS;
2427 /* name string = "USB" */
2431 /* psic | proto-defined | compat # | compat offset */
2432 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start;
2437 /* rev major | rev minor | next-cap | cap-id */
2438 value = (0x03 << 24) | XHCI_ID_PROTOCOLS;
2441 /* name string = "USB" */
2445 /* psic | proto-defined | compat # | compat offset */
2446 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start;
2451 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset));
2455 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x",
2463 pci_xhci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2464 uint64_t offset, int size)
2466 struct pci_xhci_softc *sc;
2471 assert(baridx == 0);
2473 pthread_mutex_lock(&sc->mtx);
2474 if (offset < XHCI_CAPLEN)
2475 value = pci_xhci_hostcap_read(sc, offset);
2476 else if (offset < sc->dboff)
2477 value = pci_xhci_hostop_read(sc, offset);
2478 else if (offset < sc->rtsoff)
2479 value = pci_xhci_dbregs_read(sc, offset);
2480 else if (offset < sc->regsend)
2481 value = pci_xhci_rtsregs_read(sc, offset);
2482 else if (offset < (sc->regsend + 4*32))
2483 value = pci_xhci_xecp_read(sc, offset);
2486 WPRINTF(("pci_xhci: read invalid offset %ld", offset));
2489 pthread_mutex_unlock(&sc->mtx);
2499 value &= 0xFFFFFFFF;
2507 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm)
2509 struct pci_xhci_portregs *port;
2510 struct pci_xhci_dev_emu *dev;
2511 struct xhci_trb evtrb;
2514 assert(portn <= XHCI_MAX_DEVS);
2516 DPRINTF(("xhci reset port %d", portn));
2518 port = XHCI_PORTREG_PTR(sc, portn);
2519 dev = XHCI_DEVINST_PTR(sc, portn);
2521 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC);
2522 port->portsc |= XHCI_PS_PED |
2523 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2525 if (warm && dev->dev_ue->ue_usbver == 3) {
2526 port->portsc |= XHCI_PS_WRC;
2529 if ((port->portsc & XHCI_PS_PRC) == 0) {
2530 port->portsc |= XHCI_PS_PRC;
2532 pci_xhci_set_evtrb(&evtrb, portn,
2533 XHCI_TRB_ERROR_SUCCESS,
2534 XHCI_TRB_EVENT_PORT_STS_CHANGE);
2535 error = pci_xhci_insert_event(sc, &evtrb, 1);
2536 if (error != XHCI_TRB_ERROR_SUCCESS)
2537 DPRINTF(("xhci reset port insert event "
2544 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn)
2546 struct pci_xhci_portregs *port;
2547 struct pci_xhci_dev_emu *dev;
2549 port = XHCI_PORTREG_PTR(sc, portn);
2550 dev = XHCI_DEVINST_PTR(sc, portn);
2552 port->portsc = XHCI_PS_CCS | /* connected */
2553 XHCI_PS_PP; /* port power */
2555 if (dev->dev_ue->ue_usbver == 2) {
2556 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) |
2557 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2559 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) |
2560 XHCI_PS_PED | /* enabled */
2561 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2564 DPRINTF(("Init port %d 0x%x", portn, port->portsc));
2566 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP;
2567 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc));
2572 pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
2574 struct pci_xhci_dev_emu *dev;
2575 struct xhci_dev_ctx *dev_ctx;
2576 struct xhci_trb evtrb;
2577 struct pci_xhci_softc *sc;
2578 struct pci_xhci_portregs *p;
2579 struct xhci_endp_ctx *ep_ctx;
2584 dir_in = epctx & 0x80;
2585 epid = epctx & ~0x80;
2587 /* HW endpoint contexts are 0-15; convert to epid based on dir */
2588 epid = (epid * 2) + (dir_in ? 1 : 0);
2590 assert(epid >= 1 && epid <= 31);
2595 /* check if device is ready; OS has to initialise it */
2596 if (sc->rtsregs.erstba_p == NULL ||
2597 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 ||
2598 dev->dev_ctx == NULL)
2601 p = XHCI_PORTREG_PTR(sc, hci->hci_port);
2603 /* raise event if link U3 (suspended) state */
2604 if (XHCI_PS_PLS_GET(p->portsc) == 3) {
2605 p->portsc &= ~XHCI_PS_PLS_MASK;
2606 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME);
2607 if ((p->portsc & XHCI_PS_PLC) != 0)
2610 p->portsc |= XHCI_PS_PLC;
2612 pci_xhci_set_evtrb(&evtrb, hci->hci_port,
2613 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE);
2614 error = pci_xhci_insert_event(sc, &evtrb, 0);
2615 if (error != XHCI_TRB_ERROR_SUCCESS)
2619 dev_ctx = dev->dev_ctx;
2620 ep_ctx = &dev_ctx->ctx_ep[epid];
2621 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) {
2622 DPRINTF(("xhci device interrupt on disabled endpoint %d",
2627 DPRINTF(("xhci device interrupt on endpoint %d", epid));
2629 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0);
2636 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid, void *param)
2639 DPRINTF(("xhci device event port %d", hci->hci_port));
2646 pci_xhci_device_usage(char *opt)
2649 EPRINTLN("Invalid USB emulation \"%s\"", opt);
2653 pci_xhci_parse_opts(struct pci_xhci_softc *sc, char *opts)
2655 struct pci_xhci_dev_emu **devices;
2656 struct pci_xhci_dev_emu *dev;
2657 struct usb_devemu *ue;
2659 char *uopt, *xopts, *config;
2660 int usb3_port, usb2_port, i;
2663 usb3_port = sc->usb3_port_start - 1;
2664 usb2_port = sc->usb2_port_start - 1;
2670 devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *));
2672 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *));
2673 sc->devices = devices;
2676 uopt = strdup(opts);
2677 for (xopts = strtok(uopt, ",");
2679 xopts = strtok(NULL, ",")) {
2680 if (usb2_port == ((sc->usb2_port_start-1) + XHCI_MAX_DEVS/2) ||
2681 usb3_port == ((sc->usb3_port_start-1) + XHCI_MAX_DEVS/2)) {
2682 WPRINTF(("pci_xhci max number of USB 2 or 3 "
2683 "devices reached, max %d", XHCI_MAX_DEVS/2));
2684 usb2_port = usb3_port = -1;
2688 /* device[=<config>] */
2689 if ((config = strchr(xopts, '=')) == NULL)
2690 config = ""; /* no config */
2694 ue = usb_emu_finddev(xopts);
2696 pci_xhci_device_usage(xopts);
2697 DPRINTF(("pci_xhci device not found %s", xopts));
2698 usb2_port = usb3_port = -1;
2702 DPRINTF(("pci_xhci adding device %s, opts \"%s\"",
2705 dev = calloc(1, sizeof(struct pci_xhci_dev_emu));
2707 dev->hci.hci_sc = dev;
2708 dev->hci.hci_intr = pci_xhci_dev_intr;
2709 dev->hci.hci_event = pci_xhci_dev_event;
2711 if (ue->ue_usbver == 2) {
2712 dev->hci.hci_port = usb2_port + 1;
2713 devices[usb2_port] = dev;
2716 dev->hci.hci_port = usb3_port + 1;
2717 devices[usb3_port] = dev;
2721 dev->hci.hci_address = 0;
2722 devsc = ue->ue_init(&dev->hci, config);
2723 if (devsc == NULL) {
2724 pci_xhci_device_usage(xopts);
2725 usb2_port = usb3_port = -1;
2730 dev->dev_sc = devsc;
2732 /* assign slot number to device */
2733 sc->slots[sc->ndevices] = dev;
2739 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs));
2741 if (sc->ndevices > 0) {
2742 /* port and slot numbering start from 1 */
2747 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
2748 pci_xhci_init_port(sc, i);
2751 WPRINTF(("pci_xhci no USB devices configured"));
2756 if (devices != NULL) {
2757 if (usb2_port <= 0 && usb3_port <= 0) {
2759 for (i = 0; devices[i] != NULL; i++)
2767 return (sc->ndevices);
2771 pci_xhci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2773 struct pci_xhci_softc *sc;
2777 WPRINTF(("pci_xhci controller already defined"));
2782 sc = calloc(1, sizeof(struct pci_xhci_softc));
2786 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1;
2787 sc->usb3_port_start = 1;
2789 /* discover devices */
2790 error = pci_xhci_parse_opts(sc, opts);
2796 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) |
2797 XHCI_SET_HCIVERSION(0x0100);
2798 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) |
2799 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */
2800 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS);
2801 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) |
2802 XHCI_SET_HCSP2_IST(0x04);
2803 sc->hcsparams3 = 0; /* no latency */
2804 sc->hccparams1 = XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */
2805 XHCI_SET_HCCP1_SPC(1) | /* short packet */
2806 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX);
2807 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) |
2808 XHCI_SET_HCCP2_U3C(1);
2809 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START +
2810 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs));
2812 /* dboff must be 32-bit aligned */
2813 if (sc->dboff & 0x3)
2814 sc->dboff = (sc->dboff + 0x3) & ~0x3;
2816 /* rtsoff must be 32-bytes aligned */
2817 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32);
2818 if (sc->rtsoff & 0x1F)
2819 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F;
2821 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff,
2824 sc->opregs.usbsts = XHCI_STS_HCH;
2825 sc->opregs.pgsz = XHCI_PAGESIZE_4K;
2829 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */
2832 * Set extended capabilities pointer to be after regsend;
2833 * value of xecp field is 32-bit offset.
2835 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4);
2837 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31);
2838 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2839 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS);
2840 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB);
2841 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI);
2842 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0);
2844 pci_emul_add_msicap(pi, 1);
2846 /* regsend + xecp registers */
2847 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32);
2848 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32));
2851 pci_lintr_request(pi);
2853 pthread_mutex_init(&sc->mtx, NULL);
2863 #ifdef BHYVE_SNAPSHOT
2865 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[])
2868 struct pci_xhci_dev_emu *dev, *slot;
2870 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS);
2872 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
2873 for (j = 1; j <= XHCI_MAX_DEVS; j++) {
2874 slot = XHCI_SLOTDEV_PTR(sc, i);
2875 dev = XHCI_DEVINST_PTR(sc, j);
2884 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev,
2885 int idx, struct vm_snapshot_meta *meta)
2889 struct usb_data_xfer *xfer;
2890 struct usb_data_xfer_block *xfer_block;
2892 /* some sanity checks */
2893 if (meta->op == VM_SNAPSHOT_SAVE)
2894 xfer = dev->eps[idx].ep_xfer;
2896 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done);
2902 if (meta->op == VM_SNAPSHOT_RESTORE) {
2903 pci_xhci_init_ep(dev, idx);
2904 xfer = dev->eps[idx].ep_xfer;
2907 /* save / restore proper */
2908 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) {
2909 xfer_block = &xfer->data[k];
2911 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf,
2912 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret,
2914 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done);
2915 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done);
2916 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done);
2917 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done);
2918 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done);
2919 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done);
2920 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done);
2923 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done);
2925 /* xfer->ureq is not allocated at restore time */
2926 if (meta->op == VM_SNAPSHOT_RESTORE)
2927 xfer->ureq = malloc(sizeof(struct usb_device_request));
2929 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq,
2930 sizeof(struct usb_device_request),
2934 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done);
2935 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done);
2936 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done);
2943 pci_xhci_snapshot(struct vm_snapshot_meta *meta)
2948 struct pci_devinst *pi;
2949 struct pci_xhci_softc *sc;
2950 struct pci_xhci_portregs *port;
2951 struct pci_xhci_dev_emu *dev;
2952 char dname[SNAP_DEV_NAME_LEN];
2953 int maps[XHCI_MAX_SLOTS + 1];
2955 pi = meta->dev_data;
2958 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done);
2959 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done);
2960 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done);
2961 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done);
2962 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done);
2963 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done);
2964 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done);
2965 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done);
2966 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done);
2969 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done);
2970 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done);
2971 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done);
2972 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done);
2973 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done);
2974 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done);
2975 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done);
2978 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p,
2979 XHCI_GADDR_SIZE(sc->opregs.cr_p), false, meta, ret, done);
2981 /* opregs.dcbaa_p */
2982 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p,
2983 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), false, meta, ret, done);
2986 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done);
2988 /* rtsregs.intrreg */
2989 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done);
2990 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done);
2991 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done);
2992 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done);
2993 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done);
2994 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done);
2996 /* rtsregs.erstba_p */
2997 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p,
2998 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), false, meta, ret, done);
3000 /* rtsregs.erst_p */
3001 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p,
3002 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), false, meta, ret, done);
3004 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done);
3005 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done);
3006 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done);
3007 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done);
3008 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done);
3010 /* sanity checking */
3011 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3012 dev = XHCI_DEVINST_PTR(sc, i);
3016 if (meta->op == VM_SNAPSHOT_SAVE)
3018 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done);
3020 /* check if the restored device (when restoring) is sane */
3021 if (restore_idx != i) {
3022 fprintf(stderr, "%s: idx not matching: actual: %d, "
3023 "expected: %d\r\n", __func__, restore_idx, i);
3028 if (meta->op == VM_SNAPSHOT_SAVE) {
3029 memset(dname, 0, sizeof(dname));
3030 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1);
3033 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done);
3035 if (meta->op == VM_SNAPSHOT_RESTORE) {
3036 dname[sizeof(dname) - 1] = '\0';
3037 if (strcmp(dev->dev_ue->ue_emu, dname)) {
3038 fprintf(stderr, "%s: device names mismatch: "
3039 "actual: %s, expected: %s\r\n",
3040 __func__, dname, dev->dev_ue->ue_emu);
3049 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3050 port = XHCI_PORTREG_PTR(sc, i);
3051 dev = XHCI_DEVINST_PTR(sc, i);
3056 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done);
3057 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done);
3058 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done);
3059 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done);
3063 if (meta->op == VM_SNAPSHOT_SAVE)
3064 pci_xhci_map_devs_slots(sc, maps);
3066 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
3067 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done);
3069 if (meta->op == VM_SNAPSHOT_SAVE) {
3070 dev = XHCI_SLOTDEV_PTR(sc, i);
3071 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
3073 dev = XHCI_DEVINST_PTR(sc, maps[i]);
3077 XHCI_SLOTDEV_PTR(sc, i) = dev;
3087 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx,
3088 XHCI_GADDR_SIZE(dev->dev_ctx), false, meta, ret, done);
3090 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) {
3091 ret = pci_xhci_snapshot_ep(sc, dev, j, meta);
3096 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done);
3098 /* devices[i]->dev_sc */
3099 dev->dev_ue->ue_snapshot(dev->dev_sc, meta);
3101 /* devices[i]->hci */
3102 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done);
3103 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done);
3106 SNAPSHOT_VAR_OR_LEAVE(sc->ndevices, meta, ret, done);
3107 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done);
3108 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done);
3115 struct pci_devemu pci_de_xhci = {
3117 .pe_init = pci_xhci_init,
3118 .pe_barwrite = pci_xhci_write,
3119 .pe_barread = pci_xhci_read,
3120 #ifdef BHYVE_SNAPSHOT
3121 .pe_snapshot = pci_xhci_snapshot,
3124 PCI_EMUL_SET(pci_de_xhci);