2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2014 Leon Dang <ldang@nahannisys.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 tablet USB tablet mouse
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
40 #include <sys/types.h>
41 #include <sys/queue.h>
51 #include <machine/vmm_snapshot.h>
53 #include <dev/usb/usbdi.h>
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usb_freebsd.h>
66 static int xhci_debug = 0;
67 #define DPRINTF(params) if (xhci_debug) PRINTLN params
68 #define WPRINTF(params) PRINTLN params
71 #define XHCI_NAME "xhci"
72 #define XHCI_MAX_DEVS 8 /* 4 USB3 + 4 USB2 devs */
74 #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */
77 * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping
78 * to 4k to avoid going over the guest physical memory barrier.
80 #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */
82 #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */
84 #define XHCI_CAPLEN (4*8) /* offset of op register space */
85 #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */
86 #define XHCI_PORTREGS_START 0x400
87 #define XHCI_DOORBELL_MAX 256
89 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */
91 /* caplength and hci-version registers */
92 #define XHCI_SET_CAPLEN(x) ((x) & 0xFF)
93 #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16)
94 #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF)
96 /* hcsparams1 register */
97 #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF)
98 #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8)
99 #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24)
101 /* hcsparams2 register */
102 #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F)
103 #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4)
104 #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21)
105 #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27)
107 /* hcsparams3 register */
108 #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF)
109 #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16)
111 /* hccparams1 register */
112 #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01)
113 #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1)
114 #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2)
115 #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3)
116 #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4)
117 #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5)
118 #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6)
119 #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7)
120 #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8)
121 #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9)
122 #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10)
123 #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11)
124 #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12)
125 #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16)
127 /* hccparams2 register */
128 #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01)
129 #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1)
130 #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2)
131 #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3)
132 #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4)
133 #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5)
135 /* other registers */
136 #define XHCI_SET_DOORBELL(x) ((x) & ~0x03)
137 #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F)
140 #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */
141 #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */
142 #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */
144 /* port register set */
145 #define XHCI_PORTREGS_BASE 0x400 /* base offset */
146 #define XHCI_PORTREGS_PORT0 0x3F0
147 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */
149 #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL)
150 #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL)
152 #define FIELD_REPLACE(a,b,m,s) (((a) & ~((m) << (s))) | \
153 (((b) & (m)) << (s)))
154 #define FIELD_COPY(a,b,m,s) (((a) & ~((m) << (s))) | \
155 (((b) & ((m) << (s)))))
157 #define SNAP_DEV_NAME_LEN 128
159 struct pci_xhci_trb_ring {
160 uint64_t ringaddr; /* current dequeue guest address */
161 uint32_t ccs; /* consumer cycle state */
164 /* device endpoint transfer/stream rings */
165 struct pci_xhci_dev_ep {
167 struct xhci_trb *_epu_tr;
168 struct xhci_stream_ctx *_epu_sctx;
170 #define ep_tr _ep_trbsctx._epu_tr
171 #define ep_sctx _ep_trbsctx._epu_sctx
174 struct pci_xhci_trb_ring _epu_trb;
175 struct pci_xhci_trb_ring *_epu_sctx_trbs;
177 #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr
178 #define ep_ccs _ep_trb_rings._epu_trb.ccs
179 #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs
181 struct usb_data_xfer *ep_xfer; /* transfer chain */
184 /* device context base address array: maps slot->device context */
186 uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */
189 /* port status registers */
190 struct pci_xhci_portregs {
191 uint32_t portsc; /* port status and control */
192 uint32_t portpmsc; /* port pwr mgmt status & control */
193 uint32_t portli; /* port link info */
194 uint32_t porthlpmc; /* port hardware LPM control */
196 #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10)
198 /* xHC operational registers */
199 struct pci_xhci_opregs {
200 uint32_t usbcmd; /* usb command */
201 uint32_t usbsts; /* usb status */
202 uint32_t pgsz; /* page size */
203 uint32_t dnctrl; /* device notification control */
204 uint64_t crcr; /* command ring control */
205 uint64_t dcbaap; /* device ctx base addr array ptr */
206 uint32_t config; /* configure */
208 /* guest mapped addresses: */
209 struct xhci_trb *cr_p; /* crcr dequeue */
210 struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */
213 /* xHC runtime registers */
214 struct pci_xhci_rtsregs {
215 uint32_t mfindex; /* microframe index */
216 struct { /* interrupter register set */
217 uint32_t iman; /* interrupter management */
218 uint32_t imod; /* interrupter moderation */
219 uint32_t erstsz; /* event ring segment table size */
221 uint64_t erstba; /* event ring seg-tbl base addr */
222 uint64_t erdp; /* event ring dequeue ptr */
225 /* guest mapped addresses */
226 struct xhci_event_ring_seg *erstba_p;
227 struct xhci_trb *erst_p; /* event ring segment tbl */
228 int er_deq_seg; /* event ring dequeue segment */
229 int er_enq_idx; /* event ring enqueue index - xHCI */
230 int er_enq_seg; /* event ring enqueue segment */
231 uint32_t er_events_cnt; /* number of events in ER */
232 uint32_t event_pcs; /* producer cycle state flag */
236 struct pci_xhci_softc;
240 * USB device emulation container.
241 * This is referenced from usb_hci->hci_sc; 1 pci_xhci_dev_emu for each
242 * emulated device instance.
244 struct pci_xhci_dev_emu {
245 struct pci_xhci_softc *xsc;
248 struct xhci_dev_ctx *dev_ctx;
249 struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS];
252 struct usb_devemu *dev_ue; /* USB emulated dev */
253 void *dev_sc; /* device's softc */
258 struct pci_xhci_softc {
259 struct pci_devinst *xsc_pi;
263 uint32_t caplength; /* caplen & hciversion */
264 uint32_t hcsparams1; /* structural parameters 1 */
265 uint32_t hcsparams2; /* structural parameters 2 */
266 uint32_t hcsparams3; /* structural parameters 3 */
267 uint32_t hccparams1; /* capability parameters 1 */
268 uint32_t dboff; /* doorbell offset */
269 uint32_t rtsoff; /* runtime register space offset */
270 uint32_t hccparams2; /* capability parameters 2 */
272 uint32_t regsend; /* end of configuration registers */
274 struct pci_xhci_opregs opregs;
275 struct pci_xhci_rtsregs rtsregs;
277 struct pci_xhci_portregs *portregs;
278 struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */
279 struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */
286 /* portregs and devices arrays are set up to start from idx=1 */
287 #define XHCI_PORTREG_PTR(x,n) &(x)->portregs[(n)]
288 #define XHCI_DEVINST_PTR(x,n) (x)->devices[(n)]
289 #define XHCI_SLOTDEV_PTR(x,n) (x)->slots[(n)]
291 #define XHCI_HALTED(sc) ((sc)->opregs.usbsts & XHCI_STS_HCH)
293 #define XHCI_GADDR_SIZE(a) (XHCI_PADDR_SZ - \
294 (((uint64_t) (a)) & (XHCI_PADDR_SZ - 1)))
295 #define XHCI_GADDR(sc,a) paddr_guest2host((sc)->xsc_pi->pi_vmctx, \
296 (a), XHCI_GADDR_SIZE(a))
298 static int xhci_in_use;
300 /* map USB errors to XHCI */
301 static const int xhci_usb_errors[USB_ERR_MAX] = {
302 [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS,
303 [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE,
304 [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
305 [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID,
306 [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE,
307 [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED,
308 [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER,
309 [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER,
310 [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER,
311 [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL,
312 [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE,
313 [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE,
314 [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE,
315 [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED,
316 [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED,
317 [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE,
318 [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON,
319 [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE,
320 [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB,
321 [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON,
322 [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED,
323 [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT,
324 [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL,
325 [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED,
326 [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF,
327 [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB,
328 [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED,
329 [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED,
330 [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED,
332 #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \
333 XHCI_TRB_ERROR_INVALID)
335 static int pci_xhci_insert_event(struct pci_xhci_softc *sc,
336 struct xhci_trb *evtrb, int do_intr);
337 static void pci_xhci_dump_trb(struct xhci_trb *trb);
338 static void pci_xhci_assert_interrupt(struct pci_xhci_softc *sc);
339 static void pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot);
340 static void pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm);
341 static void pci_xhci_update_ep_ring(struct pci_xhci_softc *sc,
342 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
343 struct xhci_endp_ctx *ep_ctx, uint32_t streamid,
344 uint64_t ringaddr, int ccs);
347 pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode,
350 evtrb->qwTrb0 = port << 24;
351 evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode);
352 evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype);
356 /* controller reset */
358 pci_xhci_reset(struct pci_xhci_softc *sc)
362 sc->rtsregs.er_enq_idx = 0;
363 sc->rtsregs.er_events_cnt = 0;
364 sc->rtsregs.event_pcs = 1;
366 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
367 pci_xhci_reset_slot(sc, i);
372 pci_xhci_usbcmd_write(struct pci_xhci_softc *sc, uint32_t cmd)
377 if (cmd & XHCI_CMD_RS) {
378 do_intr = (sc->opregs.usbcmd & XHCI_CMD_RS) == 0;
380 sc->opregs.usbcmd |= XHCI_CMD_RS;
381 sc->opregs.usbsts &= ~XHCI_STS_HCH;
382 sc->opregs.usbsts |= XHCI_STS_PCD;
384 /* Queue port change event on controller run from stop */
386 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
387 struct pci_xhci_dev_emu *dev;
388 struct pci_xhci_portregs *port;
389 struct xhci_trb evtrb;
391 if ((dev = XHCI_DEVINST_PTR(sc, i)) == NULL)
394 port = XHCI_PORTREG_PTR(sc, i);
395 port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS;
396 port->portsc &= ~XHCI_PS_PLS_MASK;
399 * XHCI 4.19.3 USB2 RxDetect->Polling,
402 if (dev->dev_ue->ue_usbver == 2)
404 XHCI_PS_PLS_SET(UPS_PORT_LS_POLL);
407 XHCI_PS_PLS_SET(UPS_PORT_LS_U0);
409 pci_xhci_set_evtrb(&evtrb, i,
410 XHCI_TRB_ERROR_SUCCESS,
411 XHCI_TRB_EVENT_PORT_STS_CHANGE);
413 if (pci_xhci_insert_event(sc, &evtrb, 0) !=
414 XHCI_TRB_ERROR_SUCCESS)
418 sc->opregs.usbcmd &= ~XHCI_CMD_RS;
419 sc->opregs.usbsts |= XHCI_STS_HCH;
420 sc->opregs.usbsts &= ~XHCI_STS_PCD;
423 /* start execution of schedule; stop when set to 0 */
424 cmd |= sc->opregs.usbcmd & XHCI_CMD_RS;
426 if (cmd & XHCI_CMD_HCRST) {
427 /* reset controller */
429 cmd &= ~XHCI_CMD_HCRST;
432 cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS);
435 pci_xhci_assert_interrupt(sc);
441 pci_xhci_portregs_write(struct pci_xhci_softc *sc, uint64_t offset,
444 struct xhci_trb evtrb;
445 struct pci_xhci_portregs *p;
447 uint32_t oldpls, newpls;
449 if (sc->portregs == NULL)
452 port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ;
453 offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ;
455 DPRINTF(("pci_xhci: portregs wr offset 0x%lx, port %u: 0x%lx",
456 offset, port, value));
460 if (port > XHCI_MAX_DEVS) {
461 DPRINTF(("pci_xhci: portregs_write port %d > ndevices",
466 if (XHCI_DEVINST_PTR(sc, port) == NULL) {
467 DPRINTF(("pci_xhci: portregs_write to unattached port %d",
471 p = XHCI_PORTREG_PTR(sc, port);
474 /* port reset or warm reset */
475 if (value & (XHCI_PS_PR | XHCI_PS_WPR)) {
476 pci_xhci_reset_port(sc, port, value & XHCI_PS_WPR);
480 if ((p->portsc & XHCI_PS_PP) == 0) {
481 WPRINTF(("pci_xhci: portregs_write to unpowered "
486 /* Port status and control register */
487 oldpls = XHCI_PS_PLS_GET(p->portsc);
488 newpls = XHCI_PS_PLS_GET(value);
490 p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK |
491 XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK;
493 if (XHCI_DEVINST_PTR(sc, port))
494 p->portsc |= XHCI_PS_CCS;
496 p->portsc |= (value &
500 XHCI_PS_PLS_MASK | /* link state */
502 XHCI_PS_PIC_MASK | /* port indicator */
503 XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR));
505 /* clear control bits */
506 p->portsc &= ~(value &
516 /* port disable request; for USB3, don't care */
517 if (value & XHCI_PS_PED)
518 DPRINTF(("Disable port %d request", port));
520 if (!(value & XHCI_PS_LWS))
523 DPRINTF(("Port new PLS: %d", newpls));
527 if (oldpls != newpls) {
528 p->portsc &= ~XHCI_PS_PLS_MASK;
529 p->portsc |= XHCI_PS_PLS_SET(newpls) |
532 if (oldpls != 0 && newpls == 0) {
533 pci_xhci_set_evtrb(&evtrb, port,
534 XHCI_TRB_ERROR_SUCCESS,
535 XHCI_TRB_EVENT_PORT_STS_CHANGE);
537 pci_xhci_insert_event(sc, &evtrb, 1);
543 DPRINTF(("Unhandled change port %d PLS %u",
549 /* Port power management status and control register */
553 /* Port link information register */
554 DPRINTF(("pci_xhci attempted write to PORTLI, port %d",
559 * Port hardware LPM control register.
560 * For USB3, this register is reserved.
562 p->porthlpmc = value;
567 struct xhci_dev_ctx *
568 pci_xhci_get_dev_ctx(struct pci_xhci_softc *sc, uint32_t slot)
570 uint64_t devctx_addr;
571 struct xhci_dev_ctx *devctx;
573 assert(slot > 0 && slot <= XHCI_MAX_DEVS);
574 assert(XHCI_SLOTDEV_PTR(sc, slot) != NULL);
575 assert(sc->opregs.dcbaa_p != NULL);
577 devctx_addr = sc->opregs.dcbaa_p->dcba[slot];
579 if (devctx_addr == 0) {
580 DPRINTF(("get_dev_ctx devctx_addr == 0"));
584 DPRINTF(("pci_xhci: get dev ctx, slot %u devctx addr %016lx",
586 devctx = XHCI_GADDR(sc, devctx_addr & ~0x3FUL);
592 pci_xhci_trb_next(struct pci_xhci_softc *sc, struct xhci_trb *curtrb,
595 struct xhci_trb *next;
597 assert(curtrb != NULL);
599 if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) {
601 *guestaddr = curtrb->qwTrb0 & ~0xFUL;
603 next = XHCI_GADDR(sc, curtrb->qwTrb0 & ~0xFUL);
606 *guestaddr += sizeof(struct xhci_trb) & ~0xFUL;
615 pci_xhci_assert_interrupt(struct pci_xhci_softc *sc)
618 sc->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY;
619 sc->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND;
620 sc->opregs.usbsts |= XHCI_STS_EINT;
622 /* only trigger interrupt if permitted */
623 if ((sc->opregs.usbcmd & XHCI_CMD_INTE) &&
624 (sc->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) {
625 if (pci_msi_enabled(sc->xsc_pi))
626 pci_generate_msi(sc->xsc_pi, 0);
628 pci_lintr_assert(sc->xsc_pi);
633 pci_xhci_deassert_interrupt(struct pci_xhci_softc *sc)
636 if (!pci_msi_enabled(sc->xsc_pi))
637 pci_lintr_assert(sc->xsc_pi);
641 pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid)
643 struct xhci_dev_ctx *dev_ctx;
644 struct pci_xhci_dev_ep *devep;
645 struct xhci_endp_ctx *ep_ctx;
649 dev_ctx = dev->dev_ctx;
650 ep_ctx = &dev_ctx->ctx_ep[epid];
651 devep = &dev->eps[epid];
652 pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0);
654 DPRINTF(("init_ep %d with pstreams %d", epid, pstreams));
655 assert(devep->ep_sctx_trbs == NULL);
657 devep->ep_sctx = XHCI_GADDR(dev->xsc, ep_ctx->qwEpCtx2 &
658 XHCI_EPCTX_2_TR_DQ_PTR_MASK);
659 devep->ep_sctx_trbs = calloc(pstreams,
660 sizeof(struct pci_xhci_trb_ring));
661 for (i = 0; i < pstreams; i++) {
662 devep->ep_sctx_trbs[i].ringaddr =
663 devep->ep_sctx[i].qwSctx0 &
664 XHCI_SCTX_0_TR_DQ_PTR_MASK;
665 devep->ep_sctx_trbs[i].ccs =
666 XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0);
669 DPRINTF(("init_ep %d with no pstreams", epid));
670 devep->ep_ringaddr = ep_ctx->qwEpCtx2 &
671 XHCI_EPCTX_2_TR_DQ_PTR_MASK;
672 devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2);
673 devep->ep_tr = XHCI_GADDR(dev->xsc, devep->ep_ringaddr);
674 DPRINTF(("init_ep tr DCS %x", devep->ep_ccs));
677 if (devep->ep_xfer == NULL) {
678 devep->ep_xfer = malloc(sizeof(struct usb_data_xfer));
679 USB_DATA_XFER_INIT(devep->ep_xfer);
684 pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid)
686 struct xhci_dev_ctx *dev_ctx;
687 struct pci_xhci_dev_ep *devep;
688 struct xhci_endp_ctx *ep_ctx;
690 DPRINTF(("pci_xhci disable_ep %d", epid));
692 dev_ctx = dev->dev_ctx;
693 ep_ctx = &dev_ctx->ctx_ep[epid];
694 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED;
696 devep = &dev->eps[epid];
697 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0 &&
698 devep->ep_sctx_trbs != NULL)
699 free(devep->ep_sctx_trbs);
701 if (devep->ep_xfer != NULL) {
702 free(devep->ep_xfer);
703 devep->ep_xfer = NULL;
706 memset(devep, 0, sizeof(struct pci_xhci_dev_ep));
710 /* reset device at slot and data structures related to it */
712 pci_xhci_reset_slot(struct pci_xhci_softc *sc, int slot)
714 struct pci_xhci_dev_emu *dev;
716 dev = XHCI_SLOTDEV_PTR(sc, slot);
719 DPRINTF(("xhci reset unassigned slot (%d)?", slot));
721 dev->dev_slotstate = XHCI_ST_DISABLED;
724 /* TODO: reset ring buffer pointers */
728 pci_xhci_insert_event(struct pci_xhci_softc *sc, struct xhci_trb *evtrb,
731 struct pci_xhci_rtsregs *rts;
735 struct xhci_trb *evtrbptr;
737 err = XHCI_TRB_ERROR_SUCCESS;
741 erdp = rts->intrreg.erdp & ~0xF;
742 erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) /
743 sizeof(struct xhci_trb);
745 DPRINTF(("pci_xhci: insert event 0[%lx] 2[%x] 3[%x]",
746 evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3));
747 DPRINTF(("\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u",
748 erdp_idx, rts->er_deq_seg, rts->er_enq_idx,
749 rts->er_enq_seg, rts->event_pcs));
750 DPRINTF(("\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)",
751 erdp, rts->erstba_p->qwEvrsTablePtr,
752 rts->erstba_p->dwEvrsTableSize, do_intr));
754 evtrbptr = &rts->erst_p[rts->er_enq_idx];
756 /* TODO: multi-segment table */
757 if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) {
758 DPRINTF(("pci_xhci[%d] cannot insert event; ring full",
760 err = XHCI_TRB_ERROR_EV_RING_FULL;
764 if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) {
765 struct xhci_trb errev;
767 if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) {
769 DPRINTF(("pci_xhci[%d] insert evt err: ring full",
773 errev.dwTrb2 = XHCI_TRB_2_ERROR_SET(
774 XHCI_TRB_ERROR_EV_RING_FULL);
775 errev.dwTrb3 = XHCI_TRB_3_TYPE_SET(
776 XHCI_TRB_EVENT_HOST_CTRL) |
778 rts->er_events_cnt++;
779 memcpy(&rts->erst_p[rts->er_enq_idx], &errev,
780 sizeof(struct xhci_trb));
781 rts->er_enq_idx = (rts->er_enq_idx + 1) %
782 rts->erstba_p->dwEvrsTableSize;
783 err = XHCI_TRB_ERROR_EV_RING_FULL;
789 rts->er_events_cnt++;
792 evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT;
793 evtrb->dwTrb3 |= rts->event_pcs;
795 memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb));
796 rts->er_enq_idx = (rts->er_enq_idx + 1) %
797 rts->erstba_p->dwEvrsTableSize;
799 if (rts->er_enq_idx == 0)
804 pci_xhci_assert_interrupt(sc);
810 pci_xhci_cmd_enable_slot(struct pci_xhci_softc *sc, uint32_t *slot)
812 struct pci_xhci_dev_emu *dev;
816 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
817 if (sc->portregs != NULL)
818 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
819 dev = XHCI_SLOTDEV_PTR(sc, i);
820 if (dev && dev->dev_slotstate == XHCI_ST_DISABLED) {
822 dev->dev_slotstate = XHCI_ST_ENABLED;
823 cmderr = XHCI_TRB_ERROR_SUCCESS;
824 dev->hci.hci_address = i;
829 DPRINTF(("pci_xhci enable slot (error=%d) slot %u",
830 cmderr != XHCI_TRB_ERROR_SUCCESS, *slot));
836 pci_xhci_cmd_disable_slot(struct pci_xhci_softc *sc, uint32_t slot)
838 struct pci_xhci_dev_emu *dev;
841 DPRINTF(("pci_xhci disable slot %u", slot));
843 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
844 if (sc->portregs == NULL)
847 if (slot > XHCI_MAX_SLOTS) {
848 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
852 dev = XHCI_SLOTDEV_PTR(sc, slot);
854 if (dev->dev_slotstate == XHCI_ST_DISABLED) {
855 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
857 dev->dev_slotstate = XHCI_ST_DISABLED;
858 cmderr = XHCI_TRB_ERROR_SUCCESS;
859 /* TODO: reset events and endpoints */
862 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
869 pci_xhci_cmd_reset_device(struct pci_xhci_softc *sc, uint32_t slot)
871 struct pci_xhci_dev_emu *dev;
872 struct xhci_dev_ctx *dev_ctx;
873 struct xhci_endp_ctx *ep_ctx;
877 cmderr = XHCI_TRB_ERROR_NO_SLOTS;
878 if (sc->portregs == NULL)
881 DPRINTF(("pci_xhci reset device slot %u", slot));
883 dev = XHCI_SLOTDEV_PTR(sc, slot);
884 if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED)
885 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
887 dev->dev_slotstate = XHCI_ST_DEFAULT;
889 dev->hci.hci_address = 0;
890 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
893 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
894 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT,
897 /* number of contexts */
898 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
899 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
901 /* reset all eps other than ep-0 */
902 for (i = 2; i <= 31; i++) {
903 ep_ctx = &dev_ctx->ctx_ep[i];
904 ep_ctx->dwEpCtx0 = FIELD_REPLACE( ep_ctx->dwEpCtx0,
905 XHCI_ST_EPCTX_DISABLED, 0x7, 0);
908 cmderr = XHCI_TRB_ERROR_SUCCESS;
911 pci_xhci_reset_slot(sc, slot);
918 pci_xhci_cmd_address_device(struct pci_xhci_softc *sc, uint32_t slot,
919 struct xhci_trb *trb)
921 struct pci_xhci_dev_emu *dev;
922 struct xhci_input_dev_ctx *input_ctx;
923 struct xhci_slot_ctx *islot_ctx;
924 struct xhci_dev_ctx *dev_ctx;
925 struct xhci_endp_ctx *ep0_ctx;
928 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
929 islot_ctx = &input_ctx->ctx_slot;
930 ep0_ctx = &input_ctx->ctx_ep[1];
932 cmderr = XHCI_TRB_ERROR_SUCCESS;
934 DPRINTF(("pci_xhci: address device, input ctl: D 0x%08x A 0x%08x,",
935 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
936 DPRINTF((" slot %08x %08x %08x %08x",
937 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
938 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
939 DPRINTF((" ep0 %08x %08x %016lx %08x",
940 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
943 /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */
944 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
945 (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) {
946 DPRINTF(("pci_xhci: address device, input ctl invalid"));
947 cmderr = XHCI_TRB_ERROR_TRB;
951 /* assign address to slot */
952 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
954 DPRINTF(("pci_xhci: address device, dev ctx"));
955 DPRINTF((" slot %08x %08x %08x %08x",
956 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
957 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
959 dev = XHCI_SLOTDEV_PTR(sc, slot);
962 dev->hci.hci_address = slot;
963 dev->dev_ctx = dev_ctx;
965 if (dev->dev_ue->ue_reset == NULL ||
966 dev->dev_ue->ue_reset(dev->dev_sc) < 0) {
967 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
971 memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx));
973 dev_ctx->ctx_slot.dwSctx3 =
974 XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) |
975 XHCI_SCTX_3_DEV_ADDR_SET(slot);
977 memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx));
978 ep0_ctx = &dev_ctx->ctx_ep[1];
979 ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) |
980 XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING);
982 pci_xhci_init_ep(dev, 1);
984 dev->dev_slotstate = XHCI_ST_ADDRESSED;
986 DPRINTF(("pci_xhci: address device, output ctx"));
987 DPRINTF((" slot %08x %08x %08x %08x",
988 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
989 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
990 DPRINTF((" ep0 %08x %08x %016lx %08x",
991 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
999 pci_xhci_cmd_config_ep(struct pci_xhci_softc *sc, uint32_t slot,
1000 struct xhci_trb *trb)
1002 struct xhci_input_dev_ctx *input_ctx;
1003 struct pci_xhci_dev_emu *dev;
1004 struct xhci_dev_ctx *dev_ctx;
1005 struct xhci_endp_ctx *ep_ctx, *iep_ctx;
1009 cmderr = XHCI_TRB_ERROR_SUCCESS;
1011 DPRINTF(("pci_xhci config_ep slot %u", slot));
1013 dev = XHCI_SLOTDEV_PTR(sc, slot);
1014 assert(dev != NULL);
1016 if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) {
1017 DPRINTF(("pci_xhci config_ep - deconfigure ep slot %u",
1019 if (dev->dev_ue->ue_stop != NULL)
1020 dev->dev_ue->ue_stop(dev->dev_sc);
1022 dev->dev_slotstate = XHCI_ST_ADDRESSED;
1024 dev->hci.hci_address = 0;
1025 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1027 /* number of contexts */
1028 dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(
1029 dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27);
1032 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1033 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED,
1036 /* disable endpoints */
1037 for (i = 2; i < 32; i++)
1038 pci_xhci_disable_ep(dev, i);
1040 cmderr = XHCI_TRB_ERROR_SUCCESS;
1045 if (dev->dev_slotstate < XHCI_ST_ADDRESSED) {
1046 DPRINTF(("pci_xhci: config_ep slotstate x%x != addressed",
1047 dev->dev_slotstate));
1048 cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON;
1052 /* In addressed/configured state;
1053 * for each drop endpoint ctx flag:
1054 * ep->state = DISABLED
1055 * for each add endpoint ctx flag:
1057 * ep->state = RUNNING
1058 * for each drop+add endpoint flag:
1059 * reset ep resources
1061 * ep->state = RUNNING
1062 * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled)
1063 * slot->state = configured
1066 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1067 dev_ctx = dev->dev_ctx;
1068 DPRINTF(("pci_xhci: config_ep inputctx: D:x%08x A:x%08x 7:x%08x",
1069 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1,
1070 input_ctx->ctx_input.dwInCtx7));
1072 for (i = 2; i <= 31; i++) {
1073 ep_ctx = &dev_ctx->ctx_ep[i];
1075 if (input_ctx->ctx_input.dwInCtx0 &
1076 XHCI_INCTX_0_DROP_MASK(i)) {
1077 DPRINTF((" config ep - dropping ep %d", i));
1078 pci_xhci_disable_ep(dev, i);
1081 if (input_ctx->ctx_input.dwInCtx1 &
1082 XHCI_INCTX_1_ADD_MASK(i)) {
1083 iep_ctx = &input_ctx->ctx_ep[i];
1085 DPRINTF((" enable ep[%d] %08x %08x %016lx %08x",
1086 i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1,
1087 iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4));
1089 memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx));
1091 pci_xhci_init_ep(dev, i);
1094 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1095 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1099 /* slot state to configured */
1100 dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(
1101 dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27);
1102 dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY(
1103 dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27);
1104 dev->dev_slotstate = XHCI_ST_CONFIGURED;
1106 DPRINTF(("EP configured; slot %u [0]=0x%08x [1]=0x%08x [2]=0x%08x "
1108 slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1109 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1116 pci_xhci_cmd_reset_ep(struct pci_xhci_softc *sc, uint32_t slot,
1117 struct xhci_trb *trb)
1119 struct pci_xhci_dev_emu *dev;
1120 struct pci_xhci_dev_ep *devep;
1121 struct xhci_dev_ctx *dev_ctx;
1122 struct xhci_endp_ctx *ep_ctx;
1123 uint32_t cmderr, epid;
1126 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1128 DPRINTF(("pci_xhci: reset ep %u: slot %u", epid, slot));
1130 cmderr = XHCI_TRB_ERROR_SUCCESS;
1132 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1134 dev = XHCI_SLOTDEV_PTR(sc, slot);
1135 assert(dev != NULL);
1137 if (type == XHCI_TRB_TYPE_STOP_EP &&
1138 (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) {
1139 /* XXX suspend endpoint for 10ms */
1142 if (epid < 1 || epid > 31) {
1143 DPRINTF(("pci_xhci: reset ep: invalid epid %u", epid));
1144 cmderr = XHCI_TRB_ERROR_TRB;
1148 devep = &dev->eps[epid];
1149 if (devep->ep_xfer != NULL)
1150 USB_DATA_XFER_RESET(devep->ep_xfer);
1152 dev_ctx = dev->dev_ctx;
1153 assert(dev_ctx != NULL);
1155 ep_ctx = &dev_ctx->ctx_ep[epid];
1157 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1159 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) == 0)
1160 ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs;
1162 DPRINTF(("pci_xhci: reset ep[%u] %08x %08x %016lx %08x",
1163 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1166 if (type == XHCI_TRB_TYPE_RESET_EP &&
1167 (dev->dev_ue->ue_reset == NULL ||
1168 dev->dev_ue->ue_reset(dev->dev_sc) < 0)) {
1169 cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON;
1179 pci_xhci_find_stream(struct pci_xhci_softc *sc, struct xhci_endp_ctx *ep,
1180 uint32_t streamid, struct xhci_stream_ctx **osctx)
1182 struct xhci_stream_ctx *sctx;
1183 uint32_t maxpstreams;
1185 maxpstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep->dwEpCtx0);
1186 if (maxpstreams == 0)
1187 return (XHCI_TRB_ERROR_TRB);
1189 if (maxpstreams > XHCI_STREAMS_MAX)
1190 return (XHCI_TRB_ERROR_INVALID_SID);
1192 if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) {
1193 DPRINTF(("pci_xhci: find_stream; LSA bit not set"));
1194 return (XHCI_TRB_ERROR_INVALID_SID);
1197 /* only support primary stream */
1198 if (streamid > maxpstreams)
1199 return (XHCI_TRB_ERROR_STREAM_TYPE);
1201 sctx = XHCI_GADDR(sc, ep->qwEpCtx2 & ~0xFUL) + streamid;
1202 if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0))
1203 return (XHCI_TRB_ERROR_STREAM_TYPE);
1207 return (XHCI_TRB_ERROR_SUCCESS);
1212 pci_xhci_cmd_set_tr(struct pci_xhci_softc *sc, uint32_t slot,
1213 struct xhci_trb *trb)
1215 struct pci_xhci_dev_emu *dev;
1216 struct pci_xhci_dev_ep *devep;
1217 struct xhci_dev_ctx *dev_ctx;
1218 struct xhci_endp_ctx *ep_ctx;
1219 uint32_t cmderr, epid;
1222 cmderr = XHCI_TRB_ERROR_SUCCESS;
1224 dev = XHCI_SLOTDEV_PTR(sc, slot);
1225 assert(dev != NULL);
1227 DPRINTF(("pci_xhci set_tr: new-tr x%016lx, SCT %u DCS %u",
1228 (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7),
1229 (uint32_t)(trb->qwTrb0 & 0x1)));
1230 DPRINTF((" stream-id %u, slot %u, epid %u, C %u",
1231 (trb->dwTrb2 >> 16) & 0xFFFF,
1232 XHCI_TRB_3_SLOT_GET(trb->dwTrb3),
1233 XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1));
1235 epid = XHCI_TRB_3_EP_GET(trb->dwTrb3);
1236 if (epid < 1 || epid > 31) {
1237 DPRINTF(("pci_xhci: set_tr_deq: invalid epid %u", epid));
1238 cmderr = XHCI_TRB_ERROR_TRB;
1242 dev_ctx = dev->dev_ctx;
1243 assert(dev_ctx != NULL);
1245 ep_ctx = &dev_ctx->ctx_ep[epid];
1246 devep = &dev->eps[epid];
1248 switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) {
1249 case XHCI_ST_EPCTX_STOPPED:
1250 case XHCI_ST_EPCTX_ERROR:
1253 DPRINTF(("pci_xhci cmd set_tr invalid state %x",
1254 XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)));
1255 cmderr = XHCI_TRB_ERROR_CONTEXT_STATE;
1259 streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2);
1260 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0) {
1261 struct xhci_stream_ctx *sctx;
1264 cmderr = pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx);
1266 assert(devep->ep_sctx != NULL);
1268 devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0;
1269 devep->ep_sctx_trbs[streamid].ringaddr =
1271 devep->ep_sctx_trbs[streamid].ccs =
1272 XHCI_EPCTX_2_DCS_GET(trb->qwTrb0);
1275 if (streamid != 0) {
1276 DPRINTF(("pci_xhci cmd set_tr streamid %x != 0",
1279 ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL;
1280 devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL;
1281 devep->ep_ccs = trb->qwTrb0 & 0x1;
1282 devep->ep_tr = XHCI_GADDR(sc, devep->ep_ringaddr);
1284 DPRINTF(("pci_xhci set_tr first TRB:"));
1285 pci_xhci_dump_trb(devep->ep_tr);
1287 ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED;
1294 pci_xhci_cmd_eval_ctx(struct pci_xhci_softc *sc, uint32_t slot,
1295 struct xhci_trb *trb)
1297 struct xhci_input_dev_ctx *input_ctx;
1298 struct xhci_slot_ctx *islot_ctx;
1299 struct xhci_dev_ctx *dev_ctx;
1300 struct xhci_endp_ctx *ep0_ctx;
1303 input_ctx = XHCI_GADDR(sc, trb->qwTrb0 & ~0xFUL);
1304 islot_ctx = &input_ctx->ctx_slot;
1305 ep0_ctx = &input_ctx->ctx_ep[1];
1307 cmderr = XHCI_TRB_ERROR_SUCCESS;
1308 DPRINTF(("pci_xhci: eval ctx, input ctl: D 0x%08x A 0x%08x,",
1309 input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1));
1310 DPRINTF((" slot %08x %08x %08x %08x",
1311 islot_ctx->dwSctx0, islot_ctx->dwSctx1,
1312 islot_ctx->dwSctx2, islot_ctx->dwSctx3));
1313 DPRINTF((" ep0 %08x %08x %016lx %08x",
1314 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1315 ep0_ctx->dwEpCtx4));
1317 /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */
1318 if ((input_ctx->ctx_input.dwInCtx0 != 0) ||
1319 (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) {
1320 DPRINTF(("pci_xhci: eval ctx, input ctl invalid"));
1321 cmderr = XHCI_TRB_ERROR_TRB;
1325 /* assign address to slot; in this emulation, slot_id = address */
1326 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1328 DPRINTF(("pci_xhci: eval ctx, dev ctx"));
1329 DPRINTF((" slot %08x %08x %08x %08x",
1330 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1331 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1333 if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */
1334 /* set max exit latency */
1335 dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY(
1336 dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1,
1339 /* set interrupter target */
1340 dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY(
1341 dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2,
1344 if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */
1345 /* set max packet size */
1346 dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY(
1347 dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1,
1350 ep0_ctx = &dev_ctx->ctx_ep[1];
1353 DPRINTF(("pci_xhci: eval ctx, output ctx"));
1354 DPRINTF((" slot %08x %08x %08x %08x",
1355 dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1,
1356 dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3));
1357 DPRINTF((" ep0 %08x %08x %016lx %08x",
1358 ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2,
1359 ep0_ctx->dwEpCtx4));
1366 pci_xhci_complete_commands(struct pci_xhci_softc *sc)
1368 struct xhci_trb evtrb;
1369 struct xhci_trb *trb;
1371 uint32_t ccs; /* cycle state (XHCI 4.9.2) */
1378 sc->opregs.crcr |= XHCI_CRCR_LO_CRR;
1380 trb = sc->opregs.cr_p;
1381 ccs = sc->opregs.crcr & XHCI_CRCR_LO_RCS;
1382 crcr = sc->opregs.crcr & ~0xF;
1385 sc->opregs.cr_p = trb;
1387 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1389 if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) !=
1390 (ccs & XHCI_TRB_3_CYCLE_BIT))
1393 DPRINTF(("pci_xhci: cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x"
1394 " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u",
1395 type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3,
1396 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs));
1398 cmderr = XHCI_TRB_ERROR_SUCCESS;
1400 evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) |
1401 XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE);
1405 case XHCI_TRB_TYPE_LINK: /* 0x06 */
1406 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1407 ccs ^= XHCI_CRCR_LO_RCS;
1410 case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */
1411 cmderr = pci_xhci_cmd_enable_slot(sc, &slot);
1414 case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */
1415 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1416 cmderr = pci_xhci_cmd_disable_slot(sc, slot);
1419 case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */
1420 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1421 cmderr = pci_xhci_cmd_address_device(sc, slot, trb);
1424 case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */
1425 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1426 cmderr = pci_xhci_cmd_config_ep(sc, slot, trb);
1429 case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */
1430 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1431 cmderr = pci_xhci_cmd_eval_ctx(sc, slot, trb);
1434 case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */
1435 DPRINTF(("Reset Endpoint on slot %d", slot));
1436 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1437 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1440 case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */
1441 DPRINTF(("Stop Endpoint on slot %d", slot));
1442 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1443 cmderr = pci_xhci_cmd_reset_ep(sc, slot, trb);
1446 case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */
1447 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1448 cmderr = pci_xhci_cmd_set_tr(sc, slot, trb);
1451 case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */
1452 slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3);
1453 cmderr = pci_xhci_cmd_reset_device(sc, slot);
1456 case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */
1460 case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */
1463 case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */
1466 case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */
1469 case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */
1472 case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */
1476 DPRINTF(("pci_xhci: unsupported cmd %x", type));
1480 if (type != XHCI_TRB_TYPE_LINK) {
1482 * insert command completion event and assert intr
1484 evtrb.qwTrb0 = crcr;
1485 evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr);
1486 evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot);
1487 DPRINTF(("pci_xhci: command 0x%x result: 0x%x",
1489 pci_xhci_insert_event(sc, &evtrb, 1);
1492 trb = pci_xhci_trb_next(sc, trb, &crcr);
1495 sc->opregs.crcr = crcr | (sc->opregs.crcr & XHCI_CRCR_LO_CA) | ccs;
1496 sc->opregs.crcr &= ~XHCI_CRCR_LO_CRR;
1501 pci_xhci_dump_trb(struct xhci_trb *trb)
1503 static const char *trbtypes[] = {
1531 type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3);
1532 DPRINTF(("pci_xhci: trb[@%p] type x%02x %s 0:x%016lx 2:x%08x 3:x%08x",
1534 type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID",
1535 trb->qwTrb0, trb->dwTrb2, trb->dwTrb3));
1539 pci_xhci_xfer_complete(struct pci_xhci_softc *sc, struct usb_data_xfer *xfer,
1540 uint32_t slot, uint32_t epid, int *do_intr)
1542 struct pci_xhci_dev_emu *dev;
1543 struct pci_xhci_dev_ep *devep;
1544 struct xhci_dev_ctx *dev_ctx;
1545 struct xhci_endp_ctx *ep_ctx;
1546 struct xhci_trb *trb;
1547 struct xhci_trb evtrb;
1552 dev = XHCI_SLOTDEV_PTR(sc, slot);
1553 devep = &dev->eps[epid];
1554 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1556 assert(dev_ctx != NULL);
1558 ep_ctx = &dev_ctx->ctx_ep[epid];
1560 err = XHCI_TRB_ERROR_SUCCESS;
1564 /* go through list of TRBs and insert event(s) */
1565 for (i = xfer->head; xfer->ndata > 0; ) {
1566 evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data;
1567 trb = XHCI_GADDR(sc, evtrb.qwTrb0);
1568 trbflags = trb->dwTrb3;
1570 DPRINTF(("pci_xhci: xfer[%d] done?%u:%d trb %x %016lx %x "
1572 i, xfer->data[i].processed, xfer->data[i].blen,
1573 XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0,
1575 trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0));
1577 if (!xfer->data[i].processed) {
1583 edtla += xfer->data[i].bdone;
1585 trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs);
1587 pci_xhci_update_ep_ring(sc, dev, devep, ep_ctx,
1588 xfer->data[i].streamid, xfer->data[i].trbnext,
1591 /* Only interrupt if IOC or short packet */
1592 if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) &&
1593 !((err == XHCI_TRB_ERROR_SHORT_PKT) &&
1594 (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) {
1596 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1600 evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) |
1601 XHCI_TRB_2_REM_SET(xfer->data[i].blen);
1603 evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) |
1604 XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid);
1606 if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) {
1607 DPRINTF(("pci_xhci EVENT_DATA edtla %u", edtla));
1608 evtrb.qwTrb0 = trb->qwTrb0;
1609 evtrb.dwTrb2 = (edtla & 0xFFFFF) |
1610 XHCI_TRB_2_ERROR_SET(err);
1611 evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT;
1617 err = pci_xhci_insert_event(sc, &evtrb, 0);
1618 if (err != XHCI_TRB_ERROR_SUCCESS) {
1622 i = (i + 1) % USB_MAX_XFER_BLOCKS;
1629 pci_xhci_update_ep_ring(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev,
1630 struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx,
1631 uint32_t streamid, uint64_t ringaddr, int ccs)
1634 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) {
1635 devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) |
1638 devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL;
1639 devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1;
1640 ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1);
1642 DPRINTF(("xhci update ep-ring stream %d, addr %lx",
1643 streamid, devep->ep_sctx[streamid].qwSctx0));
1645 devep->ep_ringaddr = ringaddr & ~0xFUL;
1646 devep->ep_ccs = ccs & 0x1;
1647 devep->ep_tr = XHCI_GADDR(sc, ringaddr & ~0xFUL);
1648 ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1);
1650 DPRINTF(("xhci update ep-ring, addr %lx",
1651 (devep->ep_ringaddr | devep->ep_ccs)));
1656 * Outstanding transfer still in progress (device NAK'd earlier) so retry
1657 * the transfer again to see if it succeeds.
1660 pci_xhci_try_usb_xfer(struct pci_xhci_softc *sc,
1661 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1662 struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid)
1664 struct usb_data_xfer *xfer;
1668 ep_ctx->dwEpCtx0 = FIELD_REPLACE(
1669 ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1674 xfer = devep->ep_xfer;
1675 USB_DATA_XFER_LOCK(xfer);
1677 /* outstanding requests queued up */
1678 if (dev->dev_ue->ue_data != NULL) {
1679 err = dev->dev_ue->ue_data(dev->dev_sc, xfer,
1680 epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2);
1681 if (err == USB_ERR_CANCELLED) {
1682 if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) ==
1684 err = XHCI_TRB_ERROR_SUCCESS;
1686 err = pci_xhci_xfer_complete(sc, xfer, slot, epid,
1688 if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) {
1689 pci_xhci_assert_interrupt(sc);
1693 /* XXX should not do it if error? */
1694 USB_DATA_XFER_RESET(xfer);
1698 USB_DATA_XFER_UNLOCK(xfer);
1706 pci_xhci_handle_transfer(struct pci_xhci_softc *sc,
1707 struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep,
1708 struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot,
1709 uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid)
1711 struct xhci_trb *setup_trb;
1712 struct usb_data_xfer *xfer;
1713 struct usb_data_xfer_block *xfer_block;
1719 ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0,
1720 XHCI_ST_EPCTX_RUNNING, 0x7, 0);
1722 xfer = devep->ep_xfer;
1723 USB_DATA_XFER_LOCK(xfer);
1725 DPRINTF(("pci_xhci handle_transfer slot %u", slot));
1734 pci_xhci_dump_trb(trb);
1736 trbflags = trb->dwTrb3;
1738 if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK &&
1739 (trbflags & XHCI_TRB_3_CYCLE_BIT) !=
1740 (ccs & XHCI_TRB_3_CYCLE_BIT)) {
1741 DPRINTF(("Cycle-bit changed trbflags %x, ccs %x",
1742 trbflags & XHCI_TRB_3_CYCLE_BIT, ccs));
1748 switch (XHCI_TRB_3_TYPE_GET(trbflags)) {
1749 case XHCI_TRB_TYPE_LINK:
1750 if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT)
1753 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1755 xfer_block->processed = 1;
1758 case XHCI_TRB_TYPE_SETUP_STAGE:
1759 if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 ||
1760 XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) {
1761 DPRINTF(("pci_xhci: invalid setup trb"));
1762 err = XHCI_TRB_ERROR_TRB;
1769 xfer->ureq = malloc(
1770 sizeof(struct usb_device_request));
1771 memcpy(xfer->ureq, &val,
1772 sizeof(struct usb_device_request));
1774 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1776 xfer_block->processed = 1;
1779 case XHCI_TRB_TYPE_NORMAL:
1780 case XHCI_TRB_TYPE_ISOCH:
1781 if (setup_trb != NULL) {
1782 DPRINTF(("pci_xhci: trb not supposed to be in "
1784 err = XHCI_TRB_ERROR_TRB;
1789 case XHCI_TRB_TYPE_DATA_STAGE:
1790 xfer_block = usb_data_xfer_append(xfer,
1791 (void *)(trbflags & XHCI_TRB_3_IDT_BIT ?
1792 &trb->qwTrb0 : XHCI_GADDR(sc, trb->qwTrb0)),
1793 trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs);
1796 case XHCI_TRB_TYPE_STATUS_STAGE:
1797 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1801 case XHCI_TRB_TYPE_NOOP:
1802 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1804 xfer_block->processed = 1;
1807 case XHCI_TRB_TYPE_EVENT_DATA:
1808 xfer_block = usb_data_xfer_append(xfer, NULL, 0,
1810 if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) {
1811 xfer_block->processed = 1;
1816 DPRINTF(("pci_xhci: handle xfer unexpected trb type "
1818 XHCI_TRB_3_TYPE_GET(trbflags)));
1819 err = XHCI_TRB_ERROR_TRB;
1823 trb = pci_xhci_trb_next(sc, trb, &addr);
1825 DPRINTF(("pci_xhci: next trb: 0x%lx", (uint64_t)trb));
1828 xfer_block->trbnext = addr;
1829 xfer_block->streamid = streamid;
1832 if (!setup_trb && !(trbflags & XHCI_TRB_3_CHAIN_BIT) &&
1833 XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK) {
1837 /* handle current batch that requires interrupt on complete */
1838 if (trbflags & XHCI_TRB_3_IOC_BIT) {
1839 DPRINTF(("pci_xhci: trb IOC bit set"));
1846 DPRINTF(("pci_xhci[%d]: xfer->ndata %u", __LINE__, xfer->ndata));
1848 if (xfer->ndata <= 0)
1852 err = USB_ERR_NOT_STARTED;
1853 if (dev->dev_ue->ue_request != NULL)
1854 err = dev->dev_ue->ue_request(dev->dev_sc, xfer);
1857 /* handle data transfer */
1858 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1859 err = XHCI_TRB_ERROR_SUCCESS;
1863 err = USB_TO_XHCI_ERR(err);
1864 if ((err == XHCI_TRB_ERROR_SUCCESS) ||
1865 (err == XHCI_TRB_ERROR_STALL) ||
1866 (err == XHCI_TRB_ERROR_SHORT_PKT)) {
1867 err = pci_xhci_xfer_complete(sc, xfer, slot, epid, &do_intr);
1868 if (err != XHCI_TRB_ERROR_SUCCESS)
1873 if (err == XHCI_TRB_ERROR_EV_RING_FULL)
1874 DPRINTF(("pci_xhci[%d]: event ring full", __LINE__));
1877 USB_DATA_XFER_UNLOCK(xfer);
1880 pci_xhci_assert_interrupt(sc);
1883 USB_DATA_XFER_RESET(xfer);
1884 DPRINTF(("pci_xhci[%d]: retry:continuing with next TRBs",
1890 USB_DATA_XFER_RESET(xfer);
1896 pci_xhci_device_doorbell(struct pci_xhci_softc *sc, uint32_t slot,
1897 uint32_t epid, uint32_t streamid)
1899 struct pci_xhci_dev_emu *dev;
1900 struct pci_xhci_dev_ep *devep;
1901 struct xhci_dev_ctx *dev_ctx;
1902 struct xhci_endp_ctx *ep_ctx;
1903 struct pci_xhci_trb_ring *sctx_tr;
1904 struct xhci_trb *trb;
1908 DPRINTF(("pci_xhci doorbell slot %u epid %u stream %u",
1909 slot, epid, streamid));
1911 if (slot == 0 || slot > XHCI_MAX_SLOTS) {
1912 DPRINTF(("pci_xhci: invalid doorbell slot %u", slot));
1916 if (epid == 0 || epid >= XHCI_MAX_ENDPOINTS) {
1917 DPRINTF(("pci_xhci: invalid endpoint %u", epid));
1921 dev = XHCI_SLOTDEV_PTR(sc, slot);
1922 devep = &dev->eps[epid];
1923 dev_ctx = pci_xhci_get_dev_ctx(sc, slot);
1927 ep_ctx = &dev_ctx->ctx_ep[epid];
1931 DPRINTF(("pci_xhci: device doorbell ep[%u] %08x %08x %016lx %08x",
1932 epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2,
1935 if (ep_ctx->qwEpCtx2 == 0)
1938 /* handle pending transfers */
1939 if (devep->ep_xfer->ndata > 0) {
1940 pci_xhci_try_usb_xfer(sc, dev, devep, ep_ctx, slot, epid);
1944 /* get next trb work item */
1945 if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) {
1946 struct xhci_stream_ctx *sctx;
1949 * Stream IDs of 0, 65535 (any stream), and 65534
1950 * (prime) are invalid.
1952 if (streamid == 0 || streamid == 65534 || streamid == 65535) {
1953 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1958 pci_xhci_find_stream(sc, ep_ctx, streamid, &sctx);
1960 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1963 sctx_tr = &devep->ep_sctx_trbs[streamid];
1964 ringaddr = sctx_tr->ringaddr;
1966 trb = XHCI_GADDR(sc, sctx_tr->ringaddr & ~0xFUL);
1967 DPRINTF(("doorbell, stream %u, ccs %lx, trb ccs %x",
1968 streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1969 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1971 if (streamid != 0) {
1972 DPRINTF(("pci_xhci: invalid stream %u", streamid));
1975 ringaddr = devep->ep_ringaddr;
1976 ccs = devep->ep_ccs;
1978 DPRINTF(("doorbell, ccs %lx, trb ccs %x",
1979 ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT,
1980 trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT));
1983 if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) {
1984 DPRINTF(("pci_xhci: ring %lx trb[%lx] EP %u is RESERVED?",
1985 ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid));
1989 pci_xhci_handle_transfer(sc, dev, devep, ep_ctx, trb, slot, epid,
1990 ringaddr, ccs, streamid);
1994 pci_xhci_dbregs_write(struct pci_xhci_softc *sc, uint64_t offset,
1998 offset = (offset - sc->dboff) / sizeof(uint32_t);
2000 DPRINTF(("pci_xhci: doorbell write offset 0x%lx: 0x%lx",
2003 if (XHCI_HALTED(sc)) {
2004 DPRINTF(("pci_xhci: controller halted"));
2009 pci_xhci_complete_commands(sc);
2010 else if (sc->portregs != NULL)
2011 pci_xhci_device_doorbell(sc, offset,
2012 XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value));
2016 pci_xhci_rtsregs_write(struct pci_xhci_softc *sc, uint64_t offset,
2019 struct pci_xhci_rtsregs *rts;
2021 offset -= sc->rtsoff;
2024 DPRINTF(("pci_xhci attempted write to MFINDEX"));
2028 DPRINTF(("pci_xhci: runtime regs write offset 0x%lx: 0x%lx",
2031 offset -= 0x20; /* start of intrreg */
2037 if (value & XHCI_IMAN_INTR_PEND)
2038 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2039 rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) |
2040 (rts->intrreg.iman & XHCI_IMAN_INTR_PEND);
2042 if (!(value & XHCI_IMAN_INTR_ENA))
2043 pci_xhci_deassert_interrupt(sc);
2048 rts->intrreg.imod = value;
2052 rts->intrreg.erstsz = value & 0xFFFF;
2056 /* ERSTBA low bits */
2057 rts->intrreg.erstba = MASK_64_HI(sc->rtsregs.intrreg.erstba) |
2062 /* ERSTBA high bits */
2063 rts->intrreg.erstba = (value << 32) |
2064 MASK_64_LO(sc->rtsregs.intrreg.erstba);
2066 rts->erstba_p = XHCI_GADDR(sc,
2067 sc->rtsregs.intrreg.erstba & ~0x3FUL);
2069 rts->erst_p = XHCI_GADDR(sc,
2070 sc->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL);
2072 rts->er_enq_idx = 0;
2073 rts->er_events_cnt = 0;
2075 DPRINTF(("pci_xhci: wr erstba erst (%p) ptr 0x%lx, sz %u",
2077 rts->erstba_p->qwEvrsTablePtr,
2078 rts->erstba_p->dwEvrsTableSize));
2084 MASK_64_HI(sc->rtsregs.intrreg.erdp) |
2085 (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) |
2087 if (value & XHCI_ERDP_LO_BUSY) {
2088 rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY;
2089 rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND;
2092 rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value);
2097 /* ERDP high bits */
2098 rts->intrreg.erdp = (value << 32) |
2099 MASK_64_LO(sc->rtsregs.intrreg.erdp);
2101 if (rts->er_events_cnt > 0) {
2105 erdp = rts->intrreg.erdp & ~0xF;
2106 erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) /
2107 sizeof(struct xhci_trb);
2109 if (erdp_i <= rts->er_enq_idx)
2110 rts->er_events_cnt = rts->er_enq_idx - erdp_i;
2112 rts->er_events_cnt =
2113 rts->erstba_p->dwEvrsTableSize -
2114 (erdp_i - rts->er_enq_idx);
2116 DPRINTF(("pci_xhci: erdp 0x%lx, events cnt %u",
2117 erdp, rts->er_events_cnt));
2123 DPRINTF(("pci_xhci attempted write to RTS offset 0x%lx",
2130 pci_xhci_portregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2135 if (sc->portregs == NULL)
2138 port = (offset - 0x3F0) / 0x10;
2140 if (port > XHCI_MAX_DEVS) {
2141 DPRINTF(("pci_xhci: portregs_read port %d >= XHCI_MAX_DEVS",
2144 /* return default value for unused port */
2145 return (XHCI_PS_SPEED_SET(3));
2148 offset = (offset - 0x3F0) % 0x10;
2150 p = &sc->portregs[port].portsc;
2151 p += offset / sizeof(uint32_t);
2153 DPRINTF(("pci_xhci: portregs read offset 0x%lx port %u -> 0x%x",
2160 pci_xhci_hostop_write(struct pci_xhci_softc *sc, uint64_t offset,
2163 offset -= XHCI_CAPLEN;
2166 DPRINTF(("pci_xhci: hostop write offset 0x%lx: 0x%lx",
2171 sc->opregs.usbcmd = pci_xhci_usbcmd_write(sc, value & 0x3F0F);
2175 /* clear bits on write */
2176 sc->opregs.usbsts &= ~(value &
2177 (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS|
2178 XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR));
2186 sc->opregs.dnctrl = value & 0xFFFF;
2190 if (sc->opregs.crcr & XHCI_CRCR_LO_CRR) {
2191 sc->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2192 sc->opregs.crcr |= value &
2193 (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA);
2195 sc->opregs.crcr = MASK_64_HI(sc->opregs.crcr) |
2196 (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS));
2201 if (!(sc->opregs.crcr & XHCI_CRCR_LO_CRR)) {
2202 sc->opregs.crcr = MASK_64_LO(sc->opregs.crcr) |
2205 sc->opregs.cr_p = XHCI_GADDR(sc,
2206 sc->opregs.crcr & ~0xF);
2209 if (sc->opregs.crcr & XHCI_CRCR_LO_CS) {
2210 /* Stop operation of Command Ring */
2213 if (sc->opregs.crcr & XHCI_CRCR_LO_CA) {
2219 case XHCI_DCBAAP_LO:
2220 sc->opregs.dcbaap = MASK_64_HI(sc->opregs.dcbaap) |
2221 (value & 0xFFFFFFC0);
2224 case XHCI_DCBAAP_HI:
2225 sc->opregs.dcbaap = MASK_64_LO(sc->opregs.dcbaap) |
2227 sc->opregs.dcbaa_p = XHCI_GADDR(sc, sc->opregs.dcbaap & ~0x3FUL);
2229 DPRINTF(("pci_xhci: opregs dcbaap = 0x%lx (vaddr 0x%lx)",
2230 sc->opregs.dcbaap, (uint64_t)sc->opregs.dcbaa_p));
2234 sc->opregs.config = value & 0x03FF;
2238 if (offset >= 0x400)
2239 pci_xhci_portregs_write(sc, offset, value);
2247 pci_xhci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2248 int baridx, uint64_t offset, int size, uint64_t value)
2250 struct pci_xhci_softc *sc;
2254 assert(baridx == 0);
2256 pthread_mutex_lock(&sc->mtx);
2257 if (offset < XHCI_CAPLEN) /* read only registers */
2258 WPRINTF(("pci_xhci: write RO-CAPs offset %ld", offset));
2259 else if (offset < sc->dboff)
2260 pci_xhci_hostop_write(sc, offset, value);
2261 else if (offset < sc->rtsoff)
2262 pci_xhci_dbregs_write(sc, offset, value);
2263 else if (offset < sc->regsend)
2264 pci_xhci_rtsregs_write(sc, offset, value);
2266 WPRINTF(("pci_xhci: write invalid offset %ld", offset));
2268 pthread_mutex_unlock(&sc->mtx);
2272 pci_xhci_hostcap_read(struct pci_xhci_softc *sc, uint64_t offset)
2277 case XHCI_CAPLENGTH: /* 0x00 */
2278 value = sc->caplength;
2281 case XHCI_HCSPARAMS1: /* 0x04 */
2282 value = sc->hcsparams1;
2285 case XHCI_HCSPARAMS2: /* 0x08 */
2286 value = sc->hcsparams2;
2289 case XHCI_HCSPARAMS3: /* 0x0C */
2290 value = sc->hcsparams3;
2293 case XHCI_HCSPARAMS0: /* 0x10 */
2294 value = sc->hccparams1;
2297 case XHCI_DBOFF: /* 0x14 */
2301 case XHCI_RTSOFF: /* 0x18 */
2305 case XHCI_HCCPRAMS2: /* 0x1C */
2306 value = sc->hccparams2;
2314 DPRINTF(("pci_xhci: hostcap read offset 0x%lx -> 0x%lx",
2321 pci_xhci_hostop_read(struct pci_xhci_softc *sc, uint64_t offset)
2325 offset = (offset - XHCI_CAPLEN);
2328 case XHCI_USBCMD: /* 0x00 */
2329 value = sc->opregs.usbcmd;
2332 case XHCI_USBSTS: /* 0x04 */
2333 value = sc->opregs.usbsts;
2336 case XHCI_PAGESIZE: /* 0x08 */
2337 value = sc->opregs.pgsz;
2340 case XHCI_DNCTRL: /* 0x14 */
2341 value = sc->opregs.dnctrl;
2344 case XHCI_CRCR_LO: /* 0x18 */
2345 value = sc->opregs.crcr & XHCI_CRCR_LO_CRR;
2348 case XHCI_CRCR_HI: /* 0x1C */
2352 case XHCI_DCBAAP_LO: /* 0x30 */
2353 value = sc->opregs.dcbaap & 0xFFFFFFFF;
2356 case XHCI_DCBAAP_HI: /* 0x34 */
2357 value = (sc->opregs.dcbaap >> 32) & 0xFFFFFFFF;
2360 case XHCI_CONFIG: /* 0x38 */
2361 value = sc->opregs.config;
2365 if (offset >= 0x400)
2366 value = pci_xhci_portregs_read(sc, offset);
2374 DPRINTF(("pci_xhci: hostop read offset 0x%lx -> 0x%lx",
2381 pci_xhci_dbregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2384 /* read doorbell always returns 0 */
2389 pci_xhci_rtsregs_read(struct pci_xhci_softc *sc, uint64_t offset)
2393 offset -= sc->rtsoff;
2396 if (offset == XHCI_MFINDEX) {
2397 value = sc->rtsregs.mfindex;
2398 } else if (offset >= 0x20) {
2405 assert(offset < sizeof(sc->rtsregs.intrreg));
2407 p = &sc->rtsregs.intrreg.iman;
2408 p += item / sizeof(uint32_t);
2412 DPRINTF(("pci_xhci: rtsregs read offset 0x%lx -> 0x%x",
2419 pci_xhci_xecp_read(struct pci_xhci_softc *sc, uint64_t offset)
2423 offset -= sc->regsend;
2428 /* rev major | rev minor | next-cap | cap-id */
2429 value = (0x02 << 24) | (4 << 8) | XHCI_ID_PROTOCOLS;
2432 /* name string = "USB" */
2436 /* psic | proto-defined | compat # | compat offset */
2437 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb2_port_start;
2442 /* rev major | rev minor | next-cap | cap-id */
2443 value = (0x03 << 24) | XHCI_ID_PROTOCOLS;
2446 /* name string = "USB" */
2450 /* psic | proto-defined | compat # | compat offset */
2451 value = ((XHCI_MAX_DEVS/2) << 8) | sc->usb3_port_start;
2456 DPRINTF(("pci_xhci: xecp invalid offset 0x%lx", offset));
2460 DPRINTF(("pci_xhci: xecp read offset 0x%lx -> 0x%x",
2468 pci_xhci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2469 uint64_t offset, int size)
2471 struct pci_xhci_softc *sc;
2476 assert(baridx == 0);
2478 pthread_mutex_lock(&sc->mtx);
2479 if (offset < XHCI_CAPLEN)
2480 value = pci_xhci_hostcap_read(sc, offset);
2481 else if (offset < sc->dboff)
2482 value = pci_xhci_hostop_read(sc, offset);
2483 else if (offset < sc->rtsoff)
2484 value = pci_xhci_dbregs_read(sc, offset);
2485 else if (offset < sc->regsend)
2486 value = pci_xhci_rtsregs_read(sc, offset);
2487 else if (offset < (sc->regsend + 4*32))
2488 value = pci_xhci_xecp_read(sc, offset);
2491 WPRINTF(("pci_xhci: read invalid offset %ld", offset));
2494 pthread_mutex_unlock(&sc->mtx);
2504 value &= 0xFFFFFFFF;
2512 pci_xhci_reset_port(struct pci_xhci_softc *sc, int portn, int warm)
2514 struct pci_xhci_portregs *port;
2515 struct pci_xhci_dev_emu *dev;
2516 struct xhci_trb evtrb;
2519 assert(portn <= XHCI_MAX_DEVS);
2521 DPRINTF(("xhci reset port %d", portn));
2523 port = XHCI_PORTREG_PTR(sc, portn);
2524 dev = XHCI_DEVINST_PTR(sc, portn);
2526 port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC);
2527 port->portsc |= XHCI_PS_PED |
2528 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2530 if (warm && dev->dev_ue->ue_usbver == 3) {
2531 port->portsc |= XHCI_PS_WRC;
2534 if ((port->portsc & XHCI_PS_PRC) == 0) {
2535 port->portsc |= XHCI_PS_PRC;
2537 pci_xhci_set_evtrb(&evtrb, portn,
2538 XHCI_TRB_ERROR_SUCCESS,
2539 XHCI_TRB_EVENT_PORT_STS_CHANGE);
2540 error = pci_xhci_insert_event(sc, &evtrb, 1);
2541 if (error != XHCI_TRB_ERROR_SUCCESS)
2542 DPRINTF(("xhci reset port insert event "
2549 pci_xhci_init_port(struct pci_xhci_softc *sc, int portn)
2551 struct pci_xhci_portregs *port;
2552 struct pci_xhci_dev_emu *dev;
2554 port = XHCI_PORTREG_PTR(sc, portn);
2555 dev = XHCI_DEVINST_PTR(sc, portn);
2557 port->portsc = XHCI_PS_CCS | /* connected */
2558 XHCI_PS_PP; /* port power */
2560 if (dev->dev_ue->ue_usbver == 2) {
2561 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL) |
2562 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2564 port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0) |
2565 XHCI_PS_PED | /* enabled */
2566 XHCI_PS_SPEED_SET(dev->dev_ue->ue_usbspeed);
2569 DPRINTF(("Init port %d 0x%x", portn, port->portsc));
2571 port->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP;
2572 DPRINTF(("Init empty port %d 0x%x", portn, port->portsc));
2577 pci_xhci_dev_intr(struct usb_hci *hci, int epctx)
2579 struct pci_xhci_dev_emu *dev;
2580 struct xhci_dev_ctx *dev_ctx;
2581 struct xhci_trb evtrb;
2582 struct pci_xhci_softc *sc;
2583 struct pci_xhci_portregs *p;
2584 struct xhci_endp_ctx *ep_ctx;
2589 dir_in = epctx & 0x80;
2590 epid = epctx & ~0x80;
2592 /* HW endpoint contexts are 0-15; convert to epid based on dir */
2593 epid = (epid * 2) + (dir_in ? 1 : 0);
2595 assert(epid >= 1 && epid <= 31);
2600 /* check if device is ready; OS has to initialise it */
2601 if (sc->rtsregs.erstba_p == NULL ||
2602 (sc->opregs.usbcmd & XHCI_CMD_RS) == 0 ||
2603 dev->dev_ctx == NULL)
2606 p = XHCI_PORTREG_PTR(sc, hci->hci_port);
2608 /* raise event if link U3 (suspended) state */
2609 if (XHCI_PS_PLS_GET(p->portsc) == 3) {
2610 p->portsc &= ~XHCI_PS_PLS_MASK;
2611 p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME);
2612 if ((p->portsc & XHCI_PS_PLC) != 0)
2615 p->portsc |= XHCI_PS_PLC;
2617 pci_xhci_set_evtrb(&evtrb, hci->hci_port,
2618 XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE);
2619 error = pci_xhci_insert_event(sc, &evtrb, 0);
2620 if (error != XHCI_TRB_ERROR_SUCCESS)
2624 dev_ctx = dev->dev_ctx;
2625 ep_ctx = &dev_ctx->ctx_ep[epid];
2626 if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) {
2627 DPRINTF(("xhci device interrupt on disabled endpoint %d",
2632 DPRINTF(("xhci device interrupt on endpoint %d", epid));
2634 pci_xhci_device_doorbell(sc, hci->hci_port, epid, 0);
2641 pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid, void *param)
2644 DPRINTF(("xhci device event port %d", hci->hci_port));
2649 * Each controller contains a "slot" node which contains a list of
2650 * child nodes each of which is a device. Each slot node's name
2651 * corresponds to a specific controller slot. These nodes
2652 * contain a "device" variable identifying the device model of the
2653 * USB device. For example:
2662 pci_xhci_legacy_config(nvlist_t *nvl, const char *opts)
2665 nvlist_t *slots_nvl, *slot_nvl;
2666 char *cp, *opt, *str, *tofree;
2672 slots_nvl = create_relative_config_node(nvl, "slot");
2674 tofree = str = strdup(opts);
2675 while ((opt = strsep(&str, ",")) != NULL) {
2676 /* device[=<config>] */
2677 cp = strchr(opt, '=');
2683 snprintf(node_name, sizeof(node_name), "%d", slot);
2685 slot_nvl = create_relative_config_node(slots_nvl, node_name);
2686 set_config_value_node(slot_nvl, "device", opt);
2689 * NB: Given that we split on commas above, the legacy
2690 * format only supports a single option.
2692 if (cp != NULL && *cp != '\0')
2693 pci_parse_legacy_config(slot_nvl, cp);
2700 pci_xhci_parse_devices(struct pci_xhci_softc *sc, nvlist_t *nvl)
2702 struct pci_xhci_dev_emu *dev;
2703 struct usb_devemu *ue;
2704 const nvlist_t *slots_nvl, *slot_nvl;
2705 const char *name, *device;
2707 void *devsc, *cookie;
2709 int type, usb3_port, usb2_port, i, ndevices;
2711 usb3_port = sc->usb3_port_start;
2712 usb2_port = sc->usb2_port_start;
2714 sc->devices = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_dev_emu *));
2715 sc->slots = calloc(XHCI_MAX_SLOTS, sizeof(struct pci_xhci_dev_emu *));
2717 /* port and slot numbering start from 1 */
2723 slots_nvl = find_relative_config_node(nvl, "slot");
2724 if (slots_nvl == NULL)
2728 while ((name = nvlist_next(slots_nvl, &type, &cookie)) != NULL) {
2729 if (usb2_port == ((sc->usb2_port_start) + XHCI_MAX_DEVS/2) ||
2730 usb3_port == ((sc->usb3_port_start) + XHCI_MAX_DEVS/2)) {
2731 WPRINTF(("pci_xhci max number of USB 2 or 3 "
2732 "devices reached, max %d", XHCI_MAX_DEVS/2));
2736 if (type != NV_TYPE_NVLIST) {
2738 "pci_xhci: config variable '%s' under slot node",
2743 slot = strtol(name, &cp, 0);
2744 if (*cp != '\0' || slot <= 0 || slot > XHCI_MAX_SLOTS) {
2745 EPRINTLN("pci_xhci: invalid slot '%s'", name);
2749 if (XHCI_SLOTDEV_PTR(sc, slot) != NULL) {
2750 EPRINTLN("pci_xhci: duplicate slot '%s'", name);
2754 slot_nvl = nvlist_get_nvlist(slots_nvl, name);
2755 device = get_config_value_node(slot_nvl, "device");
2756 if (device == NULL) {
2758 "pci_xhci: missing \"device\" value for slot '%s'",
2763 ue = usb_emu_finddev(device);
2765 EPRINTLN("pci_xhci: unknown device model \"%s\"",
2770 DPRINTF(("pci_xhci adding device %s", device));
2772 dev = calloc(1, sizeof(struct pci_xhci_dev_emu));
2774 dev->hci.hci_sc = dev;
2775 dev->hci.hci_intr = pci_xhci_dev_intr;
2776 dev->hci.hci_event = pci_xhci_dev_event;
2778 if (ue->ue_usbver == 2) {
2779 if (usb2_port == sc->usb2_port_start +
2780 XHCI_MAX_DEVS / 2) {
2781 WPRINTF(("pci_xhci max number of USB 2 devices "
2782 "reached, max %d", XHCI_MAX_DEVS / 2));
2785 dev->hci.hci_port = usb2_port;
2788 if (usb3_port == sc->usb3_port_start +
2789 XHCI_MAX_DEVS / 2) {
2790 WPRINTF(("pci_xhci max number of USB 3 devices "
2791 "reached, max %d", XHCI_MAX_DEVS / 2));
2794 dev->hci.hci_port = usb3_port;
2797 XHCI_DEVINST_PTR(sc, dev->hci.hci_port) = dev;
2799 dev->hci.hci_address = 0;
2800 devsc = ue->ue_init(&dev->hci, nvl);
2801 if (devsc == NULL) {
2806 dev->dev_sc = devsc;
2808 XHCI_SLOTDEV_PTR(sc, slot) = dev;
2813 sc->portregs = calloc(XHCI_MAX_DEVS, sizeof(struct pci_xhci_portregs));
2817 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
2818 pci_xhci_init_port(sc, i);
2821 WPRINTF(("pci_xhci no USB devices configured"));
2826 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
2827 free(XHCI_DEVINST_PTR(sc, i));
2830 free(sc->devices + 1);
2831 free(sc->slots + 1);
2837 pci_xhci_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl)
2839 struct pci_xhci_softc *sc;
2843 WPRINTF(("pci_xhci controller already defined"));
2848 sc = calloc(1, sizeof(struct pci_xhci_softc));
2852 sc->usb2_port_start = (XHCI_MAX_DEVS/2) + 1;
2853 sc->usb3_port_start = 1;
2855 /* discover devices */
2856 error = pci_xhci_parse_devices(sc, nvl);
2862 sc->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) |
2863 XHCI_SET_HCIVERSION(0x0100);
2864 sc->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) |
2865 XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */
2866 XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS);
2867 sc->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) |
2868 XHCI_SET_HCSP2_IST(0x04);
2869 sc->hcsparams3 = 0; /* no latency */
2870 sc->hccparams1 = XHCI_SET_HCCP1_AC64(1) | /* 64-bit addrs */
2871 XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */
2872 XHCI_SET_HCCP1_SPC(1) | /* short packet */
2873 XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX);
2874 sc->hccparams2 = XHCI_SET_HCCP2_LEC(1) |
2875 XHCI_SET_HCCP2_U3C(1);
2876 sc->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START +
2877 XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs));
2879 /* dboff must be 32-bit aligned */
2880 if (sc->dboff & 0x3)
2881 sc->dboff = (sc->dboff + 0x3) & ~0x3;
2883 /* rtsoff must be 32-bytes aligned */
2884 sc->rtsoff = XHCI_SET_RTSOFFSET(sc->dboff + (XHCI_MAX_SLOTS+1) * 32);
2885 if (sc->rtsoff & 0x1F)
2886 sc->rtsoff = (sc->rtsoff + 0x1F) & ~0x1F;
2888 DPRINTF(("pci_xhci dboff: 0x%x, rtsoff: 0x%x", sc->dboff,
2891 sc->opregs.usbsts = XHCI_STS_HCH;
2892 sc->opregs.pgsz = XHCI_PAGESIZE_4K;
2896 sc->regsend = sc->rtsoff + 0x20 + 32; /* only 1 intrpter */
2899 * Set extended capabilities pointer to be after regsend;
2900 * value of xecp field is 32-bit offset.
2902 sc->hccparams1 |= XHCI_SET_HCCP1_XECP(sc->regsend/4);
2904 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x1E31);
2905 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2906 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_SERIALBUS);
2907 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_SERIALBUS_USB);
2908 pci_set_cfgdata8(pi, PCIR_PROGIF,PCIP_SERIALBUS_USB_XHCI);
2909 pci_set_cfgdata8(pi, PCI_USBREV, PCI_USB_REV_3_0);
2911 pci_emul_add_msicap(pi, 1);
2913 /* regsend + xecp registers */
2914 pci_emul_alloc_bar(pi, 0, PCIBAR_MEM32, sc->regsend + 4*32);
2915 DPRINTF(("pci_xhci pci_emu_alloc: %d", sc->regsend + 4*32));
2918 pci_lintr_request(pi);
2920 pthread_mutex_init(&sc->mtx, NULL);
2930 #ifdef BHYVE_SNAPSHOT
2932 pci_xhci_map_devs_slots(struct pci_xhci_softc *sc, int maps[])
2935 struct pci_xhci_dev_emu *dev, *slot;
2937 memset(maps, 0, sizeof(maps[0]) * XHCI_MAX_SLOTS);
2939 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
2940 for (j = 1; j <= XHCI_MAX_DEVS; j++) {
2941 slot = XHCI_SLOTDEV_PTR(sc, i);
2942 dev = XHCI_DEVINST_PTR(sc, j);
2951 pci_xhci_snapshot_ep(struct pci_xhci_softc *sc, struct pci_xhci_dev_emu *dev,
2952 int idx, struct vm_snapshot_meta *meta)
2956 struct usb_data_xfer *xfer;
2957 struct usb_data_xfer_block *xfer_block;
2959 /* some sanity checks */
2960 if (meta->op == VM_SNAPSHOT_SAVE)
2961 xfer = dev->eps[idx].ep_xfer;
2963 SNAPSHOT_VAR_OR_LEAVE(xfer, meta, ret, done);
2969 if (meta->op == VM_SNAPSHOT_RESTORE) {
2970 pci_xhci_init_ep(dev, idx);
2971 xfer = dev->eps[idx].ep_xfer;
2974 /* save / restore proper */
2975 for (k = 0; k < USB_MAX_XFER_BLOCKS; k++) {
2976 xfer_block = &xfer->data[k];
2978 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(xfer_block->buf,
2979 XHCI_GADDR_SIZE(xfer_block->buf), true, meta, ret,
2981 SNAPSHOT_VAR_OR_LEAVE(xfer_block->blen, meta, ret, done);
2982 SNAPSHOT_VAR_OR_LEAVE(xfer_block->bdone, meta, ret, done);
2983 SNAPSHOT_VAR_OR_LEAVE(xfer_block->processed, meta, ret, done);
2984 SNAPSHOT_VAR_OR_LEAVE(xfer_block->hci_data, meta, ret, done);
2985 SNAPSHOT_VAR_OR_LEAVE(xfer_block->ccs, meta, ret, done);
2986 SNAPSHOT_VAR_OR_LEAVE(xfer_block->streamid, meta, ret, done);
2987 SNAPSHOT_VAR_OR_LEAVE(xfer_block->trbnext, meta, ret, done);
2990 SNAPSHOT_VAR_OR_LEAVE(xfer->ureq, meta, ret, done);
2992 /* xfer->ureq is not allocated at restore time */
2993 if (meta->op == VM_SNAPSHOT_RESTORE)
2994 xfer->ureq = malloc(sizeof(struct usb_device_request));
2996 SNAPSHOT_BUF_OR_LEAVE(xfer->ureq,
2997 sizeof(struct usb_device_request),
3001 SNAPSHOT_VAR_OR_LEAVE(xfer->ndata, meta, ret, done);
3002 SNAPSHOT_VAR_OR_LEAVE(xfer->head, meta, ret, done);
3003 SNAPSHOT_VAR_OR_LEAVE(xfer->tail, meta, ret, done);
3010 pci_xhci_snapshot(struct vm_snapshot_meta *meta)
3015 struct pci_devinst *pi;
3016 struct pci_xhci_softc *sc;
3017 struct pci_xhci_portregs *port;
3018 struct pci_xhci_dev_emu *dev;
3019 char dname[SNAP_DEV_NAME_LEN];
3020 int maps[XHCI_MAX_SLOTS + 1];
3022 pi = meta->dev_data;
3025 SNAPSHOT_VAR_OR_LEAVE(sc->caplength, meta, ret, done);
3026 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams1, meta, ret, done);
3027 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams2, meta, ret, done);
3028 SNAPSHOT_VAR_OR_LEAVE(sc->hcsparams3, meta, ret, done);
3029 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams1, meta, ret, done);
3030 SNAPSHOT_VAR_OR_LEAVE(sc->dboff, meta, ret, done);
3031 SNAPSHOT_VAR_OR_LEAVE(sc->rtsoff, meta, ret, done);
3032 SNAPSHOT_VAR_OR_LEAVE(sc->hccparams2, meta, ret, done);
3033 SNAPSHOT_VAR_OR_LEAVE(sc->regsend, meta, ret, done);
3036 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbcmd, meta, ret, done);
3037 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.usbsts, meta, ret, done);
3038 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.pgsz, meta, ret, done);
3039 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dnctrl, meta, ret, done);
3040 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.crcr, meta, ret, done);
3041 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.dcbaap, meta, ret, done);
3042 SNAPSHOT_VAR_OR_LEAVE(sc->opregs.config, meta, ret, done);
3045 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.cr_p,
3046 XHCI_GADDR_SIZE(sc->opregs.cr_p), true, meta, ret, done);
3048 /* opregs.dcbaa_p */
3049 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->opregs.dcbaa_p,
3050 XHCI_GADDR_SIZE(sc->opregs.dcbaa_p), true, meta, ret, done);
3053 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.mfindex, meta, ret, done);
3055 /* rtsregs.intrreg */
3056 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.iman, meta, ret, done);
3057 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.imod, meta, ret, done);
3058 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstsz, meta, ret, done);
3059 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.rsvd, meta, ret, done);
3060 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erstba, meta, ret, done);
3061 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.intrreg.erdp, meta, ret, done);
3063 /* rtsregs.erstba_p */
3064 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erstba_p,
3065 XHCI_GADDR_SIZE(sc->rtsregs.erstba_p), true, meta, ret, done);
3067 /* rtsregs.erst_p */
3068 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(sc->rtsregs.erst_p,
3069 XHCI_GADDR_SIZE(sc->rtsregs.erst_p), true, meta, ret, done);
3071 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_deq_seg, meta, ret, done);
3072 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_idx, meta, ret, done);
3073 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_enq_seg, meta, ret, done);
3074 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.er_events_cnt, meta, ret, done);
3075 SNAPSHOT_VAR_OR_LEAVE(sc->rtsregs.event_pcs, meta, ret, done);
3077 /* sanity checking */
3078 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3079 dev = XHCI_DEVINST_PTR(sc, i);
3083 if (meta->op == VM_SNAPSHOT_SAVE)
3085 SNAPSHOT_VAR_OR_LEAVE(restore_idx, meta, ret, done);
3087 /* check if the restored device (when restoring) is sane */
3088 if (restore_idx != i) {
3089 fprintf(stderr, "%s: idx not matching: actual: %d, "
3090 "expected: %d\r\n", __func__, restore_idx, i);
3095 if (meta->op == VM_SNAPSHOT_SAVE) {
3096 memset(dname, 0, sizeof(dname));
3097 strncpy(dname, dev->dev_ue->ue_emu, sizeof(dname) - 1);
3100 SNAPSHOT_BUF_OR_LEAVE(dname, sizeof(dname), meta, ret, done);
3102 if (meta->op == VM_SNAPSHOT_RESTORE) {
3103 dname[sizeof(dname) - 1] = '\0';
3104 if (strcmp(dev->dev_ue->ue_emu, dname)) {
3105 fprintf(stderr, "%s: device names mismatch: "
3106 "actual: %s, expected: %s\r\n",
3107 __func__, dname, dev->dev_ue->ue_emu);
3116 for (i = 1; i <= XHCI_MAX_DEVS; i++) {
3117 port = XHCI_PORTREG_PTR(sc, i);
3118 dev = XHCI_DEVINST_PTR(sc, i);
3123 SNAPSHOT_VAR_OR_LEAVE(port->portsc, meta, ret, done);
3124 SNAPSHOT_VAR_OR_LEAVE(port->portpmsc, meta, ret, done);
3125 SNAPSHOT_VAR_OR_LEAVE(port->portli, meta, ret, done);
3126 SNAPSHOT_VAR_OR_LEAVE(port->porthlpmc, meta, ret, done);
3130 if (meta->op == VM_SNAPSHOT_SAVE)
3131 pci_xhci_map_devs_slots(sc, maps);
3133 for (i = 1; i <= XHCI_MAX_SLOTS; i++) {
3134 SNAPSHOT_VAR_OR_LEAVE(maps[i], meta, ret, done);
3136 if (meta->op == VM_SNAPSHOT_SAVE) {
3137 dev = XHCI_SLOTDEV_PTR(sc, i);
3138 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
3140 dev = XHCI_DEVINST_PTR(sc, maps[i]);
3144 XHCI_SLOTDEV_PTR(sc, i) = dev;
3154 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(dev->dev_ctx,
3155 XHCI_GADDR_SIZE(dev->dev_ctx), true, meta, ret, done);
3157 if (dev->dev_ctx != NULL) {
3158 for (j = 1; j < XHCI_MAX_ENDPOINTS; j++) {
3159 ret = pci_xhci_snapshot_ep(sc, dev, j, meta);
3165 SNAPSHOT_VAR_OR_LEAVE(dev->dev_slotstate, meta, ret, done);
3167 /* devices[i]->dev_sc */
3168 dev->dev_ue->ue_snapshot(dev->dev_sc, meta);
3170 /* devices[i]->hci */
3171 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_address, meta, ret, done);
3172 SNAPSHOT_VAR_OR_LEAVE(dev->hci.hci_port, meta, ret, done);
3175 SNAPSHOT_VAR_OR_LEAVE(sc->usb2_port_start, meta, ret, done);
3176 SNAPSHOT_VAR_OR_LEAVE(sc->usb3_port_start, meta, ret, done);
3183 struct pci_devemu pci_de_xhci = {
3185 .pe_init = pci_xhci_init,
3186 .pe_legacy_config = pci_xhci_legacy_config,
3187 .pe_barwrite = pci_xhci_write,
3188 .pe_barread = pci_xhci_read,
3189 #ifdef BHYVE_SNAPSHOT
3190 .pe_snapshot = pci_xhci_snapshot,
3193 PCI_EMUL_SET(pci_de_xhci);