2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #ifndef WITHOUT_CAPSICUM
36 #include <sys/capsicum.h>
38 #include <sys/types.h>
40 #include <sys/pciio.h>
41 #include <sys/ioctl.h>
43 #include <dev/io/iodev.h>
44 #include <dev/pci/pcireg.h>
46 #include <machine/iodev.h>
48 #ifndef WITHOUT_CAPSICUM
49 #include <capsicum_helpers.h>
60 #include <machine/vmm.h>
69 #define _PATH_DEVPCI "/dev/pci"
73 #define _PATH_DEVIO "/dev/io"
77 #define _PATH_MEM "/dev/mem"
80 #define LEGACY_SUPPORT 1
82 #define MSIX_TABLE_COUNT(ctrl) (((ctrl) & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
83 #define MSIX_CAPLEN 12
85 static int pcifd = -1;
87 static int memfd = -1;
89 struct passthru_softc {
90 struct pci_devinst *psc_pi;
91 struct pcibar psc_bar[PCI_BARMAX + 1];
100 struct pcisel psc_sel;
104 msi_caplen(int msgctrl)
108 len = 10; /* minimum length of msi capability */
110 if (msgctrl & PCIM_MSICTRL_64BIT)
115 * Ignore the 'mask' and 'pending' bits in the MSI capability.
116 * We'll let the guest manipulate them directly.
118 if (msgctrl & PCIM_MSICTRL_VECTOR)
126 read_config(const struct pcisel *sel, long reg, int width)
130 bzero(&pi, sizeof(pi));
135 if (ioctl(pcifd, PCIOCREAD, &pi) < 0)
136 return (0); /* XXX */
142 write_config(const struct pcisel *sel, long reg, int width, uint32_t data)
146 bzero(&pi, sizeof(pi));
152 (void)ioctl(pcifd, PCIOCWRITE, &pi); /* XXX */
155 #ifdef LEGACY_SUPPORT
157 passthru_add_msicap(struct pci_devinst *pi, int msgnum, int nextptr)
160 struct msicap msicap;
163 pci_populate_msicap(&msicap, msgnum, nextptr);
167 * Copy the msi capability structure in the last 16 bytes of the
168 * config space. This is wrong because it could shadow something
169 * useful to the device.
171 capoff = 256 - roundup(sizeof(msicap), 4);
172 capdata = (u_char *)&msicap;
173 for (i = 0; i < sizeof(msicap); i++)
174 pci_set_cfgdata8(pi, capoff + i, capdata[i]);
178 #endif /* LEGACY_SUPPORT */
181 cfginitmsi(struct passthru_softc *sc)
183 int i, ptr, capptr, cap, sts, caplen, table_size;
186 struct pci_devinst *pi;
187 struct msixcap msixcap;
188 uint32_t *msixcap_ptr;
194 * Parse the capabilities and cache the location of the MSI
195 * and MSI-X capabilities.
197 sts = read_config(&sel, PCIR_STATUS, 2);
198 if (sts & PCIM_STATUS_CAPPRESENT) {
199 ptr = read_config(&sel, PCIR_CAP_PTR, 1);
200 while (ptr != 0 && ptr != 0xff) {
201 cap = read_config(&sel, ptr + PCICAP_ID, 1);
202 if (cap == PCIY_MSI) {
204 * Copy the MSI capability into the config
205 * space of the emulated pci device
207 sc->psc_msi.capoff = ptr;
208 sc->psc_msi.msgctrl = read_config(&sel,
210 sc->psc_msi.emulated = 0;
211 caplen = msi_caplen(sc->psc_msi.msgctrl);
214 u32 = read_config(&sel, capptr, 4);
215 pci_set_cfgdata32(pi, capptr, u32);
219 } else if (cap == PCIY_MSIX) {
221 * Copy the MSI-X capability
223 sc->psc_msix.capoff = ptr;
225 msixcap_ptr = (uint32_t*) &msixcap;
228 u32 = read_config(&sel, capptr, 4);
230 pci_set_cfgdata32(pi, capptr, u32);
236 ptr = read_config(&sel, ptr + PCICAP_NEXTPTR, 1);
240 if (sc->psc_msix.capoff != 0) {
241 pi->pi_msix.pba_bar =
242 msixcap.pba_info & PCIM_MSIX_BIR_MASK;
243 pi->pi_msix.pba_offset =
244 msixcap.pba_info & ~PCIM_MSIX_BIR_MASK;
245 pi->pi_msix.table_bar =
246 msixcap.table_info & PCIM_MSIX_BIR_MASK;
247 pi->pi_msix.table_offset =
248 msixcap.table_info & ~PCIM_MSIX_BIR_MASK;
249 pi->pi_msix.table_count = MSIX_TABLE_COUNT(msixcap.msgctrl);
250 pi->pi_msix.pba_size = PBA_SIZE(pi->pi_msix.table_count);
252 /* Allocate the emulated MSI-X table array */
253 table_size = pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
254 pi->pi_msix.table = calloc(1, table_size);
256 /* Mask all table entries */
257 for (i = 0; i < pi->pi_msix.table_count; i++) {
258 pi->pi_msix.table[i].vector_control |=
259 PCIM_MSIX_VCTRL_MASK;
263 #ifdef LEGACY_SUPPORT
265 * If the passthrough device does not support MSI then craft a
266 * MSI capability for it. We link the new MSI capability at the
267 * head of the list of capabilities.
269 if ((sts & PCIM_STATUS_CAPPRESENT) != 0 && sc->psc_msi.capoff == 0) {
271 origptr = read_config(&sel, PCIR_CAP_PTR, 1);
272 msiptr = passthru_add_msicap(pi, 1, origptr);
273 sc->psc_msi.capoff = msiptr;
274 sc->psc_msi.msgctrl = pci_get_cfgdata16(pi, msiptr + 2);
275 sc->psc_msi.emulated = 1;
276 pci_set_cfgdata8(pi, PCIR_CAP_PTR, msiptr);
280 /* Make sure one of the capabilities is present */
281 if (sc->psc_msi.capoff == 0 && sc->psc_msix.capoff == 0)
288 msix_table_read(struct passthru_softc *sc, uint64_t offset, int size)
290 struct pci_devinst *pi;
291 struct msix_table_entry *entry;
301 if (pi->pi_msix.pba_page != NULL && offset >= pi->pi_msix.pba_offset &&
302 offset < pi->pi_msix.pba_offset + pi->pi_msix.pba_size) {
305 src8 = (uint8_t *)(pi->pi_msix.pba_page + offset -
306 pi->pi_msix.pba_page_offset);
310 src16 = (uint16_t *)(pi->pi_msix.pba_page + offset -
311 pi->pi_msix.pba_page_offset);
315 src32 = (uint32_t *)(pi->pi_msix.pba_page + offset -
316 pi->pi_msix.pba_page_offset);
320 src64 = (uint64_t *)(pi->pi_msix.pba_page + offset -
321 pi->pi_msix.pba_page_offset);
330 if (offset < pi->pi_msix.table_offset)
333 offset -= pi->pi_msix.table_offset;
334 index = offset / MSIX_TABLE_ENTRY_SIZE;
335 if (index >= pi->pi_msix.table_count)
338 entry = &pi->pi_msix.table[index];
339 entry_offset = offset % MSIX_TABLE_ENTRY_SIZE;
343 src8 = (uint8_t *)((void *)entry + entry_offset);
347 src16 = (uint16_t *)((void *)entry + entry_offset);
351 src32 = (uint32_t *)((void *)entry + entry_offset);
355 src64 = (uint64_t *)((void *)entry + entry_offset);
366 msix_table_write(struct vmctx *ctx, int vcpu, struct passthru_softc *sc,
367 uint64_t offset, int size, uint64_t data)
369 struct pci_devinst *pi;
370 struct msix_table_entry *entry;
376 uint32_t vector_control;
380 if (pi->pi_msix.pba_page != NULL && offset >= pi->pi_msix.pba_offset &&
381 offset < pi->pi_msix.pba_offset + pi->pi_msix.pba_size) {
384 dest8 = (uint8_t *)(pi->pi_msix.pba_page + offset -
385 pi->pi_msix.pba_page_offset);
389 dest16 = (uint16_t *)(pi->pi_msix.pba_page + offset -
390 pi->pi_msix.pba_page_offset);
394 dest32 = (uint32_t *)(pi->pi_msix.pba_page + offset -
395 pi->pi_msix.pba_page_offset);
399 dest64 = (uint64_t *)(pi->pi_msix.pba_page + offset -
400 pi->pi_msix.pba_page_offset);
409 if (offset < pi->pi_msix.table_offset)
412 offset -= pi->pi_msix.table_offset;
413 index = offset / MSIX_TABLE_ENTRY_SIZE;
414 if (index >= pi->pi_msix.table_count)
417 entry = &pi->pi_msix.table[index];
418 entry_offset = offset % MSIX_TABLE_ENTRY_SIZE;
420 /* Only 4 byte naturally-aligned writes are supported */
422 assert(entry_offset % 4 == 0);
424 vector_control = entry->vector_control;
425 dest32 = (uint32_t *)((void *)entry + entry_offset);
427 /* If MSI-X hasn't been enabled, do nothing */
428 if (pi->pi_msix.enabled) {
429 /* If the entry is masked, don't set it up */
430 if ((entry->vector_control & PCIM_MSIX_VCTRL_MASK) == 0 ||
431 (vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
432 (void)vm_setup_pptdev_msix(ctx, vcpu,
433 sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
434 sc->psc_sel.pc_func, index, entry->addr,
435 entry->msg_data, entry->vector_control);
441 init_msix_table(struct vmctx *ctx, struct passthru_softc *sc, uint64_t base)
446 uint32_t table_size, table_offset;
447 uint32_t pba_size, pba_offset;
449 struct pci_devinst *pi = sc->psc_pi;
451 assert(pci_msix_table_bar(pi) >= 0 && pci_msix_pba_bar(pi) >= 0);
453 b = sc->psc_sel.pc_bus;
454 s = sc->psc_sel.pc_dev;
455 f = sc->psc_sel.pc_func;
458 * If the MSI-X table BAR maps memory intended for
459 * other uses, it is at least assured that the table
460 * either resides in its own page within the region,
461 * or it resides in a page shared with only the PBA.
463 table_offset = rounddown2(pi->pi_msix.table_offset, 4096);
465 table_size = pi->pi_msix.table_offset - table_offset;
466 table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
467 table_size = roundup2(table_size, 4096);
469 idx = pi->pi_msix.table_bar;
470 start = pi->pi_bar[idx].addr;
471 remaining = pi->pi_bar[idx].size;
473 if (pi->pi_msix.pba_bar == pi->pi_msix.table_bar) {
474 pba_offset = pi->pi_msix.pba_offset;
475 pba_size = pi->pi_msix.pba_size;
476 if (pba_offset >= table_offset + table_size ||
477 table_offset >= pba_offset + pba_size) {
479 * If the PBA does not share a page with the MSI-x
480 * tables, no PBA emulation is required.
482 pi->pi_msix.pba_page = NULL;
483 pi->pi_msix.pba_page_offset = 0;
486 * The PBA overlaps with either the first or last
487 * page of the MSI-X table region. Map the
490 if (pba_offset <= table_offset)
491 pi->pi_msix.pba_page_offset = table_offset;
493 pi->pi_msix.pba_page_offset = table_offset +
495 pi->pi_msix.pba_page = mmap(NULL, 4096, PROT_READ |
496 PROT_WRITE, MAP_SHARED, memfd, start +
497 pi->pi_msix.pba_page_offset);
498 if (pi->pi_msix.pba_page == MAP_FAILED) {
500 "Failed to map PBA page for MSI-X on %d/%d/%d",
511 cfginitbar(struct vmctx *ctx, struct passthru_softc *sc)
514 struct pci_devinst *pi;
515 struct pci_bar_io bar;
516 enum pcibar_type bartype;
522 * Initialize BAR registers
524 for (i = 0; i <= PCI_BARMAX; i++) {
525 bzero(&bar, sizeof(bar));
526 bar.pbi_sel = sc->psc_sel;
527 bar.pbi_reg = PCIR_BAR(i);
529 if (ioctl(pcifd, PCIOCGETBAR, &bar) < 0)
532 if (PCI_BAR_IO(bar.pbi_base)) {
534 base = bar.pbi_base & PCIM_BAR_IO_BASE;
536 switch (bar.pbi_base & PCIM_BAR_MEM_TYPE) {
537 case PCIM_BAR_MEM_64:
538 bartype = PCIBAR_MEM64;
541 bartype = PCIBAR_MEM32;
544 base = bar.pbi_base & PCIM_BAR_MEM_BASE;
546 size = bar.pbi_length;
548 if (bartype != PCIBAR_IO) {
549 if (((base | size) & PAGE_MASK) != 0) {
550 warnx("passthru device %d/%d/%d BAR %d: "
551 "base %#lx or size %#lx not page aligned\n",
552 sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
553 sc->psc_sel.pc_func, i, base, size);
558 /* Cache information about the "real" BAR */
559 sc->psc_bar[i].type = bartype;
560 sc->psc_bar[i].size = size;
561 sc->psc_bar[i].addr = base;
563 /* Allocate the BAR in the guest I/O or MMIO space */
564 error = pci_emul_alloc_bar(pi, i, bartype, size);
568 /* The MSI-X table needs special handling */
569 if (i == pci_msix_table_bar(pi)) {
570 error = init_msix_table(ctx, sc, base);
576 * 64-bit BAR takes up two slots so skip the next one.
578 if (bartype == PCIBAR_MEM64) {
580 assert(i <= PCI_BARMAX);
581 sc->psc_bar[i].type = PCIBAR_MEMHI64;
588 cfginit(struct vmctx *ctx, struct pci_devinst *pi, int bus, int slot, int func)
591 struct passthru_softc *sc;
596 bzero(&sc->psc_sel, sizeof(struct pcisel));
597 sc->psc_sel.pc_bus = bus;
598 sc->psc_sel.pc_dev = slot;
599 sc->psc_sel.pc_func = func;
601 if (cfginitmsi(sc) != 0) {
602 warnx("failed to initialize MSI for PCI %d/%d/%d",
607 if (cfginitbar(ctx, sc) != 0) {
608 warnx("failed to initialize BARs for PCI %d/%d/%d",
613 pci_set_cfgdata16(pi, PCIR_COMMAND, read_config(&sc->psc_sel,
616 error = 0; /* success */
622 passthru_legacy_config(nvlist_t *nvl, const char *opts)
630 if (sscanf(opts, "%d/%d/%d", &bus, &slot, &func) != 3) {
631 EPRINTLN("passthru: invalid options \"%s\"", opts);
635 snprintf(value, sizeof(value), "%d", bus);
636 set_config_value_node(nvl, "bus", value);
637 snprintf(value, sizeof(value), "%d", slot);
638 set_config_value_node(nvl, "slot", value);
639 snprintf(value, sizeof(value), "%d", func);
640 set_config_value_node(nvl, "func", value);
645 passthru_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl)
647 int bus, slot, func, error, memflags;
648 struct passthru_softc *sc;
650 #ifndef WITHOUT_CAPSICUM
652 cap_ioctl_t pci_ioctls[] = { PCIOCREAD, PCIOCWRITE, PCIOCGETBAR };
653 cap_ioctl_t io_ioctls[] = { IODEV_PIO };
659 #ifndef WITHOUT_CAPSICUM
660 cap_rights_init(&rights, CAP_IOCTL, CAP_READ, CAP_WRITE);
663 memflags = vm_get_memflags(ctx);
664 if (!(memflags & VM_MEM_F_WIRED)) {
665 warnx("passthru requires guest memory to be wired");
670 pcifd = open(_PATH_DEVPCI, O_RDWR, 0);
672 warn("failed to open %s", _PATH_DEVPCI);
677 #ifndef WITHOUT_CAPSICUM
678 if (caph_rights_limit(pcifd, &rights) == -1)
679 errx(EX_OSERR, "Unable to apply rights for sandbox");
680 if (caph_ioctls_limit(pcifd, pci_ioctls, nitems(pci_ioctls)) == -1)
681 errx(EX_OSERR, "Unable to apply rights for sandbox");
685 iofd = open(_PATH_DEVIO, O_RDWR, 0);
687 warn("failed to open %s", _PATH_DEVIO);
692 #ifndef WITHOUT_CAPSICUM
693 if (caph_rights_limit(iofd, &rights) == -1)
694 errx(EX_OSERR, "Unable to apply rights for sandbox");
695 if (caph_ioctls_limit(iofd, io_ioctls, nitems(io_ioctls)) == -1)
696 errx(EX_OSERR, "Unable to apply rights for sandbox");
700 memfd = open(_PATH_MEM, O_RDWR, 0);
702 warn("failed to open %s", _PATH_MEM);
707 #ifndef WITHOUT_CAPSICUM
708 cap_rights_clear(&rights, CAP_IOCTL);
709 cap_rights_set(&rights, CAP_MMAP_RW);
710 if (caph_rights_limit(memfd, &rights) == -1)
711 errx(EX_OSERR, "Unable to apply rights for sandbox");
714 #define GET_INT_CONFIG(var, name) do { \
715 value = get_config_value_node(nvl, name); \
716 if (value == NULL) { \
717 EPRINTLN("passthru: missing required %s setting", name); \
723 GET_INT_CONFIG(bus, "bus");
724 GET_INT_CONFIG(slot, "slot");
725 GET_INT_CONFIG(func, "func");
727 if (vm_assign_pptdev(ctx, bus, slot, func) != 0) {
728 warnx("PCI device at %d/%d/%d is not using the ppt(4) driver",
733 sc = calloc(1, sizeof(struct passthru_softc));
738 /* initialize config space */
739 error = cfginit(ctx, pi, bus, slot, func);
743 vm_unassign_pptdev(ctx, bus, slot, func);
751 if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1))
758 msicap_access(struct passthru_softc *sc, int coff)
762 if (sc->psc_msi.capoff == 0)
765 caplen = msi_caplen(sc->psc_msi.msgctrl);
767 if (coff >= sc->psc_msi.capoff && coff < sc->psc_msi.capoff + caplen)
774 msixcap_access(struct passthru_softc *sc, int coff)
776 if (sc->psc_msix.capoff == 0)
779 return (coff >= sc->psc_msix.capoff &&
780 coff < sc->psc_msix.capoff + MSIX_CAPLEN);
784 passthru_cfgread(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
785 int coff, int bytes, uint32_t *rv)
787 struct passthru_softc *sc;
792 * PCI BARs and MSI capability is emulated.
794 if (bar_access(coff) || msicap_access(sc, coff))
797 #ifdef LEGACY_SUPPORT
799 * Emulate PCIR_CAP_PTR if this device does not support MSI capability
802 if (sc->psc_msi.emulated) {
803 if (coff >= PCIR_CAP_PTR && coff < PCIR_CAP_PTR + 4)
809 * Emulate the command register. If a single read reads both the
810 * command and status registers, read the status register from the
811 * device's config space.
813 if (coff == PCIR_COMMAND) {
816 *rv = read_config(&sc->psc_sel, PCIR_STATUS, 2) << 16 |
817 pci_get_cfgdata16(pi, PCIR_COMMAND);
821 /* Everything else just read from the device's config space */
822 *rv = read_config(&sc->psc_sel, coff, bytes);
828 passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
829 int coff, int bytes, uint32_t val)
831 int error, msix_table_entries, i;
832 struct passthru_softc *sc;
838 * PCI BARs are emulated
840 if (bar_access(coff))
844 * MSI capability is emulated
846 if (msicap_access(sc, coff)) {
847 pci_emul_capwrite(pi, coff, bytes, val, sc->psc_msi.capoff,
849 error = vm_setup_pptdev_msi(ctx, vcpu, sc->psc_sel.pc_bus,
850 sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
851 pi->pi_msi.addr, pi->pi_msi.msg_data,
852 pi->pi_msi.maxmsgnum);
854 err(1, "vm_setup_pptdev_msi");
858 if (msixcap_access(sc, coff)) {
859 pci_emul_capwrite(pi, coff, bytes, val, sc->psc_msix.capoff,
861 if (pi->pi_msix.enabled) {
862 msix_table_entries = pi->pi_msix.table_count;
863 for (i = 0; i < msix_table_entries; i++) {
864 error = vm_setup_pptdev_msix(ctx, vcpu,
865 sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
866 sc->psc_sel.pc_func, i,
867 pi->pi_msix.table[i].addr,
868 pi->pi_msix.table[i].msg_data,
869 pi->pi_msix.table[i].vector_control);
872 err(1, "vm_setup_pptdev_msix");
875 error = vm_disable_pptdev_msix(ctx, sc->psc_sel.pc_bus,
876 sc->psc_sel.pc_dev, sc->psc_sel.pc_func);
878 err(1, "vm_disable_pptdev_msix");
883 #ifdef LEGACY_SUPPORT
885 * If this device does not support MSI natively then we cannot let
886 * the guest disable legacy interrupts from the device. It is the
887 * legacy interrupt that is triggering the virtual MSI to the guest.
889 if (sc->psc_msi.emulated && pci_msi_enabled(pi)) {
890 if (coff == PCIR_COMMAND && bytes == 2)
891 val &= ~PCIM_CMD_INTxDIS;
895 write_config(&sc->psc_sel, coff, bytes, val);
896 if (coff == PCIR_COMMAND) {
897 cmd_old = pci_get_cfgdata16(pi, PCIR_COMMAND);
899 pci_set_cfgdata8(pi, PCIR_COMMAND, val);
901 pci_set_cfgdata16(pi, PCIR_COMMAND, val);
902 pci_emul_cmd_changed(pi, cmd_old);
909 passthru_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
910 uint64_t offset, int size, uint64_t value)
912 struct passthru_softc *sc;
913 struct iodev_pio_req pio;
917 if (baridx == pci_msix_table_bar(pi)) {
918 msix_table_write(ctx, vcpu, sc, offset, size, value);
920 assert(pi->pi_bar[baridx].type == PCIBAR_IO);
921 bzero(&pio, sizeof(struct iodev_pio_req));
922 pio.access = IODEV_PIO_WRITE;
923 pio.port = sc->psc_bar[baridx].addr + offset;
927 (void)ioctl(iofd, IODEV_PIO, &pio);
932 passthru_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
933 uint64_t offset, int size)
935 struct passthru_softc *sc;
936 struct iodev_pio_req pio;
941 if (baridx == pci_msix_table_bar(pi)) {
942 val = msix_table_read(sc, offset, size);
944 assert(pi->pi_bar[baridx].type == PCIBAR_IO);
945 bzero(&pio, sizeof(struct iodev_pio_req));
946 pio.access = IODEV_PIO_READ;
947 pio.port = sc->psc_bar[baridx].addr + offset;
951 (void)ioctl(iofd, IODEV_PIO, &pio);
960 passthru_msix_addr(struct vmctx *ctx, struct pci_devinst *pi, int baridx,
961 int enabled, uint64_t address)
963 struct passthru_softc *sc;
965 uint32_t table_size, table_offset;
968 table_offset = rounddown2(pi->pi_msix.table_offset, 4096);
969 if (table_offset > 0) {
971 if (vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
973 sc->psc_sel.pc_func, address,
975 warnx("pci_passthru: unmap_pptdev_mmio failed");
977 if (vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
979 sc->psc_sel.pc_func, address,
981 sc->psc_bar[baridx].addr) != 0)
982 warnx("pci_passthru: map_pptdev_mmio failed");
985 table_size = pi->pi_msix.table_offset - table_offset;
986 table_size += pi->pi_msix.table_count * MSIX_TABLE_ENTRY_SIZE;
987 table_size = roundup2(table_size, 4096);
988 remaining = pi->pi_bar[baridx].size - table_offset - table_size;
990 address += table_offset + table_size;
992 if (vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
994 sc->psc_sel.pc_func, address,
996 warnx("pci_passthru: unmap_pptdev_mmio failed");
998 if (vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
1000 sc->psc_sel.pc_func, address,
1002 sc->psc_bar[baridx].addr +
1003 table_offset + table_size) != 0)
1004 warnx("pci_passthru: map_pptdev_mmio failed");
1010 passthru_mmio_addr(struct vmctx *ctx, struct pci_devinst *pi, int baridx,
1011 int enabled, uint64_t address)
1013 struct passthru_softc *sc;
1017 if (vm_unmap_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
1019 sc->psc_sel.pc_func, address,
1020 sc->psc_bar[baridx].size) != 0)
1021 warnx("pci_passthru: unmap_pptdev_mmio failed");
1023 if (vm_map_pptdev_mmio(ctx, sc->psc_sel.pc_bus,
1025 sc->psc_sel.pc_func, address,
1026 sc->psc_bar[baridx].size,
1027 sc->psc_bar[baridx].addr) != 0)
1028 warnx("pci_passthru: map_pptdev_mmio failed");
1033 passthru_addr(struct vmctx *ctx, struct pci_devinst *pi, int baridx,
1034 int enabled, uint64_t address)
1037 if (pi->pi_bar[baridx].type == PCIBAR_IO)
1039 if (baridx == pci_msix_table_bar(pi))
1040 passthru_msix_addr(ctx, pi, baridx, enabled, address);
1042 passthru_mmio_addr(ctx, pi, baridx, enabled, address);
1045 struct pci_devemu passthru = {
1046 .pe_emu = "passthru",
1047 .pe_init = passthru_init,
1048 .pe_legacy_config = passthru_legacy_config,
1049 .pe_cfgwrite = passthru_cfgwrite,
1050 .pe_cfgread = passthru_cfgread,
1051 .pe_barwrite = passthru_write,
1052 .pe_barread = passthru_read,
1053 .pe_baraddr = passthru_addr,
1055 PCI_EMUL_SET(passthru);