2 * Copyright (c) 2016, Anish Gupta (anish@freebsd.org)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
35 #include <sys/malloc.h>
39 #include <sys/sysctl.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcireg.h>
47 #include <machine/resource.h>
48 #include <machine/vmm.h>
49 #include <machine/pmap.h>
50 #include <machine/vmparam.h>
51 #include <machine/pci_cfgreg.h>
56 #include "amdvi_priv.h"
59 SYSCTL_NODE(_hw_vmm, OID_AUTO, amdvi, CTLFLAG_RW, NULL, NULL);
61 #define MOD_INC(a, s, m) (((a) + (s)) % ((m) * (s)))
62 #define MOD_DEC(a, s, m) (((a) - (s)) % ((m) * (s)))
64 /* Print RID or device ID in PCI string format. */
65 #define RID2PCI_STR(d) PCI_RID2BUS(d), PCI_RID2SLOT(d), PCI_RID2FUNC(d)
67 static void amdvi_dump_cmds(struct amdvi_softc *softc);
68 static void amdvi_print_dev_cap(struct amdvi_softc *softc);
70 MALLOC_DEFINE(M_AMDVI, "amdvi", "amdvi");
72 extern device_t *ivhd_devs;
74 extern int ivhd_count;
75 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, count, CTLFLAG_RDTUN, &ivhd_count,
78 static int amdvi_enable_user = 0;
79 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, enable, CTLFLAG_RDTUN,
80 &amdvi_enable_user, 0, NULL);
81 TUNABLE_INT("hw.vmm.amdvi_enable", &amdvi_enable_user);
83 #ifdef AMDVI_ATS_ENABLE
84 /* XXX: ATS is not tested. */
85 static int amdvi_enable_iotlb = 1;
86 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, iotlb_enabled, CTLFLAG_RDTUN,
87 &amdvi_enable_iotlb, 0, NULL);
88 TUNABLE_INT("hw.vmm.enable_iotlb", &amdvi_enable_iotlb);
91 static int amdvi_host_ptp = 1; /* Use page tables for host. */
92 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, host_ptp, CTLFLAG_RDTUN,
93 &amdvi_host_ptp, 0, NULL);
94 TUNABLE_INT("hw.vmm.amdvi.host_ptp", &amdvi_host_ptp);
96 /* Page table level used <= supported by h/w[v1=7]. */
97 static int amdvi_ptp_level = 4;
98 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, ptp_level, CTLFLAG_RDTUN,
99 &amdvi_ptp_level, 0, NULL);
100 TUNABLE_INT("hw.vmm.amdvi.ptp_level", &amdvi_ptp_level);
102 /* Disable fault event reporting. */
103 static int amdvi_disable_io_fault = 0;
104 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, disable_io_fault, CTLFLAG_RDTUN,
105 &amdvi_disable_io_fault, 0, NULL);
106 TUNABLE_INT("hw.vmm.amdvi.disable_io_fault", &amdvi_disable_io_fault);
108 static uint32_t amdvi_dom_id = 0; /* 0 is reserved for host. */
109 SYSCTL_UINT(_hw_vmm_amdvi, OID_AUTO, domain_id, CTLFLAG_RD,
110 &amdvi_dom_id, 0, NULL);
112 * Device table entry.
113 * Bus(256) x Dev(32) x Fun(8) x DTE(256 bits or 32 bytes).
114 * = 256 * 2 * PAGE_SIZE.
116 static struct amdvi_dte amdvi_dte[PCI_NUM_DEV_MAX] __aligned(PAGE_SIZE);
117 CTASSERT(PCI_NUM_DEV_MAX == 0x10000);
118 CTASSERT(sizeof(amdvi_dte) == 0x200000);
120 static SLIST_HEAD (, amdvi_domain) dom_head;
122 static inline uint32_t
123 amdvi_pci_read(struct amdvi_softc *softc, int off)
126 return (pci_cfgregread(PCI_RID2BUS(softc->pci_rid),
127 PCI_RID2SLOT(softc->pci_rid), PCI_RID2FUNC(softc->pci_rid),
131 #ifdef AMDVI_ATS_ENABLE
132 /* XXX: Should be in pci.c */
134 * Check if device has ATS capability and its enabled.
135 * If ATS is absent or disabled, return (-1), otherwise ATS
139 amdvi_find_ats_qlen(uint16_t devid)
145 dev = pci_find_bsf(PCI_RID2BUS(devid), PCI_RID2SLOT(devid),
146 PCI_RID2FUNC(devid));
151 #define PCIM_ATS_EN BIT(31)
153 if (pci_find_extcap(dev, PCIZ_ATS, &off) == 0) {
154 cap = pci_read_config(dev, off + 4, 4);
156 qlen = qlen ? qlen : 32;
157 printf("AMD-Vi: PCI device %d.%d.%d ATS %s qlen=%d\n",
159 (cap & PCIM_ATS_EN) ? "enabled" : "Disabled",
161 qlen = (cap & PCIM_ATS_EN) ? qlen : -1;
168 * Check if an endpoint device support device IOTLB or ATS.
171 amdvi_dev_support_iotlb(struct amdvi_softc *softc, uint16_t devid)
173 struct ivhd_dev_cfg *cfg;
175 bool pci_ats, ivhd_ats;
177 qlen = amdvi_find_ats_qlen(devid);
181 KASSERT(softc, ("softc is NULL"));
182 cfg = softc->dev_cfg;
185 for (i = 0; i < softc->dev_cfg_cnt; i++) {
186 if ((cfg->start_id <= devid) && (cfg->end_id >= devid)) {
187 ivhd_ats = cfg->enable_ats;
193 pci_ats = (qlen < 0) ? false : true;
194 if (pci_ats != ivhd_ats)
195 device_printf(softc->dev,
196 "BIOS bug: mismatch in ATS setting for %d.%d.%d,"
197 "ATS inv qlen = %d\n", RID2PCI_STR(devid), qlen);
199 /* Ignore IVRS setting and respect PCI setting. */
204 /* Enable IOTLB support for IOMMU if its supported. */
206 amdvi_hw_enable_iotlb(struct amdvi_softc *softc)
208 #ifndef AMDVI_ATS_ENABLE
209 softc->iotlb = false;
213 supported = (softc->ivhd_flag & IVHD_FLAG_IOTLB) ? true : false;
215 if (softc->pci_cap & AMDVI_PCI_CAP_IOTLB) {
217 device_printf(softc->dev, "IOTLB disabled by BIOS.\n");
219 if (supported && !amdvi_enable_iotlb) {
220 device_printf(softc->dev, "IOTLB disabled by user.\n");
226 softc->iotlb = supported;
232 amdvi_init_cmd(struct amdvi_softc *softc)
234 struct amdvi_ctrl *ctrl = softc->ctrl;
236 ctrl->cmd.len = 8; /* Use 256 command buffer entries. */
237 softc->cmd_max = 1 << ctrl->cmd.len;
239 softc->cmd = malloc(sizeof(struct amdvi_cmd) *
240 softc->cmd_max, M_AMDVI, M_WAITOK | M_ZERO);
242 if ((uintptr_t)softc->cmd & PAGE_MASK)
243 panic("AMDVi: Command buffer not aligned on page boundary.");
245 ctrl->cmd.base = vtophys(softc->cmd) / PAGE_SIZE;
247 * XXX: Reset the h/w pointers in case IOMMU is restarting,
248 * h/w doesn't clear these pointers based on empirical data.
257 * Note: Update tail pointer after we have written the command since tail
258 * pointer update cause h/w to execute new commands, see section 3.3
259 * of AMD IOMMU spec ver 2.0.
261 /* Get the command tail pointer w/o updating it. */
262 static struct amdvi_cmd *
263 amdvi_get_cmd_tail(struct amdvi_softc *softc)
265 struct amdvi_ctrl *ctrl;
266 struct amdvi_cmd *tail;
268 KASSERT(softc, ("softc is NULL"));
269 KASSERT(softc->cmd != NULL, ("cmd is NULL"));
272 KASSERT(ctrl != NULL, ("ctrl is NULL"));
274 tail = (struct amdvi_cmd *)((uint8_t *)softc->cmd +
281 * Update the command tail pointer which will start command execution.
284 amdvi_update_cmd_tail(struct amdvi_softc *softc)
286 struct amdvi_ctrl *ctrl;
289 size = sizeof(struct amdvi_cmd);
290 KASSERT(softc->cmd != NULL, ("cmd is NULL"));
293 KASSERT(ctrl != NULL, ("ctrl is NULL"));
295 ctrl->cmd_tail = MOD_INC(ctrl->cmd_tail, size, softc->cmd_max);
298 #ifdef AMDVI_DEBUG_CMD
299 device_printf(softc->dev, "cmd_tail: %s Tail:0x%x, Head:0x%x.\n",
307 * Various commands supported by IOMMU.
310 /* Completion wait command. */
312 amdvi_cmd_cmp(struct amdvi_softc *softc, const uint64_t data)
314 struct amdvi_cmd *cmd;
317 cmd = amdvi_get_cmd_tail(softc);
318 KASSERT(cmd != NULL, ("Cmd is NULL"));
320 pa = vtophys(&softc->cmp_data);
321 cmd->opcode = AMDVI_CMP_WAIT_OPCODE;
322 cmd->word0 = (pa & 0xFFFFFFF8) |
323 (AMDVI_CMP_WAIT_STORE);
324 //(AMDVI_CMP_WAIT_FLUSH | AMDVI_CMP_WAIT_STORE);
325 cmd->word1 = (pa >> 32) & 0xFFFFF;
328 amdvi_update_cmd_tail(softc);
331 /* Invalidate device table entry. */
333 amdvi_cmd_inv_dte(struct amdvi_softc *softc, uint16_t devid)
335 struct amdvi_cmd *cmd;
337 cmd = amdvi_get_cmd_tail(softc);
338 KASSERT(cmd != NULL, ("Cmd is NULL"));
339 cmd->opcode = AMDVI_INVD_DTE_OPCODE;
341 amdvi_update_cmd_tail(softc);
342 #ifdef AMDVI_DEBUG_CMD
343 device_printf(softc->dev, "Invalidated DTE:0x%x\n", devid);
347 /* Invalidate IOMMU page, use for invalidation of domain. */
349 amdvi_cmd_inv_iommu_pages(struct amdvi_softc *softc, uint16_t domain_id,
350 uint64_t addr, bool guest_nested,
353 struct amdvi_cmd *cmd;
355 cmd = amdvi_get_cmd_tail(softc);
356 KASSERT(cmd != NULL, ("Cmd is NULL"));
359 cmd->opcode = AMDVI_INVD_PAGE_OPCODE;
360 cmd->word1 = domain_id;
362 * Invalidate all addresses for this domain.
365 cmd->addr |= pde ? AMDVI_INVD_PAGE_PDE : 0;
366 cmd->addr |= page ? AMDVI_INVD_PAGE_S : 0;
368 amdvi_update_cmd_tail(softc);
371 #ifdef AMDVI_ATS_ENABLE
372 /* Invalidate device IOTLB. */
374 amdvi_cmd_inv_iotlb(struct amdvi_softc *softc, uint16_t devid)
376 struct amdvi_cmd *cmd;
382 qlen = amdvi_find_ats_qlen(devid);
384 panic("AMDVI: Invalid ATS qlen(%d) for device %d.%d.%d\n",
385 qlen, RID2PCI_STR(devid));
387 cmd = amdvi_get_cmd_tail(softc);
388 KASSERT(cmd != NULL, ("Cmd is NULL"));
390 #ifdef AMDVI_DEBUG_CMD
391 device_printf(softc->dev, "Invalidate IOTLB devID 0x%x"
392 " Qlen:%d\n", devid, qlen);
394 cmd->opcode = AMDVI_INVD_IOTLB_OPCODE;
397 cmd->addr = AMDVI_INVD_IOTLB_ALL_ADDR |
399 amdvi_update_cmd_tail(softc);
403 #ifdef notyet /* For Interrupt Remap. */
405 amdvi_cmd_inv_intr_map(struct amdvi_softc *softc,
408 struct amdvi_cmd *cmd;
410 cmd = amdvi_get_cmd_tail(softc);
411 KASSERT(cmd != NULL, ("Cmd is NULL"));
412 cmd->opcode = AMDVI_INVD_INTR_OPCODE;
414 amdvi_update_cmd_tail(softc);
415 #ifdef AMDVI_DEBUG_CMD
416 device_printf(softc->dev, "Invalidate INTR map of devID 0x%x\n", devid);
421 /* Invalidate domain using INVALIDATE_IOMMU_PAGES command. */
423 amdvi_inv_domain(struct amdvi_softc *softc, uint16_t domain_id)
425 struct amdvi_cmd *cmd;
427 cmd = amdvi_get_cmd_tail(softc);
428 KASSERT(cmd != NULL, ("Cmd is NULL"));
431 * See section 3.3.3 of IOMMU spec rev 2.0, software note
432 * for invalidating domain.
434 amdvi_cmd_inv_iommu_pages(softc, domain_id, AMDVI_INVD_PAGE_ALL_ADDR,
437 #ifdef AMDVI_DEBUG_CMD
438 device_printf(softc->dev, "Invalidate domain:0x%x\n", domain_id);
444 amdvi_cmp_wait(struct amdvi_softc *softc)
446 struct amdvi_ctrl *ctrl;
447 const uint64_t VERIFY = 0xA5A5;
448 volatile uint64_t *read;
453 read = &softc->cmp_data;
455 amdvi_cmd_cmp(softc, VERIFY);
456 /* Wait for h/w to update completion data. */
457 for (i = 0; i < 100 && (*read != VERIFY); i++) {
458 DELAY(1000); /* 1 ms */
460 status = (VERIFY == softc->cmp_data) ? true : false;
462 #ifdef AMDVI_DEBUG_CMD
464 device_printf(softc->dev, "CMD completion DONE Tail:0x%x, "
465 "Head:0x%x, loop:%d.\n", ctrl->cmd_tail,
466 ctrl->cmd_head, loop);
472 amdvi_wait(struct amdvi_softc *softc)
474 struct amdvi_ctrl *ctrl;
477 KASSERT(softc, ("softc is NULL"));
480 KASSERT(ctrl != NULL, ("ctrl is NULL"));
481 /* Don't wait if h/w is not enabled. */
482 if ((ctrl->control & AMDVI_CTRL_EN) == 0)
485 for (i = 0; i < 10; i++) {
486 if (amdvi_cmp_wait(softc))
490 device_printf(softc->dev, "Error: completion failed"
491 " tail:0x%x, head:0x%x.\n",
492 ctrl->cmd_tail, ctrl->cmd_head);
493 amdvi_dump_cmds(softc);
497 amdvi_dump_cmds(struct amdvi_softc *softc)
499 struct amdvi_ctrl *ctrl;
500 struct amdvi_cmd *cmd;
504 device_printf(softc->dev, "Dump all the commands:\n");
506 * If h/w is stuck in completion, it is the previous command,
507 * start dumping from previous command onward.
509 off = MOD_DEC(ctrl->cmd_head, sizeof(struct amdvi_cmd),
511 for (i = 0; off != ctrl->cmd_tail &&
512 i < softc->cmd_max; i++) {
513 cmd = (struct amdvi_cmd *)((uint8_t *)softc->cmd + off);
514 printf(" [CMD%d, off:0x%x] opcode= 0x%x 0x%x"
515 " 0x%x 0x%lx\n", i, off, cmd->opcode,
516 cmd->word0, cmd->word1, cmd->addr);
517 off = (off + sizeof(struct amdvi_cmd)) %
518 (softc->cmd_max * sizeof(struct amdvi_cmd));
523 amdvi_init_event(struct amdvi_softc *softc)
525 struct amdvi_ctrl *ctrl;
529 softc->event_max = 1 << ctrl->event.len;
530 softc->event = malloc(sizeof(struct amdvi_event) *
531 softc->event_max, M_AMDVI, M_WAITOK | M_ZERO);
532 if ((uintptr_t)softc->event & PAGE_MASK) {
533 device_printf(softc->dev, "Event buffer not aligned on page.");
536 ctrl->event.base = vtophys(softc->event) / PAGE_SIZE;
538 /* Reset the pointers. */
546 amdvi_decode_evt_flag(uint16_t flag)
549 flag &= AMDVI_EVENT_FLAG_MASK;
550 printf(" 0x%b]\n", flag,
564 /* See section 2.5.4 of AMD IOMMU spec ver 2.62.*/
566 amdvi_decode_evt_flag_type(uint8_t type)
569 switch (AMDVI_EVENT_FLAG_TYPE(type)) {
574 printf("Master Abort\n");
577 printf("Target Abort\n");
580 printf("Data Err\n");
588 amdvi_decode_inv_dte_evt(uint16_t devid, uint16_t domid, uint64_t addr,
592 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x"
595 amdvi_decode_evt_flag(flag);
599 amdvi_decode_pf_evt(uint16_t devid, uint16_t domid, uint64_t addr,
603 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x"
606 amdvi_decode_evt_flag(flag);
610 amdvi_decode_dte_hwerr_evt(uint16_t devid, uint16_t domid,
611 uint64_t addr, uint16_t flag)
614 printf("\t[DEV_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x"
615 " Addr:0x%lx", devid, domid, addr);
616 amdvi_decode_evt_flag(flag);
617 amdvi_decode_evt_flag_type(flag);
621 amdvi_decode_page_hwerr_evt(uint16_t devid, uint16_t domid, uint64_t addr,
625 printf("\t[PAGE_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x"
626 " Addr:0x%lx", devid, domid, addr);
627 amdvi_decode_evt_flag(flag);
628 amdvi_decode_evt_flag_type(AMDVI_EVENT_FLAG_TYPE(flag));
632 amdvi_decode_evt(struct amdvi_event *evt)
634 struct amdvi_cmd *cmd;
636 switch (evt->opcode) {
637 case AMDVI_EVENT_INVALID_DTE:
638 amdvi_decode_inv_dte_evt(evt->devid, evt->pasid_domid,
639 evt->addr, evt->flag);
642 case AMDVI_EVENT_PFAULT:
643 amdvi_decode_pf_evt(evt->devid, evt->pasid_domid,
644 evt->addr, evt->flag);
647 case AMDVI_EVENT_DTE_HW_ERROR:
648 amdvi_decode_dte_hwerr_evt(evt->devid, evt->pasid_domid,
649 evt->addr, evt->flag);
652 case AMDVI_EVENT_PAGE_HW_ERROR:
653 amdvi_decode_page_hwerr_evt(evt->devid, evt->pasid_domid,
654 evt->addr, evt->flag);
657 case AMDVI_EVENT_ILLEGAL_CMD:
659 case AMDVI_EVENT_CMD_HW_ERROR:
660 printf("\t[%s EVT]\n", (evt->opcode == AMDVI_EVENT_ILLEGAL_CMD) ?
661 "ILLEGAL CMD" : "CMD HW ERR");
662 cmd = (struct amdvi_cmd *)PHYS_TO_DMAP(evt->addr);
663 printf("\tCMD opcode= 0x%x 0x%x 0x%x 0x%lx\n",
664 cmd->opcode, cmd->word0, cmd->word1, cmd->addr);
667 case AMDVI_EVENT_IOTLB_TIMEOUT:
668 printf("\t[IOTLB_INV_TIMEOUT devid:0x%x addr:0x%lx]\n",
669 evt->devid, evt->addr);
672 case AMDVI_EVENT_INVALID_DTE_REQ:
673 printf("\t[INV_DTE devid:0x%x addr:0x%lx type:0x%x tr:%d]\n",
674 evt->devid, evt->addr, evt->flag >> 9,
675 (evt->flag >> 8) & 1);
678 case AMDVI_EVENT_INVALID_PPR_REQ:
679 case AMDVI_EVENT_COUNTER_ZERO:
680 printf("AMD-Vi: v2 events.\n");
684 printf("Unsupported AMD-Vi event:%d\n", evt->opcode);
689 amdvi_print_events(struct amdvi_softc *softc)
691 struct amdvi_ctrl *ctrl;
692 struct amdvi_event *event;
696 size = sizeof(struct amdvi_event);
697 for (i = 0; i < softc->event_max; i++) {
698 event = &softc->event[ctrl->evt_head / size];
701 device_printf(softc->dev, "\t[Event%d: Head:0x%x Tail:0x%x]\n",
702 i, ctrl->evt_head, ctrl->evt_tail);
703 amdvi_decode_evt(event);
704 ctrl->evt_head = MOD_INC(ctrl->evt_head, size,
710 amdvi_init_dte(struct amdvi_softc *softc)
712 struct amdvi_ctrl *ctrl;
715 ctrl->dte.base = vtophys(amdvi_dte) / PAGE_SIZE;
716 ctrl->dte.size = 0x1FF; /* 2MB device table. */
722 * Not all capabilities of IOMMU are available in ACPI IVHD flag
723 * or EFR entry, read directly from device.
726 amdvi_print_pci_cap(device_t dev)
728 struct amdvi_softc *softc;
732 softc = device_get_softc(dev);
733 off = softc->cap_off;
736 * Section 3.7.1 of IOMMU sepc rev 2.0.
737 * Read capability from device.
739 cap = amdvi_pci_read(softc, off);
741 /* Make sure capability type[18:16] is 3. */
742 KASSERT((((cap >> 16) & 0x7) == 0x3),
743 ("Not a IOMMU capability 0x%x@0x%x", cap, off));
745 softc->pci_cap = cap >> 24;
746 device_printf(softc->dev, "PCI cap 0x%x@0x%x feature:%b\n",
747 cap, off, softc->pci_cap,
748 "\20\1IOTLB\2HT\3NPCache\4EFR\5CapExt");
754 amdvi_event_intr(void *arg)
756 struct amdvi_softc *softc;
757 struct amdvi_ctrl *ctrl;
759 softc = (struct amdvi_softc *)arg;
761 device_printf(softc->dev, "EVT INTR %ld Status:0x%x"
762 " EVT Head:0x%x Tail:0x%x]\n", softc->event_intr_cnt++,
763 ctrl->status, ctrl->evt_head, ctrl->evt_tail);
764 printf(" [CMD Total 0x%lx] Tail:0x%x, Head:0x%x.\n",
765 softc->total_cmd, ctrl->cmd_tail, ctrl->cmd_head);
767 amdvi_print_events(softc);
768 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR;
772 amdvi_free_evt_intr_res(device_t dev)
775 struct amdvi_softc *softc;
777 softc = device_get_softc(dev);
778 if (softc->event_tag != NULL) {
779 bus_teardown_intr(dev, softc->event_res, softc->event_tag);
781 if (softc->event_res != NULL) {
782 bus_release_resource(dev, SYS_RES_IRQ, softc->event_rid,
785 bus_delete_resource(dev, SYS_RES_IRQ, softc->event_rid);
786 PCIB_RELEASE_MSI(device_get_parent(device_get_parent(dev)),
787 dev, 1, &softc->event_irq);
791 amdvi_alloc_intr_resources(struct amdvi_softc *softc)
793 struct amdvi_ctrl *ctrl;
801 pcib = device_get_parent(device_get_parent(dev));
802 mmio_dev = pci_find_bsf(PCI_RID2BUS(softc->pci_rid),
803 PCI_RID2SLOT(softc->pci_rid), PCI_RID2FUNC(softc->pci_rid));
804 if (device_is_attached(mmio_dev)) {
806 "warning: IOMMU device is claimed by another driver %s\n",
807 device_get_driver(mmio_dev)->name);
810 softc->event_irq = -1;
811 softc->event_rid = 0;
814 * Section 3.7.1 of IOMMU rev 2.0. With MSI, there is only one
815 * interrupt. XXX: Enable MSI/X support.
817 err = PCIB_ALLOC_MSI(pcib, dev, 1, 1, &softc->event_irq);
820 "Couldn't find event MSI IRQ resource.\n");
824 err = bus_set_resource(dev, SYS_RES_IRQ, softc->event_rid,
825 softc->event_irq, 1);
827 device_printf(dev, "Couldn't set event MSI resource.\n");
831 softc->event_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
832 &softc->event_rid, RF_ACTIVE);
833 if (!softc->event_res) {
835 "Unable to allocate event INTR resource.\n");
839 if (bus_setup_intr(dev, softc->event_res,
840 INTR_TYPE_MISC | INTR_MPSAFE, NULL, amdvi_event_intr,
841 softc, &softc->event_tag)) {
842 device_printf(dev, "Fail to setup event intr\n");
843 bus_release_resource(softc->dev, SYS_RES_IRQ,
844 softc->event_rid, softc->event_res);
845 softc->event_res = NULL;
849 bus_describe_intr(dev, softc->event_res, softc->event_tag,
852 err = PCIB_MAP_MSI(pcib, dev, softc->event_irq, &msi_addr,
856 "Event interrupt config failed, err=%d.\n",
858 amdvi_free_evt_intr_res(softc->dev);
862 /* Clear interrupt status bits. */
864 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR;
866 /* Now enable MSI interrupt. */
867 pci_enable_msi(mmio_dev, msi_addr, msi_data);
873 amdvi_print_dev_cap(struct amdvi_softc *softc)
875 struct ivhd_dev_cfg *cfg;
878 cfg = softc->dev_cfg;
879 for (i = 0; i < softc->dev_cfg_cnt; i++) {
880 device_printf(softc->dev, "device [0x%x - 0x%x]"
881 "config:%b%s\n", cfg->start_id, cfg->end_id,
883 "\020\001INIT\002ExtInt\003NMI"
884 "\007LINT0\008LINT1",
885 cfg->enable_ats ? "ATS enabled" : "");
891 amdvi_handle_sysctl(SYSCTL_HANDLER_ARGS)
893 struct amdvi_softc *softc;
894 int result, type, error = 0;
896 softc = (struct amdvi_softc *)arg1;
901 result = softc->ctrl->cmd_head;
902 error = sysctl_handle_int(oidp, &result, 0,
906 result = softc->ctrl->cmd_tail;
907 error = sysctl_handle_int(oidp, &result, 0,
911 result = softc->ctrl->evt_head;
912 error = sysctl_handle_int(oidp, &result, 0,
916 result = softc->ctrl->evt_tail;
917 error = sysctl_handle_int(oidp, &result, 0,
922 device_printf(softc->dev, "Unknown sysctl:%d\n", type);
929 amdvi_add_sysctl(struct amdvi_softc *softc)
931 struct sysctl_oid_list *child;
932 struct sysctl_ctx_list *ctx;
936 ctx = device_get_sysctl_ctx(dev);
937 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
939 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "event_intr_count", CTLFLAG_RD,
940 &softc->event_intr_cnt, "Event interrupt count");
941 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "command_count", CTLFLAG_RD,
942 &softc->total_cmd, "Command submitted count");
943 SYSCTL_ADD_U16(ctx, child, OID_AUTO, "pci_rid", CTLFLAG_RD,
944 &softc->pci_rid, 0, "IOMMU RID");
945 SYSCTL_ADD_U16(ctx, child, OID_AUTO, "start_dev_rid", CTLFLAG_RD,
946 &softc->start_dev_rid, 0, "Start of device under this IOMMU");
947 SYSCTL_ADD_U16(ctx, child, OID_AUTO, "end_dev_rid", CTLFLAG_RD,
948 &softc->end_dev_rid, 0, "End of device under this IOMMU");
949 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_head",
950 CTLTYPE_UINT | CTLFLAG_RD, softc, 0,
951 amdvi_handle_sysctl, "IU", "Command head");
952 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_tail",
953 CTLTYPE_UINT | CTLFLAG_RD, softc, 1,
954 amdvi_handle_sysctl, "IU", "Command tail");
955 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_head",
956 CTLTYPE_UINT | CTLFLAG_RD, softc, 2,
957 amdvi_handle_sysctl, "IU", "Command head");
958 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_tail",
959 CTLTYPE_UINT | CTLFLAG_RD, softc, 3,
960 amdvi_handle_sysctl, "IU", "Command tail");
964 amdvi_setup_hw(struct amdvi_softc *softc)
971 amdvi_hw_enable_iotlb(softc);
973 amdvi_print_dev_cap(softc);
975 if ((status = amdvi_print_pci_cap(dev)) != 0) {
976 device_printf(dev, "PCI capability.\n");
979 if ((status = amdvi_init_cmd(softc)) != 0) {
980 device_printf(dev, "Couldn't configure command buffer.\n");
983 if ((status = amdvi_init_event(softc)) != 0) {
984 device_printf(dev, "Couldn't configure event buffer.\n");
987 if ((status = amdvi_init_dte(softc)) != 0) {
988 device_printf(dev, "Couldn't configure device table.\n");
991 if ((status = amdvi_alloc_intr_resources(softc)) != 0) {
994 amdvi_add_sysctl(softc);
999 amdvi_teardown_hw(struct amdvi_softc *softc)
1006 * Called after disable, h/w is stopped by now, free all the resources.
1008 amdvi_free_evt_intr_res(dev);
1011 free(softc->cmd, M_AMDVI);
1014 free(softc->event, M_AMDVI);
1019 /*********** bhyve interfaces *********************/
1026 if (!amdvi_enable_user && ivhd_count) {
1027 printf("bhyve: Found %d AMD-Vi/IOMMU device(s), "
1028 "use hw.vmm.amdvi.enable=1 to enable pass-through.\n",
1042 amdvi_domainId(void)
1046 * If we hit maximum domain limit, rollover leaving host
1048 * XXX: make sure that this domain is not used.
1050 if (amdvi_dom_id == AMDVI_MAX_DOMAIN)
1053 return ((uint16_t)amdvi_dom_id++);
1057 amdvi_do_inv_domain(uint16_t domain_id, bool create)
1059 struct amdvi_softc *softc;
1062 for (i = 0; i < ivhd_count; i++) {
1063 softc = device_get_softc(ivhd_devs[i]);
1064 KASSERT(softc, ("softc is NULL"));
1066 * If not present pages are cached, invalidate page after
1070 if (create && ((softc->pci_cap & AMDVI_PCI_CAP_NPCACHE) == 0))
1073 amdvi_inv_domain(softc, domain_id);
1079 amdvi_create_domain(vm_paddr_t maxaddr)
1081 struct amdvi_domain *dom;
1083 dom = malloc(sizeof(struct amdvi_domain), M_AMDVI, M_ZERO | M_WAITOK);
1084 dom->id = amdvi_domainId();
1085 //dom->maxaddr = maxaddr;
1086 #ifdef AMDVI_DEBUG_CMD
1087 printf("Created domain #%d\n", dom->id);
1090 * Host domain(#0) don't create translation table.
1092 if (dom->id || amdvi_host_ptp)
1093 dom->ptp = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO);
1095 dom->ptp_level = amdvi_ptp_level;
1097 amdvi_do_inv_domain(dom->id, true);
1098 SLIST_INSERT_HEAD(&dom_head, dom, next);
1104 amdvi_free_ptp(uint64_t *ptp, int level)
1111 for (i = 0; i < NPTEPG ; i++) {
1112 if ((ptp[i] & AMDVI_PT_PRESENT) == 0)
1114 /* XXX: Add super-page or PTE mapping > 4KB. */
1116 /* Super-page mapping. */
1117 if (AMDVI_PD_SUPER(ptp[i]))
1121 amdvi_free_ptp((uint64_t *)PHYS_TO_DMAP(ptp[i]
1122 & AMDVI_PT_MASK), level - 1);
1130 amdvi_destroy_domain(void *arg)
1132 struct amdvi_domain *domain;
1134 domain = (struct amdvi_domain *)arg;
1135 KASSERT(domain, ("domain is NULL"));
1136 #ifdef AMDVI_DEBUG_CMD
1137 printf("Destroying domain %d\n", domain->id);
1140 amdvi_free_ptp(domain->ptp, domain->ptp_level);
1142 amdvi_do_inv_domain(domain->id, false);
1143 SLIST_REMOVE(&dom_head, domain, amdvi_domain, next);
1144 free(domain, M_AMDVI);
1148 amdvi_set_pt(uint64_t *pt, int level, vm_paddr_t gpa,
1149 vm_paddr_t hpa, uint64_t pg_size, bool create)
1153 const int PT_SHIFT = 9;
1154 const int PT_INDEX_MASK = (1 << PT_SHIFT) - 1; /* Based on PT_SHIFT */
1159 if (hpa & (pg_size - 1)) {
1160 printf("HPA is not size aligned.\n");
1163 if (gpa & (pg_size - 1)) {
1164 printf("HPA is not size aligned.\n");
1168 while ((shift > PAGE_SHIFT) && (pg_size < (1UL << shift))) {
1169 index = (gpa >> shift) & PT_INDEX_MASK;
1171 if ((pt[index] == 0) && create) {
1172 page = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO);
1174 pt[index] = pa | AMDVI_PT_PRESENT | AMDVI_PT_RW |
1175 ((level - 1) << AMDVI_PD_LEVEL_SHIFT);
1177 #ifdef AMDVI_DEBUG_PTE
1178 if ((gpa % 0x1000000) == 0)
1179 printf("[level%d, shift = %d]PTE:0x%lx\n",
1180 level, shift, pt[index]);
1182 #define PTE2PA(x) ((uint64_t)(x) & AMDVI_PT_MASK)
1183 pa = PTE2PA(pt[index]);
1184 pt = (uint64_t *)PHYS_TO_DMAP(pa);
1190 index = (gpa >> shift) & PT_INDEX_MASK;
1193 pt[index] = hpa | AMDVI_PT_RW | AMDVI_PT_PRESENT;
1197 #ifdef AMDVI_DEBUG_PTE
1198 if ((gpa % 0x1000000) == 0)
1199 printf("[Last level%d, shift = %d]PTE:0x%lx\n",
1200 level, shift, pt[index]);
1202 return (1ULL << shift);
1206 amdvi_update_mapping(struct amdvi_domain *domain, vm_paddr_t gpa,
1207 vm_paddr_t hpa, uint64_t size, bool create)
1209 uint64_t mapped, *ptp, len;
1212 KASSERT(domain, ("domain is NULL"));
1213 level = domain->ptp_level;
1214 KASSERT(level, ("Page table level is 0"));
1217 KASSERT(ptp, ("PTP is NULL"));
1219 while (mapped < size) {
1220 len = amdvi_set_pt(ptp, level, gpa + mapped, hpa + mapped,
1223 printf("Error: Couldn't map HPA:0x%lx GPA:0x%lx\n",
1234 amdvi_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa,
1237 struct amdvi_domain *domain;
1239 domain = (struct amdvi_domain *)arg;
1241 if (domain->id && !domain->ptp) {
1242 printf("ptp is NULL");
1247 * If host domain is created w/o page table, skip IOMMU page
1251 return (amdvi_update_mapping(domain, gpa, hpa, len, true));
1257 amdvi_destroy_mapping(void *arg, vm_paddr_t gpa, uint64_t len)
1259 struct amdvi_domain *domain;
1261 domain = (struct amdvi_domain *)arg;
1263 * If host domain is created w/o page table, skip IOMMU page
1267 return (amdvi_update_mapping(domain, gpa, 0, len, false));
1272 static struct amdvi_softc *
1273 amdvi_find_iommu(uint16_t devid)
1275 struct amdvi_softc *softc;
1278 for (i = 0; i < ivhd_count; i++) {
1279 softc = device_get_softc(ivhd_devs[i]);
1280 if ((devid >= softc->start_dev_rid) &&
1281 (devid <= softc->end_dev_rid))
1286 * XXX: BIOS bug, device not in IVRS table, assume its from first IOMMU.
1288 printf("BIOS bug device(%d.%d.%d) doesn't have IVHD entry.\n",
1289 RID2PCI_STR(devid));
1291 return (device_get_softc(ivhd_devs[0]));
1295 * Set-up device table entry.
1296 * IOMMU spec Rev 2.0, section 3.2.2.2, some of the fields must
1297 * be set concurrently, e.g. read and write bits.
1300 amdvi_set_dte(struct amdvi_domain *domain, uint16_t devid, bool enable)
1302 struct amdvi_softc *softc;
1303 struct amdvi_dte* temp;
1305 KASSERT(domain, ("domain is NULL for pci_rid:0x%x\n", devid));
1307 softc = amdvi_find_iommu(devid);
1308 KASSERT(softc, ("softc is NULL for pci_rid:0x%x\n", devid));
1310 temp = &amdvi_dte[devid];
1312 #ifdef AMDVI_ATS_ENABLE
1313 /* If IOMMU and device support IOTLB, enable it. */
1314 if (amdvi_dev_support_iotlb(softc, devid) && softc->iotlb)
1315 temp->iotlb_enable = 1;
1318 /* Avoid duplicate I/O faults. */
1319 temp->sup_second_io_fault = 1;
1320 temp->sup_all_io_fault = amdvi_disable_io_fault;
1323 temp->domain_id = domain->id;
1327 temp->pt_base = vtophys(domain->ptp) >> 12;
1328 temp->pt_level = amdvi_ptp_level;
1331 * XXX: Page table valid[TV] bit must be set even if host domain
1332 * page tables are not enabled.
1335 temp->read_allow = 1;
1336 temp->write_allow = 1;
1341 amdvi_inv_device(uint16_t devid)
1343 struct amdvi_softc *softc;
1345 softc = amdvi_find_iommu(devid);
1346 KASSERT(softc, ("softc is NULL"));
1348 amdvi_cmd_inv_dte(softc, devid);
1349 #ifdef AMDVI_ATS_ENABLE
1350 if (amdvi_dev_support_iotlb(softc, devid))
1351 amdvi_cmd_inv_iotlb(softc, devid);
1357 amdvi_add_device(void *arg, uint16_t devid)
1359 struct amdvi_domain *domain;
1361 domain = (struct amdvi_domain *)arg;
1362 KASSERT(domain != NULL, ("domain is NULL"));
1363 #ifdef AMDVI_DEBUG_CMD
1364 printf("Assigning device(%d.%d.%d) to domain:%d\n",
1365 RID2PCI_STR(devid), domain->id);
1367 amdvi_set_dte(domain, devid, true);
1368 amdvi_inv_device(devid);
1372 amdvi_remove_device(void *arg, uint16_t devid)
1374 struct amdvi_domain *domain;
1376 domain = (struct amdvi_domain *)arg;
1377 #ifdef AMDVI_DEBUG_CMD
1378 printf("Remove device(0x%x) from domain:%d\n",
1381 amdvi_set_dte(domain, devid, false);
1382 amdvi_inv_device(devid);
1388 struct amdvi_ctrl *ctrl;
1389 struct amdvi_softc *softc;
1393 for (i = 0; i < ivhd_count; i++) {
1394 softc = device_get_softc(ivhd_devs[i]);
1395 KASSERT(softc, ("softc is NULL\n"));
1397 KASSERT(ctrl, ("ctrl is NULL\n"));
1399 val = ( AMDVI_CTRL_EN |
1402 AMDVI_CTRL_ELOGINT |
1403 AMDVI_CTRL_INV_TO_1S);
1405 if (softc->ivhd_flag & IVHD_FLAG_COH)
1406 val |= AMDVI_CTRL_COH;
1407 if (softc->ivhd_flag & IVHD_FLAG_HTT)
1408 val |= AMDVI_CTRL_HTT;
1409 if (softc->ivhd_flag & IVHD_FLAG_RPPW)
1410 val |= AMDVI_CTRL_RPPW;
1411 if (softc->ivhd_flag & IVHD_FLAG_PPW)
1412 val |= AMDVI_CTRL_PPW;
1413 if (softc->ivhd_flag & IVHD_FLAG_ISOC)
1414 val |= AMDVI_CTRL_ISOC;
1416 ctrl->control = val;
1423 struct amdvi_ctrl *ctrl;
1424 struct amdvi_softc *softc;
1427 for (i = 0; i < ivhd_count; i++) {
1428 softc = device_get_softc(ivhd_devs[i]);
1429 KASSERT(softc, ("softc is NULL\n"));
1431 KASSERT(ctrl, ("ctrl is NULL\n"));
1438 amdvi_inv_tlb(void *arg)
1440 struct amdvi_domain *domain;
1442 domain = (struct amdvi_domain *)arg;
1443 KASSERT(domain, ("domain is NULL"));
1444 amdvi_do_inv_domain(domain->id, false);
1447 struct iommu_ops iommu_ops_amd = {
1452 amdvi_create_domain,
1453 amdvi_destroy_domain,
1454 amdvi_create_mapping,
1455 amdvi_destroy_mapping,
1457 amdvi_remove_device,