2 * Copyright (c) 2008, 2013 Citrix Systems, Inc.
3 * Copyright (c) 2012 Spectra Logic Corporation
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
37 #include <sys/systm.h>
42 #include <dev/pci/pcivar.h>
44 #include <machine/cpufunc.h>
45 #include <machine/cpu.h>
46 #include <machine/smp.h>
48 #include <x86/apicreg.h>
50 #include <xen/xen-os.h>
51 #include <xen/features.h>
52 #include <xen/gnttab.h>
53 #include <xen/hypervisor.h>
55 #include <xen/xen_intr.h>
57 #include <xen/interface/hvm/params.h>
58 #include <xen/interface/vcpu.h>
60 /*--------------------------- Forward Declarations ---------------------------*/
62 static driver_filter_t xen_smp_rendezvous_action;
63 static driver_filter_t xen_invltlb;
64 static driver_filter_t xen_invlpg;
65 static driver_filter_t xen_invlrng;
66 static driver_filter_t xen_invlcache;
68 static driver_filter_t xen_lazypmap;
70 static driver_filter_t xen_ipi_bitmap_handler;
71 static driver_filter_t xen_cpustop_handler;
72 static driver_filter_t xen_cpususpend_handler;
73 static driver_filter_t xen_cpustophard_handler;
75 static void xen_ipi_vectored(u_int vector, int dest);
76 static void xen_hvm_cpu_resume(void);
77 static void xen_hvm_cpu_init(void);
79 /*---------------------------- Extern Declarations ---------------------------*/
80 /* Variables used by mp_machdep to perform the MMU related IPIs */
81 extern volatile int smp_tlb_wait;
82 extern vm_offset_t smp_tlb_addr2;
84 extern vm_offset_t smp_tlb_addr1;
86 extern struct invpcid_descr smp_tlb_invpcid;
87 extern uint64_t pcid_cr3;
88 extern int invpcid_works;
89 extern int pmap_pcid_enabled;
90 extern pmap_t smp_tlb_pmap;
94 extern void pmap_lazyfix_action(void);
97 /* Variables used by mp_machdep to perform the bitmap IPI */
98 extern volatile u_int cpu_ipi_pending[MAXCPU];
100 /*---------------------------------- Macros ----------------------------------*/
101 #define IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
103 /*-------------------------------- Local Types -------------------------------*/
104 enum xen_hvm_init_type {
106 XEN_HVM_INIT_CANCELLED_SUSPEND,
110 struct xen_ipi_handler
112 driver_filter_t *filter;
113 const char *description;
116 /*-------------------------------- Global Data -------------------------------*/
117 enum xen_domain_type xen_domain_type = XEN_NATIVE;
119 struct cpu_ops xen_hvm_cpu_ops = {
120 .ipi_vectored = lapic_ipi_vectored,
121 .cpu_init = xen_hvm_cpu_init,
122 .cpu_resume = xen_hvm_cpu_resume
125 static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
128 static struct xen_ipi_handler xen_ipis[] =
130 [IPI_TO_IDX(IPI_RENDEZVOUS)] = { xen_smp_rendezvous_action, "r" },
131 [IPI_TO_IDX(IPI_INVLTLB)] = { xen_invltlb, "itlb"},
132 [IPI_TO_IDX(IPI_INVLPG)] = { xen_invlpg, "ipg" },
133 [IPI_TO_IDX(IPI_INVLRNG)] = { xen_invlrng, "irg" },
134 [IPI_TO_IDX(IPI_INVLCACHE)] = { xen_invlcache, "ic" },
136 [IPI_TO_IDX(IPI_LAZYPMAP)] = { xen_lazypmap, "lp" },
138 [IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler, "b" },
139 [IPI_TO_IDX(IPI_STOP)] = { xen_cpustop_handler, "st" },
140 [IPI_TO_IDX(IPI_SUSPEND)] = { xen_cpususpend_handler, "sp" },
141 [IPI_TO_IDX(IPI_STOP_HARD)] = { xen_cpustophard_handler, "sth" },
146 * If non-zero, the hypervisor has been configured to use a direct
147 * IDT event callback for interrupt injection.
149 int xen_vector_callback_enabled;
151 /*------------------------------- Per-CPU Data -------------------------------*/
152 DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
153 DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
155 DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
158 /*------------------ Hypervisor Access Shared Memory Regions -----------------*/
159 /** Hypercall table accessed via HYPERVISOR_*_op() methods. */
160 char *hypercall_stubs;
161 shared_info_t *HYPERVISOR_shared_info;
164 /*---------------------------- XEN PV IPI Handlers ---------------------------*/
166 * This are C clones of the ASM functions found in apic_vector.s
169 xen_ipi_bitmap_handler(void *arg)
171 struct trapframe *frame;
174 ipi_bitmap_handler(*frame);
175 return (FILTER_HANDLED);
179 xen_smp_rendezvous_action(void *arg)
184 cpu = PCPU_GET(cpuid);
185 (*ipi_rendezvous_counts[cpu])++;
186 #endif /* COUNT_IPIS */
188 smp_rendezvous_action();
189 return (FILTER_HANDLED);
193 xen_invltlb(void *arg)
195 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
198 cpu = PCPU_GET(cpuid);
199 #ifdef COUNT_XINVLTLB_HITS
201 #endif /* COUNT_XINVLTLB_HITS */
203 (*ipi_invltlb_counts[cpu])++;
204 #endif /* COUNT_IPIS */
205 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
208 atomic_add_int(&smp_tlb_wait, 1);
209 return (FILTER_HANDLED);
214 xen_invltlb_pcid(void *arg)
217 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
220 cpu = PCPU_GET(cpuid);
221 #ifdef COUNT_XINVLTLB_HITS
223 #endif /* COUNT_XINVLTLB_HITS */
225 (*ipi_invltlb_counts[cpu])++;
226 #endif /* COUNT_IPIS */
227 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
230 if (smp_tlb_invpcid.pcid != (uint64_t)-1 &&
231 smp_tlb_invpcid.pcid != 0) {
234 invpcid(&smp_tlb_invpcid, INVPCID_CTX);
236 /* Otherwise reload %cr3 twice. */
237 if (cr3 != pcid_cr3) {
239 cr3 |= CR3_PCID_SAVE;
246 if (smp_tlb_pmap != NULL)
247 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &smp_tlb_pmap->pm_save);
249 atomic_add_int(&smp_tlb_wait, 1);
250 return (FILTER_HANDLED);
255 xen_invlpg(void *arg)
257 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
260 cpu = PCPU_GET(cpuid);
261 #ifdef COUNT_XINVLTLB_HITS
263 #endif /* COUNT_XINVLTLB_HITS */
265 (*ipi_invlpg_counts[cpu])++;
266 #endif /* COUNT_IPIS */
267 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
270 invlpg(smp_tlb_addr1);
272 invlpg(smp_tlb_invpcid.addr);
274 atomic_add_int(&smp_tlb_wait, 1);
275 return (FILTER_HANDLED);
280 xen_invlpg_pcid(void *arg)
282 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
285 cpu = PCPU_GET(cpuid);
286 #ifdef COUNT_XINVLTLB_HITS
288 #endif /* COUNT_XINVLTLB_HITS */
290 (*ipi_invlpg_counts[cpu])++;
291 #endif /* COUNT_IPIS */
292 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
295 invpcid(&smp_tlb_invpcid, INVPCID_ADDR);
296 } else if (smp_tlb_invpcid.pcid == 0) {
297 invlpg(smp_tlb_invpcid.addr);
298 } else if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
304 * PCID supported, but INVPCID is not.
305 * Temporarily switch to the target address
306 * space and do INVLPG.
310 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
311 invlpg(smp_tlb_invpcid.addr);
312 load_cr3(cr3 | CR3_PCID_SAVE);
315 atomic_add_int(&smp_tlb_wait, 1);
316 return (FILTER_HANDLED);
321 invlpg_range(vm_offset_t start, vm_offset_t end)
326 } while (start < end);
330 xen_invlrng(void *arg)
333 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
336 cpu = PCPU_GET(cpuid);
337 #ifdef COUNT_XINVLTLB_HITS
339 #endif /* COUNT_XINVLTLB_HITS */
341 (*ipi_invlrng_counts[cpu])++;
342 #endif /* COUNT_IPIS */
343 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
346 addr = smp_tlb_addr1;
347 invlpg_range(addr, smp_tlb_addr2);
349 addr = smp_tlb_invpcid.addr;
350 if (pmap_pcid_enabled) {
352 struct invpcid_descr d;
356 invpcid(&d, INVPCID_ADDR);
358 } while (d.addr < smp_tlb_addr2);
359 } else if (smp_tlb_invpcid.pcid == 0) {
361 * kernel pmap - use invlpg to invalidate
364 invlpg_range(addr, smp_tlb_addr2);
365 } else if (smp_tlb_invpcid.pcid != (uint64_t)-1) {
367 if (smp_tlb_pmap != NULL) {
368 CPU_CLR_ATOMIC(PCPU_GET(cpuid),
369 &smp_tlb_pmap->pm_save);
376 load_cr3(pcid_cr3 | CR3_PCID_SAVE);
377 invlpg_range(addr, smp_tlb_addr2);
378 load_cr3(cr3 | CR3_PCID_SAVE);
381 invlpg_range(addr, smp_tlb_addr2);
385 atomic_add_int(&smp_tlb_wait, 1);
386 return (FILTER_HANDLED);
390 xen_invlcache(void *arg)
393 int cpu = PCPU_GET(cpuid);
395 cpu = PCPU_GET(cpuid);
396 (*ipi_invlcache_counts[cpu])++;
397 #endif /* COUNT_IPIS */
400 atomic_add_int(&smp_tlb_wait, 1);
401 return (FILTER_HANDLED);
406 xen_lazypmap(void *arg)
409 pmap_lazyfix_action();
410 return (FILTER_HANDLED);
415 xen_cpustop_handler(void *arg)
419 return (FILTER_HANDLED);
423 xen_cpususpend_handler(void *arg)
426 cpususpend_handler();
427 return (FILTER_HANDLED);
431 xen_cpustophard_handler(void *arg)
435 return (FILTER_HANDLED);
438 /* Xen PV IPI sender */
440 xen_ipi_vectored(u_int vector, int dest)
442 xen_intr_handle_t *ipi_handle;
443 int ipi_idx, to_cpu, self;
445 ipi_idx = IPI_TO_IDX(vector);
446 if (ipi_idx > nitems(xen_ipis))
447 panic("IPI out of range");
450 case APIC_IPI_DEST_SELF:
451 ipi_handle = DPCPU_GET(ipi_handle);
452 xen_intr_signal(ipi_handle[ipi_idx]);
454 case APIC_IPI_DEST_ALL:
455 CPU_FOREACH(to_cpu) {
456 ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
457 xen_intr_signal(ipi_handle[ipi_idx]);
460 case APIC_IPI_DEST_OTHERS:
461 self = PCPU_GET(cpuid);
462 CPU_FOREACH(to_cpu) {
463 if (to_cpu != self) {
464 ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
465 xen_intr_signal(ipi_handle[ipi_idx]);
470 to_cpu = apic_cpuid(dest);
471 ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
472 xen_intr_signal(ipi_handle[ipi_idx]);
477 /* XEN diverged cpu operations */
479 xen_hvm_cpu_resume(void)
481 u_int cpuid = PCPU_GET(cpuid);
484 * Reset pending bitmap IPIs, because Xen doesn't preserve pending
485 * event channels on migration.
487 cpu_ipi_pending[cpuid] = 0;
489 /* register vcpu_info area */
494 xen_cpu_ipi_init(int cpu)
496 xen_intr_handle_t *ipi_handle;
497 const struct xen_ipi_handler *ipi;
501 ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
502 dev = pcpu_find(cpu)->pc_device;
503 KASSERT((dev != NULL), ("NULL pcpu device_t"));
505 for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
507 if (ipi->filter == NULL) {
508 ipi_handle[idx] = NULL;
512 rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
513 INTR_TYPE_TTY, &ipi_handle[idx]);
515 panic("Unable to allocate a XEN IPI port");
516 xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
525 if (!xen_hvm_domain() || !xen_vector_callback_enabled)
529 if (pmap_pcid_enabled) {
530 xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
531 xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
537 /* Set the xen pv ipi ops to replace the native ones */
538 cpu_ops.ipi_vectored = xen_ipi_vectored;
542 /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
544 xen_hvm_cpuid_base(void)
546 uint32_t base, regs[4];
548 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
549 do_cpuid(base, regs);
550 if (!memcmp("XenVMMXenVMM", ®s[1], 12)
551 && (regs[0] - base) >= 2)
558 * Allocate and fill in the hypcall page.
561 xen_hvm_init_hypercall_stubs(void)
563 uint32_t base, regs[4];
566 base = xen_hvm_cpuid_base();
570 if (hypercall_stubs == NULL) {
571 do_cpuid(base + 1, regs);
572 printf("XEN: Hypervisor version %d.%d detected.\n",
573 regs[0] >> 16, regs[0] & 0xffff);
577 * Find the hypercall pages.
579 do_cpuid(base + 2, regs);
581 if (hypercall_stubs == NULL) {
582 size_t call_region_size;
584 call_region_size = regs[0] * PAGE_SIZE;
585 hypercall_stubs = malloc(call_region_size, M_XENHVM, M_NOWAIT);
586 if (hypercall_stubs == NULL)
587 panic("Unable to allocate Xen hypercall region");
590 for (i = 0; i < regs[0]; i++)
591 wrmsr(regs[1], vtophys(hypercall_stubs + i * PAGE_SIZE) + i);
597 xen_hvm_init_shared_info_page(void)
599 struct xen_add_to_physmap xatp;
601 if (HYPERVISOR_shared_info == NULL) {
602 HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
603 if (HYPERVISOR_shared_info == NULL)
604 panic("Unable to allocate Xen shared info page");
607 xatp.domid = DOMID_SELF;
609 xatp.space = XENMAPSPACE_shared_info;
610 xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
611 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
612 panic("HYPERVISOR_memory_op failed");
616 * Tell the hypervisor how to contact us for event channel callbacks.
619 xen_hvm_set_callback(device_t dev)
621 struct xen_hvm_param xhp;
624 if (xen_vector_callback_enabled)
627 xhp.domid = DOMID_SELF;
628 xhp.index = HVM_PARAM_CALLBACK_IRQ;
629 if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
632 xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
633 error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
635 xen_vector_callback_enabled = 1;
638 printf("Xen HVM callback vector registration failed (%d). "
639 "Falling back to emulated device interrupt\n", error);
641 xen_vector_callback_enabled = 0;
644 * Called from early boot or resume.
645 * xenpci will invoke us again later.
650 irq = pci_get_irq(dev);
652 xhp.value = HVM_CALLBACK_GSI(irq);
657 slot = pci_get_slot(dev);
658 pin = pci_get_intpin(dev) - 1;
659 xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
662 if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
663 panic("Can't set evtchn callback");
666 #define XEN_MAGIC_IOPORT 0x10
669 XMI_UNPLUG_IDE_DISKS = 0x01,
670 XMI_UNPLUG_NICS = 0x02,
671 XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
675 xen_hvm_disable_emulated_devices(void)
677 if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
681 printf("XEN: Disabling emulated block and network devices\n");
682 outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS|XMI_UNPLUG_NICS);
686 xen_hvm_init(enum xen_hvm_init_type init_type)
691 if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND)
694 error = xen_hvm_init_hypercall_stubs();
697 case XEN_HVM_INIT_COLD:
701 setup_xen_features();
702 cpu_ops = xen_hvm_cpu_ops;
703 vm_guest = VM_GUEST_XEN;
705 case XEN_HVM_INIT_RESUME:
707 panic("Unable to init Xen hypercall stubs on resume");
709 /* Clear stale vcpu_info. */
711 DPCPU_ID_SET(i, vcpu_info, NULL);
714 panic("Unsupported HVM initialization type");
717 xen_vector_callback_enabled = 0;
718 xen_domain_type = XEN_HVM_DOMAIN;
719 xen_hvm_init_shared_info_page();
720 xen_hvm_set_callback(NULL);
721 xen_hvm_disable_emulated_devices();
725 xen_hvm_suspend(void)
730 xen_hvm_resume(bool suspend_cancelled)
733 xen_hvm_init(suspend_cancelled ?
734 XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME);
736 /* Register vcpu_info area for CPU#0. */
741 xen_hvm_sysinit(void *arg __unused)
743 xen_hvm_init(XEN_HVM_INIT_COLD);
747 xen_set_vcpu_id(void)
752 /* Set vcpu_id to acpi_id */
755 pc->pc_vcpu_id = pc->pc_acpi_id;
757 printf("XEN: CPU %u has VCPU ID %u\n",
763 xen_hvm_cpu_init(void)
765 struct vcpu_register_vcpu_info info;
766 struct vcpu_info *vcpu_info;
772 if (DPCPU_GET(vcpu_info) != NULL) {
774 * vcpu_info is already set. We're resuming
775 * from a failed migration and our pre-suspend
776 * configuration is still valid.
781 vcpu_info = DPCPU_PTR(vcpu_local_info);
782 cpu = PCPU_GET(vcpu_id);
783 info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
784 info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
786 rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
788 DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
790 DPCPU_SET(vcpu_info, vcpu_info);
793 SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL);
795 SYSINIT(xen_setup_cpus, SI_SUB_SMP, SI_ORDER_FIRST, xen_setup_cpus, NULL);
797 SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL);
798 SYSINIT(xen_set_vcpu_id, SI_SUB_CPU, SI_ORDER_ANY, xen_set_vcpu_id, NULL);