]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/x86/xen/hvm.c
xen: add PV/PVH kernel entry point
[FreeBSD/FreeBSD.git] / sys / x86 / xen / hvm.c
1 /*
2  * Copyright (c) 2008, 2013 Citrix Systems, Inc.
3  * Copyright (c) 2012 Spectra Logic Corporation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/smp.h>
37 #include <sys/systm.h>
38
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41
42 #include <dev/pci/pcivar.h>
43
44 #include <machine/cpufunc.h>
45 #include <machine/cpu.h>
46 #include <machine/smp.h>
47
48 #include <x86/apicreg.h>
49
50 #include <xen/xen-os.h>
51 #include <xen/features.h>
52 #include <xen/gnttab.h>
53 #include <xen/hypervisor.h>
54 #include <xen/hvm.h>
55 #include <xen/xen_intr.h>
56
57 #include <xen/interface/hvm/params.h>
58 #include <xen/interface/vcpu.h>
59
60 /*--------------------------- Forward Declarations ---------------------------*/
61 #ifdef SMP
62 static driver_filter_t xen_smp_rendezvous_action;
63 static driver_filter_t xen_invltlb;
64 static driver_filter_t xen_invlpg;
65 static driver_filter_t xen_invlrng;
66 static driver_filter_t xen_invlcache;
67 #ifdef __i386__
68 static driver_filter_t xen_lazypmap;
69 #endif
70 static driver_filter_t xen_ipi_bitmap_handler;
71 static driver_filter_t xen_cpustop_handler;
72 static driver_filter_t xen_cpususpend_handler;
73 static driver_filter_t xen_cpustophard_handler;
74 #endif
75 static void xen_ipi_vectored(u_int vector, int dest);
76 static void xen_hvm_cpu_resume(void);
77 static void xen_hvm_cpu_init(void);
78
79 /*---------------------------- Extern Declarations ---------------------------*/
80 #ifdef __i386__
81 extern void pmap_lazyfix_action(void);
82 #endif
83 #ifdef __amd64__
84 extern int pmap_pcid_enabled;
85 #endif
86
87 /* Variables used by mp_machdep to perform the bitmap IPI */
88 extern volatile u_int cpu_ipi_pending[MAXCPU];
89
90 /*---------------------------------- Macros ----------------------------------*/
91 #define IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
92
93 /*-------------------------------- Local Types -------------------------------*/
94 enum xen_hvm_init_type {
95         XEN_HVM_INIT_COLD,
96         XEN_HVM_INIT_CANCELLED_SUSPEND,
97         XEN_HVM_INIT_RESUME
98 };
99
100 struct xen_ipi_handler
101 {
102         driver_filter_t *filter;
103         const char      *description;
104 };
105
106 /*-------------------------------- Global Data -------------------------------*/
107 enum xen_domain_type xen_domain_type = XEN_NATIVE;
108
109 struct cpu_ops xen_hvm_cpu_ops = {
110         .ipi_vectored   = lapic_ipi_vectored,
111         .cpu_init       = xen_hvm_cpu_init,
112         .cpu_resume     = xen_hvm_cpu_resume
113 };
114
115 static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
116
117 #ifdef SMP
118 static struct xen_ipi_handler xen_ipis[] = 
119 {
120         [IPI_TO_IDX(IPI_RENDEZVOUS)]    = { xen_smp_rendezvous_action,  "r"   },
121         [IPI_TO_IDX(IPI_INVLTLB)]       = { xen_invltlb,                "itlb"},
122         [IPI_TO_IDX(IPI_INVLPG)]        = { xen_invlpg,                 "ipg" },
123         [IPI_TO_IDX(IPI_INVLRNG)]       = { xen_invlrng,                "irg" },
124         [IPI_TO_IDX(IPI_INVLCACHE)]     = { xen_invlcache,              "ic"  },
125 #ifdef __i386__
126         [IPI_TO_IDX(IPI_LAZYPMAP)]      = { xen_lazypmap,               "lp"  },
127 #endif
128         [IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,     "b"   },
129         [IPI_TO_IDX(IPI_STOP)]          = { xen_cpustop_handler,        "st"  },
130         [IPI_TO_IDX(IPI_SUSPEND)]       = { xen_cpususpend_handler,     "sp"  },
131         [IPI_TO_IDX(IPI_STOP_HARD)]     = { xen_cpustophard_handler,    "sth" },
132 };
133 #endif
134
135 /**
136  * If non-zero, the hypervisor has been configured to use a direct
137  * IDT event callback for interrupt injection.
138  */
139 int xen_vector_callback_enabled;
140
141 /*------------------------------- Per-CPU Data -------------------------------*/
142 DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
143 DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
144 #ifdef SMP
145 DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
146 #endif
147
148 /*------------------ Hypervisor Access Shared Memory Regions -----------------*/
149 /** Hypercall table accessed via HYPERVISOR_*_op() methods. */
150 char *hypercall_stubs;
151 shared_info_t *HYPERVISOR_shared_info;
152 start_info_t *HYPERVISOR_start_info;
153
154 #ifdef SMP
155 /*---------------------------- XEN PV IPI Handlers ---------------------------*/
156 /*
157  * This are C clones of the ASM functions found in apic_vector.s
158  */
159 static int
160 xen_ipi_bitmap_handler(void *arg)
161 {
162         struct trapframe *frame;
163
164         frame = arg;
165         ipi_bitmap_handler(*frame);
166         return (FILTER_HANDLED);
167 }
168
169 static int
170 xen_smp_rendezvous_action(void *arg)
171 {
172 #ifdef COUNT_IPIS
173         (*ipi_rendezvous_counts[PCPU_GET(cpuid)])++;
174 #endif /* COUNT_IPIS */
175
176         smp_rendezvous_action();
177         return (FILTER_HANDLED);
178 }
179
180 static int
181 xen_invltlb(void *arg)
182 {
183
184         invltlb_handler();
185         return (FILTER_HANDLED);
186 }
187
188 #ifdef __amd64__
189 static int
190 xen_invltlb_pcid(void *arg)
191 {
192
193         invltlb_pcid_handler();
194         return (FILTER_HANDLED);
195 }
196 #endif
197
198 static int
199 xen_invlpg(void *arg)
200 {
201
202         invlpg_handler();
203         return (FILTER_HANDLED);
204 }
205
206 #ifdef __amd64__
207 static int
208 xen_invlpg_pcid(void *arg)
209 {
210
211         invlpg_pcid_handler();
212         return (FILTER_HANDLED);
213 }
214 #endif
215
216 static int
217 xen_invlrng(void *arg)
218 {
219
220         invlrng_handler();
221         return (FILTER_HANDLED);
222 }
223
224 static int
225 xen_invlcache(void *arg)
226 {
227
228         invlcache_handler();
229         return (FILTER_HANDLED);
230 }
231
232 #ifdef __i386__
233 static int
234 xen_lazypmap(void *arg)
235 {
236
237         pmap_lazyfix_action();
238         return (FILTER_HANDLED);
239 }
240 #endif
241
242 static int
243 xen_cpustop_handler(void *arg)
244 {
245
246         cpustop_handler();
247         return (FILTER_HANDLED);
248 }
249
250 static int
251 xen_cpususpend_handler(void *arg)
252 {
253
254         cpususpend_handler();
255         return (FILTER_HANDLED);
256 }
257
258 static int
259 xen_cpustophard_handler(void *arg)
260 {
261
262         ipi_nmi_handler();
263         return (FILTER_HANDLED);
264 }
265
266 /* Xen PV IPI sender */
267 static void
268 xen_ipi_vectored(u_int vector, int dest)
269 {
270         xen_intr_handle_t *ipi_handle;
271         int ipi_idx, to_cpu, self;
272
273         ipi_idx = IPI_TO_IDX(vector);
274         if (ipi_idx > nitems(xen_ipis))
275                 panic("IPI out of range");
276
277         switch(dest) {
278         case APIC_IPI_DEST_SELF:
279                 ipi_handle = DPCPU_GET(ipi_handle);
280                 xen_intr_signal(ipi_handle[ipi_idx]);
281                 break;
282         case APIC_IPI_DEST_ALL:
283                 CPU_FOREACH(to_cpu) {
284                         ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
285                         xen_intr_signal(ipi_handle[ipi_idx]);
286                 }
287                 break;
288         case APIC_IPI_DEST_OTHERS:
289                 self = PCPU_GET(cpuid);
290                 CPU_FOREACH(to_cpu) {
291                         if (to_cpu != self) {
292                                 ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
293                                 xen_intr_signal(ipi_handle[ipi_idx]);
294                         }
295                 }
296                 break;
297         default:
298                 to_cpu = apic_cpuid(dest);
299                 ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
300                 xen_intr_signal(ipi_handle[ipi_idx]);
301                 break;
302         }
303 }
304
305 /* XEN diverged cpu operations */
306 static void
307 xen_hvm_cpu_resume(void)
308 {
309         u_int cpuid = PCPU_GET(cpuid);
310
311         /*
312          * Reset pending bitmap IPIs, because Xen doesn't preserve pending
313          * event channels on migration.
314          */
315         cpu_ipi_pending[cpuid] = 0;
316
317         /* register vcpu_info area */
318         xen_hvm_cpu_init();
319 }
320
321 static void
322 xen_cpu_ipi_init(int cpu)
323 {
324         xen_intr_handle_t *ipi_handle;
325         const struct xen_ipi_handler *ipi;
326         device_t dev;
327         int idx, rc;
328
329         ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
330         dev = pcpu_find(cpu)->pc_device;
331         KASSERT((dev != NULL), ("NULL pcpu device_t"));
332
333         for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
334
335                 if (ipi->filter == NULL) {
336                         ipi_handle[idx] = NULL;
337                         continue;
338                 }
339
340                 rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
341                     INTR_TYPE_TTY, &ipi_handle[idx]);
342                 if (rc != 0)
343                         panic("Unable to allocate a XEN IPI port");
344                 xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
345         }
346 }
347
348 static void
349 xen_setup_cpus(void)
350 {
351         int i;
352
353         if (!xen_hvm_domain() || !xen_vector_callback_enabled)
354                 return;
355
356 #ifdef __amd64__
357         if (pmap_pcid_enabled) {
358                 xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
359                 xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
360         }
361 #endif
362         CPU_FOREACH(i)
363                 xen_cpu_ipi_init(i);
364
365         /* Set the xen pv ipi ops to replace the native ones */
366         cpu_ops.ipi_vectored = xen_ipi_vectored;
367 }
368 #endif
369
370 /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
371 static uint32_t
372 xen_hvm_cpuid_base(void)
373 {
374         uint32_t base, regs[4];
375
376         for (base = 0x40000000; base < 0x40010000; base += 0x100) {
377                 do_cpuid(base, regs);
378                 if (!memcmp("XenVMMXenVMM", &regs[1], 12)
379                     && (regs[0] - base) >= 2)
380                         return (base);
381         }
382         return (0);
383 }
384
385 /*
386  * Allocate and fill in the hypcall page.
387  */
388 static int
389 xen_hvm_init_hypercall_stubs(void)
390 {
391         uint32_t base, regs[4];
392         int i;
393
394         base = xen_hvm_cpuid_base();
395         if (base == 0)
396                 return (ENXIO);
397
398         if (hypercall_stubs == NULL) {
399                 do_cpuid(base + 1, regs);
400                 printf("XEN: Hypervisor version %d.%d detected.\n",
401                     regs[0] >> 16, regs[0] & 0xffff);
402         }
403
404         /*
405          * Find the hypercall pages.
406          */
407         do_cpuid(base + 2, regs);
408         
409         if (hypercall_stubs == NULL) {
410                 size_t call_region_size;
411
412                 call_region_size = regs[0] * PAGE_SIZE;
413                 hypercall_stubs = malloc(call_region_size, M_XENHVM, M_NOWAIT);
414                 if (hypercall_stubs == NULL)
415                         panic("Unable to allocate Xen hypercall region");
416         }
417
418         for (i = 0; i < regs[0]; i++)
419                 wrmsr(regs[1], vtophys(hypercall_stubs + i * PAGE_SIZE) + i);
420
421         return (0);
422 }
423
424 static void
425 xen_hvm_init_shared_info_page(void)
426 {
427         struct xen_add_to_physmap xatp;
428
429         if (HYPERVISOR_shared_info == NULL) {
430                 HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
431                 if (HYPERVISOR_shared_info == NULL)
432                         panic("Unable to allocate Xen shared info page");
433         }
434
435         xatp.domid = DOMID_SELF;
436         xatp.idx = 0;
437         xatp.space = XENMAPSPACE_shared_info;
438         xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
439         if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
440                 panic("HYPERVISOR_memory_op failed");
441 }
442
443 /*
444  * Tell the hypervisor how to contact us for event channel callbacks.
445  */
446 void
447 xen_hvm_set_callback(device_t dev)
448 {
449         struct xen_hvm_param xhp;
450         int irq;
451
452         if (xen_vector_callback_enabled)
453                 return;
454
455         xhp.domid = DOMID_SELF;
456         xhp.index = HVM_PARAM_CALLBACK_IRQ;
457         if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
458                 int error;
459
460                 xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
461                 error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
462                 if (error == 0) {
463                         xen_vector_callback_enabled = 1;
464                         return;
465                 }
466                 printf("Xen HVM callback vector registration failed (%d). "
467                     "Falling back to emulated device interrupt\n", error);
468         }
469         xen_vector_callback_enabled = 0;
470         if (dev == NULL) {
471                 /*
472                  * Called from early boot or resume.
473                  * xenpci will invoke us again later.
474                  */
475                 return;
476         }
477
478         irq = pci_get_irq(dev);
479         if (irq < 16) {
480                 xhp.value = HVM_CALLBACK_GSI(irq);
481         } else {
482                 u_int slot;
483                 u_int pin;
484
485                 slot = pci_get_slot(dev);
486                 pin = pci_get_intpin(dev) - 1;
487                 xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
488         }
489
490         if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
491                 panic("Can't set evtchn callback");
492 }
493
494 #define XEN_MAGIC_IOPORT 0x10
495 enum {
496         XMI_MAGIC                        = 0x49d2,
497         XMI_UNPLUG_IDE_DISKS             = 0x01,
498         XMI_UNPLUG_NICS                  = 0x02,
499         XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
500 };
501
502 static void
503 xen_hvm_disable_emulated_devices(void)
504 {
505         if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
506                 return;
507
508         if (bootverbose)
509                 printf("XEN: Disabling emulated block and network devices\n");
510         outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS|XMI_UNPLUG_NICS);
511 }
512
513 static void
514 xen_hvm_init(enum xen_hvm_init_type init_type)
515 {
516         int error;
517         int i;
518
519         if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND)
520                 return;
521
522         error = xen_hvm_init_hypercall_stubs();
523
524         switch (init_type) {
525         case XEN_HVM_INIT_COLD:
526                 if (error != 0)
527                         return;
528
529                 setup_xen_features();
530                 cpu_ops = xen_hvm_cpu_ops;
531                 vm_guest = VM_GUEST_XEN;
532                 break;
533         case XEN_HVM_INIT_RESUME:
534                 if (error != 0)
535                         panic("Unable to init Xen hypercall stubs on resume");
536
537                 /* Clear stale vcpu_info. */
538                 CPU_FOREACH(i)
539                         DPCPU_ID_SET(i, vcpu_info, NULL);
540                 break;
541         default:
542                 panic("Unsupported HVM initialization type");
543         }
544
545         xen_vector_callback_enabled = 0;
546         xen_domain_type = XEN_HVM_DOMAIN;
547         xen_hvm_init_shared_info_page();
548         xen_hvm_set_callback(NULL);
549         xen_hvm_disable_emulated_devices();
550
551
552 void
553 xen_hvm_suspend(void)
554 {
555 }
556
557 void
558 xen_hvm_resume(bool suspend_cancelled)
559 {
560
561         xen_hvm_init(suspend_cancelled ?
562             XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME);
563
564         /* Register vcpu_info area for CPU#0. */
565         xen_hvm_cpu_init();
566 }
567  
568 static void
569 xen_hvm_sysinit(void *arg __unused)
570 {
571         xen_hvm_init(XEN_HVM_INIT_COLD);
572 }
573
574 static void
575 xen_set_vcpu_id(void)
576 {
577         struct pcpu *pc;
578         int i;
579
580         /* Set vcpu_id to acpi_id */
581         CPU_FOREACH(i) {
582                 pc = pcpu_find(i);
583                 pc->pc_vcpu_id = pc->pc_acpi_id;
584                 if (bootverbose)
585                         printf("XEN: CPU %u has VCPU ID %u\n",
586                                i, pc->pc_vcpu_id);
587         }
588 }
589
590 static void
591 xen_hvm_cpu_init(void)
592 {
593         struct vcpu_register_vcpu_info info;
594         struct vcpu_info *vcpu_info;
595         int cpu, rc;
596
597         if (!xen_domain())
598                 return;
599
600         if (DPCPU_GET(vcpu_info) != NULL) {
601                 /*
602                  * vcpu_info is already set.  We're resuming
603                  * from a failed migration and our pre-suspend
604                  * configuration is still valid.
605                  */
606                 return;
607         }
608
609         vcpu_info = DPCPU_PTR(vcpu_local_info);
610         cpu = PCPU_GET(vcpu_id);
611         info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
612         info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
613
614         rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
615         if (rc != 0)
616                 DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
617         else
618                 DPCPU_SET(vcpu_info, vcpu_info);
619 }
620
621 SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL);
622 #ifdef SMP
623 SYSINIT(xen_setup_cpus, SI_SUB_SMP, SI_ORDER_FIRST, xen_setup_cpus, NULL);
624 #endif
625 SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL);
626 SYSINIT(xen_set_vcpu_id, SI_SUB_CPU, SI_ORDER_ANY, xen_set_vcpu_id, NULL);