2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
38 #include <x86/specialreg.h>
39 #include <x86/apicreg.h>
41 #include <machine/vmm.h>
43 #include "vmm_lapic.h"
47 * Some MSI message definitions
49 #define MSI_X86_ADDR_MASK 0xfff00000
50 #define MSI_X86_ADDR_BASE 0xfee00000
51 #define MSI_X86_ADDR_RH 0x00000008 /* Redirection Hint */
52 #define MSI_X86_ADDR_LOG 0x00000004 /* Destination Mode */
55 lapic_set_intr(struct vm *vm, int cpu, int vector, bool level)
57 struct vlapic *vlapic;
59 if (cpu < 0 || cpu >= vm_get_maxcpus(vm))
63 * According to section "Maskable Hardware Interrupts" in Intel SDM
64 * vectors 16 through 255 can be delivered through the local APIC.
66 if (vector < 16 || vector > 255)
69 vlapic = vm_lapic(vm, cpu);
70 if (vlapic_set_intr_ready(vlapic, vector, level))
71 vcpu_notify_event(vm, cpu, true);
76 lapic_set_local_intr(struct vm *vm, int cpu, int vector)
78 struct vlapic *vlapic;
82 if (cpu < -1 || cpu >= vm_get_maxcpus(vm))
86 dmask = vm_active_cpus(vm);
88 CPU_SETOF(cpu, &dmask);
90 while ((cpu = CPU_FFS(&dmask)) != 0) {
93 vlapic = vm_lapic(vm, cpu);
94 error = vlapic_trigger_lvt(vlapic, vector);
103 lapic_intr_msi(struct vm *vm, uint64_t addr, uint64_t msg)
109 VM_CTR2(vm, "lapic MSI addr: %#lx msg: %#lx", addr, msg);
111 if ((addr & MSI_X86_ADDR_MASK) != MSI_X86_ADDR_BASE) {
112 VM_CTR1(vm, "lapic MSI invalid addr %#lx", addr);
117 * Extract the x86-specific fields from the MSI addr/msg
118 * params according to the Intel Arch spec, Vol3 Ch 10.
120 * The PCI specification does not support level triggered
121 * MSI/MSI-X so ignore trigger level in 'msg'.
123 * The 'dest' is interpreted as a logical APIC ID if both
124 * the Redirection Hint and Destination Mode are '1' and
125 * physical otherwise.
127 dest = (addr >> 12) & 0xff;
128 phys = ((addr & (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG)) !=
129 (MSI_X86_ADDR_RH | MSI_X86_ADDR_LOG));
130 delmode = msg & APIC_DELMODE_MASK;
133 VM_CTR3(vm, "lapic MSI %s dest %#x, vec %d",
134 phys ? "physical" : "logical", dest, vec);
136 vlapic_deliver_intr(vm, LAPIC_TRIG_EDGE, dest, phys, delmode, vec);
141 x2apic_msr(u_int msr)
143 return (msr >= 0x800 && msr <= 0xBFF);
147 x2apic_msr_to_regoff(u_int msr)
150 return ((msr - 0x800) << 4);
157 return (x2apic_msr(msr) || msr == MSR_APICBASE);
161 lapic_rdmsr(struct vm *vm, int cpu, u_int msr, uint64_t *rval, bool *retu)
165 struct vlapic *vlapic;
167 vlapic = vm_lapic(vm, cpu);
169 if (msr == MSR_APICBASE) {
170 *rval = vlapic_get_apicbase(vlapic);
173 offset = x2apic_msr_to_regoff(msr);
174 error = vlapic_read(vlapic, 0, offset, rval, retu);
181 lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t val, bool *retu)
185 struct vlapic *vlapic;
187 vlapic = vm_lapic(vm, cpu);
189 if (msr == MSR_APICBASE) {
190 error = vlapic_set_apicbase(vlapic, val);
192 offset = x2apic_msr_to_regoff(msr);
193 error = vlapic_write(vlapic, 0, offset, val, retu);
200 lapic_mmio_write(void *vm, int cpu, uint64_t gpa, uint64_t wval, int size,
205 struct vlapic *vlapic;
207 off = gpa - DEFAULT_APIC_BASE;
210 * Memory mapped local apic accesses must be 4 bytes wide and
211 * aligned on a 16-byte boundary.
213 if (size != 4 || off & 0xf)
216 vlapic = vm_lapic(vm, cpu);
217 error = vlapic_write(vlapic, 1, off, wval, arg);
222 lapic_mmio_read(void *vm, int cpu, uint64_t gpa, uint64_t *rval, int size,
227 struct vlapic *vlapic;
229 off = gpa - DEFAULT_APIC_BASE;
232 * Memory mapped local apic accesses should be aligned on a
233 * 16-byte boundary. They are also suggested to be 4 bytes
234 * wide, alas not all OSes follow suggestions.
240 vlapic = vm_lapic(vm, cpu);
241 error = vlapic_read(vlapic, 1, off, rval, arg);