2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <machine/segments.h>
36 #include <machine/specialreg.h>
37 #include <machine/vmm.h>
43 #include "svm_softc.h"
46 * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
47 * in memory that describes the virtual machine.
50 * - instructions or events in the guest to intercept
51 * - control bits that modify execution environment of the guest
52 * - guest processor state (e.g. general purpose registers)
56 * Return VMCB segment area.
58 static struct vmcb_segment *
59 vmcb_segptr(struct vmcb *vmcb, int type)
61 struct vmcb_state *state;
62 struct vmcb_segment *seg;
91 case VM_REG_GUEST_GDTR:
95 case VM_REG_GUEST_IDTR:
99 case VM_REG_GUEST_LDTR:
103 case VM_REG_GUEST_TR:
116 vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
123 vmcb = svm_get_vmcb(softc, vcpu);
124 off = VMCB_ACCESS_OFFSET(ident);
125 bytes = VMCB_ACCESS_BYTES(ident);
127 if ((off + bytes) >= sizeof (struct vmcb))
140 memcpy(ptr + off, val, bytes);
142 memcpy(val, ptr + off, bytes);
145 VCPU_CTR1(softc->vm, vcpu,
146 "Invalid size %d for VMCB access: %d", bytes);
150 /* Invalidate all VMCB state cached by h/w. */
152 svm_set_dirty(softc, vcpu, 0xffffffff);
158 * Read from segment selector, control and general purpose register of VMCB.
161 vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
164 struct vmcb_state *state;
165 struct vmcb_segment *seg;
168 vmcb = svm_get_vmcb(sc, vcpu);
169 state = &vmcb->state;
172 if (VMCB_ACCESS_OK(ident))
173 return (vmcb_access(sc, vcpu, 0, ident, retval));
176 case VM_REG_GUEST_CR0:
177 *retval = state->cr0;
180 case VM_REG_GUEST_CR2:
181 *retval = state->cr2;
184 case VM_REG_GUEST_CR3:
185 *retval = state->cr3;
188 case VM_REG_GUEST_CR4:
189 *retval = state->cr4;
192 case VM_REG_GUEST_DR6:
193 *retval = state->dr6;
196 case VM_REG_GUEST_DR7:
197 *retval = state->dr7;
200 case VM_REG_GUEST_EFER:
201 *retval = state->efer;
204 case VM_REG_GUEST_RAX:
205 *retval = state->rax;
208 case VM_REG_GUEST_RFLAGS:
209 *retval = state->rflags;
212 case VM_REG_GUEST_RIP:
213 *retval = state->rip;
216 case VM_REG_GUEST_RSP:
217 *retval = state->rsp;
220 case VM_REG_GUEST_CS:
221 case VM_REG_GUEST_DS:
222 case VM_REG_GUEST_ES:
223 case VM_REG_GUEST_FS:
224 case VM_REG_GUEST_GS:
225 case VM_REG_GUEST_SS:
226 case VM_REG_GUEST_LDTR:
227 case VM_REG_GUEST_TR:
228 seg = vmcb_segptr(vmcb, ident);
229 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
231 *retval = seg->selector;
234 case VM_REG_GUEST_GDTR:
235 case VM_REG_GUEST_IDTR:
236 /* GDTR and IDTR don't have segment selectors */
248 * Write to segment selector, control and general purpose register of VMCB.
251 vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
254 struct vmcb_state *state;
255 struct vmcb_segment *seg;
258 vmcb = svm_get_vmcb(sc, vcpu);
259 state = &vmcb->state;
263 if (VMCB_ACCESS_OK(ident))
264 return (vmcb_access(sc, vcpu, 1, ident, &val));
267 case VM_REG_GUEST_CR0:
269 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
272 case VM_REG_GUEST_CR2:
274 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
277 case VM_REG_GUEST_CR3:
279 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
282 case VM_REG_GUEST_CR4:
284 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
287 case VM_REG_GUEST_DR6:
289 svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
292 case VM_REG_GUEST_DR7:
294 svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
297 case VM_REG_GUEST_EFER:
298 /* EFER_SVM must always be set when the guest is executing */
299 state->efer = val | EFER_SVM;
300 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
303 case VM_REG_GUEST_RAX:
307 case VM_REG_GUEST_RFLAGS:
311 case VM_REG_GUEST_RIP:
315 case VM_REG_GUEST_RSP:
319 case VM_REG_GUEST_CS:
320 case VM_REG_GUEST_DS:
321 case VM_REG_GUEST_ES:
322 case VM_REG_GUEST_SS:
323 dirtyseg = 1; /* FALLTHROUGH */
324 case VM_REG_GUEST_FS:
325 case VM_REG_GUEST_GS:
326 case VM_REG_GUEST_LDTR:
327 case VM_REG_GUEST_TR:
328 seg = vmcb_segptr(vmcb, ident);
329 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
333 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
336 case VM_REG_GUEST_GDTR:
337 case VM_REG_GUEST_IDTR:
338 /* GDTR and IDTR don't have segment selectors */
350 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
352 struct vmcb_segment *seg;
354 seg = vmcb_segptr(vmcb, ident);
356 bcopy(seg, seg2, sizeof(struct vmcb_segment));
364 vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
367 struct svm_softc *sc;
368 struct vmcb_segment *seg;
372 vmcb = svm_get_vmcb(sc, vcpu);
374 seg = vmcb_segptr(vmcb, reg);
375 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
378 seg->base = desc->base;
379 seg->limit = desc->limit;
380 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
382 * Map seg_desc access to VMCB attribute format.
384 * SVM uses the 'P' bit in the segment attributes to indicate a
385 * NULL segment so clear it if the segment is marked unusable.
387 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
388 if (SEG_DESC_UNUSABLE(desc->access)) {
391 seg->attrib = attrib;
394 VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
395 "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
398 case VM_REG_GUEST_CS:
399 case VM_REG_GUEST_DS:
400 case VM_REG_GUEST_ES:
401 case VM_REG_GUEST_SS:
402 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
404 case VM_REG_GUEST_GDTR:
405 case VM_REG_GUEST_IDTR:
406 svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
416 vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
419 struct svm_softc *sc;
420 struct vmcb_segment *seg;
423 vmcb = svm_get_vmcb(sc, vcpu);
424 seg = vmcb_segptr(vmcb, reg);
425 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
428 desc->base = seg->base;
429 desc->limit = seg->limit;
432 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
433 /* Map seg_desc access to VMCB attribute format */
434 desc->access = ((seg->attrib & 0xF00) << 4) |
435 (seg->attrib & 0xFF);
438 * VT-x uses bit 16 to indicate a segment that has been loaded
439 * with a NULL selector (aka unusable). The 'desc->access'
440 * field is interpreted in the VT-x format by the
441 * processor-independent code.
443 * SVM uses the 'P' bit to convey the same information so
444 * convert it into the VT-x format. For more details refer to
445 * section "Segment State in the VMCB" in APMv2.
447 if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
448 if ((desc->access & 0x80) == 0)
449 desc->access |= 0x10000; /* Unusable segment */