2 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <machine/segments.h>
34 #include <machine/specialreg.h>
35 #include <machine/vmm.h>
41 #include "svm_softc.h"
44 * The VMCB aka Virtual Machine Control Block is a 4KB aligned page
45 * in memory that describes the virtual machine.
48 * - instructions or events in the guest to intercept
49 * - control bits that modify execution environment of the guest
50 * - guest processor state (e.g. general purpose registers)
54 * Return VMCB segment area.
56 static struct vmcb_segment *
57 vmcb_segptr(struct vmcb *vmcb, int type)
59 struct vmcb_state *state;
60 struct vmcb_segment *seg;
89 case VM_REG_GUEST_GDTR:
93 case VM_REG_GUEST_IDTR:
97 case VM_REG_GUEST_LDTR:
101 case VM_REG_GUEST_TR:
114 vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
121 vmcb = svm_get_vmcb(softc, vcpu);
122 off = VMCB_ACCESS_OFFSET(ident);
123 bytes = VMCB_ACCESS_BYTES(ident);
125 if ((off + bytes) >= sizeof (struct vmcb))
138 memcpy(ptr + off, val, bytes);
140 memcpy(val, ptr + off, bytes);
143 VCPU_CTR1(softc->vm, vcpu,
144 "Invalid size %d for VMCB access: %d", bytes);
148 /* Invalidate all VMCB state cached by h/w. */
150 svm_set_dirty(softc, vcpu, 0xffffffff);
156 * Read from segment selector, control and general purpose register of VMCB.
159 vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
162 struct vmcb_state *state;
163 struct vmcb_segment *seg;
166 vmcb = svm_get_vmcb(sc, vcpu);
167 state = &vmcb->state;
170 if (VMCB_ACCESS_OK(ident))
171 return (vmcb_access(sc, vcpu, 0, ident, retval));
174 case VM_REG_GUEST_CR0:
175 *retval = state->cr0;
178 case VM_REG_GUEST_CR2:
179 *retval = state->cr2;
182 case VM_REG_GUEST_CR3:
183 *retval = state->cr3;
186 case VM_REG_GUEST_CR4:
187 *retval = state->cr4;
190 case VM_REG_GUEST_DR6:
191 *retval = state->dr6;
194 case VM_REG_GUEST_DR7:
195 *retval = state->dr7;
198 case VM_REG_GUEST_EFER:
199 *retval = state->efer;
202 case VM_REG_GUEST_RAX:
203 *retval = state->rax;
206 case VM_REG_GUEST_RFLAGS:
207 *retval = state->rflags;
210 case VM_REG_GUEST_RIP:
211 *retval = state->rip;
214 case VM_REG_GUEST_RSP:
215 *retval = state->rsp;
218 case VM_REG_GUEST_CS:
219 case VM_REG_GUEST_DS:
220 case VM_REG_GUEST_ES:
221 case VM_REG_GUEST_FS:
222 case VM_REG_GUEST_GS:
223 case VM_REG_GUEST_SS:
224 case VM_REG_GUEST_LDTR:
225 case VM_REG_GUEST_TR:
226 seg = vmcb_segptr(vmcb, ident);
227 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
229 *retval = seg->selector;
232 case VM_REG_GUEST_GDTR:
233 case VM_REG_GUEST_IDTR:
234 /* GDTR and IDTR don't have segment selectors */
246 * Write to segment selector, control and general purpose register of VMCB.
249 vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
252 struct vmcb_state *state;
253 struct vmcb_segment *seg;
256 vmcb = svm_get_vmcb(sc, vcpu);
257 state = &vmcb->state;
261 if (VMCB_ACCESS_OK(ident))
262 return (vmcb_access(sc, vcpu, 1, ident, &val));
265 case VM_REG_GUEST_CR0:
267 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
270 case VM_REG_GUEST_CR2:
272 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
275 case VM_REG_GUEST_CR3:
277 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
280 case VM_REG_GUEST_CR4:
282 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
285 case VM_REG_GUEST_DR6:
287 svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
290 case VM_REG_GUEST_DR7:
292 svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
295 case VM_REG_GUEST_EFER:
296 /* EFER_SVM must always be set when the guest is executing */
297 state->efer = val | EFER_SVM;
298 svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
301 case VM_REG_GUEST_RAX:
305 case VM_REG_GUEST_RFLAGS:
309 case VM_REG_GUEST_RIP:
313 case VM_REG_GUEST_RSP:
317 case VM_REG_GUEST_CS:
318 case VM_REG_GUEST_DS:
319 case VM_REG_GUEST_ES:
320 case VM_REG_GUEST_SS:
321 dirtyseg = 1; /* FALLTHROUGH */
322 case VM_REG_GUEST_FS:
323 case VM_REG_GUEST_GS:
324 case VM_REG_GUEST_LDTR:
325 case VM_REG_GUEST_TR:
326 seg = vmcb_segptr(vmcb, ident);
327 KASSERT(seg != NULL, ("%s: unable to get segment %d from VMCB",
331 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
334 case VM_REG_GUEST_GDTR:
335 case VM_REG_GUEST_IDTR:
336 /* GDTR and IDTR don't have segment selectors */
348 vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
350 struct vmcb_segment *seg;
352 seg = vmcb_segptr(vmcb, ident);
354 bcopy(seg, seg2, sizeof(struct vmcb_segment));
362 vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
365 struct svm_softc *sc;
366 struct vmcb_segment *seg;
370 vmcb = svm_get_vmcb(sc, vcpu);
372 seg = vmcb_segptr(vmcb, reg);
373 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
376 seg->base = desc->base;
377 seg->limit = desc->limit;
378 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
380 * Map seg_desc access to VMCB attribute format.
382 * SVM uses the 'P' bit in the segment attributes to indicate a
383 * NULL segment so clear it if the segment is marked unusable.
385 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
386 if (SEG_DESC_UNUSABLE(desc->access)) {
389 seg->attrib = attrib;
392 VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
393 "attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
396 case VM_REG_GUEST_CS:
397 case VM_REG_GUEST_DS:
398 case VM_REG_GUEST_ES:
399 case VM_REG_GUEST_SS:
400 svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
402 case VM_REG_GUEST_GDTR:
403 case VM_REG_GUEST_IDTR:
404 svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
414 vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
417 struct svm_softc *sc;
418 struct vmcb_segment *seg;
421 vmcb = svm_get_vmcb(sc, vcpu);
422 seg = vmcb_segptr(vmcb, reg);
423 KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
426 desc->base = seg->base;
427 desc->limit = seg->limit;
430 if (reg != VM_REG_GUEST_GDTR && reg != VM_REG_GUEST_IDTR) {
431 /* Map seg_desc access to VMCB attribute format */
432 desc->access = ((seg->attrib & 0xF00) << 4) |
433 (seg->attrib & 0xFF);
436 * VT-x uses bit 16 to indicate a segment that has been loaded
437 * with a NULL selector (aka unusable). The 'desc->access'
438 * field is interpreted in the VT-x format by the
439 * processor-independent code.
441 * SVM uses the 'P' bit to convey the same information so
442 * convert it into the VT-x format. For more details refer to
443 * section "Segment State in the VMCB" in APMv2.
445 if (reg != VM_REG_GUEST_CS && reg != VM_REG_GUEST_TR) {
446 if ((desc->access & 0x80) == 0)
447 desc->access |= 0x10000; /* Unusable segment */