2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1997 Jonathan Lemon
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
42 #include <vm/vm_map.h>
43 #include <vm/vm_page.h>
45 #include <machine/md_var.h>
46 #include <machine/pcb.h>
47 #include <machine/pcb_ext.h>
48 #include <machine/psl.h>
49 #include <machine/specialreg.h>
50 #include <machine/sysarch.h>
53 extern struct pcb *vm86pcb;
55 static struct mtx vm86_lock;
57 extern int vm86_bioscall(struct vm86frame *);
58 extern void vm86_biosret(struct vm86frame *);
60 void vm86_prepcall(struct vm86frame *);
76 #define OPERAND_SIZE_PREFIX 0x66
77 #define ADDRESS_SIZE_PREFIX 0x67
78 #define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
79 #define POP_MASK ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
81 static __inline caddr_t
82 MAKE_ADDR(u_short sel, u_short off)
84 return ((caddr_t)((sel << 4) + off));
88 GET_VEC(u_int vec, u_short *sel, u_short *off)
95 MAKE_VEC(u_short sel, u_short off)
97 return ((sel << 16) | off);
101 PUSH(u_short x, struct vm86frame *vmf)
104 suword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
108 PUSHL(u_int x, struct vm86frame *vmf)
111 suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
114 static __inline u_short
115 POP(struct vm86frame *vmf)
117 u_short x = fuword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
123 static __inline u_int
124 POPL(struct vm86frame *vmf)
126 u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
134 struct vm86frame *vmf;
136 struct vm86_kernel *vm86;
144 * pcb_ext contains the address of the extension area, or zero if
145 * the extension is not present. (This check should not be needed,
146 * as we can't enter vm86 mode until we set up an extension area)
148 if (curpcb->pcb_ext == 0)
150 vm86 = &curpcb->pcb_ext->ext_vm86;
152 if (vmf->vmf_eflags & PSL_T)
155 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
156 i_byte = fubyte(addr);
157 if (i_byte == ADDRESS_SIZE_PREFIX) {
158 i_byte = fubyte(++addr);
162 if (vm86->vm86_has_vme) {
164 case OPERAND_SIZE_PREFIX:
165 i_byte = fubyte(++addr);
169 if (vmf->vmf_eflags & PSL_VIF)
170 PUSHL((vmf->vmf_eflags & PUSH_MASK)
171 | PSL_IOPL | PSL_I, vmf);
173 PUSHL((vmf->vmf_eflags & PUSH_MASK)
175 vmf->vmf_ip += inc_ip;
179 temp_flags = POPL(vmf) & POP_MASK;
180 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
181 | temp_flags | PSL_VM | PSL_I;
182 vmf->vmf_ip += inc_ip;
183 if (temp_flags & PSL_I) {
184 vmf->vmf_eflags |= PSL_VIF;
185 if (vmf->vmf_eflags & PSL_VIP)
188 vmf->vmf_eflags &= ~PSL_VIF;
194 /* VME faults here if VIP is set, but does not set VIF. */
196 vmf->vmf_eflags |= PSL_VIF;
197 vmf->vmf_ip += inc_ip;
198 if ((vmf->vmf_eflags & PSL_VIP) == 0) {
199 uprintf("fatal sti\n");
204 /* VME if no redirection support */
208 /* VME if trying to set PSL_T, or PSL_I when VIP is set */
210 temp_flags = POP(vmf) & POP_MASK;
211 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
212 | temp_flags | PSL_VM | PSL_I;
213 vmf->vmf_ip += inc_ip;
214 if (temp_flags & PSL_I) {
215 vmf->vmf_eflags |= PSL_VIF;
216 if (vmf->vmf_eflags & PSL_VIP)
219 vmf->vmf_eflags &= ~PSL_VIF;
223 /* VME if trying to set PSL_T, or PSL_I when VIP is set */
225 vmf->vmf_ip = POP(vmf);
226 vmf->vmf_cs = POP(vmf);
227 temp_flags = POP(vmf) & POP_MASK;
228 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
229 | temp_flags | PSL_VM | PSL_I;
230 if (temp_flags & PSL_I) {
231 vmf->vmf_eflags |= PSL_VIF;
232 if (vmf->vmf_eflags & PSL_VIP)
235 vmf->vmf_eflags &= ~PSL_VIF;
244 case OPERAND_SIZE_PREFIX:
245 i_byte = fubyte(++addr);
249 if (vm86->vm86_eflags & PSL_VIF)
250 PUSHL((vmf->vmf_flags & PUSH_MASK)
251 | PSL_IOPL | PSL_I, vmf);
253 PUSHL((vmf->vmf_flags & PUSH_MASK)
255 vmf->vmf_ip += inc_ip;
259 temp_flags = POPL(vmf) & POP_MASK;
260 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
261 | temp_flags | PSL_VM | PSL_I;
262 vmf->vmf_ip += inc_ip;
263 if (temp_flags & PSL_I) {
264 vm86->vm86_eflags |= PSL_VIF;
265 if (vm86->vm86_eflags & PSL_VIP)
268 vm86->vm86_eflags &= ~PSL_VIF;
275 vm86->vm86_eflags &= ~PSL_VIF;
276 vmf->vmf_ip += inc_ip;
280 /* if there is a pending interrupt, go to the emulator */
281 vm86->vm86_eflags |= PSL_VIF;
282 vmf->vmf_ip += inc_ip;
283 if (vm86->vm86_eflags & PSL_VIP)
288 if (vm86->vm86_eflags & PSL_VIF)
289 PUSH((vmf->vmf_flags & PUSH_MASK)
290 | PSL_IOPL | PSL_I, vmf);
292 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
293 vmf->vmf_ip += inc_ip;
297 i_byte = fubyte(addr + 1);
298 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
300 if (vm86->vm86_eflags & PSL_VIF)
301 PUSH((vmf->vmf_flags & PUSH_MASK)
302 | PSL_IOPL | PSL_I, vmf);
304 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
305 PUSH(vmf->vmf_cs, vmf);
306 PUSH(vmf->vmf_ip + inc_ip + 1, vmf); /* increment IP */
307 GET_VEC(fuword((caddr_t)(i_byte * 4)),
308 &vmf->vmf_cs, &vmf->vmf_ip);
309 vmf->vmf_flags &= ~PSL_T;
310 vm86->vm86_eflags &= ~PSL_VIF;
314 vmf->vmf_ip = POP(vmf);
315 vmf->vmf_cs = POP(vmf);
316 temp_flags = POP(vmf) & POP_MASK;
317 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
318 | temp_flags | PSL_VM | PSL_I;
319 if (temp_flags & PSL_I) {
320 vm86->vm86_eflags |= PSL_VIF;
321 if (vm86->vm86_eflags & PSL_VIP)
324 vm86->vm86_eflags &= ~PSL_VIF;
329 temp_flags = POP(vmf) & POP_MASK;
330 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
331 | temp_flags | PSL_VM | PSL_I;
332 vmf->vmf_ip += inc_ip;
333 if (temp_flags & PSL_I) {
334 vm86->vm86_eflags |= PSL_VIF;
335 if (vm86->vm86_eflags & PSL_VIP)
338 vm86->vm86_eflags &= ~PSL_VIF;
345 #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
346 #define INTMAP_SIZE 32
347 #define IOMAP_SIZE ctob(IOPAGES)
349 (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
350 INTMAP_SIZE + IOMAP_SIZE + 1)
353 pt_entry_t vml_pgtbl[PGTABLE_SIZE];
355 struct pcb_ext vml_ext;
356 char vml_intmap[INTMAP_SIZE];
357 char vml_iomap[IOMAP_SIZE];
358 char vml_iomap_trailer;
362 vm86_initialize(void)
366 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
369 struct soft_segment_descriptor ssd = {
370 0, /* segment base address (overwritten) */
371 0, /* length (overwritten) */
372 SDT_SYS386TSS, /* segment type */
373 0, /* priority level */
374 1, /* descriptor present */
376 0, /* default 16 size */
381 * this should be a compile time error, but cpp doesn't grok sizeof().
383 if (sizeof(struct vm86_layout) > ctob(3))
384 panic("struct vm86_layout exceeds space allocated in locore.s");
387 * Below is the memory layout that we use for the vm86 region.
395 * +--------+ +--------+ <--------- vm86paddr
396 * | | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
398 * | | | PCB | size: ~240 bytes
399 * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
405 * +--------+ | bitmap |
412 * A rudimentary PCB must be installed, in order to get to the
413 * PCB extension area. We use the PCB area as a scratchpad for
414 * data storage, the layout of which is shown below.
416 * pcb_esi = new PTD entry 0
417 * pcb_ebp = pointer to frame on vm86 stack
418 * pcb_esp = stack frame pointer at time of switch
419 * pcb_ebx = va of vm86 page table
420 * pcb_eip = argument pointer to initial call
421 * pcb_spare[0] = saved TSS descriptor, word 0
422 * pcb_space[1] = saved TSS descriptor, word 1
424 #define new_ptd pcb_esi
425 #define vm86_frame pcb_ebp
426 #define pgtable_va pcb_ebx
431 mtx_init(&vm86_lock, "vm86 lock", NULL, MTX_DEF);
433 bzero(pcb, sizeof(struct pcb));
434 pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
435 pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
436 pcb->pgtable_va = vm86paddr;
437 pcb->pcb_flags = PCB_VM86CALL;
440 bzero(ext, sizeof(struct pcb_ext));
441 ext->ext_tss.tss_esp0 = vm86paddr;
442 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
443 ext->ext_tss.tss_ioopt =
444 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
445 ext->ext_iomap = vml->vml_iomap;
446 ext->ext_vm86.vm86_intmap = vml->vml_intmap;
448 if (cpu_feature & CPUID_VME)
449 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
451 addr = (u_int *)ext->ext_vm86.vm86_intmap;
452 for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
454 vml->vml_iomap_trailer = 0xff;
456 ssd.ssd_base = (u_int)&ext->ext_tss;
457 ssd.ssd_limit = TSS_SIZE - 1;
458 ssdtosd(&ssd, &ext->ext_tssd);
464 * use whatever is leftover of the vm86 page layout as a
465 * message buffer so we can capture early output.
467 msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
468 ctob(3) - sizeof(struct vm86_layout));
473 vm86_getpage(struct vm86context *vmc, int pagenum)
477 for (i = 0; i < vmc->npages; i++)
478 if (vmc->pmap[i].pte_num == pagenum)
479 return (vmc->pmap[i].kva);
484 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
488 for (i = 0; i < vmc->npages; i++)
489 if (vmc->pmap[i].pte_num == pagenum)
492 if (vmc->npages == VM86_PMAPSIZE)
493 goto full; /* XXX grow map? */
496 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
501 vmc->pmap[i].flags = flags;
502 vmc->pmap[i].kva = kva;
503 vmc->pmap[i].pte_num = pagenum;
506 panic("vm86_addpage: overlap");
508 panic("vm86_addpage: not enough room");
512 * called from vm86_bioscall, while in vm86 address space, to finalize setup.
515 vm86_prepcall(struct vm86frame *vmf)
517 struct vm86_kernel *vm86;
521 code = (void *)0xa00;
522 stack = (void *)(0x1000 - 2); /* keep aligned */
523 if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
524 /* interrupt call requested */
526 code[1] = vmf->vmf_trapno & 0xff;
528 vmf->vmf_ip = (uintptr_t)code;
533 stack[0] = MAKE_VEC(0, (uintptr_t)code);
535 vmf->vmf_sp = (uintptr_t)stack;
537 vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = 0;
538 vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
540 vm86 = &curpcb->pcb_ext->ext_vm86;
541 if (!vm86->vm86_has_vme)
542 vm86->vm86_eflags = vmf->vmf_eflags; /* save VIF, VIP */
546 * vm86 trap handler; determines whether routine succeeded or not.
547 * Called while in vm86 space, returns to calling process.
550 vm86_trap(struct vm86frame *vmf)
554 /* "should not happen" */
555 if ((vmf->vmf_eflags & PSL_VM) == 0)
556 panic("vm86_trap called, but not in vm86 mode");
558 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
559 if (*(u_char *)addr == HLT)
560 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
562 vmf->vmf_trapno = vmf->vmf_trapno << 16;
568 vm86_intcall(int intnum, struct vm86frame *vmf)
572 if (intnum < 0 || intnum > 0xff)
575 vmf->vmf_trapno = intnum;
576 mtx_lock(&vm86_lock);
578 retval = vm86_bioscall(vmf);
580 mtx_unlock(&vm86_lock);
585 * struct vm86context contains the page table to use when making
586 * vm86 calls. If intnum is a valid interrupt number (0-255), then
587 * the "interrupt trampoline" will be used, otherwise we use the
588 * caller's cs:ip routine.
591 vm86_datacall(intnum, vmf, vmc)
593 struct vm86frame *vmf;
594 struct vm86context *vmc;
596 pt_entry_t *pte = (pt_entry_t *)vm86paddr;
598 int i, entry, retval;
600 mtx_lock(&vm86_lock);
601 for (i = 0; i < vmc->npages; i++) {
602 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
603 entry = vmc->pmap[i].pte_num;
604 vmc->pmap[i].old_pte = pte[entry];
605 pte[entry] = page | PG_V | PG_RW | PG_U;
606 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
609 vmf->vmf_trapno = intnum;
611 retval = vm86_bioscall(vmf);
614 for (i = 0; i < vmc->npages; i++) {
615 entry = vmc->pmap[i].pte_num;
616 pte[entry] = vmc->pmap[i].old_pte;
617 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
619 mtx_unlock(&vm86_lock);
625 vm86_getaddr(struct vm86context *vmc, u_short sel, u_short off)
630 addr = (vm_offset_t)MAKE_ADDR(sel, off);
631 page = addr >> PAGE_SHIFT;
632 for (i = 0; i < vmc->npages; i++)
633 if (page == vmc->pmap[i].pte_num)
634 return (vmc->pmap[i].kva + (addr & PAGE_MASK));
639 vm86_getptr(vmc, kva, sel, off)
640 struct vm86context *vmc;
647 for (i = 0; i < vmc->npages; i++)
648 if (kva >= vmc->pmap[i].kva &&
649 kva < vmc->pmap[i].kva + PAGE_SIZE) {
650 *off = kva - vmc->pmap[i].kva;
651 *sel = vmc->pmap[i].pte_num << 8;
658 vm86_sysarch(td, args)
663 struct i386_vm86_args ua;
664 struct vm86_kernel *vm86;
666 if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
669 if (td->td_pcb->pcb_ext == 0)
670 if ((error = i386_extend_pcb(td)) != 0)
672 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
676 struct vm86_init_args sa;
678 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
680 if (cpu_feature & CPUID_VME)
681 vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
683 vm86->vm86_has_vme = 0;
684 vm86->vm86_inited = 1;
685 vm86->vm86_debug = sa.debug;
686 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
692 struct vm86_vme_args sa;
694 if ((cpu_feature & CPUID_VME) == 0)
697 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
700 load_cr4(rcr4() | CR4_VME);
702 load_cr4(rcr4() & ~CR4_VME);
708 struct vm86_vme_args sa;
710 sa.state = (rcr4() & CR4_VME ? 1 : 0);
711 error = copyout(&sa, ua.sub_args, sizeof(sa));
716 struct vm86_intcall_args sa;
718 if ((error = priv_check(td, PRIV_VM86_INTCALL)))
720 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
722 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
724 error = copyout(&sa, ua.sub_args, sizeof(sa));