]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/i386/i386/vm86.c
MFV r324198: 8081 Compiler warnings in zdb
[FreeBSD/FreeBSD.git] / sys / i386 / i386 / vm86.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1997 Jonathan Lemon
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/priv.h>
35 #include <sys/proc.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 #include <vm/vm_map.h>
43 #include <vm/vm_page.h>
44
45 #include <machine/md_var.h>
46 #include <machine/pcb.h>
47 #include <machine/pcb_ext.h>
48 #include <machine/psl.h>
49 #include <machine/specialreg.h>
50 #include <machine/sysarch.h>
51
52 extern int vm86pa;
53 extern struct pcb *vm86pcb;
54
55 static struct mtx vm86_lock;
56
57 extern int vm86_bioscall(struct vm86frame *);
58 extern void vm86_biosret(struct vm86frame *);
59
60 void vm86_prepcall(struct vm86frame *);
61
62 struct system_map {
63         int             type;
64         vm_offset_t     start;
65         vm_offset_t     end;
66 };
67
68 #define HLT     0xf4
69 #define CLI     0xfa
70 #define STI     0xfb
71 #define PUSHF   0x9c
72 #define POPF    0x9d
73 #define INTn    0xcd
74 #define IRET    0xcf
75 #define CALLm   0xff
76 #define OPERAND_SIZE_PREFIX     0x66
77 #define ADDRESS_SIZE_PREFIX     0x67
78 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
79 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
80
81 static __inline caddr_t
82 MAKE_ADDR(u_short sel, u_short off)
83 {
84         return ((caddr_t)((sel << 4) + off));
85 }
86
87 static __inline void
88 GET_VEC(u_int vec, u_short *sel, u_short *off)
89 {
90         *sel = vec >> 16;
91         *off = vec & 0xffff;
92 }
93
94 static __inline u_int
95 MAKE_VEC(u_short sel, u_short off)
96 {
97         return ((sel << 16) | off);
98 }
99
100 static __inline void
101 PUSH(u_short x, struct vm86frame *vmf)
102 {
103         vmf->vmf_sp -= 2;
104         suword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
105 }
106
107 static __inline void
108 PUSHL(u_int x, struct vm86frame *vmf)
109 {
110         vmf->vmf_sp -= 4;
111         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
112 }
113
114 static __inline u_short
115 POP(struct vm86frame *vmf)
116 {
117         u_short x = fuword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
118
119         vmf->vmf_sp += 2;
120         return (x);
121 }
122
123 static __inline u_int
124 POPL(struct vm86frame *vmf)
125 {
126         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
127
128         vmf->vmf_sp += 4;
129         return (x);
130 }
131
132 int
133 vm86_emulate(vmf)
134         struct vm86frame *vmf;
135 {
136         struct vm86_kernel *vm86;
137         caddr_t addr;
138         u_char i_byte;
139         u_int temp_flags;
140         int inc_ip = 1;
141         int retcode = 0;
142
143         /*
144          * pcb_ext contains the address of the extension area, or zero if
145          * the extension is not present.  (This check should not be needed,
146          * as we can't enter vm86 mode until we set up an extension area)
147          */
148         if (curpcb->pcb_ext == 0)
149                 return (SIGBUS);
150         vm86 = &curpcb->pcb_ext->ext_vm86;
151
152         if (vmf->vmf_eflags & PSL_T)
153                 retcode = SIGTRAP;
154
155         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
156         i_byte = fubyte(addr);
157         if (i_byte == ADDRESS_SIZE_PREFIX) {
158                 i_byte = fubyte(++addr);
159                 inc_ip++;
160         }
161
162         if (vm86->vm86_has_vme) {
163                 switch (i_byte) {
164                 case OPERAND_SIZE_PREFIX:
165                         i_byte = fubyte(++addr);
166                         inc_ip++;
167                         switch (i_byte) {
168                         case PUSHF:
169                                 if (vmf->vmf_eflags & PSL_VIF)
170                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
171                                             | PSL_IOPL | PSL_I, vmf);
172                                 else
173                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
174                                             | PSL_IOPL, vmf);
175                                 vmf->vmf_ip += inc_ip;
176                                 return (retcode);
177
178                         case POPF:
179                                 temp_flags = POPL(vmf) & POP_MASK;
180                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
181                                     | temp_flags | PSL_VM | PSL_I;
182                                 vmf->vmf_ip += inc_ip;
183                                 if (temp_flags & PSL_I) {
184                                         vmf->vmf_eflags |= PSL_VIF;
185                                         if (vmf->vmf_eflags & PSL_VIP)
186                                                 break;
187                                 } else {
188                                         vmf->vmf_eflags &= ~PSL_VIF;
189                                 }
190                                 return (retcode);
191                         }
192                         break;
193
194                 /* VME faults here if VIP is set, but does not set VIF. */
195                 case STI:
196                         vmf->vmf_eflags |= PSL_VIF;
197                         vmf->vmf_ip += inc_ip;
198                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
199                                 uprintf("fatal sti\n");
200                                 return (SIGKILL);
201                         }
202                         break;
203
204                 /* VME if no redirection support */
205                 case INTn:
206                         break;
207
208                 /* VME if trying to set PSL_T, or PSL_I when VIP is set */
209                 case POPF:
210                         temp_flags = POP(vmf) & POP_MASK;
211                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
212                             | temp_flags | PSL_VM | PSL_I;
213                         vmf->vmf_ip += inc_ip;
214                         if (temp_flags & PSL_I) {
215                                 vmf->vmf_eflags |= PSL_VIF;
216                                 if (vmf->vmf_eflags & PSL_VIP)
217                                         break;
218                         } else {
219                                 vmf->vmf_eflags &= ~PSL_VIF;
220                         }
221                         return (retcode);
222
223                 /* VME if trying to set PSL_T, or PSL_I when VIP is set */
224                 case IRET:
225                         vmf->vmf_ip = POP(vmf);
226                         vmf->vmf_cs = POP(vmf);
227                         temp_flags = POP(vmf) & POP_MASK;
228                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
229                             | temp_flags | PSL_VM | PSL_I;
230                         if (temp_flags & PSL_I) {
231                                 vmf->vmf_eflags |= PSL_VIF;
232                                 if (vmf->vmf_eflags & PSL_VIP)
233                                         break;
234                         } else {
235                                 vmf->vmf_eflags &= ~PSL_VIF;
236                         }
237                         return (retcode);
238
239                 }
240                 return (SIGBUS);
241         }
242
243         switch (i_byte) {
244         case OPERAND_SIZE_PREFIX:
245                 i_byte = fubyte(++addr);
246                 inc_ip++;
247                 switch (i_byte) {
248                 case PUSHF:
249                         if (vm86->vm86_eflags & PSL_VIF)
250                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
251                                     | PSL_IOPL | PSL_I, vmf);
252                         else
253                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
254                                     | PSL_IOPL, vmf);
255                         vmf->vmf_ip += inc_ip;
256                         return (retcode);
257
258                 case POPF:
259                         temp_flags = POPL(vmf) & POP_MASK;
260                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
261                             | temp_flags | PSL_VM | PSL_I;
262                         vmf->vmf_ip += inc_ip;
263                         if (temp_flags & PSL_I) {
264                                 vm86->vm86_eflags |= PSL_VIF;
265                                 if (vm86->vm86_eflags & PSL_VIP)
266                                         break;
267                         } else {
268                                 vm86->vm86_eflags &= ~PSL_VIF;
269                         }
270                         return (retcode);
271                 }
272                 return (SIGBUS);
273
274         case CLI:
275                 vm86->vm86_eflags &= ~PSL_VIF;
276                 vmf->vmf_ip += inc_ip;
277                 return (retcode);
278
279         case STI:
280                 /* if there is a pending interrupt, go to the emulator */
281                 vm86->vm86_eflags |= PSL_VIF;
282                 vmf->vmf_ip += inc_ip;
283                 if (vm86->vm86_eflags & PSL_VIP)
284                         break;
285                 return (retcode);
286
287         case PUSHF:
288                 if (vm86->vm86_eflags & PSL_VIF)
289                         PUSH((vmf->vmf_flags & PUSH_MASK)
290                             | PSL_IOPL | PSL_I, vmf);
291                 else
292                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
293                 vmf->vmf_ip += inc_ip;
294                 return (retcode);
295
296         case INTn:
297                 i_byte = fubyte(addr + 1);
298                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
299                         break;
300                 if (vm86->vm86_eflags & PSL_VIF)
301                         PUSH((vmf->vmf_flags & PUSH_MASK)
302                             | PSL_IOPL | PSL_I, vmf);
303                 else
304                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
305                 PUSH(vmf->vmf_cs, vmf);
306                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
307                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
308                      &vmf->vmf_cs, &vmf->vmf_ip);
309                 vmf->vmf_flags &= ~PSL_T;
310                 vm86->vm86_eflags &= ~PSL_VIF;
311                 return (retcode);
312
313         case IRET:
314                 vmf->vmf_ip = POP(vmf);
315                 vmf->vmf_cs = POP(vmf);
316                 temp_flags = POP(vmf) & POP_MASK;
317                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
318                     | temp_flags | PSL_VM | PSL_I;
319                 if (temp_flags & PSL_I) {
320                         vm86->vm86_eflags |= PSL_VIF;
321                         if (vm86->vm86_eflags & PSL_VIP)
322                                 break;
323                 } else {
324                         vm86->vm86_eflags &= ~PSL_VIF;
325                 }
326                 return (retcode);
327
328         case POPF:
329                 temp_flags = POP(vmf) & POP_MASK;
330                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
331                     | temp_flags | PSL_VM | PSL_I;
332                 vmf->vmf_ip += inc_ip;
333                 if (temp_flags & PSL_I) {
334                         vm86->vm86_eflags |= PSL_VIF;
335                         if (vm86->vm86_eflags & PSL_VIP)
336                                 break;
337                 } else {
338                         vm86->vm86_eflags &= ~PSL_VIF;
339                 }
340                 return (retcode);
341         }
342         return (SIGBUS);
343 }
344
345 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
346 #define INTMAP_SIZE     32
347 #define IOMAP_SIZE      ctob(IOPAGES)
348 #define TSS_SIZE \
349         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
350          INTMAP_SIZE + IOMAP_SIZE + 1)
351
352 struct vm86_layout {
353         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
354         struct  pcb vml_pcb;
355         struct  pcb_ext vml_ext;
356         char    vml_intmap[INTMAP_SIZE];
357         char    vml_iomap[IOMAP_SIZE];
358         char    vml_iomap_trailer;
359 };
360
361 void
362 vm86_initialize(void)
363 {
364         int i;
365         u_int *addr;
366         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
367         struct pcb *pcb;
368         struct pcb_ext *ext;
369         struct soft_segment_descriptor ssd = {
370                 0,                      /* segment base address (overwritten) */
371                 0,                      /* length (overwritten) */
372                 SDT_SYS386TSS,          /* segment type */
373                 0,                      /* priority level */
374                 1,                      /* descriptor present */
375                 0, 0,
376                 0,                      /* default 16 size */
377                 0                       /* granularity */
378         };
379
380         /*
381          * this should be a compile time error, but cpp doesn't grok sizeof().
382          */
383         if (sizeof(struct vm86_layout) > ctob(3))
384                 panic("struct vm86_layout exceeds space allocated in locore.s");
385
386         /*
387          * Below is the memory layout that we use for the vm86 region.
388          *
389          * +--------+
390          * |        | 
391          * |        |
392          * | page 0 |       
393          * |        | +--------+
394          * |        | | stack  |
395          * +--------+ +--------+ <--------- vm86paddr
396          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
397          * |        | +--------+
398          * |        | |  PCB   | size: ~240 bytes
399          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
400          * |        | +--------+
401          * |        | |int map |
402          * |        | +--------+
403          * +--------+ |        |
404          * | page 2 | |  I/O   |
405          * +--------+ | bitmap |
406          * | page 3 | |        |
407          * |        | +--------+
408          * +--------+ 
409          */
410
411         /*
412          * A rudimentary PCB must be installed, in order to get to the
413          * PCB extension area.  We use the PCB area as a scratchpad for
414          * data storage, the layout of which is shown below.
415          *
416          * pcb_esi      = new PTD entry 0
417          * pcb_ebp      = pointer to frame on vm86 stack
418          * pcb_esp      =    stack frame pointer at time of switch
419          * pcb_ebx      = va of vm86 page table
420          * pcb_eip      =    argument pointer to initial call
421          * pcb_spare[0] =    saved TSS descriptor, word 0
422          * pcb_space[1] =    saved TSS descriptor, word 1
423          */
424 #define new_ptd         pcb_esi
425 #define vm86_frame      pcb_ebp
426 #define pgtable_va      pcb_ebx
427
428         pcb = &vml->vml_pcb;
429         ext = &vml->vml_ext;
430
431         mtx_init(&vm86_lock, "vm86 lock", NULL, MTX_DEF);
432
433         bzero(pcb, sizeof(struct pcb));
434         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
435         pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
436         pcb->pgtable_va = vm86paddr;
437         pcb->pcb_flags = PCB_VM86CALL; 
438         pcb->pcb_ext = ext;
439
440         bzero(ext, sizeof(struct pcb_ext)); 
441         ext->ext_tss.tss_esp0 = vm86paddr;
442         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
443         ext->ext_tss.tss_ioopt = 
444                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
445         ext->ext_iomap = vml->vml_iomap;
446         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
447
448         if (cpu_feature & CPUID_VME)
449                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
450
451         addr = (u_int *)ext->ext_vm86.vm86_intmap;
452         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
453                 *addr++ = 0;
454         vml->vml_iomap_trailer = 0xff;
455
456         ssd.ssd_base = (u_int)&ext->ext_tss;
457         ssd.ssd_limit = TSS_SIZE - 1; 
458         ssdtosd(&ssd, &ext->ext_tssd);
459
460         vm86pcb = pcb;
461
462 #if 0
463         /*
464          * use whatever is leftover of the vm86 page layout as a
465          * message buffer so we can capture early output.
466          */
467         msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
468             ctob(3) - sizeof(struct vm86_layout));
469 #endif
470 }
471
472 vm_offset_t
473 vm86_getpage(struct vm86context *vmc, int pagenum)
474 {
475         int i;
476
477         for (i = 0; i < vmc->npages; i++)
478                 if (vmc->pmap[i].pte_num == pagenum)
479                         return (vmc->pmap[i].kva);
480         return (0);
481 }
482
483 vm_offset_t
484 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
485 {
486         int i, flags = 0;
487
488         for (i = 0; i < vmc->npages; i++)
489                 if (vmc->pmap[i].pte_num == pagenum)
490                         goto overlap;
491
492         if (vmc->npages == VM86_PMAPSIZE)
493                 goto full;                      /* XXX grow map? */
494
495         if (kva == 0) {
496                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
497                 flags = VMAP_MALLOC;
498         }
499
500         i = vmc->npages++;
501         vmc->pmap[i].flags = flags;
502         vmc->pmap[i].kva = kva;
503         vmc->pmap[i].pte_num = pagenum;
504         return (kva);
505 overlap:
506         panic("vm86_addpage: overlap");
507 full:
508         panic("vm86_addpage: not enough room");
509 }
510
511 /*
512  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
513  */
514 void
515 vm86_prepcall(struct vm86frame *vmf)
516 {
517         struct vm86_kernel *vm86;
518         uint32_t *stack;
519         uint8_t *code;
520
521         code = (void *)0xa00;
522         stack = (void *)(0x1000 - 2);   /* keep aligned */
523         if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
524                 /* interrupt call requested */
525                 code[0] = INTn;
526                 code[1] = vmf->vmf_trapno & 0xff;
527                 code[2] = HLT;
528                 vmf->vmf_ip = (uintptr_t)code;
529                 vmf->vmf_cs = 0;
530         } else {
531                 code[0] = HLT;
532                 stack--;
533                 stack[0] = MAKE_VEC(0, (uintptr_t)code);
534         }
535         vmf->vmf_sp = (uintptr_t)stack;
536         vmf->vmf_ss = 0;
537         vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = 0;
538         vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
539
540         vm86 = &curpcb->pcb_ext->ext_vm86;
541         if (!vm86->vm86_has_vme) 
542                 vm86->vm86_eflags = vmf->vmf_eflags;  /* save VIF, VIP */
543 }
544
545 /*
546  * vm86 trap handler; determines whether routine succeeded or not.
547  * Called while in vm86 space, returns to calling process.
548  */
549 void
550 vm86_trap(struct vm86frame *vmf)
551 {
552         caddr_t addr;
553
554         /* "should not happen" */
555         if ((vmf->vmf_eflags & PSL_VM) == 0)
556                 panic("vm86_trap called, but not in vm86 mode");
557
558         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
559         if (*(u_char *)addr == HLT)
560                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
561         else
562                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
563
564         vm86_biosret(vmf);
565 }
566
567 int
568 vm86_intcall(int intnum, struct vm86frame *vmf)
569 {
570         int retval;
571
572         if (intnum < 0 || intnum > 0xff)
573                 return (EINVAL);
574
575         vmf->vmf_trapno = intnum;
576         mtx_lock(&vm86_lock);
577         critical_enter();
578         retval = vm86_bioscall(vmf);
579         critical_exit();
580         mtx_unlock(&vm86_lock);
581         return (retval);
582 }
583
584 /*
585  * struct vm86context contains the page table to use when making
586  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
587  * the "interrupt trampoline" will be used, otherwise we use the
588  * caller's cs:ip routine.  
589  */
590 int
591 vm86_datacall(intnum, vmf, vmc)
592         int intnum;
593         struct vm86frame *vmf;
594         struct vm86context *vmc;
595 {
596         pt_entry_t *pte = (pt_entry_t *)vm86paddr;
597         vm_paddr_t page;
598         int i, entry, retval;
599
600         mtx_lock(&vm86_lock);
601         for (i = 0; i < vmc->npages; i++) {
602                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
603                 entry = vmc->pmap[i].pte_num; 
604                 vmc->pmap[i].old_pte = pte[entry];
605                 pte[entry] = page | PG_V | PG_RW | PG_U;
606                 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
607         }
608
609         vmf->vmf_trapno = intnum;
610         critical_enter();
611         retval = vm86_bioscall(vmf);
612         critical_exit();
613
614         for (i = 0; i < vmc->npages; i++) {
615                 entry = vmc->pmap[i].pte_num;
616                 pte[entry] = vmc->pmap[i].old_pte;
617                 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
618         }
619         mtx_unlock(&vm86_lock);
620
621         return (retval);
622 }
623
624 vm_offset_t
625 vm86_getaddr(struct vm86context *vmc, u_short sel, u_short off)
626 {
627         int i, page;
628         vm_offset_t addr;
629
630         addr = (vm_offset_t)MAKE_ADDR(sel, off);
631         page = addr >> PAGE_SHIFT;
632         for (i = 0; i < vmc->npages; i++)
633                 if (page == vmc->pmap[i].pte_num)
634                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
635         return (0);
636 }
637
638 int
639 vm86_getptr(vmc, kva, sel, off)
640         struct vm86context *vmc;
641         vm_offset_t kva;
642         u_short *sel;
643         u_short *off;
644 {
645         int i;
646
647         for (i = 0; i < vmc->npages; i++)
648                 if (kva >= vmc->pmap[i].kva &&
649                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
650                         *off = kva - vmc->pmap[i].kva;
651                         *sel = vmc->pmap[i].pte_num << 8;
652                         return (1);
653                 }
654         return (0);
655 }
656         
657 int
658 vm86_sysarch(td, args)
659         struct thread *td;
660         char *args;
661 {
662         int error = 0;
663         struct i386_vm86_args ua;
664         struct vm86_kernel *vm86;
665
666         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
667                 return (error);
668
669         if (td->td_pcb->pcb_ext == 0)
670                 if ((error = i386_extend_pcb(td)) != 0)
671                         return (error);
672         vm86 = &td->td_pcb->pcb_ext->ext_vm86;
673
674         switch (ua.sub_op) {
675         case VM86_INIT: {
676                 struct vm86_init_args sa;
677
678                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
679                         return (error);
680                 if (cpu_feature & CPUID_VME)
681                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
682                 else
683                         vm86->vm86_has_vme = 0;
684                 vm86->vm86_inited = 1;
685                 vm86->vm86_debug = sa.debug;
686                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
687                 }
688                 break;
689
690 #if 0
691         case VM86_SET_VME: {
692                 struct vm86_vme_args sa;
693         
694                 if ((cpu_feature & CPUID_VME) == 0)
695                         return (ENODEV);
696
697                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
698                         return (error);
699                 if (sa.state)
700                         load_cr4(rcr4() | CR4_VME);
701                 else
702                         load_cr4(rcr4() & ~CR4_VME);
703                 }
704                 break;
705 #endif
706
707         case VM86_GET_VME: {
708                 struct vm86_vme_args sa;
709
710                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
711                 error = copyout(&sa, ua.sub_args, sizeof(sa));
712                 }
713                 break;
714
715         case VM86_INTCALL: {
716                 struct vm86_intcall_args sa;
717
718                 if ((error = priv_check(td, PRIV_VM86_INTCALL)))
719                         return (error);
720                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
721                         return (error);
722                 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
723                         return (error);
724                 error = copyout(&sa, ua.sub_args, sizeof(sa));
725                 }
726                 break;
727
728         default:
729                 error = EINVAL;
730         }
731         return (error);
732 }