2 * Copyright (c) 1990 The Regents of the University of California.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_kstack_pages.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
46 #include <sys/sysproto.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_extern.h>
53 #include <machine/cpu.h>
54 #include <machine/pcb.h>
55 #include <machine/pcb_ext.h>
56 #include <machine/proc.h>
57 #include <machine/sysarch.h>
59 #include <security/audit/audit.h>
61 #include <vm/vm_kern.h> /* for kernel_map */
64 #define LD_PER_PAGE 512
65 #define NEW_MAX_LD(num) ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
66 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
67 #define NULL_LDT_BASE ((caddr_t)NULL)
70 static void set_user_ldt_rv(struct vmspace *vmsp);
72 static int i386_set_ldt_data(struct thread *, int start, int num,
73 union descriptor *descs);
74 static int i386_ldt_grow(struct thread *td, int len);
76 #ifndef _SYS_SYSPROTO_H_
86 register struct sysarch_args *uap;
91 struct i386_ldt_args largs;
92 struct i386_ioperm_args iargs;
95 struct segment_descriptor sd, *sdp;
97 AUDIT_ARG(cmd, uap->op);
100 case I386_SET_IOPERM:
101 if ((error = copyin(uap->parms, &kargs.iargs,
102 sizeof(struct i386_ioperm_args))) != 0)
107 if ((error = copyin(uap->parms, &kargs.largs,
108 sizeof(struct i386_ldt_args))) != 0)
110 if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0)
119 error = i386_get_ldt(td, &kargs.largs);
122 if (kargs.largs.descs != NULL) {
123 lp = (union descriptor *)kmem_alloc(kernel_map,
124 kargs.largs.num * sizeof(union descriptor));
129 error = copyin(kargs.largs.descs, lp,
130 kargs.largs.num * sizeof(union descriptor));
132 error = i386_set_ldt(td, &kargs.largs, lp);
133 kmem_free(kernel_map, (vm_offset_t)lp,
134 kargs.largs.num * sizeof(union descriptor));
136 error = i386_set_ldt(td, &kargs.largs, NULL);
139 case I386_GET_IOPERM:
140 error = i386_get_ioperm(td, &kargs.iargs);
142 error = copyout(&kargs.iargs, uap->parms,
143 sizeof(struct i386_ioperm_args));
145 case I386_SET_IOPERM:
146 error = i386_set_ioperm(td, &kargs.iargs);
149 error = vm86_sysarch(td, uap->parms);
151 case I386_GET_FSBASE:
152 sdp = &td->td_pcb->pcb_fsd;
153 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
154 error = copyout(&base, uap->parms, sizeof(base));
156 case I386_SET_FSBASE:
157 error = copyin(uap->parms, &base, sizeof(base));
160 * Construct a descriptor and store it in the pcb for
161 * the next context switch. Also store it in the gdt
162 * so that the load of tf_fs into %fs will activate it
163 * at return to userland.
165 sd.sd_lobase = base & 0xffffff;
166 sd.sd_hibase = (base >> 24) & 0xff;
167 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
169 sd.sd_type = SDT_MEMRWA;
176 td->td_pcb->pcb_fsd = sd;
177 PCPU_GET(fsgs_gdt)[0] = sd;
179 td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
182 case I386_GET_GSBASE:
183 sdp = &td->td_pcb->pcb_gsd;
184 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
185 error = copyout(&base, uap->parms, sizeof(base));
187 case I386_SET_GSBASE:
188 error = copyin(uap->parms, &base, sizeof(base));
191 * Construct a descriptor and store it in the pcb for
192 * the next context switch. Also store it in the gdt
193 * because we have to do a load_gs() right now.
195 sd.sd_lobase = base & 0xffffff;
196 sd.sd_hibase = (base >> 24) & 0xff;
197 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
199 sd.sd_type = SDT_MEMRWA;
206 td->td_pcb->pcb_gsd = sd;
207 PCPU_GET(fsgs_gdt)[1] = sd;
209 load_gs(GSEL(GUGS_SEL, SEL_UPL));
220 i386_extend_pcb(struct thread *td)
225 struct soft_segment_descriptor ssd = {
226 0, /* segment base address (overwritten) */
227 ctob(IOPAGES + 1) - 1, /* length */
228 SDT_SYS386TSS, /* segment type */
229 0, /* priority level */
230 1, /* descriptor present */
232 0, /* default 32 size */
236 if (td->td_proc->p_flag & P_SA)
237 return (EINVAL); /* XXXKSE */
238 /* XXXKSE All the code below only works in 1:1 needs changing */
239 ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1));
242 bzero(ext, sizeof(struct pcb_ext));
243 /* -16 is so we can convert a trapframe into vm86trapframe inplace */
244 ext->ext_tss.tss_esp0 = td->td_kstack + ctob(KSTACK_PAGES) -
245 sizeof(struct pcb) - 16;
246 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
248 * The last byte of the i/o map must be followed by an 0xff byte.
249 * We arbitrarily allocate 16 bytes here, to keep the starting
250 * address on a doubleword boundary.
252 offset = PAGE_SIZE - 16;
253 ext->ext_tss.tss_ioopt =
254 (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
255 ext->ext_iomap = (caddr_t)ext + offset;
256 ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
258 addr = (u_long *)ext->ext_vm86.vm86_intmap;
259 for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
262 ssd.ssd_base = (unsigned)&ext->ext_tss;
263 ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
264 ssdtosd(&ssd, &ext->ext_tssd);
266 KASSERT(td == curthread, ("giving TSS to !curthread"));
267 KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
269 /* Switch to the new TSS. */
271 td->td_pcb->pcb_ext = ext;
272 PCPU_SET(private_tss, 1);
273 *PCPU_GET(tss_gdt) = ext->ext_tssd;
274 ltr(GSEL(GPROC0_SEL, SEL_KPL));
281 i386_set_ioperm(td, uap)
283 struct i386_ioperm_args *uap;
288 if ((error = priv_check(td, PRIV_IO)) != 0)
290 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
294 * While this is restricted to root, we should probably figure out
295 * whether any other driver is using this i/o address, as so not to
296 * cause confusion. This probably requires a global 'usage registry'.
299 if (td->td_pcb->pcb_ext == 0)
300 if ((error = i386_extend_pcb(td)) != 0)
302 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
304 if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
307 for (i = uap->start; i < uap->start + uap->length; i++) {
309 iomap[i >> 3] &= ~(1 << (i & 7));
311 iomap[i >> 3] |= (1 << (i & 7));
317 i386_get_ioperm(td, uap)
319 struct i386_ioperm_args *uap;
324 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
327 if (td->td_pcb->pcb_ext == 0) {
332 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
335 state = (iomap[i >> 3] >> (i & 7)) & 1;
336 uap->enable = !state;
339 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
340 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
350 * Update the GDT entry pointing to the LDT to point to the LDT of the
351 * current process. Manage dt_lock holding/unholding autonomously.
354 set_user_ldt(struct mdproc *mdp)
356 struct proc_ldt *pldt;
360 if (!mtx_owned(&dt_lock)) {
361 mtx_lock_spin(&dt_lock);
367 gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
369 gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
371 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
372 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
374 mtx_unlock_spin(&dt_lock);
379 set_user_ldt_rv(struct vmspace *vmsp)
384 if (vmsp != td->td_proc->p_vmspace)
387 set_user_ldt(&td->td_proc->p_md);
392 * dt_lock must be held. Returns with dt_lock held.
395 user_ldt_alloc(struct mdproc *mdp, int len)
397 struct proc_ldt *pldt, *new_ldt;
399 mtx_assert(&dt_lock, MA_OWNED);
400 mtx_unlock_spin(&dt_lock);
401 MALLOC(new_ldt, struct proc_ldt *, sizeof(struct proc_ldt),
402 M_SUBPROC, M_WAITOK);
404 new_ldt->ldt_len = len = NEW_MAX_LD(len);
405 new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map,
406 len * sizeof(union descriptor));
407 if (new_ldt->ldt_base == NULL) {
408 FREE(new_ldt, M_SUBPROC);
409 mtx_lock_spin(&dt_lock);
412 new_ldt->ldt_refcnt = 1;
413 new_ldt->ldt_active = 0;
415 mtx_lock_spin(&dt_lock);
416 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
417 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
418 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
420 if ((pldt = mdp->md_ldt) != NULL) {
421 if (len > pldt->ldt_len)
423 bcopy(pldt->ldt_base, new_ldt->ldt_base,
424 len * sizeof(union descriptor));
426 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
432 * Must be called with dt_lock held. Returns with dt_lock unheld.
435 user_ldt_free(struct thread *td)
437 struct mdproc *mdp = &td->td_proc->p_md;
438 struct proc_ldt *pldt;
440 mtx_assert(&dt_lock, MA_OWNED);
441 if ((pldt = mdp->md_ldt) == NULL) {
442 mtx_unlock_spin(&dt_lock);
446 if (td == PCPU_GET(curthread)) {
448 PCPU_SET(currentldt, _default_ldt);
452 user_ldt_deref(pldt);
456 user_ldt_deref(struct proc_ldt *pldt)
459 mtx_assert(&dt_lock, MA_OWNED);
460 if (--pldt->ldt_refcnt == 0) {
461 mtx_unlock_spin(&dt_lock);
462 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
463 pldt->ldt_len * sizeof(union descriptor));
464 FREE(pldt, M_SUBPROC);
466 mtx_unlock_spin(&dt_lock);
470 * Note for the authors of compat layers (linux, etc): copyout() in
471 * the function below is not a problem since it presents data in
472 * arch-specific format (i.e. i386-specific in this case), not in
473 * the OS-specific one.
476 i386_get_ldt(td, uap)
478 struct i386_ldt_args *uap;
481 struct proc_ldt *pldt;
483 union descriptor *lp;
486 printf("i386_get_ldt: start=%d num=%d descs=%p\n",
487 uap->start, uap->num, (void *)uap->descs);
490 mtx_lock_spin(&dt_lock);
491 if ((pldt = td->td_proc->p_md.md_ldt) != NULL) {
492 nldt = pldt->ldt_len;
493 lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
494 mtx_unlock_spin(&dt_lock);
495 num = min(uap->num, nldt);
497 mtx_unlock_spin(&dt_lock);
498 nldt = sizeof(ldt)/sizeof(ldt[0]);
499 num = min(uap->num, nldt);
500 lp = &ldt[uap->start];
503 if ((uap->start > (unsigned int)nldt) ||
504 ((unsigned int)num > (unsigned int)nldt) ||
505 ((unsigned int)(uap->start + num) > (unsigned int)nldt))
508 error = copyout(lp, uap->descs, num * sizeof(union descriptor));
510 td->td_retval[0] = num;
516 i386_set_ldt(td, uap, descs)
518 struct i386_ldt_args *uap;
519 union descriptor *descs;
523 struct mdproc *mdp = &td->td_proc->p_md;
524 struct proc_ldt *pldt;
525 union descriptor *dp;
528 printf("i386_set_ldt: start=%d num=%d descs=%p\n",
529 uap->start, uap->num, (void *)uap->descs);
533 /* Free descriptors */
534 if (uap->start == 0 && uap->num == 0) {
536 * Treat this as a special case, so userland needn't
537 * know magic number NLDT.
540 uap->num = MAX_LD - NLDT;
544 mtx_lock_spin(&dt_lock);
545 if ((pldt = mdp->md_ldt) == NULL ||
546 uap->start >= pldt->ldt_len) {
547 mtx_unlock_spin(&dt_lock);
550 largest_ld = uap->start + uap->num;
551 if (largest_ld > pldt->ldt_len)
552 largest_ld = pldt->ldt_len;
553 i = largest_ld - uap->start;
554 bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
555 sizeof(union descriptor) * i);
556 mtx_unlock_spin(&dt_lock);
560 if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
561 /* verify range of descriptors to modify */
562 largest_ld = uap->start + uap->num;
563 if (uap->start >= MAX_LD ||
564 uap->num < 0 || largest_ld > MAX_LD) {
569 /* Check descriptors for access violations */
570 for (i = 0; i < uap->num; i++) {
573 switch (dp->sd.sd_type) {
574 case SDT_SYSNULL: /* system null */
577 case SDT_SYS286TSS: /* system 286 TSS available */
578 case SDT_SYSLDT: /* system local descriptor table */
579 case SDT_SYS286BSY: /* system 286 TSS busy */
580 case SDT_SYSTASKGT: /* system task gate */
581 case SDT_SYS286IGT: /* system 286 interrupt gate */
582 case SDT_SYS286TGT: /* system 286 trap gate */
583 case SDT_SYSNULL2: /* undefined by Intel */
584 case SDT_SYS386TSS: /* system 386 TSS available */
585 case SDT_SYSNULL3: /* undefined by Intel */
586 case SDT_SYS386BSY: /* system 386 TSS busy */
587 case SDT_SYSNULL4: /* undefined by Intel */
588 case SDT_SYS386IGT: /* system 386 interrupt gate */
589 case SDT_SYS386TGT: /* system 386 trap gate */
590 case SDT_SYS286CGT: /* system 286 call gate */
591 case SDT_SYS386CGT: /* system 386 call gate */
592 /* I can't think of any reason to allow a user proc
593 * to create a segment of these types. They are
599 /* memory segment types */
600 case SDT_MEMEC: /* memory execute only conforming */
601 case SDT_MEMEAC: /* memory execute only accessed conforming */
602 case SDT_MEMERC: /* memory execute read conforming */
603 case SDT_MEMERAC: /* memory execute read accessed conforming */
604 /* Must be "present" if executable and conforming. */
605 if (dp->sd.sd_p == 0)
608 case SDT_MEMRO: /* memory read only */
609 case SDT_MEMROA: /* memory read only accessed */
610 case SDT_MEMRW: /* memory read write */
611 case SDT_MEMRWA: /* memory read write accessed */
612 case SDT_MEMROD: /* memory read only expand dwn limit */
613 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
614 case SDT_MEMRWD: /* memory read write expand dwn limit */
615 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
616 case SDT_MEME: /* memory execute only */
617 case SDT_MEMEA: /* memory execute only accessed */
618 case SDT_MEMER: /* memory execute read */
619 case SDT_MEMERA: /* memory execute read accessed */
626 /* Only user (ring-3) descriptors may be present. */
627 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL))
631 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
632 /* Allocate a free slot */
633 mtx_lock_spin(&dt_lock);
634 if ((pldt = mdp->md_ldt) == NULL) {
635 if ((error = i386_ldt_grow(td, NLDT + 1))) {
636 mtx_unlock_spin(&dt_lock);
643 * start scanning a bit up to leave room for NVidia and
644 * Wine, which still user the "Blat" method of allocation.
646 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
647 for (i = NLDT; i < pldt->ldt_len; ++i) {
648 if (dp->sd.sd_type == SDT_SYSNULL)
652 if (i >= pldt->ldt_len) {
653 if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
654 mtx_unlock_spin(&dt_lock);
660 error = i386_set_ldt_data(td, i, 1, descs);
661 mtx_unlock_spin(&dt_lock);
663 largest_ld = uap->start + uap->num;
664 mtx_lock_spin(&dt_lock);
665 if (!(error = i386_ldt_grow(td, largest_ld))) {
666 error = i386_set_ldt_data(td, uap->start, uap->num,
669 mtx_unlock_spin(&dt_lock);
672 td->td_retval[0] = uap->start;
677 i386_set_ldt_data(struct thread *td, int start, int num,
678 union descriptor *descs)
680 struct mdproc *mdp = &td->td_proc->p_md;
681 struct proc_ldt *pldt = mdp->md_ldt;
683 mtx_assert(&dt_lock, MA_OWNED);
687 &((union descriptor *)(pldt->ldt_base))[start],
688 num * sizeof(union descriptor));
693 i386_ldt_grow(struct thread *td, int len)
695 struct mdproc *mdp = &td->td_proc->p_md;
696 struct proc_ldt *new_ldt, *pldt;
697 caddr_t old_ldt_base = NULL_LDT_BASE;
700 mtx_assert(&dt_lock, MA_OWNED);
707 /* Allocate a user ldt. */
708 if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
709 new_ldt = user_ldt_alloc(mdp, len);
715 if (new_ldt->ldt_len <= pldt->ldt_len) {
717 * We just lost the race for allocation, so
718 * free the new object and return.
720 mtx_unlock_spin(&dt_lock);
721 kmem_free(kernel_map,
722 (vm_offset_t)new_ldt->ldt_base,
723 new_ldt->ldt_len * sizeof(union descriptor));
724 FREE(new_ldt, M_SUBPROC);
725 mtx_lock_spin(&dt_lock);
730 * We have to substitute the current LDT entry for
731 * curproc with the new one since its size grew.
733 old_ldt_base = pldt->ldt_base;
734 old_ldt_len = pldt->ldt_len;
735 pldt->ldt_sd = new_ldt->ldt_sd;
736 pldt->ldt_base = new_ldt->ldt_base;
737 pldt->ldt_len = new_ldt->ldt_len;
739 mdp->md_ldt = pldt = new_ldt;
742 * Signal other cpus to reload ldt. We need to unlock dt_lock
743 * here because other CPU will contest on it since their
744 * curthreads won't hold the lock and will block when trying
747 mtx_unlock_spin(&dt_lock);
748 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
749 NULL, td->td_proc->p_vmspace);
751 set_user_ldt(&td->td_proc->p_md);
752 mtx_unlock_spin(&dt_lock);
754 if (old_ldt_base != NULL_LDT_BASE) {
755 kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
756 old_ldt_len * sizeof(union descriptor));
757 FREE(new_ldt, M_SUBPROC);
759 mtx_lock_spin(&dt_lock);