2 * Copyright (c) 1990 The Regents of the University of California.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_kstack_pages.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
46 #include <sys/sysproto.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_extern.h>
53 #include <machine/cpu.h>
54 #include <machine/pcb.h>
55 #include <machine/pcb_ext.h>
56 #include <machine/proc.h>
57 #include <machine/sysarch.h>
59 #include <security/audit/audit.h>
60 #include <security/mac/mac_framework.h>
62 #include <vm/vm_kern.h> /* for kernel_map */
65 #define LD_PER_PAGE 512
66 #define NEW_MAX_LD(num) ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
67 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
71 static int i386_set_ldt_data(struct thread *, int start, int num,
72 union descriptor *descs);
73 static int i386_ldt_grow(struct thread *td, int len);
75 static void set_user_ldt_rv(struct thread *);
78 #ifndef _SYS_SYSPROTO_H_
88 register struct sysarch_args *uap;
93 struct i386_ldt_args largs;
94 struct i386_ioperm_args iargs;
97 struct segment_descriptor sd, *sdp;
99 AUDIT_ARG(cmd, uap->op);
101 case I386_GET_IOPERM:
102 case I386_SET_IOPERM:
103 if ((error = copyin(uap->parms, &kargs.iargs,
104 sizeof(struct i386_ioperm_args))) != 0)
109 if ((error = copyin(uap->parms, &kargs.largs,
110 sizeof(struct i386_ldt_args))) != 0)
112 if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0)
122 error = i386_get_ldt(td, &kargs.largs);
125 if (kargs.largs.descs != NULL) {
126 lp = (union descriptor *)kmem_alloc(kernel_map,
127 kargs.largs.num * sizeof(union descriptor));
132 error = copyin(kargs.largs.descs, lp,
133 kargs.largs.num * sizeof(union descriptor));
135 error = i386_set_ldt(td, &kargs.largs, lp);
136 kmem_free(kernel_map, (vm_offset_t)lp,
137 kargs.largs.num * sizeof(union descriptor));
139 error = i386_set_ldt(td, &kargs.largs, NULL);
142 case I386_GET_IOPERM:
143 error = i386_get_ioperm(td, &kargs.iargs);
145 error = copyout(&kargs.iargs, uap->parms,
146 sizeof(struct i386_ioperm_args));
148 case I386_SET_IOPERM:
149 error = i386_set_ioperm(td, &kargs.iargs);
152 error = vm86_sysarch(td, uap->parms);
154 case I386_GET_FSBASE:
155 sdp = &td->td_pcb->pcb_fsd;
156 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
157 error = copyout(&base, uap->parms, sizeof(base));
159 case I386_SET_FSBASE:
160 error = copyin(uap->parms, &base, sizeof(base));
163 * Construct a descriptor and store it in the pcb for
164 * the next context switch. Also store it in the gdt
165 * so that the load of tf_fs into %fs will activate it
166 * at return to userland.
168 sd.sd_lobase = base & 0xffffff;
169 sd.sd_hibase = (base >> 24) & 0xff;
170 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
172 sd.sd_type = SDT_MEMRWA;
179 td->td_pcb->pcb_fsd = sd;
180 PCPU_GET(fsgs_gdt)[0] = sd;
182 td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
185 case I386_GET_GSBASE:
186 sdp = &td->td_pcb->pcb_gsd;
187 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
188 error = copyout(&base, uap->parms, sizeof(base));
190 case I386_SET_GSBASE:
191 error = copyin(uap->parms, &base, sizeof(base));
194 * Construct a descriptor and store it in the pcb for
195 * the next context switch. Also store it in the gdt
196 * because we have to do a load_gs() right now.
198 sd.sd_lobase = base & 0xffffff;
199 sd.sd_hibase = (base >> 24) & 0xff;
200 sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
202 sd.sd_type = SDT_MEMRWA;
209 td->td_pcb->pcb_gsd = sd;
210 PCPU_GET(fsgs_gdt)[1] = sd;
212 load_gs(GSEL(GUGS_SEL, SEL_UPL));
224 i386_extend_pcb(struct thread *td)
229 struct soft_segment_descriptor ssd = {
230 0, /* segment base address (overwritten) */
231 ctob(IOPAGES + 1) - 1, /* length */
232 SDT_SYS386TSS, /* segment type */
233 0, /* priority level */
234 1, /* descriptor present */
236 0, /* default 32 size */
240 if (td->td_proc->p_flag & P_SA)
241 return (EINVAL); /* XXXKSE */
242 /* XXXKSE All the code below only works in 1:1 needs changing */
243 ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1));
246 bzero(ext, sizeof(struct pcb_ext));
247 /* -16 is so we can convert a trapframe into vm86trapframe inplace */
248 ext->ext_tss.tss_esp0 = td->td_kstack + ctob(KSTACK_PAGES) -
249 sizeof(struct pcb) - 16;
250 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
252 * The last byte of the i/o map must be followed by an 0xff byte.
253 * We arbitrarily allocate 16 bytes here, to keep the starting
254 * address on a doubleword boundary.
256 offset = PAGE_SIZE - 16;
257 ext->ext_tss.tss_ioopt =
258 (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
259 ext->ext_iomap = (caddr_t)ext + offset;
260 ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
262 addr = (u_long *)ext->ext_vm86.vm86_intmap;
263 for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
266 ssd.ssd_base = (unsigned)&ext->ext_tss;
267 ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
268 ssdtosd(&ssd, &ext->ext_tssd);
270 KASSERT(td == curthread, ("giving TSS to !curthread"));
271 KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
273 /* Switch to the new TSS. */
275 td->td_pcb->pcb_ext = ext;
276 PCPU_SET(private_tss, 1);
277 *PCPU_GET(tss_gdt) = ext->ext_tssd;
278 ltr(GSEL(GPROC0_SEL, SEL_KPL));
285 i386_set_ioperm(td, uap)
287 struct i386_ioperm_args *uap;
293 if ((error = mac_check_sysarch_ioperm(td->td_ucred)) != 0)
296 if ((error = priv_check(td, PRIV_IO)) != 0)
298 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
302 * While this is restricted to root, we should probably figure out
303 * whether any other driver is using this i/o address, as so not to
304 * cause confusion. This probably requires a global 'usage registry'.
307 if (td->td_pcb->pcb_ext == 0)
308 if ((error = i386_extend_pcb(td)) != 0)
310 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
312 if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
315 for (i = uap->start; i < uap->start + uap->length; i++) {
317 iomap[i >> 3] &= ~(1 << (i & 7));
319 iomap[i >> 3] |= (1 << (i & 7));
325 i386_get_ioperm(td, uap)
327 struct i386_ioperm_args *uap;
332 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
335 if (td->td_pcb->pcb_ext == 0) {
340 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
343 state = (iomap[i >> 3] >> (i & 7)) & 1;
344 uap->enable = !state;
347 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
348 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
358 * Update the GDT entry pointing to the LDT to point to the LDT of the
361 * This must be called with sched_lock held. Unfortunately, we can't use a
362 * mtx_assert() here because cpu_switch() calls this function after changing
363 * curproc but before sched_lock's owner is updated in mi_switch().
366 set_user_ldt(struct mdproc *mdp)
368 struct proc_ldt *pldt;
372 gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
374 gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
376 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
377 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
382 set_user_ldt_rv(struct thread *td)
385 if (td->td_proc != curthread->td_proc)
388 set_user_ldt(&td->td_proc->p_md);
393 * Must be called with either sched_lock free or held but not recursed.
394 * If it does not return NULL, it will return with it owned.
397 user_ldt_alloc(struct mdproc *mdp, int len)
399 struct proc_ldt *pldt, *new_ldt;
401 if (mtx_owned(&sched_lock))
402 mtx_unlock_spin(&sched_lock);
403 mtx_assert(&sched_lock, MA_NOTOWNED);
404 MALLOC(new_ldt, struct proc_ldt *, sizeof(struct proc_ldt),
405 M_SUBPROC, M_WAITOK);
407 new_ldt->ldt_len = len = NEW_MAX_LD(len);
408 new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map,
409 len * sizeof(union descriptor));
410 if (new_ldt->ldt_base == NULL) {
411 FREE(new_ldt, M_SUBPROC);
414 new_ldt->ldt_refcnt = 1;
415 new_ldt->ldt_active = 0;
417 mtx_lock_spin(&sched_lock);
418 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
419 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
420 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
422 if ((pldt = mdp->md_ldt)) {
423 if (len > pldt->ldt_len)
425 bcopy(pldt->ldt_base, new_ldt->ldt_base,
426 len * sizeof(union descriptor));
428 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
434 * Must be called either with sched_lock free or held but not recursed.
435 * If md_ldt is not NULL, it will return with sched_lock released.
438 user_ldt_free(struct thread *td)
440 struct mdproc *mdp = &td->td_proc->p_md;
441 struct proc_ldt *pldt = mdp->md_ldt;
446 if (!mtx_owned(&sched_lock))
447 mtx_lock_spin(&sched_lock);
448 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
449 if (td == PCPU_GET(curthread)) {
451 PCPU_SET(currentldt, _default_ldt);
455 if (--pldt->ldt_refcnt == 0) {
456 mtx_unlock_spin(&sched_lock);
457 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
458 pldt->ldt_len * sizeof(union descriptor));
459 FREE(pldt, M_SUBPROC);
461 mtx_unlock_spin(&sched_lock);
465 * Note for the authors of compat layers (linux, etc): copyout() in
466 * the function below is not a problem since it presents data in
467 * arch-specific format (i.e. i386-specific in this case), not in
468 * the OS-specific one.
471 i386_get_ldt(td, uap)
473 struct i386_ldt_args *uap;
476 struct proc_ldt *pldt = td->td_proc->p_md.md_ldt;
478 union descriptor *lp;
481 printf("i386_get_ldt: start=%d num=%d descs=%p\n",
482 uap->start, uap->num, (void *)uap->descs);
486 nldt = pldt->ldt_len;
487 num = min(uap->num, nldt);
488 lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
490 nldt = sizeof(ldt)/sizeof(ldt[0]);
491 num = min(uap->num, nldt);
492 lp = &ldt[uap->start];
495 if ((uap->start > (unsigned int)nldt) ||
496 ((unsigned int)num > (unsigned int)nldt) ||
497 ((unsigned int)(uap->start + num) > (unsigned int)nldt))
500 error = copyout(lp, uap->descs, num * sizeof(union descriptor));
502 td->td_retval[0] = num;
507 static int ldt_warnings;
508 #define NUM_LDT_WARNINGS 10
511 i386_set_ldt(td, uap, descs)
513 struct i386_ldt_args *uap;
514 union descriptor *descs;
518 struct mdproc *mdp = &td->td_proc->p_md;
519 struct proc_ldt *pldt;
520 union descriptor *dp;
523 printf("i386_set_ldt: start=%d num=%d descs=%p\n",
524 uap->start, uap->num, (void *)uap->descs);
528 /* Free descriptors */
529 if (uap->start == 0 && uap->num == 0) {
531 * Treat this as a special case, so userland needn't
532 * know magic number NLDT.
535 uap->num = MAX_LD - NLDT;
539 mtx_lock_spin(&sched_lock);
541 if (pldt == NULL || uap->start >= pldt->ldt_len) {
542 mtx_unlock_spin(&sched_lock);
545 largest_ld = uap->start + uap->num;
546 if (largest_ld > pldt->ldt_len)
547 largest_ld = pldt->ldt_len;
548 i = largest_ld - uap->start;
549 bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
550 sizeof(union descriptor) * i);
551 mtx_unlock_spin(&sched_lock);
555 if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
556 /* complain a for a while if using old methods */
557 if (ldt_warnings++ < NUM_LDT_WARNINGS) {
558 printf("Warning: pid %d used static ldt allocation.\n",
560 printf("See the i386_set_ldt man page for more info\n");
562 /* verify range of descriptors to modify */
563 largest_ld = uap->start + uap->num;
564 if (uap->start >= MAX_LD ||
565 uap->num < 0 || largest_ld > MAX_LD) {
570 /* Check descriptors for access violations */
571 for (i = 0; i < uap->num; i++) {
574 switch (dp->sd.sd_type) {
575 case SDT_SYSNULL: /* system null */
578 case SDT_SYS286TSS: /* system 286 TSS available */
579 case SDT_SYSLDT: /* system local descriptor table */
580 case SDT_SYS286BSY: /* system 286 TSS busy */
581 case SDT_SYSTASKGT: /* system task gate */
582 case SDT_SYS286IGT: /* system 286 interrupt gate */
583 case SDT_SYS286TGT: /* system 286 trap gate */
584 case SDT_SYSNULL2: /* undefined by Intel */
585 case SDT_SYS386TSS: /* system 386 TSS available */
586 case SDT_SYSNULL3: /* undefined by Intel */
587 case SDT_SYS386BSY: /* system 386 TSS busy */
588 case SDT_SYSNULL4: /* undefined by Intel */
589 case SDT_SYS386IGT: /* system 386 interrupt gate */
590 case SDT_SYS386TGT: /* system 386 trap gate */
591 case SDT_SYS286CGT: /* system 286 call gate */
592 case SDT_SYS386CGT: /* system 386 call gate */
593 /* I can't think of any reason to allow a user proc
594 * to create a segment of these types. They are
600 /* memory segment types */
601 case SDT_MEMEC: /* memory execute only conforming */
602 case SDT_MEMEAC: /* memory execute only accessed conforming */
603 case SDT_MEMERC: /* memory execute read conforming */
604 case SDT_MEMERAC: /* memory execute read accessed conforming */
605 /* Must be "present" if executable and conforming. */
606 if (dp->sd.sd_p == 0)
609 case SDT_MEMRO: /* memory read only */
610 case SDT_MEMROA: /* memory read only accessed */
611 case SDT_MEMRW: /* memory read write */
612 case SDT_MEMRWA: /* memory read write accessed */
613 case SDT_MEMROD: /* memory read only expand dwn limit */
614 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
615 case SDT_MEMRWD: /* memory read write expand dwn limit */
616 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
617 case SDT_MEME: /* memory execute only */
618 case SDT_MEMEA: /* memory execute only accessed */
619 case SDT_MEMER: /* memory execute read */
620 case SDT_MEMERA: /* memory execute read accessed */
627 /* Only user (ring-3) descriptors may be present. */
628 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL))
632 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
633 /* Allocate a free slot */
636 error = i386_ldt_grow(td, NLDT + 1);
642 mtx_lock_spin(&sched_lock);
644 * start scanning a bit up to leave room for NVidia and
645 * Wine, which still user the "Blat" method of allocation.
647 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
648 for (i = NLDT; i < pldt->ldt_len; ++i) {
649 if (dp->sd.sd_type == SDT_SYSNULL)
653 if (i >= pldt->ldt_len) {
654 mtx_unlock_spin(&sched_lock);
655 error = i386_ldt_grow(td, pldt->ldt_len+1);
661 error = i386_set_ldt_data(td, i, 1, descs);
662 mtx_unlock_spin(&sched_lock);
664 largest_ld = uap->start + uap->num;
665 error = i386_ldt_grow(td, largest_ld);
667 mtx_lock_spin(&sched_lock);
668 error = i386_set_ldt_data(td, uap->start, uap->num,
670 mtx_unlock_spin(&sched_lock);
674 td->td_retval[0] = uap->start;
679 i386_set_ldt_data(struct thread *td, int start, int num,
680 union descriptor *descs)
682 struct mdproc *mdp = &td->td_proc->p_md;
683 struct proc_ldt *pldt = mdp->md_ldt;
685 mtx_assert(&sched_lock, MA_OWNED);
689 &((union descriptor *)(pldt->ldt_base))[start],
690 num * sizeof(union descriptor));
695 i386_ldt_grow(struct thread *td, int len)
697 struct mdproc *mdp = &td->td_proc->p_md;
698 struct proc_ldt *pldt;
699 caddr_t old_ldt_base;
707 /* Allocate a user ldt. */
709 if (!pldt || len > pldt->ldt_len) {
710 struct proc_ldt *new_ldt;
712 new_ldt = user_ldt_alloc(mdp, len);
717 /* sched_lock was acquired by user_ldt_alloc. */
719 if (new_ldt->ldt_len > pldt->ldt_len) {
720 old_ldt_base = pldt->ldt_base;
721 old_ldt_len = pldt->ldt_len;
722 pldt->ldt_sd = new_ldt->ldt_sd;
723 pldt->ldt_base = new_ldt->ldt_base;
724 pldt->ldt_len = new_ldt->ldt_len;
725 mtx_unlock_spin(&sched_lock);
726 kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
727 old_ldt_len * sizeof(union descriptor));
728 FREE(new_ldt, M_SUBPROC);
729 mtx_lock_spin(&sched_lock);
732 * If other threads already did the work,
735 mtx_unlock_spin(&sched_lock);
736 kmem_free(kernel_map,
737 (vm_offset_t)new_ldt->ldt_base,
738 new_ldt->ldt_len * sizeof(union descriptor));
739 FREE(new_ldt, M_SUBPROC);
743 mdp->md_ldt = pldt = new_ldt;
746 mtx_unlock_spin(&sched_lock);
747 /* signal other cpus to reload ldt */
748 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
752 mtx_unlock_spin(&sched_lock);