2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1990 The Regents of the University of California.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_capsicum.h"
38 #include "opt_kstack_pages.h"
40 #include <sys/param.h>
41 #include <sys/capsicum.h>
42 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
49 #include <sys/sysproto.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_extern.h>
56 #include <machine/atomic.h>
57 #include <machine/cpu.h>
58 #include <machine/pcb.h>
59 #include <machine/pcb_ext.h>
60 #include <machine/proc.h>
61 #include <machine/sysarch.h>
63 #include <security/audit/audit.h>
65 #include <vm/vm_kern.h> /* for kernel_map */
68 #define LD_PER_PAGE 512
69 #define NEW_MAX_LD(num) rounddown2(num + LD_PER_PAGE, LD_PER_PAGE)
70 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
71 #define NULL_LDT_BASE ((caddr_t)NULL)
74 static void set_user_ldt_rv(void *arg);
76 static int i386_set_ldt_data(struct thread *, int start, int num,
77 union descriptor *descs);
78 static int i386_ldt_grow(struct thread *td, int len);
81 fill_based_sd(struct segment_descriptor *sdp, uint32_t base)
84 sdp->sd_lobase = base & 0xffffff;
85 sdp->sd_hibase = (base >> 24) & 0xff;
86 sdp->sd_lolimit = 0xffff; /* 4GB limit, wraps around */
87 sdp->sd_hilimit = 0xf;
88 sdp->sd_type = SDT_MEMRWA;
89 sdp->sd_dpl = SEL_UPL;
97 * Construct special descriptors for "base" selectors. Store them in
98 * the PCB for later use by cpu_switch(). Store them in the GDT for
99 * more immediate use. The GDT entries are part of the current
100 * context. Callers must load related segment registers to complete
101 * setting up the current context.
104 set_fsbase(struct thread *td, uint32_t base)
106 struct segment_descriptor sd;
108 fill_based_sd(&sd, base);
110 td->td_pcb->pcb_fsd = sd;
111 PCPU_GET(fsgs_gdt)[0] = sd;
116 set_gsbase(struct thread *td, uint32_t base)
118 struct segment_descriptor sd;
120 fill_based_sd(&sd, base);
122 td->td_pcb->pcb_gsd = sd;
123 PCPU_GET(fsgs_gdt)[1] = sd;
127 #ifndef _SYS_SYSPROTO_H_
128 struct sysarch_args {
135 sysarch(struct thread *td, struct sysarch_args *uap)
138 union descriptor *lp;
140 struct i386_ldt_args largs;
141 struct i386_ioperm_args iargs;
142 struct i386_get_xfpustate xfpu;
145 struct segment_descriptor *sdp;
147 AUDIT_ARG_CMD(uap->op);
149 #ifdef CAPABILITY_MODE
151 * When adding new operations, add a new case statement here to
152 * explicitly indicate whether or not the operation is safe to
153 * perform in capability mode.
155 if (IN_CAPABILITY_MODE(td)) {
159 case I386_GET_IOPERM:
160 case I386_GET_FSBASE:
161 case I386_SET_FSBASE:
162 case I386_GET_GSBASE:
163 case I386_SET_GSBASE:
164 case I386_GET_XFPUSTATE:
167 case I386_SET_IOPERM:
170 if (KTRPOINT(td, KTR_CAPFAIL))
171 ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
179 case I386_GET_IOPERM:
180 case I386_SET_IOPERM:
181 if ((error = copyin(uap->parms, &kargs.iargs,
182 sizeof(struct i386_ioperm_args))) != 0)
187 if ((error = copyin(uap->parms, &kargs.largs,
188 sizeof(struct i386_ldt_args))) != 0)
191 case I386_GET_XFPUSTATE:
192 if ((error = copyin(uap->parms, &kargs.xfpu,
193 sizeof(struct i386_get_xfpustate))) != 0)
202 error = i386_get_ldt(td, &kargs.largs);
205 if (kargs.largs.descs != NULL) {
206 if (kargs.largs.num > MAX_LD)
208 lp = malloc(kargs.largs.num * sizeof(union descriptor),
210 error = copyin(kargs.largs.descs, lp,
211 kargs.largs.num * sizeof(union descriptor));
213 error = i386_set_ldt(td, &kargs.largs, lp);
216 error = i386_set_ldt(td, &kargs.largs, NULL);
219 case I386_GET_IOPERM:
220 error = i386_get_ioperm(td, &kargs.iargs);
222 error = copyout(&kargs.iargs, uap->parms,
223 sizeof(struct i386_ioperm_args));
225 case I386_SET_IOPERM:
226 error = i386_set_ioperm(td, &kargs.iargs);
229 error = vm86_sysarch(td, uap->parms);
231 case I386_GET_FSBASE:
232 sdp = &td->td_pcb->pcb_fsd;
233 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
234 error = copyout(&base, uap->parms, sizeof(base));
236 case I386_SET_FSBASE:
237 error = copyin(uap->parms, &base, sizeof(base));
240 * Construct the special descriptor for fsbase
241 * and arrange for doreti to load its selector
244 set_fsbase(td, base);
245 td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
248 case I386_GET_GSBASE:
249 sdp = &td->td_pcb->pcb_gsd;
250 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
251 error = copyout(&base, uap->parms, sizeof(base));
253 case I386_SET_GSBASE:
254 error = copyin(uap->parms, &base, sizeof(base));
257 * Construct the special descriptor for gsbase.
258 * The selector is loaded immediately, since we
259 * normally only reload %gs on context switches.
261 set_gsbase(td, base);
262 load_gs(GSEL(GUGS_SEL, SEL_UPL));
265 case I386_GET_XFPUSTATE:
266 if (kargs.xfpu.len > cpu_max_ext_state_size -
267 sizeof(union savefpu))
270 error = copyout((char *)(get_pcb_user_save_td(td) + 1),
271 kargs.xfpu.addr, kargs.xfpu.len);
281 i386_extend_pcb(struct thread *td)
286 struct soft_segment_descriptor ssd = {
287 0, /* segment base address (overwritten) */
288 ctob(IOPAGES + 1) - 1, /* length */
289 SDT_SYS386TSS, /* segment type */
290 0, /* priority level */
291 1, /* descriptor present */
293 0, /* default 32 size */
297 ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
299 /* -16 is so we can convert a trapframe into vm86trapframe inplace */
300 ext->ext_tss.tss_esp0 = (vm_offset_t)td->td_pcb - 16;
301 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
303 * The last byte of the i/o map must be followed by an 0xff byte.
304 * We arbitrarily allocate 16 bytes here, to keep the starting
305 * address on a doubleword boundary.
307 offset = PAGE_SIZE - 16;
308 ext->ext_tss.tss_ioopt =
309 (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
310 ext->ext_iomap = (caddr_t)ext + offset;
311 ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
313 addr = (u_long *)ext->ext_vm86.vm86_intmap;
314 for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
317 ssd.ssd_base = (unsigned)&ext->ext_tss;
318 ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
319 ssdtosd(&ssd, &ext->ext_tssd);
321 KASSERT(td == curthread, ("giving TSS to !curthread"));
322 KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
324 /* Switch to the new TSS. */
326 td->td_pcb->pcb_ext = ext;
327 PCPU_SET(private_tss, 1);
328 *PCPU_GET(tss_gdt) = ext->ext_tssd;
329 ltr(GSEL(GPROC0_SEL, SEL_KPL));
336 i386_set_ioperm(td, uap)
338 struct i386_ioperm_args *uap;
344 if ((error = priv_check(td, PRIV_IO)) != 0)
346 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
350 * While this is restricted to root, we should probably figure out
351 * whether any other driver is using this i/o address, as so not to
352 * cause confusion. This probably requires a global 'usage registry'.
355 if (td->td_pcb->pcb_ext == 0)
356 if ((error = i386_extend_pcb(td)) != 0)
358 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
360 if (uap->start > uap->start + uap->length ||
361 uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
364 for (i = uap->start; i < uap->start + uap->length; i++) {
366 iomap[i >> 3] &= ~(1 << (i & 7));
368 iomap[i >> 3] |= (1 << (i & 7));
374 i386_get_ioperm(td, uap)
376 struct i386_ioperm_args *uap;
381 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
384 if (td->td_pcb->pcb_ext == 0) {
389 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
392 state = (iomap[i >> 3] >> (i & 7)) & 1;
393 uap->enable = !state;
396 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
397 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
407 * Update the GDT entry pointing to the LDT to point to the LDT of the
408 * current process. Manage dt_lock holding/unholding autonomously.
411 set_user_ldt_locked(struct mdproc *mdp)
413 struct proc_ldt *pldt;
416 mtx_assert(&dt_lock, MA_OWNED);
419 gdt_idx = GUSERLDT_SEL;
420 gdt_idx += PCPU_GET(cpuid) * NGDT; /* always 0 on UP */
421 gdt[gdt_idx].sd = pldt->ldt_sd;
422 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
423 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
427 set_user_ldt(struct mdproc *mdp)
430 mtx_lock_spin(&dt_lock);
431 set_user_ldt_locked(mdp);
432 mtx_unlock_spin(&dt_lock);
437 set_user_ldt_rv(void *arg)
442 if (arg == p->p_vmspace)
443 set_user_ldt(&p->p_md);
448 * dt_lock must be held. Returns with dt_lock held.
451 user_ldt_alloc(struct mdproc *mdp, int len)
453 struct proc_ldt *pldt, *new_ldt;
455 mtx_assert(&dt_lock, MA_OWNED);
456 mtx_unlock_spin(&dt_lock);
457 new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
459 new_ldt->ldt_len = len = NEW_MAX_LD(len);
460 new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
461 len * sizeof(union descriptor), M_WAITOK | M_ZERO);
462 new_ldt->ldt_refcnt = 1;
463 new_ldt->ldt_active = 0;
465 mtx_lock_spin(&dt_lock);
466 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
467 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
468 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
470 if ((pldt = mdp->md_ldt) != NULL) {
471 if (len > pldt->ldt_len)
473 bcopy(pldt->ldt_base, new_ldt->ldt_base,
474 len * sizeof(union descriptor));
476 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
482 * Must be called with dt_lock held. Returns with dt_lock unheld.
485 user_ldt_free(struct thread *td)
488 struct proc_ldt *pldt;
490 mtx_assert(&dt_lock, MA_OWNED);
491 mdp = &td->td_proc->p_md;
492 if ((pldt = mdp->md_ldt) == NULL) {
493 mtx_unlock_spin(&dt_lock);
497 if (td == curthread) {
499 PCPU_SET(currentldt, _default_ldt);
503 user_ldt_deref(pldt);
507 user_ldt_deref(struct proc_ldt *pldt)
510 mtx_assert(&dt_lock, MA_OWNED);
511 if (--pldt->ldt_refcnt == 0) {
512 mtx_unlock_spin(&dt_lock);
513 kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
514 pldt->ldt_len * sizeof(union descriptor));
515 free(pldt, M_SUBPROC);
517 mtx_unlock_spin(&dt_lock);
521 * Note for the authors of compat layers (linux, etc): copyout() in
522 * the function below is not a problem since it presents data in
523 * arch-specific format (i.e. i386-specific in this case), not in
524 * the OS-specific one.
527 i386_get_ldt(struct thread *td, struct i386_ldt_args *uap)
529 struct proc_ldt *pldt;
535 printf("i386_get_ldt: start=%u num=%u descs=%p\n",
536 uap->start, uap->num, (void *)uap->descs);
539 num = min(uap->num, MAX_LD);
540 data = malloc(num * sizeof(union descriptor), M_TEMP, M_WAITOK);
541 mtx_lock_spin(&dt_lock);
542 pldt = td->td_proc->p_md.md_ldt;
543 nldt = pldt != NULL ? pldt->ldt_len : nitems(ldt);
544 if (uap->start >= nldt) {
547 num = min(num, nldt - uap->start);
549 &((union descriptor *)(pldt->ldt_base))[uap->start] :
550 &ldt[uap->start], data, num * sizeof(union descriptor));
552 mtx_unlock_spin(&dt_lock);
553 error = copyout(data, uap->descs, num * sizeof(union descriptor));
555 td->td_retval[0] = num;
561 i386_set_ldt(struct thread *td, struct i386_ldt_args *uap,
562 union descriptor *descs)
565 struct proc_ldt *pldt;
566 union descriptor *dp;
571 printf("i386_set_ldt: start=%u num=%u descs=%p\n",
572 uap->start, uap->num, (void *)uap->descs);
575 mdp = &td->td_proc->p_md;
578 /* Free descriptors */
579 if (uap->start == 0 && uap->num == 0) {
581 * Treat this as a special case, so userland needn't
582 * know magic number NLDT.
585 uap->num = MAX_LD - NLDT;
587 mtx_lock_spin(&dt_lock);
588 if ((pldt = mdp->md_ldt) == NULL ||
589 uap->start >= pldt->ldt_len) {
590 mtx_unlock_spin(&dt_lock);
593 largest_ld = uap->start + uap->num;
594 if (largest_ld > pldt->ldt_len)
595 largest_ld = pldt->ldt_len;
596 for (i = uap->start; i < largest_ld; i++)
597 atomic_store_rel_64(&((uint64_t *)(pldt->ldt_base))[i],
599 mtx_unlock_spin(&dt_lock);
603 if (uap->start != LDT_AUTO_ALLOC || uap->num != 1) {
604 /* verify range of descriptors to modify */
605 largest_ld = uap->start + uap->num;
606 if (uap->start >= MAX_LD || largest_ld > MAX_LD)
610 /* Check descriptors for access violations */
611 for (i = 0; i < uap->num; i++) {
614 switch (dp->sd.sd_type) {
615 case SDT_SYSNULL: /* system null */
618 case SDT_SYS286TSS: /* system 286 TSS available */
619 case SDT_SYSLDT: /* system local descriptor table */
620 case SDT_SYS286BSY: /* system 286 TSS busy */
621 case SDT_SYSTASKGT: /* system task gate */
622 case SDT_SYS286IGT: /* system 286 interrupt gate */
623 case SDT_SYS286TGT: /* system 286 trap gate */
624 case SDT_SYSNULL2: /* undefined by Intel */
625 case SDT_SYS386TSS: /* system 386 TSS available */
626 case SDT_SYSNULL3: /* undefined by Intel */
627 case SDT_SYS386BSY: /* system 386 TSS busy */
628 case SDT_SYSNULL4: /* undefined by Intel */
629 case SDT_SYS386IGT: /* system 386 interrupt gate */
630 case SDT_SYS386TGT: /* system 386 trap gate */
631 case SDT_SYS286CGT: /* system 286 call gate */
632 case SDT_SYS386CGT: /* system 386 call gate */
635 /* memory segment types */
636 case SDT_MEMEC: /* memory execute only conforming */
637 case SDT_MEMEAC: /* memory execute only accessed conforming */
638 case SDT_MEMERC: /* memory execute read conforming */
639 case SDT_MEMERAC: /* memory execute read accessed conforming */
640 /* Must be "present" if executable and conforming. */
641 if (dp->sd.sd_p == 0)
644 case SDT_MEMRO: /* memory read only */
645 case SDT_MEMROA: /* memory read only accessed */
646 case SDT_MEMRW: /* memory read write */
647 case SDT_MEMRWA: /* memory read write accessed */
648 case SDT_MEMROD: /* memory read only expand dwn limit */
649 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
650 case SDT_MEMRWD: /* memory read write expand dwn limit */
651 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
652 case SDT_MEME: /* memory execute only */
653 case SDT_MEMEA: /* memory execute only accessed */
654 case SDT_MEMER: /* memory execute read */
655 case SDT_MEMERA: /* memory execute read accessed */
661 /* Only user (ring-3) descriptors may be present. */
662 if (dp->sd.sd_p != 0 && dp->sd.sd_dpl != SEL_UPL)
666 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
667 /* Allocate a free slot */
668 mtx_lock_spin(&dt_lock);
669 if ((pldt = mdp->md_ldt) == NULL) {
670 if ((error = i386_ldt_grow(td, NLDT + 1))) {
671 mtx_unlock_spin(&dt_lock);
678 * start scanning a bit up to leave room for NVidia and
679 * Wine, which still user the "Blat" method of allocation.
681 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
682 for (i = NLDT; i < pldt->ldt_len; ++i) {
683 if (dp->sd.sd_type == SDT_SYSNULL)
687 if (i >= pldt->ldt_len) {
688 if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
689 mtx_unlock_spin(&dt_lock);
695 error = i386_set_ldt_data(td, i, 1, descs);
696 mtx_unlock_spin(&dt_lock);
698 largest_ld = uap->start + uap->num;
699 mtx_lock_spin(&dt_lock);
700 if (!(error = i386_ldt_grow(td, largest_ld))) {
701 error = i386_set_ldt_data(td, uap->start, uap->num,
704 mtx_unlock_spin(&dt_lock);
707 td->td_retval[0] = uap->start;
712 i386_set_ldt_data(struct thread *td, int start, int num,
713 union descriptor *descs)
716 struct proc_ldt *pldt;
720 mtx_assert(&dt_lock, MA_OWNED);
722 mdp = &td->td_proc->p_md;
724 dst = (uint64_t *)(pldt->ldt_base);
725 src = (uint64_t *)descs;
728 * Atomic(9) is used only to get 64bit atomic store with
729 * cmpxchg8b when available. There is no op without release
732 for (i = 0; i < num; i++)
733 atomic_store_rel_64(&dst[start + i], src[i]);
738 i386_ldt_grow(struct thread *td, int len)
741 struct proc_ldt *new_ldt, *pldt;
742 caddr_t old_ldt_base;
745 mtx_assert(&dt_lock, MA_OWNED);
752 mdp = &td->td_proc->p_md;
753 old_ldt_base = NULL_LDT_BASE;
756 /* Allocate a user ldt. */
757 if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
758 new_ldt = user_ldt_alloc(mdp, len);
764 if (new_ldt->ldt_len <= pldt->ldt_len) {
766 * We just lost the race for allocation, so
767 * free the new object and return.
769 mtx_unlock_spin(&dt_lock);
770 kmem_free(kernel_arena,
771 (vm_offset_t)new_ldt->ldt_base,
772 new_ldt->ldt_len * sizeof(union descriptor));
773 free(new_ldt, M_SUBPROC);
774 mtx_lock_spin(&dt_lock);
779 * We have to substitute the current LDT entry for
780 * curproc with the new one since its size grew.
782 old_ldt_base = pldt->ldt_base;
783 old_ldt_len = pldt->ldt_len;
784 pldt->ldt_sd = new_ldt->ldt_sd;
785 pldt->ldt_base = new_ldt->ldt_base;
786 pldt->ldt_len = new_ldt->ldt_len;
788 mdp->md_ldt = pldt = new_ldt;
791 * Signal other cpus to reload ldt. We need to unlock dt_lock
792 * here because other CPU will contest on it since their
793 * curthreads won't hold the lock and will block when trying
796 mtx_unlock_spin(&dt_lock);
797 smp_rendezvous(NULL, set_user_ldt_rv, NULL,
798 td->td_proc->p_vmspace);
800 set_user_ldt_locked(&td->td_proc->p_md);
801 mtx_unlock_spin(&dt_lock);
803 if (old_ldt_base != NULL_LDT_BASE) {
804 kmem_free(kernel_arena, (vm_offset_t)old_ldt_base,
805 old_ldt_len * sizeof(union descriptor));
806 free(new_ldt, M_SUBPROC);
808 mtx_lock_spin(&dt_lock);