2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1990 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include "opt_capsicum.h"
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/capsicum.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
48 #include <sys/sysproto.h>
53 #include <vm/vm_kern.h> /* for kernel_map */
54 #include <vm/vm_extern.h>
56 #include <machine/frame.h>
57 #include <machine/md_var.h>
58 #include <machine/pcb.h>
59 #include <machine/specialreg.h>
60 #include <machine/sysarch.h>
61 #include <machine/tss.h>
62 #include <machine/vmparam.h>
64 #include <security/audit/audit.h>
66 static void user_ldt_deref(struct proc_ldt *pldt);
67 static void user_ldt_derefl(struct proc_ldt *pldt);
71 int max_ldt_segment = 512;
72 SYSCTL_INT(_machdep, OID_AUTO, max_ldt_segment, CTLFLAG_RDTUN,
74 "Maximum number of allowed LDT segments in the single address space");
77 max_ldt_segment_init(void *arg __unused)
80 if (max_ldt_segment <= 0)
82 if (max_ldt_segment > MAX_LD)
83 max_ldt_segment = MAX_LD;
85 SYSINIT(maxldt, SI_SUB_VM_CONF, SI_ORDER_ANY, max_ldt_segment_init, NULL);
87 #ifndef _SYS_SYSPROTO_H_
95 sysarch_ldt(struct thread *td, struct sysarch_args *uap, int uap_space)
97 struct i386_ldt_args *largs, la;
98 struct user_segment_descriptor *lp;
102 * XXXKIB check that the BSM generation code knows to encode
105 AUDIT_ARG_CMD(uap->op);
106 if (uap_space == UIO_USERSPACE) {
107 error = copyin(uap->parms, &la, sizeof(struct i386_ldt_args));
112 largs = (struct i386_ldt_args *)uap->parms;
116 error = amd64_get_ldt(td, largs);
119 if (largs->descs != NULL && largs->num > max_ldt_segment)
121 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
122 if (largs->descs != NULL) {
123 lp = malloc(largs->num * sizeof(struct
124 user_segment_descriptor), M_TEMP, M_WAITOK);
125 error = copyin(largs->descs, lp, largs->num *
126 sizeof(struct user_segment_descriptor));
128 error = amd64_set_ldt(td, largs, lp);
131 error = amd64_set_ldt(td, largs, NULL);
139 update_gdt_gsbase(struct thread *td, uint32_t base)
141 struct user_segment_descriptor *sd;
145 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
147 sd = PCPU_GET(gs32p);
148 sd->sd_lobase = base & 0xffffff;
149 sd->sd_hibase = (base >> 24) & 0xff;
154 update_gdt_fsbase(struct thread *td, uint32_t base)
156 struct user_segment_descriptor *sd;
160 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
162 sd = PCPU_GET(fs32p);
163 sd->sd_lobase = base & 0xffffff;
164 sd->sd_hibase = (base >> 24) & 0xff;
171 register struct sysarch_args *uap;
174 struct pcb *pcb = curthread->td_pcb;
177 struct i386_ioperm_args iargs;
178 struct i386_get_xfpustate i386xfpu;
179 struct amd64_get_xfpustate a64xfpu;
181 #ifdef CAPABILITY_MODE
183 * When adding new operations, add a new case statement here to
184 * explicitly indicate whether or not the operation is safe to
185 * perform in capability mode.
187 if (IN_CAPABILITY_MODE(td)) {
191 case I386_GET_IOPERM:
192 case I386_GET_FSBASE:
193 case I386_SET_FSBASE:
194 case I386_GET_GSBASE:
195 case I386_SET_GSBASE:
196 case I386_GET_XFPUSTATE:
197 case AMD64_GET_FSBASE:
198 case AMD64_SET_FSBASE:
199 case AMD64_GET_GSBASE:
200 case AMD64_SET_GSBASE:
201 case AMD64_GET_XFPUSTATE:
204 case I386_SET_IOPERM:
207 if (KTRPOINT(td, KTR_CAPFAIL))
208 ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
215 if (uap->op == I386_GET_LDT || uap->op == I386_SET_LDT)
216 return (sysarch_ldt(td, uap, UIO_USERSPACE));
218 * XXXKIB check that the BSM generation code knows to encode
221 AUDIT_ARG_CMD(uap->op);
223 case I386_GET_IOPERM:
224 case I386_SET_IOPERM:
225 if ((error = copyin(uap->parms, &iargs,
226 sizeof(struct i386_ioperm_args))) != 0)
229 case I386_GET_XFPUSTATE:
230 if ((error = copyin(uap->parms, &i386xfpu,
231 sizeof(struct i386_get_xfpustate))) != 0)
233 a64xfpu.addr = (void *)(uintptr_t)i386xfpu.addr;
234 a64xfpu.len = i386xfpu.len;
236 case AMD64_GET_XFPUSTATE:
237 if ((error = copyin(uap->parms, &a64xfpu,
238 sizeof(struct amd64_get_xfpustate))) != 0)
246 case I386_GET_IOPERM:
247 error = amd64_get_ioperm(td, &iargs);
249 error = copyout(&iargs, uap->parms,
250 sizeof(struct i386_ioperm_args));
252 case I386_SET_IOPERM:
253 error = amd64_set_ioperm(td, &iargs);
255 case I386_GET_FSBASE:
256 update_pcb_bases(pcb);
257 i386base = pcb->pcb_fsbase;
258 error = copyout(&i386base, uap->parms, sizeof(i386base));
260 case I386_SET_FSBASE:
261 error = copyin(uap->parms, &i386base, sizeof(i386base));
263 set_pcb_flags(pcb, PCB_FULL_IRET);
264 pcb->pcb_fsbase = i386base;
265 td->td_frame->tf_fs = _ufssel;
266 update_gdt_fsbase(td, i386base);
269 case I386_GET_GSBASE:
270 update_pcb_bases(pcb);
271 i386base = pcb->pcb_gsbase;
272 error = copyout(&i386base, uap->parms, sizeof(i386base));
274 case I386_SET_GSBASE:
275 error = copyin(uap->parms, &i386base, sizeof(i386base));
277 set_pcb_flags(pcb, PCB_FULL_IRET);
278 pcb->pcb_gsbase = i386base;
279 td->td_frame->tf_gs = _ugssel;
280 update_gdt_gsbase(td, i386base);
283 case AMD64_GET_FSBASE:
284 update_pcb_bases(pcb);
285 error = copyout(&pcb->pcb_fsbase, uap->parms,
286 sizeof(pcb->pcb_fsbase));
289 case AMD64_SET_FSBASE:
290 error = copyin(uap->parms, &a64base, sizeof(a64base));
292 if (a64base < VM_MAXUSER_ADDRESS) {
293 set_pcb_flags(pcb, PCB_FULL_IRET);
294 pcb->pcb_fsbase = a64base;
295 td->td_frame->tf_fs = _ufssel;
301 case AMD64_GET_GSBASE:
302 update_pcb_bases(pcb);
303 error = copyout(&pcb->pcb_gsbase, uap->parms,
304 sizeof(pcb->pcb_gsbase));
307 case AMD64_SET_GSBASE:
308 error = copyin(uap->parms, &a64base, sizeof(a64base));
310 if (a64base < VM_MAXUSER_ADDRESS) {
311 set_pcb_flags(pcb, PCB_FULL_IRET);
312 pcb->pcb_gsbase = a64base;
313 td->td_frame->tf_gs = _ugssel;
319 case I386_GET_XFPUSTATE:
320 case AMD64_GET_XFPUSTATE:
321 if (a64xfpu.len > cpu_max_ext_state_size -
322 sizeof(struct savefpu))
325 error = copyout((char *)(get_pcb_user_save_td(td) + 1),
326 a64xfpu.addr, a64xfpu.len);
337 amd64_set_ioperm(td, uap)
339 struct i386_ioperm_args *uap;
342 struct amd64tss *tssp;
343 struct system_segment_descriptor *tss_sd;
348 if ((error = priv_check(td, PRIV_IO)) != 0)
350 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
352 if (uap->start > uap->start + uap->length ||
353 uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
358 * While this is restricted to root, we should probably figure out
359 * whether any other driver is using this i/o address, as so not to
360 * cause confusion. This probably requires a global 'usage registry'.
363 if (pcb->pcb_tssp == NULL) {
364 tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
365 ctob(IOPAGES + 1), M_WAITOK);
366 pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
367 ctob(IOPAGES + 1), false);
368 iomap = (char *)&tssp[1];
369 memset(iomap, 0xff, IOPERM_BITMAP_SIZE);
371 /* Takes care of tss_rsp0. */
372 memcpy(tssp, &common_tss[PCPU_GET(cpuid)],
373 sizeof(struct amd64tss));
374 tssp->tss_iobase = sizeof(*tssp);
375 pcb->pcb_tssp = tssp;
376 tss_sd = PCPU_GET(tss);
377 tss_sd->sd_lobase = (u_long)tssp & 0xffffff;
378 tss_sd->sd_hibase = ((u_long)tssp >> 24) & 0xfffffffffful;
379 tss_sd->sd_type = SDT_SYSTSS;
380 ltr(GSEL(GPROC0_SEL, SEL_KPL));
381 PCPU_SET(tssp, tssp);
384 iomap = (char *)&pcb->pcb_tssp[1];
385 for (i = uap->start; i < uap->start + uap->length; i++) {
387 iomap[i >> 3] &= ~(1 << (i & 7));
389 iomap[i >> 3] |= (1 << (i & 7));
395 amd64_get_ioperm(td, uap)
397 struct i386_ioperm_args *uap;
402 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
404 if (td->td_pcb->pcb_tssp == NULL) {
409 iomap = (char *)&td->td_pcb->pcb_tssp[1];
412 state = (iomap[i >> 3] >> (i & 7)) & 1;
413 uap->enable = !state;
416 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
417 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
427 * Update the GDT entry pointing to the LDT to point to the LDT of the
431 set_user_ldt(struct mdproc *mdp)
434 *PCPU_GET(ldt) = mdp->md_ldt_sd;
435 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
439 set_user_ldt_rv(struct vmspace *vmsp)
444 if (vmsp != td->td_proc->p_vmspace)
447 set_user_ldt(&td->td_proc->p_md);
451 user_ldt_alloc(struct proc *p, int force)
453 struct proc_ldt *pldt, *new_ldt;
455 struct soft_segment_descriptor sldt;
459 mtx_assert(&dt_lock, MA_OWNED);
461 if (!force && mdp->md_ldt != NULL)
462 return (mdp->md_ldt);
463 mtx_unlock(&dt_lock);
464 new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
465 sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
466 sva = kmem_malloc(kernel_arena, sz, M_WAITOK | M_ZERO);
467 new_ldt->ldt_base = (caddr_t)sva;
468 pmap_pti_add_kva(sva, sva + sz, false);
469 new_ldt->ldt_refcnt = 1;
471 sldt.ssd_limit = sz - 1;
472 sldt.ssd_type = SDT_SYSLDT;
473 sldt.ssd_dpl = SEL_KPL;
480 if (pldt != NULL && !force) {
481 pmap_pti_remove_kva(sva, sva + sz);
482 kmem_free(kernel_arena, sva, sz);
483 free(new_ldt, M_SUBPROC);
488 bcopy(pldt->ldt_base, new_ldt->ldt_base, max_ldt_segment *
489 sizeof(struct user_segment_descriptor));
490 user_ldt_derefl(pldt);
493 ssdtosyssd(&sldt, &p->p_md.md_ldt_sd);
494 atomic_thread_fence_rel();
495 mdp->md_ldt = new_ldt;
497 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv, NULL,
500 return (mdp->md_ldt);
504 user_ldt_free(struct thread *td)
506 struct proc *p = td->td_proc;
507 struct mdproc *mdp = &p->p_md;
508 struct proc_ldt *pldt;
510 mtx_assert(&dt_lock, MA_OWNED);
511 if ((pldt = mdp->md_ldt) == NULL) {
512 mtx_unlock(&dt_lock);
518 atomic_thread_fence_rel();
519 bzero(&mdp->md_ldt_sd, sizeof(mdp->md_ldt_sd));
521 lldt(GSEL(GNULL_SEL, SEL_KPL));
523 user_ldt_deref(pldt);
527 user_ldt_derefl(struct proc_ldt *pldt)
532 if (--pldt->ldt_refcnt == 0) {
533 sva = (vm_offset_t)pldt->ldt_base;
534 sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
535 pmap_pti_remove_kva(sva, sva + sz);
536 kmem_free(kernel_arena, sva, sz);
537 free(pldt, M_SUBPROC);
542 user_ldt_deref(struct proc_ldt *pldt)
545 mtx_assert(&dt_lock, MA_OWNED);
546 user_ldt_derefl(pldt);
547 mtx_unlock(&dt_lock);
551 * Note for the authors of compat layers (linux, etc): copyout() in
552 * the function below is not a problem since it presents data in
553 * arch-specific format (i.e. i386-specific in this case), not in
554 * the OS-specific one.
557 amd64_get_ldt(struct thread *td, struct i386_ldt_args *uap)
559 struct proc_ldt *pldt;
560 struct user_segment_descriptor *lp;
566 printf("amd64_get_ldt: start=%u num=%u descs=%p\n",
567 uap->start, uap->num, (void *)uap->descs);
570 pldt = td->td_proc->p_md.md_ldt;
571 if (pldt == NULL || uap->start >= max_ldt_segment || uap->num == 0) {
572 td->td_retval[0] = 0;
575 num = min(uap->num, max_ldt_segment - uap->start);
576 lp = &((struct user_segment_descriptor *)(pldt->ldt_base))[uap->start];
577 data = malloc(num * sizeof(struct user_segment_descriptor), M_TEMP,
580 for (i = 0; i < num; i++)
581 data[i] = ((volatile uint64_t *)lp)[i];
582 mtx_unlock(&dt_lock);
583 error = copyout(data, uap->descs, num *
584 sizeof(struct user_segment_descriptor));
587 td->td_retval[0] = num;
592 amd64_set_ldt(struct thread *td, struct i386_ldt_args *uap,
593 struct user_segment_descriptor *descs)
596 struct proc_ldt *pldt;
597 struct user_segment_descriptor *dp;
603 printf("amd64_set_ldt: start=%u num=%u descs=%p\n",
604 uap->start, uap->num, (void *)uap->descs);
606 mdp = &td->td_proc->p_md;
609 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
612 /* Free descriptors */
613 if (uap->start == 0 && uap->num == 0)
614 uap->num = max_ldt_segment;
617 if ((pldt = mdp->md_ldt) == NULL ||
618 uap->start >= max_ldt_segment)
620 largest_ld = uap->start + uap->num;
621 if (largest_ld > max_ldt_segment)
622 largest_ld = max_ldt_segment;
623 if (largest_ld < uap->start)
626 for (i = uap->start; i < largest_ld; i++)
627 ((volatile uint64_t *)(pldt->ldt_base))[i] = 0;
628 mtx_unlock(&dt_lock);
632 if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
633 /* verify range of descriptors to modify */
634 largest_ld = uap->start + uap->num;
635 if (uap->start >= max_ldt_segment ||
636 largest_ld > max_ldt_segment ||
637 largest_ld < uap->start)
641 /* Check descriptors for access violations */
642 for (i = 0; i < uap->num; i++) {
645 switch (dp->sd_type) {
646 case SDT_SYSNULL: /* system null */
666 /* memory segment types */
667 case SDT_MEMEC: /* memory execute only conforming */
668 case SDT_MEMEAC: /* memory execute only accessed conforming */
669 case SDT_MEMERC: /* memory execute read conforming */
670 case SDT_MEMERAC: /* memory execute read accessed conforming */
671 /* Must be "present" if executable and conforming. */
675 case SDT_MEMRO: /* memory read only */
676 case SDT_MEMROA: /* memory read only accessed */
677 case SDT_MEMRW: /* memory read write */
678 case SDT_MEMRWA: /* memory read write accessed */
679 case SDT_MEMROD: /* memory read only expand dwn limit */
680 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
681 case SDT_MEMRWD: /* memory read write expand dwn limit */
682 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
683 case SDT_MEME: /* memory execute only */
684 case SDT_MEMEA: /* memory execute only accessed */
685 case SDT_MEMER: /* memory execute read */
686 case SDT_MEMERA: /* memory execute read accessed */
692 /* Only user (ring-3) descriptors may be present. */
693 if ((dp->sd_p != 0) && (dp->sd_dpl != SEL_UPL))
697 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
698 /* Allocate a free slot */
700 pldt = user_ldt_alloc(p, 0);
702 mtx_unlock(&dt_lock);
707 * start scanning a bit up to leave room for NVidia and
708 * Wine, which still user the "Blat" method of allocation.
711 dp = &((struct user_segment_descriptor *)(pldt->ldt_base))[i];
712 for (; i < max_ldt_segment; ++i, ++dp) {
713 if (dp->sd_type == SDT_SYSNULL)
716 if (i >= max_ldt_segment) {
717 mtx_unlock(&dt_lock);
721 error = amd64_set_ldt_data(td, i, 1, descs);
722 mtx_unlock(&dt_lock);
724 largest_ld = uap->start + uap->num;
725 if (largest_ld > max_ldt_segment)
728 if (user_ldt_alloc(p, 0) != NULL) {
729 error = amd64_set_ldt_data(td, uap->start, uap->num,
732 mtx_unlock(&dt_lock);
735 td->td_retval[0] = uap->start;
740 amd64_set_ldt_data(struct thread *td, int start, int num,
741 struct user_segment_descriptor *descs)
744 struct proc_ldt *pldt;
745 volatile uint64_t *dst, *src;
748 mtx_assert(&dt_lock, MA_OWNED);
750 mdp = &td->td_proc->p_md;
752 dst = (volatile uint64_t *)(pldt->ldt_base);
753 src = (volatile uint64_t *)descs;
754 for (i = 0; i < num; i++)
755 dst[start + i] = src[i];