2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1990 William Jolitz.
5 * Copyright (c) 1991 The Regents of the University of California.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <machine/bus.h>
51 #include <sys/signalvar.h>
54 #include <machine/cputypes.h>
55 #include <machine/frame.h>
56 #include <machine/intr_machdep.h>
57 #include <machine/md_var.h>
58 #include <machine/pcb.h>
59 #include <machine/psl.h>
60 #include <machine/resource.h>
61 #include <machine/specialreg.h>
62 #include <machine/segments.h>
63 #include <machine/ucontext.h>
64 #include <x86/ifunc.h>
67 * Floating point support.
70 #if defined(__GNUCLIKE_ASM) && !defined(lint)
72 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
73 #define fnclex() __asm __volatile("fnclex")
74 #define fninit() __asm __volatile("fninit")
75 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
76 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
77 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
78 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
79 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
80 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
83 xrstor(char *addr, uint64_t mask)
89 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
93 xsave(char *addr, uint64_t mask)
99 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
104 xsaveopt(char *addr, uint64_t mask)
110 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) :
114 #else /* !(__GNUCLIKE_ASM && !lint) */
116 void fldcw(u_short cw);
119 void fnstcw(caddr_t addr);
120 void fnstsw(caddr_t addr);
121 void fxsave(caddr_t addr);
122 void fxrstor(caddr_t addr);
123 void ldmxcsr(u_int csr);
124 void stmxcsr(u_int *csr);
125 void xrstor(char *addr, uint64_t mask);
126 void xsave(char *addr, uint64_t mask);
127 void xsaveopt(char *addr, uint64_t mask);
129 #endif /* __GNUCLIKE_ASM && !lint */
131 #define start_emulating() load_cr0(rcr0() | CR0_TS)
132 #define stop_emulating() clts()
134 CTASSERT(sizeof(struct savefpu) == 512);
135 CTASSERT(sizeof(struct xstate_hdr) == 64);
136 CTASSERT(sizeof(struct savefpu_ymm) == 832);
139 * This requirement is to make it easier for asm code to calculate
140 * offset of the fpu save area from the pcb address. FPU save area
141 * must be 64-byte aligned.
143 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
146 * Ensure the copy of XCR0 saved in a core is contained in the padding
149 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savefpu, sv_pad) &&
150 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savefpu));
152 static void fpu_clean_state(void);
154 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
155 SYSCTL_NULL_INT_PTR, 1, "Floating point instructions executed in hardware");
157 int lazy_fpu_switch = 0;
158 SYSCTL_INT(_hw, OID_AUTO, lazy_fpu_switch, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
160 "Lazily load FPU context after context switch");
162 int use_xsave; /* non-static for cpu_switch.S */
163 uint64_t xsave_mask; /* the same */
164 static uma_zone_t fpu_save_area_zone;
165 static struct savefpu *fpu_initialstate;
167 struct xsave_area_elm_descr {
173 fpusave_xsaveopt(void *addr)
176 xsaveopt((char *)addr, xsave_mask);
180 fpusave_xsave(void *addr)
183 xsave((char *)addr, xsave_mask);
187 fpurestore_xrstor(void *addr)
190 xrstor((char *)addr, xsave_mask);
194 fpusave_fxsave(void *addr)
197 fxsave((char *)addr);
201 fpurestore_fxrstor(void *addr)
204 fxrstor((char *)addr);
213 if ((cpu_feature2 & CPUID2_XSAVE) == 0)
216 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
219 DEFINE_IFUNC(, void, fpusave, (void *), static)
224 return ((cpu_stdext_feature & CPUID_EXTSTATE_XSAVEOPT) != 0 ?
225 fpusave_xsaveopt : fpusave_xsave);
226 return (fpusave_fxsave);
229 DEFINE_IFUNC(, void, fpurestore, (void *), static)
233 return (use_xsave ? fpurestore_xrstor : fpurestore_fxrstor);
237 fpususpend(void *addr)
248 fpuresume(void *addr)
256 load_xcr(XCR0, xsave_mask);
262 * Enable XSAVE if supported and allowed by user.
263 * Calculate the xsave_mask.
269 uint64_t xsave_mask_user;
272 TUNABLE_INT_FETCH("hw.lazy_fpu_switch", &lazy_fpu_switch);
275 cpuid_count(0xd, 0x0, cp);
276 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
277 if ((cp[0] & xsave_mask) != xsave_mask)
278 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
279 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
280 xsave_mask_user = xsave_mask;
281 TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
282 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
283 xsave_mask &= xsave_mask_user;
284 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
285 xsave_mask &= ~XFEATURE_AVX512;
286 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
287 xsave_mask &= ~XFEATURE_MPX;
289 cpuid_count(0xd, 0x1, cp);
290 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
292 * Patch the XSAVE instruction in the cpu_switch code
293 * to XSAVEOPT. We assume that XSAVE encoding used
294 * REX byte, and set the bit 4 of the r/m byte.
296 * It seems that some BIOSes give control to the OS
297 * with CR0.WP already set, making the kernel text
298 * read-only before cpu_startup().
300 old_wp = disable_wp();
301 ctx_switch_xsave[3] |= 0x10;
307 * Calculate the fpu save area size.
315 cpuid_count(0xd, 0x0, cp);
316 cpu_max_ext_state_size = cp[1];
319 * Reload the cpu_feature2, since we enabled OSXSAVE.
322 cpu_feature2 = cp[2];
324 cpu_max_ext_state_size = sizeof(struct savefpu);
328 * Initialize the floating point unit.
341 load_cr4(rcr4() | CR4_XSAVE);
342 load_xcr(XCR0, xsave_mask);
346 * XCR0 shall be set up before CPU can report the save area size.
352 * It is too early for critical_enter() to work on AP.
354 saveintr = intr_disable();
357 control = __INITIAL_FPUCW__;
359 mxcsr = __INITIAL_MXCSR__;
362 intr_restore(saveintr);
366 * On the boot CPU we generate a clean state that is used to
367 * initialize the floating point unit when it is first used by a
371 fpuinitstate(void *arg __unused)
375 int cp[4], i, max_ext_n;
377 fpu_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
379 saveintr = intr_disable();
382 fpusave_fxsave(fpu_initialstate);
383 if (fpu_initialstate->sv_env.en_mxcsr_mask)
384 cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
386 cpu_mxcsr_mask = 0xFFBF;
389 * The fninit instruction does not modify XMM registers or x87
390 * registers (MM/ST). The fpusave call dumped the garbage
391 * contained in the registers after reset to the initial state
392 * saved. Clear XMM and x87 registers file image to make the
393 * startup program state and signal handler XMM/x87 register
394 * content predictable.
396 bzero(fpu_initialstate->sv_fp, sizeof(fpu_initialstate->sv_fp));
397 bzero(fpu_initialstate->sv_xmm, sizeof(fpu_initialstate->sv_xmm));
400 * Create a table describing the layout of the CPU Extended
404 xstate_bv = (uint64_t *)((char *)(fpu_initialstate + 1) +
405 offsetof(struct xstate_hdr, xstate_bv));
406 *xstate_bv = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
408 max_ext_n = flsl(xsave_mask);
409 xsave_area_desc = malloc(max_ext_n * sizeof(struct
410 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
412 xsave_area_desc[0].offset = 0;
413 xsave_area_desc[0].size = 160;
415 xsave_area_desc[1].offset = 160;
416 xsave_area_desc[1].size = 288 - 160;
418 for (i = 2; i < max_ext_n; i++) {
419 cpuid_count(0xd, i, cp);
420 xsave_area_desc[i].offset = cp[1];
421 xsave_area_desc[i].size = cp[0];
425 fpu_save_area_zone = uma_zcreate("FPU_save_area",
426 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
427 XSAVE_AREA_ALIGN - 1, 0);
430 intr_restore(saveintr);
432 /* EFIRT needs this to be initialized before we can enter our EFI environment */
433 SYSINIT(fpuinitstate, SI_SUB_DRIVERS, SI_ORDER_FIRST, fpuinitstate, NULL);
436 * Free coprocessor (if we have it).
439 fpuexit(struct thread *td)
443 if (curthread == PCPU_GET(fpcurthread)) {
445 fpusave(curpcb->pcb_save);
447 PCPU_SET(fpcurthread, NULL);
456 return (_MC_FPFMT_XMM);
460 * The following mechanism is used to ensure that the FPE_... value
461 * that is passed as a trapcode to the signal handler of the user
462 * process does not have more than one bit set.
464 * Multiple bits may be set if the user process modifies the control
465 * word while a status word bit is already set. While this is a sign
466 * of bad coding, we have no choise than to narrow them down to one
467 * bit, since we must not send a trapcode that is not exactly one of
470 * The mechanism has a static table with 127 entries. Each combination
471 * of the 7 FPU status word exception bits directly translates to a
472 * position in this table, where a single FPE_... value is stored.
473 * This FPE_... value stored there is considered the "most important"
474 * of the exception bits and will be sent as the signal code. The
475 * precedence of the bits is based upon Intel Document "Numerical
476 * Applications", Chapter "Special Computational Situations".
478 * The macro to choose one of these values does these steps: 1) Throw
479 * away status word bits that cannot be masked. 2) Throw away the bits
480 * currently masked in the control word, assuming the user isn't
481 * interested in them anymore. 3) Reinsert status word bit 7 (stack
482 * fault) if it is set, which cannot be masked but must be presered.
483 * 4) Use the remaining bits to point into the trapcode table.
485 * The 6 maskable bits in order of their preference, as stated in the
486 * above referenced Intel manual:
487 * 1 Invalid operation (FP_X_INV)
490 * 1c Operand of unsupported format
492 * 2 QNaN operand (not an exception, irrelavant here)
493 * 3 Any other invalid-operation not mentioned above or zero divide
494 * (FP_X_INV, FP_X_DZ)
495 * 4 Denormal operand (FP_X_DNML)
496 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
497 * 6 Inexact result (FP_X_IMP)
499 static char fpetable[128] = {
501 FPE_FLTINV, /* 1 - INV */
502 FPE_FLTUND, /* 2 - DNML */
503 FPE_FLTINV, /* 3 - INV | DNML */
504 FPE_FLTDIV, /* 4 - DZ */
505 FPE_FLTINV, /* 5 - INV | DZ */
506 FPE_FLTDIV, /* 6 - DNML | DZ */
507 FPE_FLTINV, /* 7 - INV | DNML | DZ */
508 FPE_FLTOVF, /* 8 - OFL */
509 FPE_FLTINV, /* 9 - INV | OFL */
510 FPE_FLTUND, /* A - DNML | OFL */
511 FPE_FLTINV, /* B - INV | DNML | OFL */
512 FPE_FLTDIV, /* C - DZ | OFL */
513 FPE_FLTINV, /* D - INV | DZ | OFL */
514 FPE_FLTDIV, /* E - DNML | DZ | OFL */
515 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
516 FPE_FLTUND, /* 10 - UFL */
517 FPE_FLTINV, /* 11 - INV | UFL */
518 FPE_FLTUND, /* 12 - DNML | UFL */
519 FPE_FLTINV, /* 13 - INV | DNML | UFL */
520 FPE_FLTDIV, /* 14 - DZ | UFL */
521 FPE_FLTINV, /* 15 - INV | DZ | UFL */
522 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
523 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
524 FPE_FLTOVF, /* 18 - OFL | UFL */
525 FPE_FLTINV, /* 19 - INV | OFL | UFL */
526 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
527 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
528 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
529 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
530 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
531 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
532 FPE_FLTRES, /* 20 - IMP */
533 FPE_FLTINV, /* 21 - INV | IMP */
534 FPE_FLTUND, /* 22 - DNML | IMP */
535 FPE_FLTINV, /* 23 - INV | DNML | IMP */
536 FPE_FLTDIV, /* 24 - DZ | IMP */
537 FPE_FLTINV, /* 25 - INV | DZ | IMP */
538 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
539 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
540 FPE_FLTOVF, /* 28 - OFL | IMP */
541 FPE_FLTINV, /* 29 - INV | OFL | IMP */
542 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
543 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
544 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
545 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
546 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
547 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
548 FPE_FLTUND, /* 30 - UFL | IMP */
549 FPE_FLTINV, /* 31 - INV | UFL | IMP */
550 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
551 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
552 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
553 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
554 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
555 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
556 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
557 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
558 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
559 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
560 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
561 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
562 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
563 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
564 FPE_FLTSUB, /* 40 - STK */
565 FPE_FLTSUB, /* 41 - INV | STK */
566 FPE_FLTUND, /* 42 - DNML | STK */
567 FPE_FLTSUB, /* 43 - INV | DNML | STK */
568 FPE_FLTDIV, /* 44 - DZ | STK */
569 FPE_FLTSUB, /* 45 - INV | DZ | STK */
570 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
571 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
572 FPE_FLTOVF, /* 48 - OFL | STK */
573 FPE_FLTSUB, /* 49 - INV | OFL | STK */
574 FPE_FLTUND, /* 4A - DNML | OFL | STK */
575 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
576 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
577 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
578 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
579 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
580 FPE_FLTUND, /* 50 - UFL | STK */
581 FPE_FLTSUB, /* 51 - INV | UFL | STK */
582 FPE_FLTUND, /* 52 - DNML | UFL | STK */
583 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
584 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
585 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
586 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
587 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
588 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
589 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
590 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
591 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
592 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
593 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
594 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
595 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
596 FPE_FLTRES, /* 60 - IMP | STK */
597 FPE_FLTSUB, /* 61 - INV | IMP | STK */
598 FPE_FLTUND, /* 62 - DNML | IMP | STK */
599 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
600 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
601 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
602 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
603 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
604 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
605 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
606 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
607 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
608 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
609 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
610 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
611 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
612 FPE_FLTUND, /* 70 - UFL | IMP | STK */
613 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
614 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
615 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
616 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
617 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
618 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
619 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
620 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
621 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
622 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
623 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
624 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
625 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
626 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
627 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
631 * Read the FP status and control words, then generate si_code value
632 * for SIGFPE. The error code chosen will be one of the
633 * FPE_... macros. It will be sent as the second argument to old
634 * BSD-style signal handlers and as "siginfo_t->si_code" (second
635 * argument) to SA_SIGINFO signal handlers.
637 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
638 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
639 * usermode code which understands the FPU hardware enough to enable
640 * the exceptions, can also handle clearing the exception state in the
641 * handler. The only consequence of not clearing the exception is the
642 * rethrow of the SIGFPE on return from the signal handler and
643 * reexecution of the corresponding instruction.
645 * For XMM traps, the exceptions were never cleared.
650 struct savefpu *pcb_save;
651 u_short control, status;
656 * Interrupt handling (for another interrupt) may have pushed the
657 * state to memory. Fetch the relevant parts of the state from
660 if (PCPU_GET(fpcurthread) != curthread) {
661 pcb_save = curpcb->pcb_save;
662 control = pcb_save->sv_env.en_cw;
663 status = pcb_save->sv_env.en_sw;
670 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
679 if (PCPU_GET(fpcurthread) != curthread)
680 mxcsr = curpcb->pcb_save->sv_env.en_mxcsr;
684 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
688 restore_fpu_curthread(struct thread *td)
693 * Record new context early in case frstor causes a trap.
695 PCPU_SET(fpcurthread, td);
701 if ((pcb->pcb_flags & PCB_FPUINITDONE) == 0) {
703 * This is the first time this thread has used the FPU or
704 * the PCB doesn't contain a clean FPU state. Explicitly
705 * load an initial state.
707 * We prefer to restore the state from the actual save
708 * area in PCB instead of directly loading from
709 * fpu_initialstate, to ignite the XSAVEOPT
712 bcopy(fpu_initialstate, pcb->pcb_save,
713 cpu_max_ext_state_size);
714 fpurestore(pcb->pcb_save);
715 if (pcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
716 fldcw(pcb->pcb_initial_fpucw);
717 if (PCB_USER_FPU(pcb))
718 set_pcb_flags(pcb, PCB_FPUINITDONE |
719 PCB_USERFPUINITDONE);
721 set_pcb_flags(pcb, PCB_FPUINITDONE);
723 fpurestore(pcb->pcb_save);
727 * Device Not Available (DNA, #NM) exception handler.
729 * It would be better to switch FP context here (if curthread !=
730 * fpcurthread) and not necessarily for every context switch, but it
731 * is too hard to access foreign pcb's.
740 * This handler is entered with interrupts enabled, so context
741 * switches may occur before critical_enter() is executed. If
742 * a context switch occurs, then when we regain control, our
743 * state will have been completely restored. The CPU may
744 * change underneath us, but the only part of our context that
745 * lives in the CPU is CR0.TS and that will be "restored" by
746 * setting it on the new CPU.
750 KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0,
751 ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
752 if (__predict_false(PCPU_GET(fpcurthread) == td)) {
754 * Some virtual machines seems to set %cr0.TS at
755 * arbitrary moments. Silently clear the TS bit
756 * regardless of the eager/lazy FPU context switch
761 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
763 "fpudna: fpcurthread = %p (%d), curthread = %p (%d)\n",
764 PCPU_GET(fpcurthread),
765 PCPU_GET(fpcurthread)->td_tid, td, td->td_tid);
767 restore_fpu_curthread(td);
772 void fpu_activate_sw(struct thread *td); /* Called from the context switch */
774 fpu_activate_sw(struct thread *td)
777 if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 ||
778 !PCB_USER_FPU(td->td_pcb)) {
779 PCPU_SET(fpcurthread, NULL);
781 } else if (PCPU_GET(fpcurthread) != td) {
782 restore_fpu_curthread(td);
791 td = PCPU_GET(fpcurthread);
792 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
794 PCPU_SET(fpcurthread, NULL);
795 clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
800 * Get the user state of the FPU into pcb->pcb_user_save without
801 * dropping ownership (if possible). It returns the FPU ownership
805 fpugetregs(struct thread *td)
808 uint64_t *xstate_bv, bit;
810 int max_ext_n, i, owned;
814 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
815 bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
816 cpu_max_ext_state_size);
817 get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
818 pcb->pcb_initial_fpucw;
821 return (_MC_FPOWNED_PCB);
823 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
824 fpusave(get_pcb_user_save_pcb(pcb));
825 owned = _MC_FPOWNED_FPU;
827 owned = _MC_FPOWNED_PCB;
831 * Handle partially saved state.
833 sa = (char *)get_pcb_user_save_pcb(pcb);
834 xstate_bv = (uint64_t *)(sa + sizeof(struct savefpu) +
835 offsetof(struct xstate_hdr, xstate_bv));
836 max_ext_n = flsl(xsave_mask);
837 for (i = 0; i < max_ext_n; i++) {
839 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
841 bcopy((char *)fpu_initialstate +
842 xsave_area_desc[i].offset,
843 sa + xsave_area_desc[i].offset,
844 xsave_area_desc[i].size);
853 fpuuserinited(struct thread *td)
859 if (PCB_USER_FPU(pcb))
861 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
863 set_pcb_flags(pcb, PCB_FPUINITDONE);
867 fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
869 struct xstate_hdr *hdr, *ehdr;
873 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
874 if (xfpustate == NULL)
879 len = xfpustate_size;
880 if (len < sizeof(struct xstate_hdr))
882 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
886 ehdr = (struct xstate_hdr *)xfpustate;
887 bv = ehdr->xstate_bv;
892 if (bv & ~xsave_mask)
895 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
898 bcopy(xfpustate + sizeof(struct xstate_hdr),
899 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
905 * Set the state of the FPU.
908 fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
909 size_t xfpustate_size)
914 addr->sv_env.en_mxcsr &= cpu_mxcsr_mask;
918 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
919 error = fpusetxstate(td, xfpustate, xfpustate_size);
921 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
922 fpurestore(get_pcb_user_save_td(td));
923 set_pcb_flags(pcb, PCB_FPUINITDONE |
924 PCB_USERFPUINITDONE);
927 error = fpusetxstate(td, xfpustate, xfpustate_size);
929 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
938 * On AuthenticAMD processors, the fxrstor instruction does not restore
939 * the x87's stored last instruction pointer, last data pointer, and last
940 * opcode values, except in the rare case in which the exception summary
941 * (ES) bit in the x87 status word is set to 1.
943 * In order to avoid leaking this information across processes, we clean
944 * these values by performing a dummy load before executing fxrstor().
947 fpu_clean_state(void)
949 static float dummy_variable = 0.0;
953 * Clear the ES bit in the x87 status word if it is currently
954 * set, in order to avoid causing a fault in the upcoming load.
961 * Load the dummy variable into the x87 stack. This mangles
962 * the x87 stack, but we don't care since we're about to call
965 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
969 * This really sucks. We want the acpi version only, but it requires
970 * the isa_if.h file in order to get the definitions.
974 #include <isa/isavar.h>
976 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
978 static struct isa_pnp_id fpupnp_ids[] = {
979 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
984 fpupnp_probe(device_t dev)
988 result = ISA_PNP_PROBE(device_get_parent(dev), dev, fpupnp_ids);
995 fpupnp_attach(device_t dev)
1001 static device_method_t fpupnp_methods[] = {
1002 /* Device interface */
1003 DEVMETHOD(device_probe, fpupnp_probe),
1004 DEVMETHOD(device_attach, fpupnp_attach),
1005 DEVMETHOD(device_detach, bus_generic_detach),
1006 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1007 DEVMETHOD(device_suspend, bus_generic_suspend),
1008 DEVMETHOD(device_resume, bus_generic_resume),
1013 static driver_t fpupnp_driver = {
1019 static devclass_t fpupnp_devclass;
1021 DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, fpupnp_devclass, 0, 0);
1022 ISA_PNP_INFO(fpupnp_ids);
1023 #endif /* DEV_ISA */
1025 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
1026 "Kernel contexts for FPU state");
1028 #define FPU_KERN_CTX_FPUINITDONE 0x01
1029 #define FPU_KERN_CTX_DUMMY 0x02 /* avoided save for the kern thread */
1030 #define FPU_KERN_CTX_INUSE 0x04
1032 struct fpu_kern_ctx {
1033 struct savefpu *prev;
1038 struct fpu_kern_ctx *
1039 fpu_kern_alloc_ctx(u_int flags)
1041 struct fpu_kern_ctx *res;
1044 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
1045 cpu_max_ext_state_size;
1046 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
1047 M_NOWAIT : M_WAITOK) | M_ZERO);
1052 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
1055 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
1056 /* XXXKIB clear the memory ? */
1057 free(ctx, M_FPUKERN_CTX);
1060 static struct savefpu *
1061 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
1065 p = (vm_offset_t)&ctx->hwstate1;
1066 p = roundup2(p, XSAVE_AREA_ALIGN);
1067 return ((struct savefpu *)p);
1071 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
1076 KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
1077 ("ctx is required when !FPU_KERN_NOCTX"));
1078 KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
1079 ("using inuse ctx"));
1080 KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0,
1081 ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state"));
1083 if ((flags & FPU_KERN_NOCTX) != 0) {
1086 if (curthread == PCPU_GET(fpcurthread)) {
1087 fpusave(curpcb->pcb_save);
1088 PCPU_SET(fpcurthread, NULL);
1090 KASSERT(PCPU_GET(fpcurthread) == NULL,
1091 ("invalid fpcurthread"));
1095 * This breaks XSAVEOPT tracker, but
1096 * PCB_FPUNOSAVE state is supposed to never need to
1097 * save FPU context at all.
1099 fpurestore(fpu_initialstate);
1100 set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE |
1104 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
1105 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
1109 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
1110 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
1111 ctx->flags = FPU_KERN_CTX_INUSE;
1112 if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
1113 ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
1115 ctx->prev = pcb->pcb_save;
1116 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
1117 set_pcb_flags(pcb, PCB_KERNFPU);
1118 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1123 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1129 if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) {
1130 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
1131 KASSERT(PCPU_GET(fpcurthread) == NULL,
1132 ("non-NULL fpcurthread for PCB_FPUNOSAVE"));
1133 CRITICAL_ASSERT(td);
1135 clear_pcb_flags(pcb, PCB_FPUNOSAVE | PCB_FPUINITDONE);
1138 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
1139 ("leaving not inuse ctx"));
1140 ctx->flags &= ~FPU_KERN_CTX_INUSE;
1142 if (is_fpu_kern_thread(0) &&
1143 (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1145 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0,
1148 if (curthread == PCPU_GET(fpcurthread))
1150 pcb->pcb_save = ctx->prev;
1153 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1154 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
1155 set_pcb_flags(pcb, PCB_FPUINITDONE);
1156 clear_pcb_flags(pcb, PCB_KERNFPU);
1158 clear_pcb_flags(pcb, PCB_FPUINITDONE | PCB_KERNFPU);
1160 if ((ctx->flags & FPU_KERN_CTX_FPUINITDONE) != 0)
1161 set_pcb_flags(pcb, PCB_FPUINITDONE);
1163 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1164 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1171 fpu_kern_thread(u_int flags)
1174 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1175 ("Only kthread may use fpu_kern_thread"));
1176 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1177 ("mangled pcb_save"));
1178 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1180 set_pcb_flags(curpcb, PCB_KERNFPU);
1185 is_fpu_kern_thread(u_int flags)
1188 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1190 return ((curpcb->pcb_flags & PCB_KERNFPU) != 0);
1194 * FPU save area alloc/free/init utility routines
1197 fpu_save_area_alloc(void)
1200 return (uma_zalloc(fpu_save_area_zone, 0));
1204 fpu_save_area_free(struct savefpu *fsa)
1207 uma_zfree(fpu_save_area_zone, fsa);
1211 fpu_save_area_reset(struct savefpu *fsa)
1214 bcopy(fpu_initialstate, fsa, cpu_max_ext_state_size);