2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
43 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/mutex.h>
48 #include <sys/mutex.h>
51 #include <sys/sysctl.h>
52 #include <machine/bus.h>
55 #include <sys/syslog.h>
57 #include <sys/signalvar.h>
60 #include <machine/asmacros.h>
61 #include <machine/cputypes.h>
62 #include <machine/frame.h>
63 #include <machine/md_var.h>
64 #include <machine/pcb.h>
65 #include <machine/psl.h>
66 #include <machine/resource.h>
67 #include <machine/specialreg.h>
68 #include <machine/segments.h>
69 #include <machine/ucontext.h>
71 #include <machine/intr_machdep.h>
73 #include <xen/xen-os.h>
74 #include <xen/hypervisor.h>
78 #include <isa/isavar.h>
81 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
82 #define CPU_ENABLE_SSE
86 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
89 #if defined(__GNUCLIKE_ASM) && !defined(lint)
91 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
92 #define fnclex() __asm __volatile("fnclex")
93 #define fninit() __asm __volatile("fninit")
94 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr)))
95 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
96 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
97 #define fp_divide_by_0() __asm __volatile( \
98 "fldz; fld1; fdiv %st,%st(1); fnop")
99 #define frstor(addr) __asm __volatile("frstor %0" : : "m" (*(addr)))
100 #ifdef CPU_ENABLE_SSE
101 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
102 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
103 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
104 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
107 xrstor(char *addr, uint64_t mask)
113 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
117 xsave(char *addr, uint64_t mask)
123 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
128 xsaveopt(char *addr, uint64_t mask)
134 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) :
138 #else /* !(__GNUCLIKE_ASM && !lint) */
140 void fldcw(u_short cw);
143 void fnsave(caddr_t addr);
144 void fnstcw(caddr_t addr);
145 void fnstsw(caddr_t addr);
146 void fp_divide_by_0(void);
147 void frstor(caddr_t addr);
148 #ifdef CPU_ENABLE_SSE
149 void fxsave(caddr_t addr);
150 void fxrstor(caddr_t addr);
151 void ldmxcsr(u_int csr);
152 void stmxcsr(u_int *csr);
153 void xrstor(char *addr, uint64_t mask);
154 void xsave(char *addr, uint64_t mask);
155 void xsaveopt(char *addr, uint64_t mask);
158 #endif /* __GNUCLIKE_ASM && !lint */
161 #define start_emulating() (HYPERVISOR_fpu_taskswitch(1))
162 #define stop_emulating() (HYPERVISOR_fpu_taskswitch(0))
164 #define start_emulating() load_cr0(rcr0() | CR0_TS)
165 #define stop_emulating() clts()
168 #ifdef CPU_ENABLE_SSE
169 #define GET_FPU_CW(thread) \
171 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \
172 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw)
173 #define GET_FPU_SW(thread) \
175 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \
176 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw)
177 #define SET_FPU_CW(savefpu, value) do { \
179 (savefpu)->sv_xmm.sv_env.en_cw = (value); \
181 (savefpu)->sv_87.sv_env.en_cw = (value); \
183 #else /* CPU_ENABLE_SSE */
184 #define GET_FPU_CW(thread) \
185 (thread->td_pcb->pcb_save->sv_87.sv_env.en_cw)
186 #define GET_FPU_SW(thread) \
187 (thread->td_pcb->pcb_save->sv_87.sv_env.en_sw)
188 #define SET_FPU_CW(savefpu, value) \
189 (savefpu)->sv_87.sv_env.en_cw = (value)
190 #endif /* CPU_ENABLE_SSE */
192 #ifdef CPU_ENABLE_SSE
193 CTASSERT(sizeof(union savefpu) == 512);
194 CTASSERT(sizeof(struct xstate_hdr) == 64);
195 CTASSERT(sizeof(struct savefpu_ymm) == 832);
198 * This requirement is to make it easier for asm code to calculate
199 * offset of the fpu save area from the pcb address. FPU save area
200 * must be 64-byte aligned.
202 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
205 * Ensure the copy of XCR0 saved in a core is contained in the padding
208 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savexmm, sv_pad) &&
209 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savexmm));
211 static void fpu_clean_state(void);
214 static void fpusave(union savefpu *);
215 static void fpurstor(union savefpu *);
219 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
220 &hw_float, 0, "Floating point instructions executed in hardware");
222 int lazy_fpu_switch = 0;
223 SYSCTL_INT(_hw, OID_AUTO, lazy_fpu_switch, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
225 "Lazily load FPU context after context switch");
227 #ifdef CPU_ENABLE_SSE
231 static uma_zone_t fpu_save_area_zone;
232 static union savefpu *npx_initialstate;
234 #ifdef CPU_ENABLE_SSE
235 struct xsave_area_elm_descr {
240 static int use_xsaveopt;
243 static volatile u_int npx_traps_while_probing;
245 alias_for_inthand_t probetrap;
249 .type " __XSTRING(CNAME(probetrap)) ",@function \n\
250 " __XSTRING(CNAME(probetrap)) ": \n\
252 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\
258 * Determine if an FPU is present and how to use it.
263 struct gate_descriptor save_idt_npxtrap;
264 u_short control, status;
267 * Modern CPUs all have an FPU that uses the INT16 interface
268 * and provide a simple way to verify that, so handle the
269 * common case right away.
271 if (cpu_feature & CPUID_FPU) {
276 save_idt_npxtrap = idt[IDT_MF];
277 setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL,
278 GSEL(GCODE_SEL, SEL_KPL));
281 * Don't trap while we're probing.
286 * Finish resetting the coprocessor, if any. If there is an error
287 * pending, then we may get a bogus IRQ13, but npx_intr() will handle
288 * it OK. Bogus halts have never been observed, but we enabled
289 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
294 * Don't use fwait here because it might hang.
295 * Don't use fnop here because it usually hangs if there is no FPU.
297 DELAY(1000); /* wait for any IRQ13 */
299 if (npx_traps_while_probing != 0)
300 printf("fninit caused %u bogus npx trap(s)\n",
301 npx_traps_while_probing);
304 * Check for a status of mostly zero.
308 if ((status & 0xb8ff) == 0) {
310 * Good, now check for a proper control word.
314 if ((control & 0x1f3f) == 0x033f) {
316 * We have an npx, now divide by 0 to see if exception
319 control &= ~(1 << 2); /* enable divide by 0 trap */
321 #ifdef FPU_ERROR_BROKEN
323 * FPU error signal doesn't work on some CPU
329 npx_traps_while_probing = 0;
331 if (npx_traps_while_probing != 0) {
333 * Good, exception 16 works.
339 "FPU does not use exception 16 for error reporting\n");
345 * Probe failed. Floating point simply won't work.
346 * Notify user and disable FPU/MMX/SSE instruction execution.
348 printf("WARNING: no FPU!\n");
349 __asm __volatile("smsw %%ax; orb %0,%%al; lmsw %%ax" : :
350 "n" (CR0_EM | CR0_MP) : "ax");
353 idt[IDT_MF] = save_idt_npxtrap;
357 #ifdef CPU_ENABLE_SSE
359 * Enable XSAVE if supported and allowed by user.
360 * Calculate the xsave_mask.
366 uint64_t xsave_mask_user;
368 TUNABLE_INT_FETCH("hw.lazy_fpu_switch", &lazy_fpu_switch);
369 if (cpu_fxsr && (cpu_feature2 & CPUID2_XSAVE) != 0) {
371 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
376 cpuid_count(0xd, 0x0, cp);
377 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
378 if ((cp[0] & xsave_mask) != xsave_mask)
379 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
380 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
381 xsave_mask_user = xsave_mask;
382 TUNABLE_QUAD_FETCH("hw.xsave_mask", &xsave_mask_user);
383 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
384 xsave_mask &= xsave_mask_user;
385 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
386 xsave_mask &= ~XFEATURE_AVX512;
387 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
388 xsave_mask &= ~XFEATURE_MPX;
390 cpuid_count(0xd, 0x1, cp);
391 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0)
397 * Calculate the fpu save area size.
402 #ifdef CPU_ENABLE_SSE
406 cpuid_count(0xd, 0x0, cp);
407 cpu_max_ext_state_size = cp[1];
410 * Reload the cpu_feature2, since we enabled OSXSAVE.
413 cpu_feature2 = cp[2];
416 cpu_max_ext_state_size = sizeof(union savefpu);
420 * Initialize floating point unit.
425 static union savefpu dummy;
427 #ifdef CPU_ENABLE_SSE
435 #ifdef CPU_ENABLE_SSE
440 #ifdef CPU_ENABLE_SSE
442 load_cr4(rcr4() | CR4_XSAVE);
443 load_xcr(XCR0, xsave_mask);
448 * XCR0 shall be set up before CPU can report the save area size.
454 * fninit has the same h/w bugs as fnsave. Use the detoxified
455 * fnsave to throw away any junk in the fpu. fpusave() initializes
458 * It is too early for critical_enter() to work on AP.
460 saveintr = intr_disable();
462 #ifdef CPU_ENABLE_SSE
468 control = __INITIAL_NPXCW__;
470 #ifdef CPU_ENABLE_SSE
472 mxcsr = __INITIAL_MXCSR__;
477 intr_restore(saveintr);
481 * On the boot CPU we generate a clean state that is used to
482 * initialize the floating point unit when it is first used by a
486 npxinitstate(void *arg __unused)
489 #ifdef CPU_ENABLE_SSE
490 int cp[4], i, max_ext_n;
496 npx_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
498 saveintr = intr_disable();
501 fpusave(npx_initialstate);
502 #ifdef CPU_ENABLE_SSE
504 if (npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask)
506 npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask;
508 cpu_mxcsr_mask = 0xFFBF;
511 * The fninit instruction does not modify XMM
512 * registers or x87 registers (MM/ST). The fpusave
513 * call dumped the garbage contained in the registers
514 * after reset to the initial state saved. Clear XMM
515 * and x87 registers file image to make the startup
516 * program state and signal handler XMM/x87 register
517 * content predictable.
519 bzero(npx_initialstate->sv_xmm.sv_fp,
520 sizeof(npx_initialstate->sv_xmm.sv_fp));
521 bzero(npx_initialstate->sv_xmm.sv_xmm,
522 sizeof(npx_initialstate->sv_xmm.sv_xmm));
525 bzero(npx_initialstate->sv_87.sv_ac,
526 sizeof(npx_initialstate->sv_87.sv_ac));
528 #ifdef CPU_ENABLE_SSE
530 * Create a table describing the layout of the CPU Extended
534 if (xsave_mask >> 32 != 0)
535 max_ext_n = fls(xsave_mask >> 32) + 32;
537 max_ext_n = fls(xsave_mask);
538 xsave_area_desc = malloc(max_ext_n * sizeof(struct
539 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
541 xsave_area_desc[0].offset = 0;
542 xsave_area_desc[0].size = 160;
544 xsave_area_desc[1].offset = 160;
545 xsave_area_desc[1].size = 288 - 160;
547 for (i = 2; i < max_ext_n; i++) {
548 cpuid_count(0xd, i, cp);
549 xsave_area_desc[i].offset = cp[1];
550 xsave_area_desc[i].size = cp[0];
555 fpu_save_area_zone = uma_zcreate("FPU_save_area",
556 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
557 XSAVE_AREA_ALIGN - 1, 0);
560 intr_restore(saveintr);
562 SYSINIT(npxinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, npxinitstate, NULL);
565 * Free coprocessor (if we have it).
568 npxexit(struct thread *td)
572 if (curthread == PCPU_GET(fpcurthread)) {
574 fpusave(curpcb->pcb_save);
576 PCPU_SET(fpcurthread, NULL);
581 u_int masked_exceptions;
583 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f;
585 * Log exceptions that would have trapped with the old
586 * control word (overflow, divide by 0, and invalid operand).
588 if (masked_exceptions & 0x0d)
590 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
591 td->td_proc->p_pid, td->td_proc->p_comm,
602 return (_MC_FPFMT_NODEV);
603 #ifdef CPU_ENABLE_SSE
605 return (_MC_FPFMT_XMM);
607 return (_MC_FPFMT_387);
611 * The following mechanism is used to ensure that the FPE_... value
612 * that is passed as a trapcode to the signal handler of the user
613 * process does not have more than one bit set.
615 * Multiple bits may be set if the user process modifies the control
616 * word while a status word bit is already set. While this is a sign
617 * of bad coding, we have no choise than to narrow them down to one
618 * bit, since we must not send a trapcode that is not exactly one of
621 * The mechanism has a static table with 127 entries. Each combination
622 * of the 7 FPU status word exception bits directly translates to a
623 * position in this table, where a single FPE_... value is stored.
624 * This FPE_... value stored there is considered the "most important"
625 * of the exception bits and will be sent as the signal code. The
626 * precedence of the bits is based upon Intel Document "Numerical
627 * Applications", Chapter "Special Computational Situations".
629 * The macro to choose one of these values does these steps: 1) Throw
630 * away status word bits that cannot be masked. 2) Throw away the bits
631 * currently masked in the control word, assuming the user isn't
632 * interested in them anymore. 3) Reinsert status word bit 7 (stack
633 * fault) if it is set, which cannot be masked but must be presered.
634 * 4) Use the remaining bits to point into the trapcode table.
636 * The 6 maskable bits in order of their preference, as stated in the
637 * above referenced Intel manual:
638 * 1 Invalid operation (FP_X_INV)
641 * 1c Operand of unsupported format
643 * 2 QNaN operand (not an exception, irrelavant here)
644 * 3 Any other invalid-operation not mentioned above or zero divide
645 * (FP_X_INV, FP_X_DZ)
646 * 4 Denormal operand (FP_X_DNML)
647 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
648 * 6 Inexact result (FP_X_IMP)
650 static char fpetable[128] = {
652 FPE_FLTINV, /* 1 - INV */
653 FPE_FLTUND, /* 2 - DNML */
654 FPE_FLTINV, /* 3 - INV | DNML */
655 FPE_FLTDIV, /* 4 - DZ */
656 FPE_FLTINV, /* 5 - INV | DZ */
657 FPE_FLTDIV, /* 6 - DNML | DZ */
658 FPE_FLTINV, /* 7 - INV | DNML | DZ */
659 FPE_FLTOVF, /* 8 - OFL */
660 FPE_FLTINV, /* 9 - INV | OFL */
661 FPE_FLTUND, /* A - DNML | OFL */
662 FPE_FLTINV, /* B - INV | DNML | OFL */
663 FPE_FLTDIV, /* C - DZ | OFL */
664 FPE_FLTINV, /* D - INV | DZ | OFL */
665 FPE_FLTDIV, /* E - DNML | DZ | OFL */
666 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
667 FPE_FLTUND, /* 10 - UFL */
668 FPE_FLTINV, /* 11 - INV | UFL */
669 FPE_FLTUND, /* 12 - DNML | UFL */
670 FPE_FLTINV, /* 13 - INV | DNML | UFL */
671 FPE_FLTDIV, /* 14 - DZ | UFL */
672 FPE_FLTINV, /* 15 - INV | DZ | UFL */
673 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
674 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
675 FPE_FLTOVF, /* 18 - OFL | UFL */
676 FPE_FLTINV, /* 19 - INV | OFL | UFL */
677 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
678 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
679 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
680 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
681 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
682 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
683 FPE_FLTRES, /* 20 - IMP */
684 FPE_FLTINV, /* 21 - INV | IMP */
685 FPE_FLTUND, /* 22 - DNML | IMP */
686 FPE_FLTINV, /* 23 - INV | DNML | IMP */
687 FPE_FLTDIV, /* 24 - DZ | IMP */
688 FPE_FLTINV, /* 25 - INV | DZ | IMP */
689 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
690 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
691 FPE_FLTOVF, /* 28 - OFL | IMP */
692 FPE_FLTINV, /* 29 - INV | OFL | IMP */
693 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
694 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
695 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
696 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
697 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
698 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
699 FPE_FLTUND, /* 30 - UFL | IMP */
700 FPE_FLTINV, /* 31 - INV | UFL | IMP */
701 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
702 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
703 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
704 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
705 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
706 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
707 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
708 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
709 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
710 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
711 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
712 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
713 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
714 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
715 FPE_FLTSUB, /* 40 - STK */
716 FPE_FLTSUB, /* 41 - INV | STK */
717 FPE_FLTUND, /* 42 - DNML | STK */
718 FPE_FLTSUB, /* 43 - INV | DNML | STK */
719 FPE_FLTDIV, /* 44 - DZ | STK */
720 FPE_FLTSUB, /* 45 - INV | DZ | STK */
721 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
722 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
723 FPE_FLTOVF, /* 48 - OFL | STK */
724 FPE_FLTSUB, /* 49 - INV | OFL | STK */
725 FPE_FLTUND, /* 4A - DNML | OFL | STK */
726 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
727 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
728 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
729 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
730 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
731 FPE_FLTUND, /* 50 - UFL | STK */
732 FPE_FLTSUB, /* 51 - INV | UFL | STK */
733 FPE_FLTUND, /* 52 - DNML | UFL | STK */
734 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
735 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
736 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
737 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
738 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
739 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
740 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
741 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
742 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
743 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
744 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
745 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
746 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
747 FPE_FLTRES, /* 60 - IMP | STK */
748 FPE_FLTSUB, /* 61 - INV | IMP | STK */
749 FPE_FLTUND, /* 62 - DNML | IMP | STK */
750 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
751 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
752 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
753 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
754 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
755 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
756 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
757 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
758 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
759 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
760 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
761 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
762 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
763 FPE_FLTUND, /* 70 - UFL | IMP | STK */
764 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
765 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
766 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
767 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
768 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
769 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
770 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
771 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
772 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
773 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
774 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
775 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
776 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
777 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
778 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
782 * Read the FP status and control words, then generate si_code value
783 * for SIGFPE. The error code chosen will be one of the
784 * FPE_... macros. It will be sent as the second argument to old
785 * BSD-style signal handlers and as "siginfo_t->si_code" (second
786 * argument) to SA_SIGINFO signal handlers.
788 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
789 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
790 * usermode code which understands the FPU hardware enough to enable
791 * the exceptions, can also handle clearing the exception state in the
792 * handler. The only consequence of not clearing the exception is the
793 * rethrow of the SIGFPE on return from the signal handler and
794 * reexecution of the corresponding instruction.
796 * For XMM traps, the exceptions were never cleared.
801 u_short control, status;
805 "npxtrap_x87: fpcurthread = %p, curthread = %p, hw_float = %d\n",
806 PCPU_GET(fpcurthread), curthread, hw_float);
807 panic("npxtrap from nowhere");
812 * Interrupt handling (for another interrupt) may have pushed the
813 * state to memory. Fetch the relevant parts of the state from
816 if (PCPU_GET(fpcurthread) != curthread) {
817 control = GET_FPU_CW(curthread);
818 status = GET_FPU_SW(curthread);
824 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
827 #ifdef CPU_ENABLE_SSE
835 "npxtrap_sse: fpcurthread = %p, curthread = %p, hw_float = %d\n",
836 PCPU_GET(fpcurthread), curthread, hw_float);
837 panic("npxtrap from nowhere");
840 if (PCPU_GET(fpcurthread) != curthread)
841 mxcsr = curthread->td_pcb->pcb_save->sv_xmm.sv_env.en_mxcsr;
845 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
850 restore_npx_curthread(struct thread *td, struct pcb *pcb)
854 * Record new context early in case frstor causes a trap.
856 PCPU_SET(fpcurthread, td);
859 #ifdef CPU_ENABLE_SSE
864 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
866 * This is the first time this thread has used the FPU or
867 * the PCB doesn't contain a clean FPU state. Explicitly
868 * load an initial state.
870 * We prefer to restore the state from the actual save
871 * area in PCB instead of directly loading from
872 * npx_initialstate, to ignite the XSAVEOPT
875 bcopy(npx_initialstate, pcb->pcb_save, cpu_max_ext_state_size);
876 fpurstor(pcb->pcb_save);
877 if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__)
878 fldcw(pcb->pcb_initial_npxcw);
879 pcb->pcb_flags |= PCB_NPXINITDONE;
880 if (PCB_USER_FPU(pcb))
881 pcb->pcb_flags |= PCB_NPXUSERINITDONE;
883 fpurstor(pcb->pcb_save);
888 * Implement device not available (DNA) exception
890 * It would be better to switch FP context here (if curthread != fpcurthread)
891 * and not necessarily for every context switch, but it is too hard to
892 * access foreign pcb's.
903 if (__predict_false(PCPU_GET(fpcurthread) == td)) {
905 * Some virtual machines seems to set %cr0.TS at
906 * arbitrary moments. Silently clear the TS bit
907 * regardless of the eager/lazy FPU context switch
912 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
914 "npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n",
915 PCPU_GET(fpcurthread),
916 PCPU_GET(fpcurthread)->td_proc->p_pid,
917 td, td->td_proc->p_pid);
920 restore_npx_curthread(td, td->td_pcb);
927 * Wrapper for fpusave() called from context switch routines.
929 * npxsave() must be called with interrupts disabled, so that it clears
930 * fpcurthread atomically with saving the state. We require callers to do the
931 * disabling, since most callers need to disable interrupts anyway to call
932 * npxsave() atomically with checking fpcurthread.
940 #ifdef CPU_ENABLE_SSE
942 xsaveopt((char *)addr, xsave_mask);
948 void npxswitch(struct thread *td, struct pcb *pcb);
950 npxswitch(struct thread *td, struct pcb *pcb)
953 if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 ||
954 !PCB_USER_FPU(pcb)) {
956 PCPU_SET(fpcurthread, NULL);
957 } else if (PCPU_GET(fpcurthread) != td) {
958 restore_npx_curthread(td, pcb);
963 * Unconditionally save the current co-processor state across suspend and
967 npxsuspend(union savefpu *addr)
973 if (PCPU_GET(fpcurthread) == NULL) {
974 bcopy(npx_initialstate, addr, cpu_max_ext_state_size);
984 npxresume(union savefpu *addr)
1004 * Discard pending exceptions in the !cpu_fxsr case so that unmasked
1005 * ones don't cause a panic on the next frstor.
1007 #ifdef CPU_ENABLE_SSE
1012 td = PCPU_GET(fpcurthread);
1013 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
1014 CRITICAL_ASSERT(td);
1015 PCPU_SET(fpcurthread, NULL);
1016 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
1021 * Get the user state of the FPU into pcb->pcb_user_save without
1022 * dropping ownership (if possible). It returns the FPU ownership
1026 npxgetregs(struct thread *td)
1029 #ifdef CPU_ENABLE_SSE
1030 uint64_t *xstate_bv, bit;
1037 return (_MC_FPOWNED_NONE);
1040 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
1041 bcopy(npx_initialstate, get_pcb_user_save_pcb(pcb),
1042 cpu_max_ext_state_size);
1043 SET_FPU_CW(get_pcb_user_save_pcb(pcb), pcb->pcb_initial_npxcw);
1045 return (_MC_FPOWNED_PCB);
1048 if (td == PCPU_GET(fpcurthread)) {
1049 fpusave(get_pcb_user_save_pcb(pcb));
1050 #ifdef CPU_ENABLE_SSE
1054 * fnsave initializes the FPU and destroys whatever
1055 * context it contains. Make sure the FPU owner
1056 * starts with a clean state next time.
1059 owned = _MC_FPOWNED_FPU;
1061 owned = _MC_FPOWNED_PCB;
1064 #ifdef CPU_ENABLE_SSE
1067 * Handle partially saved state.
1069 sa = (char *)get_pcb_user_save_pcb(pcb);
1070 xstate_bv = (uint64_t *)(sa + sizeof(union savefpu) +
1071 offsetof(struct xstate_hdr, xstate_bv));
1072 if (xsave_mask >> 32 != 0)
1073 max_ext_n = fls(xsave_mask >> 32) + 32;
1075 max_ext_n = fls(xsave_mask);
1076 for (i = 0; i < max_ext_n; i++) {
1078 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
1080 bcopy((char *)npx_initialstate +
1081 xsave_area_desc[i].offset,
1082 sa + xsave_area_desc[i].offset,
1083 xsave_area_desc[i].size);
1092 npxuserinited(struct thread *td)
1097 if (PCB_USER_FPU(pcb))
1098 pcb->pcb_flags |= PCB_NPXINITDONE;
1099 pcb->pcb_flags |= PCB_NPXUSERINITDONE;
1102 #ifdef CPU_ENABLE_SSE
1104 npxsetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
1106 struct xstate_hdr *hdr, *ehdr;
1107 size_t len, max_len;
1110 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
1111 if (xfpustate == NULL)
1114 return (EOPNOTSUPP);
1116 len = xfpustate_size;
1117 if (len < sizeof(struct xstate_hdr))
1119 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
1123 ehdr = (struct xstate_hdr *)xfpustate;
1124 bv = ehdr->xstate_bv;
1129 if (bv & ~xsave_mask)
1132 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
1134 hdr->xstate_bv = bv;
1135 bcopy(xfpustate + sizeof(struct xstate_hdr),
1136 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
1143 npxsetregs(struct thread *td, union savefpu *addr, char *xfpustate,
1144 size_t xfpustate_size)
1147 #ifdef CPU_ENABLE_SSE
1154 #ifdef CPU_ENABLE_SSE
1156 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
1160 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
1161 #ifdef CPU_ENABLE_SSE
1162 error = npxsetxstate(td, xfpustate, xfpustate_size);
1169 fnclex(); /* As in npxdrop(). */
1170 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
1171 fpurstor(get_pcb_user_save_td(td));
1173 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE;
1176 #ifdef CPU_ENABLE_SSE
1177 error = npxsetxstate(td, xfpustate, xfpustate_size);
1181 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
1189 union savefpu *addr;
1192 #ifdef CPU_ENABLE_SSE
1194 xsave((char *)addr, xsave_mask);
1202 #ifdef CPU_ENABLE_SSE
1204 * On AuthenticAMD processors, the fxrstor instruction does not restore
1205 * the x87's stored last instruction pointer, last data pointer, and last
1206 * opcode values, except in the rare case in which the exception summary
1207 * (ES) bit in the x87 status word is set to 1.
1209 * In order to avoid leaking this information across processes, we clean
1210 * these values by performing a dummy load before executing fxrstor().
1213 fpu_clean_state(void)
1215 static float dummy_variable = 0.0;
1219 * Clear the ES bit in the x87 status word if it is currently
1220 * set, in order to avoid causing a fault in the upcoming load.
1227 * Load the dummy variable into the x87 stack. This mangles
1228 * the x87 stack, but we don't care since we're about to call
1231 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
1233 #endif /* CPU_ENABLE_SSE */
1236 fpurstor(union savefpu *addr)
1239 #ifdef CPU_ENABLE_SSE
1241 xrstor((char *)addr, xsave_mask);
1251 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
1253 static struct isa_pnp_id npxisa_ids[] = {
1254 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
1259 npxisa_probe(device_t dev)
1262 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) {
1269 npxisa_attach(device_t dev)
1274 static device_method_t npxisa_methods[] = {
1275 /* Device interface */
1276 DEVMETHOD(device_probe, npxisa_probe),
1277 DEVMETHOD(device_attach, npxisa_attach),
1278 DEVMETHOD(device_detach, bus_generic_detach),
1279 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1280 DEVMETHOD(device_suspend, bus_generic_suspend),
1281 DEVMETHOD(device_resume, bus_generic_resume),
1286 static driver_t npxisa_driver = {
1292 static devclass_t npxisa_devclass;
1294 DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0);
1296 DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0);
1298 #endif /* DEV_ISA */
1300 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
1301 "Kernel contexts for FPU state");
1303 #define FPU_KERN_CTX_NPXINITDONE 0x01
1304 #define FPU_KERN_CTX_DUMMY 0x02
1306 struct fpu_kern_ctx {
1307 union savefpu *prev;
1312 struct fpu_kern_ctx *
1313 fpu_kern_alloc_ctx(u_int flags)
1315 struct fpu_kern_ctx *res;
1318 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
1319 cpu_max_ext_state_size;
1320 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
1321 M_NOWAIT : M_WAITOK) | M_ZERO);
1326 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
1329 /* XXXKIB clear the memory ? */
1330 free(ctx, M_FPUKERN_CTX);
1333 static union savefpu *
1334 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
1338 p = (vm_offset_t)&ctx->hwstate1;
1339 p = roundup2(p, XSAVE_AREA_ALIGN);
1340 return ((union savefpu *)p);
1344 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
1348 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
1349 ctx->flags = FPU_KERN_CTX_DUMMY;
1353 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
1354 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
1356 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0)
1357 ctx->flags |= FPU_KERN_CTX_NPXINITDONE;
1359 ctx->prev = pcb->pcb_save;
1360 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
1361 pcb->pcb_flags |= PCB_KERNNPX;
1362 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1367 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1371 if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1375 if (curthread == PCPU_GET(fpcurthread))
1378 pcb->pcb_save = ctx->prev;
1379 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1380 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0)
1381 pcb->pcb_flags |= PCB_NPXINITDONE;
1383 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1384 pcb->pcb_flags &= ~PCB_KERNNPX;
1386 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0)
1387 pcb->pcb_flags |= PCB_NPXINITDONE;
1389 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1390 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1396 fpu_kern_thread(u_int flags)
1399 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1400 ("Only kthread may use fpu_kern_thread"));
1401 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1402 ("mangled pcb_save"));
1403 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1405 curpcb->pcb_flags |= PCB_KERNNPX;
1410 is_fpu_kern_thread(u_int flags)
1413 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1415 return ((curpcb->pcb_flags & PCB_KERNNPX) != 0);
1419 * FPU save area alloc/free/init utility routines
1422 fpu_save_area_alloc(void)
1425 return (uma_zalloc(fpu_save_area_zone, 0));
1429 fpu_save_area_free(union savefpu *fsa)
1432 uma_zfree(fpu_save_area_zone, fsa);
1436 fpu_save_area_reset(union savefpu *fsa)
1439 bcopy(npx_initialstate, fsa, cpu_max_ext_state_size);