2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1990 William Jolitz.
5 * Copyright (c) 1991 The Regents of the University of California.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <machine/bus.h>
51 #include <sys/signalvar.h>
54 #include <machine/cputypes.h>
55 #include <machine/frame.h>
56 #include <machine/intr_machdep.h>
57 #include <machine/md_var.h>
58 #include <machine/pcb.h>
59 #include <machine/psl.h>
60 #include <machine/resource.h>
61 #include <machine/specialreg.h>
62 #include <machine/segments.h>
63 #include <machine/ucontext.h>
66 * Floating point support.
69 #if defined(__GNUCLIKE_ASM) && !defined(lint)
71 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
72 #define fnclex() __asm __volatile("fnclex")
73 #define fninit() __asm __volatile("fninit")
74 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
75 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
76 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
77 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
78 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
79 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
82 xrstor(char *addr, uint64_t mask)
88 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
92 xsave(char *addr, uint64_t mask)
98 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
102 #else /* !(__GNUCLIKE_ASM && !lint) */
104 void fldcw(u_short cw);
107 void fnstcw(caddr_t addr);
108 void fnstsw(caddr_t addr);
109 void fxsave(caddr_t addr);
110 void fxrstor(caddr_t addr);
111 void ldmxcsr(u_int csr);
112 void stmxcsr(u_int *csr);
113 void xrstor(char *addr, uint64_t mask);
114 void xsave(char *addr, uint64_t mask);
116 #endif /* __GNUCLIKE_ASM && !lint */
118 #define start_emulating() load_cr0(rcr0() | CR0_TS)
119 #define stop_emulating() clts()
121 CTASSERT(sizeof(struct savefpu) == 512);
122 CTASSERT(sizeof(struct xstate_hdr) == 64);
123 CTASSERT(sizeof(struct savefpu_ymm) == 832);
126 * This requirement is to make it easier for asm code to calculate
127 * offset of the fpu save area from the pcb address. FPU save area
128 * must be 64-byte aligned.
130 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
133 * Ensure the copy of XCR0 saved in a core is contained in the padding
136 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savefpu, sv_pad) &&
137 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savefpu));
139 static void fpu_clean_state(void);
141 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
142 SYSCTL_NULL_INT_PTR, 1, "Floating point instructions executed in hardware");
144 int use_xsave; /* non-static for cpu_switch.S */
145 uint64_t xsave_mask; /* the same */
146 static uma_zone_t fpu_save_area_zone;
147 static struct savefpu *fpu_initialstate;
149 struct xsave_area_elm_descr {
159 xsave((char *)addr, xsave_mask);
161 fxsave((char *)addr);
165 fpurestore(void *addr)
169 xrstor((char *)addr, xsave_mask);
171 fxrstor((char *)addr);
175 fpususpend(void *addr)
186 fpuresume(void *addr)
194 load_xcr(XCR0, xsave_mask);
200 * Enable XSAVE if supported and allowed by user.
201 * Calculate the xsave_mask.
207 uint64_t xsave_mask_user;
209 if ((cpu_feature2 & CPUID2_XSAVE) != 0) {
211 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
216 cpuid_count(0xd, 0x0, cp);
217 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
218 if ((cp[0] & xsave_mask) != xsave_mask)
219 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
220 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
221 xsave_mask_user = xsave_mask;
222 TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
223 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
224 xsave_mask &= xsave_mask_user;
225 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
226 xsave_mask &= ~XFEATURE_AVX512;
227 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
228 xsave_mask &= ~XFEATURE_MPX;
230 cpuid_count(0xd, 0x1, cp);
231 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
233 * Patch the XSAVE instruction in the cpu_switch code
234 * to XSAVEOPT. We assume that XSAVE encoding used
235 * REX byte, and set the bit 4 of the r/m byte.
237 ctx_switch_xsave[3] |= 0x10;
242 * Calculate the fpu save area size.
250 cpuid_count(0xd, 0x0, cp);
251 cpu_max_ext_state_size = cp[1];
254 * Reload the cpu_feature2, since we enabled OSXSAVE.
257 cpu_feature2 = cp[2];
259 cpu_max_ext_state_size = sizeof(struct savefpu);
263 * Initialize the floating point unit.
276 load_cr4(rcr4() | CR4_XSAVE);
277 load_xcr(XCR0, xsave_mask);
281 * XCR0 shall be set up before CPU can report the save area size.
287 * It is too early for critical_enter() to work on AP.
289 saveintr = intr_disable();
292 control = __INITIAL_FPUCW__;
294 mxcsr = __INITIAL_MXCSR__;
297 intr_restore(saveintr);
301 * On the boot CPU we generate a clean state that is used to
302 * initialize the floating point unit when it is first used by a
306 fpuinitstate(void *arg __unused)
309 int cp[4], i, max_ext_n;
311 fpu_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
313 saveintr = intr_disable();
316 fpusave(fpu_initialstate);
317 if (fpu_initialstate->sv_env.en_mxcsr_mask)
318 cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
320 cpu_mxcsr_mask = 0xFFBF;
323 * The fninit instruction does not modify XMM registers or x87
324 * registers (MM/ST). The fpusave call dumped the garbage
325 * contained in the registers after reset to the initial state
326 * saved. Clear XMM and x87 registers file image to make the
327 * startup program state and signal handler XMM/x87 register
328 * content predictable.
330 bzero(fpu_initialstate->sv_fp, sizeof(fpu_initialstate->sv_fp));
331 bzero(fpu_initialstate->sv_xmm, sizeof(fpu_initialstate->sv_xmm));
334 * Create a table describing the layout of the CPU Extended
338 max_ext_n = flsl(xsave_mask);
339 xsave_area_desc = malloc(max_ext_n * sizeof(struct
340 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
342 xsave_area_desc[0].offset = 0;
343 xsave_area_desc[0].size = 160;
345 xsave_area_desc[1].offset = 160;
346 xsave_area_desc[1].size = 288 - 160;
348 for (i = 2; i < max_ext_n; i++) {
349 cpuid_count(0xd, i, cp);
350 xsave_area_desc[i].offset = cp[1];
351 xsave_area_desc[i].size = cp[0];
355 fpu_save_area_zone = uma_zcreate("FPU_save_area",
356 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
357 XSAVE_AREA_ALIGN - 1, 0);
360 intr_restore(saveintr);
362 SYSINIT(fpuinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, fpuinitstate, NULL);
365 * Free coprocessor (if we have it).
368 fpuexit(struct thread *td)
372 if (curthread == PCPU_GET(fpcurthread)) {
374 fpusave(curpcb->pcb_save);
376 PCPU_SET(fpcurthread, NULL);
385 return (_MC_FPFMT_XMM);
389 * The following mechanism is used to ensure that the FPE_... value
390 * that is passed as a trapcode to the signal handler of the user
391 * process does not have more than one bit set.
393 * Multiple bits may be set if the user process modifies the control
394 * word while a status word bit is already set. While this is a sign
395 * of bad coding, we have no choise than to narrow them down to one
396 * bit, since we must not send a trapcode that is not exactly one of
399 * The mechanism has a static table with 127 entries. Each combination
400 * of the 7 FPU status word exception bits directly translates to a
401 * position in this table, where a single FPE_... value is stored.
402 * This FPE_... value stored there is considered the "most important"
403 * of the exception bits and will be sent as the signal code. The
404 * precedence of the bits is based upon Intel Document "Numerical
405 * Applications", Chapter "Special Computational Situations".
407 * The macro to choose one of these values does these steps: 1) Throw
408 * away status word bits that cannot be masked. 2) Throw away the bits
409 * currently masked in the control word, assuming the user isn't
410 * interested in them anymore. 3) Reinsert status word bit 7 (stack
411 * fault) if it is set, which cannot be masked but must be presered.
412 * 4) Use the remaining bits to point into the trapcode table.
414 * The 6 maskable bits in order of their preference, as stated in the
415 * above referenced Intel manual:
416 * 1 Invalid operation (FP_X_INV)
419 * 1c Operand of unsupported format
421 * 2 QNaN operand (not an exception, irrelavant here)
422 * 3 Any other invalid-operation not mentioned above or zero divide
423 * (FP_X_INV, FP_X_DZ)
424 * 4 Denormal operand (FP_X_DNML)
425 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
426 * 6 Inexact result (FP_X_IMP)
428 static char fpetable[128] = {
430 FPE_FLTINV, /* 1 - INV */
431 FPE_FLTUND, /* 2 - DNML */
432 FPE_FLTINV, /* 3 - INV | DNML */
433 FPE_FLTDIV, /* 4 - DZ */
434 FPE_FLTINV, /* 5 - INV | DZ */
435 FPE_FLTDIV, /* 6 - DNML | DZ */
436 FPE_FLTINV, /* 7 - INV | DNML | DZ */
437 FPE_FLTOVF, /* 8 - OFL */
438 FPE_FLTINV, /* 9 - INV | OFL */
439 FPE_FLTUND, /* A - DNML | OFL */
440 FPE_FLTINV, /* B - INV | DNML | OFL */
441 FPE_FLTDIV, /* C - DZ | OFL */
442 FPE_FLTINV, /* D - INV | DZ | OFL */
443 FPE_FLTDIV, /* E - DNML | DZ | OFL */
444 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
445 FPE_FLTUND, /* 10 - UFL */
446 FPE_FLTINV, /* 11 - INV | UFL */
447 FPE_FLTUND, /* 12 - DNML | UFL */
448 FPE_FLTINV, /* 13 - INV | DNML | UFL */
449 FPE_FLTDIV, /* 14 - DZ | UFL */
450 FPE_FLTINV, /* 15 - INV | DZ | UFL */
451 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
452 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
453 FPE_FLTOVF, /* 18 - OFL | UFL */
454 FPE_FLTINV, /* 19 - INV | OFL | UFL */
455 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
456 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
457 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
458 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
459 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
460 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
461 FPE_FLTRES, /* 20 - IMP */
462 FPE_FLTINV, /* 21 - INV | IMP */
463 FPE_FLTUND, /* 22 - DNML | IMP */
464 FPE_FLTINV, /* 23 - INV | DNML | IMP */
465 FPE_FLTDIV, /* 24 - DZ | IMP */
466 FPE_FLTINV, /* 25 - INV | DZ | IMP */
467 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
468 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
469 FPE_FLTOVF, /* 28 - OFL | IMP */
470 FPE_FLTINV, /* 29 - INV | OFL | IMP */
471 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
472 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
473 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
474 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
475 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
476 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
477 FPE_FLTUND, /* 30 - UFL | IMP */
478 FPE_FLTINV, /* 31 - INV | UFL | IMP */
479 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
480 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
481 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
482 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
483 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
484 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
485 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
486 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
487 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
488 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
489 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
490 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
491 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
492 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
493 FPE_FLTSUB, /* 40 - STK */
494 FPE_FLTSUB, /* 41 - INV | STK */
495 FPE_FLTUND, /* 42 - DNML | STK */
496 FPE_FLTSUB, /* 43 - INV | DNML | STK */
497 FPE_FLTDIV, /* 44 - DZ | STK */
498 FPE_FLTSUB, /* 45 - INV | DZ | STK */
499 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
500 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
501 FPE_FLTOVF, /* 48 - OFL | STK */
502 FPE_FLTSUB, /* 49 - INV | OFL | STK */
503 FPE_FLTUND, /* 4A - DNML | OFL | STK */
504 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
505 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
506 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
507 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
508 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
509 FPE_FLTUND, /* 50 - UFL | STK */
510 FPE_FLTSUB, /* 51 - INV | UFL | STK */
511 FPE_FLTUND, /* 52 - DNML | UFL | STK */
512 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
513 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
514 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
515 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
516 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
517 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
518 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
519 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
520 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
521 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
522 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
523 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
524 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
525 FPE_FLTRES, /* 60 - IMP | STK */
526 FPE_FLTSUB, /* 61 - INV | IMP | STK */
527 FPE_FLTUND, /* 62 - DNML | IMP | STK */
528 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
529 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
530 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
531 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
532 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
533 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
534 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
535 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
536 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
537 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
538 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
539 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
540 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
541 FPE_FLTUND, /* 70 - UFL | IMP | STK */
542 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
543 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
544 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
545 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
546 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
547 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
548 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
549 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
550 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
551 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
552 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
553 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
554 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
555 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
556 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
560 * Read the FP status and control words, then generate si_code value
561 * for SIGFPE. The error code chosen will be one of the
562 * FPE_... macros. It will be sent as the second argument to old
563 * BSD-style signal handlers and as "siginfo_t->si_code" (second
564 * argument) to SA_SIGINFO signal handlers.
566 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
567 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
568 * usermode code which understands the FPU hardware enough to enable
569 * the exceptions, can also handle clearing the exception state in the
570 * handler. The only consequence of not clearing the exception is the
571 * rethrow of the SIGFPE on return from the signal handler and
572 * reexecution of the corresponding instruction.
574 * For XMM traps, the exceptions were never cleared.
579 struct savefpu *pcb_save;
580 u_short control, status;
585 * Interrupt handling (for another interrupt) may have pushed the
586 * state to memory. Fetch the relevant parts of the state from
589 if (PCPU_GET(fpcurthread) != curthread) {
590 pcb_save = curpcb->pcb_save;
591 control = pcb_save->sv_env.en_cw;
592 status = pcb_save->sv_env.en_sw;
599 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
608 if (PCPU_GET(fpcurthread) != curthread)
609 mxcsr = curpcb->pcb_save->sv_env.en_mxcsr;
613 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
617 * Device Not Available (DNA, #NM) exception handler.
619 * It would be better to switch FP context here (if curthread !=
620 * fpcurthread) and not necessarily for every context switch, but it
621 * is too hard to access foreign pcb's.
628 * This handler is entered with interrupts enabled, so context
629 * switches may occur before critical_enter() is executed. If
630 * a context switch occurs, then when we regain control, our
631 * state will have been completely restored. The CPU may
632 * change underneath us, but the only part of our context that
633 * lives in the CPU is CR0.TS and that will be "restored" by
634 * setting it on the new CPU.
638 KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0,
639 ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
640 if (PCPU_GET(fpcurthread) == curthread) {
641 printf("fpudna: fpcurthread == curthread\n");
646 if (PCPU_GET(fpcurthread) != NULL) {
647 panic("fpudna: fpcurthread = %p (%d), curthread = %p (%d)\n",
648 PCPU_GET(fpcurthread), PCPU_GET(fpcurthread)->td_tid,
649 curthread, curthread->td_tid);
653 * Record new context early in case frstor causes a trap.
655 PCPU_SET(fpcurthread, curthread);
659 if ((curpcb->pcb_flags & PCB_FPUINITDONE) == 0) {
661 * This is the first time this thread has used the FPU or
662 * the PCB doesn't contain a clean FPU state. Explicitly
663 * load an initial state.
665 * We prefer to restore the state from the actual save
666 * area in PCB instead of directly loading from
667 * fpu_initialstate, to ignite the XSAVEOPT
670 bcopy(fpu_initialstate, curpcb->pcb_save,
671 cpu_max_ext_state_size);
672 fpurestore(curpcb->pcb_save);
673 if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
674 fldcw(curpcb->pcb_initial_fpucw);
675 if (PCB_USER_FPU(curpcb))
676 set_pcb_flags(curpcb,
677 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
679 set_pcb_flags(curpcb, PCB_FPUINITDONE);
681 fpurestore(curpcb->pcb_save);
690 td = PCPU_GET(fpcurthread);
691 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
693 PCPU_SET(fpcurthread, NULL);
694 clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
699 * Get the user state of the FPU into pcb->pcb_user_save without
700 * dropping ownership (if possible). It returns the FPU ownership
704 fpugetregs(struct thread *td)
707 uint64_t *xstate_bv, bit;
709 int max_ext_n, i, owned;
712 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
713 bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
714 cpu_max_ext_state_size);
715 get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
716 pcb->pcb_initial_fpucw;
718 return (_MC_FPOWNED_PCB);
721 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
722 fpusave(get_pcb_user_save_pcb(pcb));
723 owned = _MC_FPOWNED_FPU;
725 owned = _MC_FPOWNED_PCB;
730 * Handle partially saved state.
732 sa = (char *)get_pcb_user_save_pcb(pcb);
733 xstate_bv = (uint64_t *)(sa + sizeof(struct savefpu) +
734 offsetof(struct xstate_hdr, xstate_bv));
735 max_ext_n = flsl(xsave_mask);
736 for (i = 0; i < max_ext_n; i++) {
738 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
740 bcopy((char *)fpu_initialstate +
741 xsave_area_desc[i].offset,
742 sa + xsave_area_desc[i].offset,
743 xsave_area_desc[i].size);
751 fpuuserinited(struct thread *td)
756 if (PCB_USER_FPU(pcb))
758 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
760 set_pcb_flags(pcb, PCB_FPUINITDONE);
764 fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
766 struct xstate_hdr *hdr, *ehdr;
770 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
771 if (xfpustate == NULL)
776 len = xfpustate_size;
777 if (len < sizeof(struct xstate_hdr))
779 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
783 ehdr = (struct xstate_hdr *)xfpustate;
784 bv = ehdr->xstate_bv;
789 if (bv & ~xsave_mask)
792 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
795 bcopy(xfpustate + sizeof(struct xstate_hdr),
796 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
802 * Set the state of the FPU.
805 fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
806 size_t xfpustate_size)
811 addr->sv_env.en_mxcsr &= cpu_mxcsr_mask;
814 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
815 error = fpusetxstate(td, xfpustate, xfpustate_size);
820 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
821 fpurestore(get_pcb_user_save_td(td));
823 set_pcb_flags(pcb, PCB_FPUINITDONE | PCB_USERFPUINITDONE);
826 error = fpusetxstate(td, xfpustate, xfpustate_size);
829 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
836 * On AuthenticAMD processors, the fxrstor instruction does not restore
837 * the x87's stored last instruction pointer, last data pointer, and last
838 * opcode values, except in the rare case in which the exception summary
839 * (ES) bit in the x87 status word is set to 1.
841 * In order to avoid leaking this information across processes, we clean
842 * these values by performing a dummy load before executing fxrstor().
845 fpu_clean_state(void)
847 static float dummy_variable = 0.0;
851 * Clear the ES bit in the x87 status word if it is currently
852 * set, in order to avoid causing a fault in the upcoming load.
859 * Load the dummy variable into the x87 stack. This mangles
860 * the x87 stack, but we don't care since we're about to call
863 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
867 * This really sucks. We want the acpi version only, but it requires
868 * the isa_if.h file in order to get the definitions.
872 #include <isa/isavar.h>
874 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
876 static struct isa_pnp_id fpupnp_ids[] = {
877 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
882 fpupnp_probe(device_t dev)
886 result = ISA_PNP_PROBE(device_get_parent(dev), dev, fpupnp_ids);
893 fpupnp_attach(device_t dev)
899 static device_method_t fpupnp_methods[] = {
900 /* Device interface */
901 DEVMETHOD(device_probe, fpupnp_probe),
902 DEVMETHOD(device_attach, fpupnp_attach),
903 DEVMETHOD(device_detach, bus_generic_detach),
904 DEVMETHOD(device_shutdown, bus_generic_shutdown),
905 DEVMETHOD(device_suspend, bus_generic_suspend),
906 DEVMETHOD(device_resume, bus_generic_resume),
911 static driver_t fpupnp_driver = {
917 static devclass_t fpupnp_devclass;
919 DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, fpupnp_devclass, 0, 0);
922 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
923 "Kernel contexts for FPU state");
925 #define FPU_KERN_CTX_FPUINITDONE 0x01
926 #define FPU_KERN_CTX_DUMMY 0x02 /* avoided save for the kern thread */
927 #define FPU_KERN_CTX_INUSE 0x04
929 struct fpu_kern_ctx {
930 struct savefpu *prev;
935 struct fpu_kern_ctx *
936 fpu_kern_alloc_ctx(u_int flags)
938 struct fpu_kern_ctx *res;
941 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
942 cpu_max_ext_state_size;
943 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
944 M_NOWAIT : M_WAITOK) | M_ZERO);
949 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
952 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
953 /* XXXKIB clear the memory ? */
954 free(ctx, M_FPUKERN_CTX);
957 static struct savefpu *
958 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
962 p = (vm_offset_t)&ctx->hwstate1;
963 p = roundup2(p, XSAVE_AREA_ALIGN);
964 return ((struct savefpu *)p);
968 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
973 KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
974 ("ctx is required when !FPU_KERN_NOCTX"));
975 KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
976 ("using inuse ctx"));
977 KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0,
978 ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state"));
980 if ((flags & FPU_KERN_NOCTX) != 0) {
983 if (curthread == PCPU_GET(fpcurthread)) {
984 fpusave(curpcb->pcb_save);
985 PCPU_SET(fpcurthread, NULL);
987 KASSERT(PCPU_GET(fpcurthread) == NULL,
988 ("invalid fpcurthread"));
992 * This breaks XSAVEOPT tracker, but
993 * PCB_FPUNOSAVE state is supposed to never need to
994 * save FPU context at all.
996 fpurestore(fpu_initialstate);
997 set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE |
1001 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
1002 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
1005 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
1006 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
1007 ctx->flags = FPU_KERN_CTX_INUSE;
1008 if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
1009 ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
1011 ctx->prev = pcb->pcb_save;
1012 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
1013 set_pcb_flags(pcb, PCB_KERNFPU);
1014 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1019 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1025 if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) {
1026 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
1027 KASSERT(PCPU_GET(fpcurthread) == NULL,
1028 ("non-NULL fpcurthread for PCB_FPUNOSAVE"));
1029 CRITICAL_ASSERT(td);
1031 clear_pcb_flags(pcb, PCB_FPUNOSAVE | PCB_FPUINITDONE);
1035 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
1036 ("leaving not inuse ctx"));
1037 ctx->flags &= ~FPU_KERN_CTX_INUSE;
1039 if (is_fpu_kern_thread(0) &&
1040 (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1042 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0,
1045 if (curthread == PCPU_GET(fpcurthread))
1048 pcb->pcb_save = ctx->prev;
1051 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1052 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
1053 set_pcb_flags(pcb, PCB_FPUINITDONE);
1054 clear_pcb_flags(pcb, PCB_KERNFPU);
1056 clear_pcb_flags(pcb, PCB_FPUINITDONE | PCB_KERNFPU);
1058 if ((ctx->flags & FPU_KERN_CTX_FPUINITDONE) != 0)
1059 set_pcb_flags(pcb, PCB_FPUINITDONE);
1061 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1062 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1068 fpu_kern_thread(u_int flags)
1071 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1072 ("Only kthread may use fpu_kern_thread"));
1073 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1074 ("mangled pcb_save"));
1075 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1077 set_pcb_flags(curpcb, PCB_KERNFPU);
1082 is_fpu_kern_thread(u_int flags)
1085 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1087 return ((curpcb->pcb_flags & PCB_KERNFPU) != 0);
1091 * FPU save area alloc/free/init utility routines
1094 fpu_save_area_alloc(void)
1097 return (uma_zalloc(fpu_save_area_zone, 0));
1101 fpu_save_area_free(struct savefpu *fsa)
1104 uma_zfree(fpu_save_area_zone, fsa);
1108 fpu_save_area_reset(struct savefpu *fsa)
1111 bcopy(fpu_initialstate, fsa, cpu_max_ext_state_size);