2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <machine/asmacros.h>
37 #include <machine/specialreg.h>
40 #include "opt_sched.h"
42 /*****************************************************************************/
44 /*****************************************************************************/
54 #if defined(SCHED_ULE) && defined(SMP)
63 * This is the second half of cpu_switch(). It is used when the current
64 * thread is either a dummy or slated to die, and we no longer care
65 * about its state. This is only a slight optimization and is probably
66 * not worth it anymore. Note that we need to clear the pm_active bits so
67 * we do need the old proc if it still exists.
79 * cpu_switch(old, new, mtx)
81 * Save the current thread state, then select the next thread to run
88 /* Switch to new thread. First, save context. */
91 movq (%rsp),%rax /* Hardware registers */
92 movq %r15,PCB_R15(%r8)
93 movq %r14,PCB_R14(%r8)
94 movq %r13,PCB_R13(%r8)
95 movq %r12,PCB_R12(%r8)
96 movq %rbp,PCB_RBP(%r8)
97 movq %rsp,PCB_RSP(%r8)
98 movq %rbx,PCB_RBX(%r8)
99 movq %rax,PCB_RIP(%r8)
101 testl $PCB_FULL_IRET,PCB_FLAGS(%r8)
103 orl $PCB_FULL_IRET,PCB_FLAGS(%r8)
104 testl $TDP_KTHREAD,TD_PFLAGS(%rdi)
106 testb $CPUID_STDEXT_FSGSBASE,cpu_stdext_feature(%rip)
112 movq %rax,PCB_FSBASE(%r8)
117 movl $MSR_KGSBASE,%ecx /* Read user gs base */
121 movq %rax,PCB_GSBASE(%r8)
125 testl $PCB_DBREGS,PCB_FLAGS(%r8)
126 jnz store_dr /* static predict not taken */
129 /* have we used fp, and need a save? */
130 cmpq %rdi,PCPU(FPCURTHREAD)
132 movq PCB_SAVEFPU(%r8),%r8
134 cmpl $0,use_xsave(%rip)
140 movl xsave_mask+4,%edx
141 .globl ctx_switch_xsave
143 /* This is patched to xsaveopt if supported, see fpuinit_bsp1() */
147 /* Save is done. Now fire up new thread. Leave old vmspace. */
152 callq pmap_activate_sw
153 SETLK %r15,TD_LOCK(%r13) /* Release the old thread */
155 movq TD_PCB(%r12),%r8
156 #if defined(SCHED_ULE) && defined(SMP)
157 /* Wait for the new thread to become unblocked */
158 movq $blocked_lock, %rdx
160 movq TD_LOCK(%r12),%rcx
166 * At this point, we've switched address spaces and are ready
167 * to load up the rest of the next context.
170 /* Skip loading LDT and user fsbase/gsbase for kthreads */
171 testl $TDP_KTHREAD,TD_PFLAGS(%r12)
177 movq TD_PROC(%r12),%rcx
178 cmpq $0, P_MD+MD_LDT(%rcx)
183 /* Restore fs base in GDT */
184 movl PCB_FSBASE(%r8),%eax
185 movq PCPU(FS32P),%rdx
192 /* Restore gs base in GDT */
193 movl PCB_GSBASE(%r8),%eax
194 movq PCPU(GS32P),%rdx
202 /* Do we need to reload tss ? */
204 movq PCB_TSSP(%r8),%rdx
206 cmovzq PCPU(COMMONTSSP),%rdx
211 movq %r8,PCPU(CURPCB)
212 movq PCPU(PTI_RSP0),%rax
215 movq %rax,TSS_RSP0(%rdx)
216 movq %r12,PCPU(CURTHREAD) /* into next thread */
218 /* Test if debug registers should be restored. */
219 testl $PCB_DBREGS,PCB_FLAGS(%r8)
220 jnz load_dr /* static predict not taken */
223 /* Restore context. */
224 movq PCB_R15(%r8),%r15
225 movq PCB_R14(%r8),%r14
226 movq PCB_R13(%r8),%r13
227 movq PCB_R12(%r8),%r12
228 movq PCB_RBP(%r8),%rbp
229 movq PCB_RSP(%r8),%rsp
230 movq PCB_RBX(%r8),%rbx
231 movq PCB_RIP(%r8),%rax
233 movq PCPU(CURTHREAD),%rdi
238 * We order these strangely for several reasons.
239 * 1: I wanted to use static branch prediction hints
240 * 2: Most athlon64/opteron cpus don't have them. They define
241 * a forward branch as 'predict not taken'. Intel cores have
242 * the 'rep' prefix to invert this.
243 * So, to make it work on both forms of cpu we do the detour.
244 * We use jumps rather than call in order to avoid the stack.
248 movq %dr7,%rax /* yes, do the save */
254 movq %r15,PCB_DR0(%r8)
255 movq %r14,PCB_DR1(%r8)
256 movq %r13,PCB_DR2(%r8)
257 movq %r12,PCB_DR3(%r8)
258 movq %r11,PCB_DR6(%r8)
259 movq %rax,PCB_DR7(%r8)
260 andq $0x0000fc00, %rax /* disable all watchpoints */
266 movq PCB_DR0(%r8),%r15
267 movq PCB_DR1(%r8),%r14
268 movq PCB_DR2(%r8),%r13
269 movq PCB_DR3(%r8),%r12
270 movq PCB_DR6(%r8),%r11
271 movq PCB_DR7(%r8),%rcx
274 /* Preserve reserved bits in %dr7 */
275 andq $0x0000fc00,%rax
276 andq $~0x0000fc00,%rcx
284 do_tss: movq %rdx,PCPU(TSSP)
294 movb $0x89,5(%rax) /* unset busy */
299 do_ldt: movq PCPU(LDT),%rax
300 movq P_MD+MD_LDT_SD(%rcx),%rdx
302 movq P_MD+MD_LDT_SD+8(%rcx),%rdx
310 * Update pcb, saving current processor state.
313 /* Save caller's return address. */
315 movq %rax,PCB_RIP(%rdi)
317 movq %rbx,PCB_RBX(%rdi)
318 movq %rsp,PCB_RSP(%rdi)
319 movq %rbp,PCB_RBP(%rdi)
320 movq %r12,PCB_R12(%rdi)
321 movq %r13,PCB_R13(%rdi)
322 movq %r14,PCB_R14(%rdi)
323 movq %r15,PCB_R15(%rdi)
326 movq %rax,PCB_CR0(%rdi)
328 movq %rax,PCB_CR2(%rdi)
330 movq %rax,PCB_CR3(%rdi)
332 movq %rax,PCB_CR4(%rdi)
335 movq %rax,PCB_DR0(%rdi)
337 movq %rax,PCB_DR1(%rdi)
339 movq %rax,PCB_DR2(%rdi)
341 movq %rax,PCB_DR3(%rdi)
343 movq %rax,PCB_DR6(%rdi)
345 movq %rax,PCB_DR7(%rdi)
347 movl $MSR_FSBASE,%ecx
349 movl %eax,PCB_FSBASE(%rdi)
350 movl %edx,PCB_FSBASE+4(%rdi)
351 movl $MSR_GSBASE,%ecx
353 movl %eax,PCB_GSBASE(%rdi)
354 movl %edx,PCB_GSBASE+4(%rdi)
355 movl $MSR_KGSBASE,%ecx
357 movl %eax,PCB_KGSBASE(%rdi)
358 movl %edx,PCB_KGSBASE+4(%rdi)
361 movl %eax,PCB_EFER(%rdi)
362 movl %edx,PCB_EFER+4(%rdi)
365 movl %eax,PCB_STAR(%rdi)
366 movl %edx,PCB_STAR+4(%rdi)
369 movl %eax,PCB_LSTAR(%rdi)
370 movl %edx,PCB_LSTAR+4(%rdi)
373 movl %eax,PCB_CSTAR(%rdi)
374 movl %edx,PCB_CSTAR+4(%rdi)
375 movl $MSR_SF_MASK,%ecx
377 movl %eax,PCB_SFMASK(%rdi)
378 movl %edx,PCB_SFMASK+4(%rdi)
391 * Resuming processor state from pcb.
394 /* Switch to KPML4phys. */
398 /* Force kernel segment registers. */
408 movl $MSR_FSBASE,%ecx
409 movl PCB_FSBASE(%rdi),%eax
410 movl 4 + PCB_FSBASE(%rdi),%edx
412 movl $MSR_GSBASE,%ecx
413 movl PCB_GSBASE(%rdi),%eax
414 movl 4 + PCB_GSBASE(%rdi),%edx
416 movl $MSR_KGSBASE,%ecx
417 movl PCB_KGSBASE(%rdi),%eax
418 movl 4 + PCB_KGSBASE(%rdi),%edx
421 /* Restore EFER one more time. */
423 movl PCB_EFER(%rdi),%eax
426 /* Restore fast syscall stuff. */
428 movl PCB_STAR(%rdi),%eax
429 movl 4 + PCB_STAR(%rdi),%edx
432 movl PCB_LSTAR(%rdi),%eax
433 movl 4 + PCB_LSTAR(%rdi),%edx
436 movl PCB_CSTAR(%rdi),%eax
437 movl 4 + PCB_CSTAR(%rdi),%edx
439 movl $MSR_SF_MASK,%ecx
440 movl PCB_SFMASK(%rdi),%eax
443 /* Restore CR0, CR2, CR4 and CR3. */
444 movq PCB_CR0(%rdi),%rax
446 movq PCB_CR2(%rdi),%rax
448 movq PCB_CR4(%rdi),%rax
450 movq PCB_CR3(%rdi),%rax
453 /* Restore descriptor tables. */
458 #define SDT_SYSBSY 11
460 /* Clear "task busy" bit and reload TR. */
462 andb $(~SDT_SYSBSY | SDT_SYSTSS),5(%rax)
463 movw PCB_TR(%rdi),%ax
469 /* Restore debug registers. */
470 movq PCB_DR0(%rdi),%rax
472 movq PCB_DR1(%rdi),%rax
474 movq PCB_DR2(%rdi),%rax
476 movq PCB_DR3(%rdi),%rax
478 movq PCB_DR6(%rdi),%rax
480 movq PCB_DR7(%rdi),%rax
483 /* Restore other callee saved registers. */
484 movq PCB_R15(%rdi),%r15
485 movq PCB_R14(%rdi),%r14
486 movq PCB_R13(%rdi),%r13
487 movq PCB_R12(%rdi),%r12
488 movq PCB_RBP(%rdi),%rbp
489 movq PCB_RSP(%rdi),%rsp
490 movq PCB_RBX(%rdi),%rbx
492 /* Restore return address. */
493 movq PCB_RIP(%rdi),%rax