2 * Copyright (c) 1990 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include "opt_sched.h"
37 #include <machine/asmacros.h>
41 #if defined(SMP) && defined(SCHED_ULE)
43 #define BLOCK_SPIN(reg) \
44 movl $blocked_lock,%eax ; \
47 cmpxchgl %eax,TD_LOCK(reg) ; \
54 #define BLOCK_SPIN(reg)
57 /*****************************************************************************/
59 /*****************************************************************************/
66 * This is the second half of cpu_switch(). It is used when the current
67 * thread is either a dummy or slated to die, and we no longer care
68 * about its state. This is only a slight optimization and is probably
69 * not worth it anymore. Note that we need to clear the pm_active bits so
70 * we do need the old proc if it still exists.
76 movl PCPU(CPUID), %esi
77 movl 4(%esp),%ecx /* Old thread */
78 testl %ecx,%ecx /* no thread? */
80 /* release bit from old pm_active */
81 movl PCPU(CURPMAP), %ebx
85 btrl %esi, PM_ACTIVE(%ebx) /* clear old */
87 movl 8(%esp),%ecx /* New thread */
88 movl TD_PCB(%ecx),%edx
89 /* set bit in new pm_active */
90 movl TD_PROC(%ecx),%eax
91 movl P_VMSPACE(%eax), %ebx
93 movl %ebx, PCPU(CURPMAP)
97 btsl %esi, PM_ACTIVE(%ebx) /* set new */
102 * cpu_switch(old, new)
104 * Save the current thread state, then select the next thread to run
105 * and load its state.
113 /* Switch to new thread. First, save context. */
117 testl %ecx,%ecx /* no thread? */
118 jz badsw2 /* no, panic */
121 movl TD_PCB(%ecx),%edx
123 movl (%esp),%eax /* Hardware registers */
124 movl %eax,PCB_EIP(%edx)
125 movl %ebx,PCB_EBX(%edx)
126 movl %esp,PCB_ESP(%edx)
127 movl %ebp,PCB_EBP(%edx)
128 movl %esi,PCB_ESI(%edx)
129 movl %edi,PCB_EDI(%edx)
131 /* Test if debug registers should be saved. */
132 testl $PCB_DBREGS,PCB_FLAGS(%edx)
133 jz 1f /* no, skip over */
134 movl %dr7,%eax /* yes, do the save */
135 movl %eax,PCB_DR7(%edx)
136 andl $0x0000fc00, %eax /* disable all watchpoints */
139 movl %eax,PCB_DR6(%edx)
141 movl %eax,PCB_DR3(%edx)
143 movl %eax,PCB_DR2(%edx)
145 movl %eax,PCB_DR1(%edx)
147 movl %eax,PCB_DR0(%edx)
150 /* have we used fp, and need a save? */
151 cmpl %ecx,PCPU(FPCURTHREAD)
153 pushl PCB_SAVEFPU(%edx) /* h/w bugs make saving complicated */
154 call npxsave /* do it in a big C function */
158 /* Save is done. Now fire up new thread. */
160 movl 8(%esp),%ecx /* New thread */
161 movl 12(%esp),%esi /* New lock */
163 testl %ecx,%ecx /* no thread? */
164 jz badsw3 /* no, panic */
166 movl TD_PCB(%ecx),%edx
168 /* Switchout td_lock */
170 movl PCPU(CPUID),%esi
171 SETOP %eax,TD_LOCK(%edi)
173 /* Release bit from old pmap->pm_active */
174 movl PCPU(CURPMAP), %ebx
178 btrl %esi, PM_ACTIVE(%ebx) /* clear old */
180 /* Set bit in new pmap->pm_active */
181 movl TD_PROC(%ecx),%eax /* newproc */
182 movl P_VMSPACE(%eax), %ebx
184 movl %ebx, PCPU(CURPMAP)
188 btsl %esi, PM_ACTIVE(%ebx) /* set new */
192 SETOP %esi,TD_LOCK(%edi) /* Switchout td_lock */
196 * At this point, we have managed thread locks and are ready
197 * to load up the rest of the next context.
200 /* Load a pointer to the thread kernel stack into PCPU. */
201 leal -VM86_STACK_SPACE(%edx), %eax /* leave space for vm86 */
202 movl %eax, PCPU(KESP0)
204 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
205 je 1f /* If not, use the default */
206 movl $1, PCPU(PRIVATE_TSS) /* mark use of private tss */
207 movl PCB_EXT(%edx), %edi /* new tss descriptor */
208 movl PCPU(TRAMPSTK), %ebx
209 movl %ebx, PCB_EXT_TSS+TSS_ESP0(%edi)
210 jmp 2f /* Load it up */
213 * Use the common default TSS instead of our own.
214 * Stack pointer in the common TSS points to the trampoline stack
215 * already and should be not changed.
217 * Test this CPU's flag to see if this CPU was using a private TSS.
219 cmpl $0, PCPU(PRIVATE_TSS) /* Already using the common? */
220 je 3f /* if so, skip reloading */
221 movl $0, PCPU(PRIVATE_TSS)
222 PCPU_ADDR(COMMON_TSSD, %edi)
224 /* Move correct tss descriptor into GDT slot, then reload tr. */
225 movl PCPU(TSS_GDT), %ebx /* entry in GDT */
230 movl $GPROC0_SEL*8, %esi /* GSEL(GPROC0_SEL, SEL_KPL) */
234 /* Copy the %fs and %gs selectors into this pcpu gdt */
235 leal PCB_FSD(%edx), %esi
236 movl PCPU(FSGS_GDT), %edi
237 movl 0(%esi), %eax /* %fs selector */
241 movl 8(%esi), %eax /* %gs selector, comes straight after */
246 /* Restore context. */
247 movl PCB_EBX(%edx),%ebx
248 movl PCB_ESP(%edx),%esp
249 movl PCB_EBP(%edx),%ebp
250 movl PCB_ESI(%edx),%esi
251 movl PCB_EDI(%edx),%edi
252 movl PCB_EIP(%edx),%eax
255 movl %edx, PCPU(CURPCB)
256 movl %ecx, PCPU(CURTHREAD) /* into next thread */
259 * Determine the LDT to use and load it if is the default one and
260 * that is not the current one.
262 movl TD_PROC(%ecx),%eax
263 cmpl $0,P_MD+MD_LDT(%eax)
265 movl _default_ldt,%eax
266 cmpl PCPU(CURRENTLDT),%eax
269 movl %eax,PCPU(CURRENTLDT)
272 /* Load the LDT when it is not the default one. */
273 pushl %edx /* Preserve pointer to pcb. */
274 addl $P_MD,%eax /* Pointer to mdproc is arg. */
277 * Holding dt_lock prevents context switches, so dt_lock cannot
278 * be held now and set_user_ldt() will not deadlock acquiring it.
285 /* This must be done after loading the user LDT. */
286 .globl cpu_switch_load_gs
290 /* Test if debug registers should be restored. */
291 testl $PCB_DBREGS,PCB_FLAGS(%edx)
295 * Restore debug registers. The special code for dr7 is to
296 * preserve the current values of its reserved bits.
298 movl PCB_DR6(%edx),%eax
300 movl PCB_DR3(%edx),%eax
302 movl PCB_DR2(%edx),%eax
304 movl PCB_DR1(%edx),%eax
306 movl PCB_DR0(%edx),%eax
309 andl $0x0000fc00,%eax
310 movl PCB_DR7(%edx),%ecx
311 andl $~0x0000fc00,%ecx
322 sw0_1: .asciz "cpu_throw: no newthread supplied"
328 sw0_2: .asciz "cpu_switch: no curthread supplied"
334 sw0_3: .asciz "cpu_switch: no newthread supplied"
340 * Update pcb, saving current processor state.
346 /* Save caller's return address. Child won't execute this routine. */
348 movl %eax,PCB_EIP(%ecx)
351 movl %eax,PCB_CR3(%ecx)
353 movl %ebx,PCB_EBX(%ecx)
354 movl %esp,PCB_ESP(%ecx)
355 movl %ebp,PCB_EBP(%ecx)
356 movl %esi,PCB_ESI(%ecx)
357 movl %edi,PCB_EDI(%ecx)
361 movl %eax,PCB_CR0(%ecx)
363 movl %eax,PCB_CR2(%ecx)
365 movl %eax,PCB_CR4(%ecx)
368 movl %eax,PCB_DR0(%ecx)
370 movl %eax,PCB_DR1(%ecx)
372 movl %eax,PCB_DR2(%ecx)
374 movl %eax,PCB_DR3(%ecx)
376 movl %eax,PCB_DR6(%ecx)
378 movl %eax,PCB_DR7(%ecx)
395 * resumectx(pcb) __fastcall
396 * Resuming processor state from pcb.
402 /* Restore segment registers */
403 movzwl PCB_DS(%ecx),%eax
405 movzwl PCB_ES(%ecx),%eax
407 movzwl PCB_FS(%ecx),%eax
409 movzwl PCB_GS(%ecx),%eax
411 movzwl PCB_SS(%ecx),%eax
414 /* Restore CR2, CR4, CR3 and CR0 */
415 movl PCB_CR2(%ecx),%eax
417 movl PCB_CR4(%ecx),%eax
419 movl PCB_CR3(%ecx),%eax
421 movl PCB_CR0(%ecx),%eax
426 /* Restore descriptor tables */
430 #define SDT_SYS386TSS 9
431 #define SDT_SYS386BSY 11
432 /* Clear "task busy" bit and reload TR */
433 movl PCPU(TSS_GDT),%eax
434 andb $(~SDT_SYS386BSY | SDT_SYS386TSS),5(%eax)
435 movzwl PCB_TR(%ecx),%eax
440 /* Restore debug registers */
441 movl PCB_DR0(%ecx),%eax
443 movl PCB_DR1(%ecx),%eax
445 movl PCB_DR2(%ecx),%eax
447 movl PCB_DR3(%ecx),%eax
449 movl PCB_DR6(%ecx),%eax
451 movl PCB_DR7(%ecx),%eax
454 /* Restore other registers */
455 movl PCB_EDI(%ecx),%edi
456 movl PCB_ESI(%ecx),%esi
457 movl PCB_EBP(%ecx),%ebp
458 movl PCB_ESP(%ecx),%esp
459 movl PCB_EBX(%ecx),%ebx
461 /* reload code selector by turning return into intersegmental return */