2 * Copyright (c) 1990 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include "opt_sched.h"
37 #include <machine/asmacros.h>
41 #if defined(SMP) && defined(SCHED_ULE)
43 #define BLOCK_SPIN(reg) \
44 movl $blocked_lock,%eax ; \
47 cmpxchgl %eax,TD_LOCK(reg) ; \
54 #define BLOCK_SPIN(reg)
57 /*****************************************************************************/
59 /*****************************************************************************/
66 * This is the second half of cpu_switch(). It is used when the current
67 * thread is either a dummy or slated to die, and we no longer care
68 * about its state. This is only a slight optimization and is probably
69 * not worth it anymore. Note that we need to clear the pm_active bits so
70 * we do need the old proc if it still exists.
76 movl PCPU(CPUID), %esi
77 /* release bit from old pm_active */
78 movl PCPU(CURPMAP), %ebx
82 btrl %esi, PM_ACTIVE(%ebx) /* clear old */
83 movl 8(%esp),%ecx /* New thread */
84 movl TD_PCB(%ecx),%edx
85 /* set bit in new pm_active */
86 movl TD_PROC(%ecx),%eax
87 movl P_VMSPACE(%eax), %ebx
89 movl %ebx, PCPU(CURPMAP)
93 btsl %esi, PM_ACTIVE(%ebx) /* set new */
98 * cpu_switch(old, new)
100 * Save the current thread state, then select the next thread to run
101 * and load its state.
109 /* Switch to new thread. First, save context. */
113 testl %ecx,%ecx /* no thread? */
114 jz badsw2 /* no, panic */
117 movl TD_PCB(%ecx),%edx
119 movl (%esp),%eax /* Hardware registers */
120 movl %eax,PCB_EIP(%edx)
121 movl %ebx,PCB_EBX(%edx)
122 movl %esp,PCB_ESP(%edx)
123 movl %ebp,PCB_EBP(%edx)
124 movl %esi,PCB_ESI(%edx)
125 movl %edi,PCB_EDI(%edx)
127 /* Test if debug registers should be saved. */
128 testl $PCB_DBREGS,PCB_FLAGS(%edx)
129 jz 1f /* no, skip over */
130 movl %dr7,%eax /* yes, do the save */
131 movl %eax,PCB_DR7(%edx)
132 andl $0x0000fc00, %eax /* disable all watchpoints */
135 movl %eax,PCB_DR6(%edx)
137 movl %eax,PCB_DR3(%edx)
139 movl %eax,PCB_DR2(%edx)
141 movl %eax,PCB_DR1(%edx)
143 movl %eax,PCB_DR0(%edx)
146 /* have we used fp, and need a save? */
147 cmpl %ecx,PCPU(FPCURTHREAD)
149 pushl PCB_SAVEFPU(%edx) /* h/w bugs make saving complicated */
150 call npxsave /* do it in a big C function */
154 /* Save is done. Now fire up new thread. */
156 movl 8(%esp),%ecx /* New thread */
157 movl 12(%esp),%esi /* New lock */
159 testl %ecx,%ecx /* no thread? */
160 jz badsw3 /* no, panic */
162 movl TD_PCB(%ecx),%edx
164 /* Switchout td_lock */
166 movl PCPU(CPUID),%esi
167 SETOP %eax,TD_LOCK(%edi)
169 /* Release bit from old pmap->pm_active */
170 movl PCPU(CURPMAP), %ebx
174 btrl %esi, PM_ACTIVE(%ebx) /* clear old */
176 /* Set bit in new pmap->pm_active */
177 movl TD_PROC(%ecx),%eax /* newproc */
178 movl P_VMSPACE(%eax), %ebx
180 movl %ebx, PCPU(CURPMAP)
184 btsl %esi, PM_ACTIVE(%ebx) /* set new */
188 * At this point, we have managed thread locks and are ready
189 * to load up the rest of the next context.
192 /* Load a pointer to the thread kernel stack into PCPU. */
193 leal -VM86_STACK_SPACE(%edx), %eax /* leave space for vm86 */
194 movl %eax, PCPU(KESP0)
196 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
197 je 1f /* If not, use the default */
198 movl $1, PCPU(PRIVATE_TSS) /* mark use of private tss */
199 movl PCB_EXT(%edx), %edi /* new tss descriptor */
200 movl PCPU(TRAMPSTK), %ebx
201 movl %ebx, PCB_EXT_TSS+TSS_ESP0(%edi)
202 jmp 2f /* Load it up */
205 * Use the common default TSS instead of our own.
206 * Stack pointer in the common TSS points to the trampoline stack
207 * already and should be not changed.
209 * Test this CPU's flag to see if this CPU was using a private TSS.
211 cmpl $0, PCPU(PRIVATE_TSS) /* Already using the common? */
212 je 3f /* if so, skip reloading */
213 movl $0, PCPU(PRIVATE_TSS)
214 PCPU_ADDR(COMMON_TSSD, %edi)
216 /* Move correct tss descriptor into GDT slot, then reload tr. */
217 movl PCPU(TSS_GDT), %ebx /* entry in GDT */
222 movl $GPROC0_SEL*8, %esi /* GSEL(GPROC0_SEL, SEL_KPL) */
226 /* Copy the %fs and %gs selectors into this pcpu gdt */
227 leal PCB_FSD(%edx), %esi
228 movl PCPU(FSGS_GDT), %edi
229 movl 0(%esi), %eax /* %fs selector */
233 movl 8(%esi), %eax /* %gs selector, comes straight after */
238 /* Restore context. */
239 movl PCB_EBX(%edx),%ebx
240 movl PCB_ESP(%edx),%esp
241 movl PCB_EBP(%edx),%ebp
242 movl PCB_ESI(%edx),%esi
243 movl PCB_EDI(%edx),%edi
244 movl PCB_EIP(%edx),%eax
247 movl %edx, PCPU(CURPCB)
248 movl %ecx, PCPU(CURTHREAD) /* into next thread */
251 * Determine the LDT to use and load it if is the default one and
252 * that is not the current one.
254 movl TD_PROC(%ecx),%eax
255 cmpl $0,P_MD+MD_LDT(%eax)
257 movl _default_ldt,%eax
258 cmpl PCPU(CURRENTLDT),%eax
261 movl %eax,PCPU(CURRENTLDT)
264 /* Load the LDT when it is not the default one. */
265 pushl %edx /* Preserve pointer to pcb. */
266 addl $P_MD,%eax /* Pointer to mdproc is arg. */
269 * Holding dt_lock prevents context switches, so dt_lock cannot
270 * be held now and set_user_ldt() will not deadlock acquiring it.
277 /* This must be done after loading the user LDT. */
278 .globl cpu_switch_load_gs
283 pushl PCPU(CURTHREAD)
288 /* Test if debug registers should be restored. */
289 testl $PCB_DBREGS,PCB_FLAGS(%edx)
293 * Restore debug registers. The special code for dr7 is to
294 * preserve the current values of its reserved bits.
296 movl PCB_DR6(%edx),%eax
298 movl PCB_DR3(%edx),%eax
300 movl PCB_DR2(%edx),%eax
302 movl PCB_DR1(%edx),%eax
304 movl PCB_DR0(%edx),%eax
307 andl $0x0000fc00,%eax
308 movl PCB_DR7(%edx),%ecx
309 andl $~0x0000fc00,%ecx
320 sw0_1: .asciz "cpu_throw: no newthread supplied"
326 sw0_2: .asciz "cpu_switch: no curthread supplied"
332 sw0_3: .asciz "cpu_switch: no newthread supplied"
338 * Update pcb, saving current processor state.
344 /* Save caller's return address. Child won't execute this routine. */
346 movl %eax,PCB_EIP(%ecx)
349 movl %eax,PCB_CR3(%ecx)
351 movl %ebx,PCB_EBX(%ecx)
352 movl %esp,PCB_ESP(%ecx)
353 movl %ebp,PCB_EBP(%ecx)
354 movl %esi,PCB_ESI(%ecx)
355 movl %edi,PCB_EDI(%ecx)
359 movl %eax,PCB_CR0(%ecx)
361 movl %eax,PCB_CR2(%ecx)
363 movl %eax,PCB_CR4(%ecx)
366 movl %eax,PCB_DR0(%ecx)
368 movl %eax,PCB_DR1(%ecx)
370 movl %eax,PCB_DR2(%ecx)
372 movl %eax,PCB_DR3(%ecx)
374 movl %eax,PCB_DR6(%ecx)
376 movl %eax,PCB_DR7(%ecx)
393 * resumectx(pcb) __fastcall
394 * Resuming processor state from pcb.
400 /* Restore segment registers */
401 movzwl PCB_DS(%ecx),%eax
403 movzwl PCB_ES(%ecx),%eax
405 movzwl PCB_FS(%ecx),%eax
407 movzwl PCB_GS(%ecx),%eax
409 movzwl PCB_SS(%ecx),%eax
412 /* Restore CR2, CR4, CR3 and CR0 */
413 movl PCB_CR2(%ecx),%eax
415 movl PCB_CR4(%ecx),%eax
417 movl PCB_CR3(%ecx),%eax
419 movl PCB_CR0(%ecx),%eax
424 /* Restore descriptor tables */
428 #define SDT_SYS386TSS 9
429 #define SDT_SYS386BSY 11
430 /* Clear "task busy" bit and reload TR */
431 movl PCPU(TSS_GDT),%eax
432 andb $(~SDT_SYS386BSY | SDT_SYS386TSS),5(%eax)
433 movzwl PCB_TR(%ecx),%eax
438 /* Restore debug registers */
439 movl PCB_DR0(%ecx),%eax
441 movl PCB_DR1(%ecx),%eax
443 movl PCB_DR2(%ecx),%eax
445 movl PCB_DR3(%ecx),%eax
447 movl PCB_DR6(%ecx),%eax
449 movl PCB_DR7(%ecx),%eax
452 /* Restore other registers */
453 movl PCB_EDI(%ecx),%edi
454 movl PCB_ESI(%ecx),%esi
455 movl PCB_EBP(%ecx),%ebp
456 movl PCB_ESP(%ecx),%esp
457 movl PCB_EBX(%ecx),%ebx
459 /* reload code selector by turning return into intersegmental return */