2 * Copyright (c) 1990 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include "opt_sched.h"
37 #include <machine/asmacros.h>
41 #if defined(SMP) && defined(SCHED_ULE)
43 #define BLOCK_SPIN(reg) \
44 movl $blocked_lock,%eax ; \
47 cmpxchgl %eax,TD_LOCK(reg) ; \
54 #define BLOCK_SPIN(reg)
57 /*****************************************************************************/
59 /*****************************************************************************/
66 * This is the second half of cpu_switch(). It is used when the current
67 * thread is either a dummy or slated to die, and we no longer care
68 * about its state. This is only a slight optimization and is probably
69 * not worth it anymore. Note that we need to clear the pm_active bits so
70 * we do need the old proc if it still exists.
76 movl PCPU(CPUID), %esi
77 movl 4(%esp),%ecx /* Old thread */
78 testl %ecx,%ecx /* no thread? */
80 /* release bit from old pm_active */
81 movl PCPU(CURPMAP), %ebx
85 btrl %esi, PM_ACTIVE(%ebx) /* clear old */
87 movl 8(%esp),%ecx /* New thread */
88 movl TD_PCB(%ecx),%edx
89 /* set bit in new pm_active */
90 movl TD_PROC(%ecx),%eax
91 movl P_VMSPACE(%eax), %ebx
93 movl %ebx, PCPU(CURPMAP)
97 btsl %esi, PM_ACTIVE(%ebx) /* set new */
102 * cpu_switch(old, new)
104 * Save the current thread state, then select the next thread to run
105 * and load its state.
113 /* Switch to new thread. First, save context. */
117 testl %ecx,%ecx /* no thread? */
118 jz badsw2 /* no, panic */
121 movl TD_PCB(%ecx),%edx
123 movl (%esp),%eax /* Hardware registers */
124 movl %eax,PCB_EIP(%edx)
125 movl %ebx,PCB_EBX(%edx)
126 movl %esp,PCB_ESP(%edx)
127 movl %ebp,PCB_EBP(%edx)
128 movl %esi,PCB_ESI(%edx)
129 movl %edi,PCB_EDI(%edx)
131 /* Test if debug registers should be saved. */
132 testl $PCB_DBREGS,PCB_FLAGS(%edx)
133 jz 1f /* no, skip over */
134 movl %dr7,%eax /* yes, do the save */
135 movl %eax,PCB_DR7(%edx)
136 andl $0x0000fc00, %eax /* disable all watchpoints */
139 movl %eax,PCB_DR6(%edx)
141 movl %eax,PCB_DR3(%edx)
143 movl %eax,PCB_DR2(%edx)
145 movl %eax,PCB_DR1(%edx)
147 movl %eax,PCB_DR0(%edx)
150 /* have we used fp, and need a save? */
151 cmpl %ecx,PCPU(FPCURTHREAD)
153 pushl PCB_SAVEFPU(%edx) /* h/w bugs make saving complicated */
154 call npxsave /* do it in a big C function */
158 /* Save is done. Now fire up new thread. */
160 movl 8(%esp),%ecx /* New thread */
161 movl 12(%esp),%esi /* New lock */
163 testl %ecx,%ecx /* no thread? */
164 jz badsw3 /* no, panic */
166 movl TD_PCB(%ecx),%edx
168 /* Switchout td_lock */
170 movl PCPU(CPUID),%esi
171 SETOP %eax,TD_LOCK(%edi)
173 /* Release bit from old pmap->pm_active */
174 movl PCPU(CURPMAP), %ebx
178 btrl %esi, PM_ACTIVE(%ebx) /* clear old */
180 /* Set bit in new pmap->pm_active */
181 movl TD_PROC(%ecx),%eax /* newproc */
182 movl P_VMSPACE(%eax), %ebx
184 movl %ebx, PCPU(CURPMAP)
188 btsl %esi, PM_ACTIVE(%ebx) /* set new */
192 * At this point, we have managed thread locks and are ready
193 * to load up the rest of the next context.
196 /* Load a pointer to the thread kernel stack into PCPU. */
197 leal -VM86_STACK_SPACE(%edx), %eax /* leave space for vm86 */
198 movl %eax, PCPU(KESP0)
200 cmpl $0, PCB_EXT(%edx) /* has pcb extension? */
201 je 1f /* If not, use the default */
202 movl $1, PCPU(PRIVATE_TSS) /* mark use of private tss */
203 movl PCB_EXT(%edx), %edi /* new tss descriptor */
204 movl PCPU(TRAMPSTK), %ebx
205 movl %ebx, PCB_EXT_TSS+TSS_ESP0(%edi)
206 jmp 2f /* Load it up */
209 * Use the common default TSS instead of our own.
210 * Stack pointer in the common TSS points to the trampoline stack
211 * already and should be not changed.
213 * Test this CPU's flag to see if this CPU was using a private TSS.
215 cmpl $0, PCPU(PRIVATE_TSS) /* Already using the common? */
216 je 3f /* if so, skip reloading */
217 movl $0, PCPU(PRIVATE_TSS)
218 PCPU_ADDR(COMMON_TSSD, %edi)
220 /* Move correct tss descriptor into GDT slot, then reload tr. */
221 movl PCPU(TSS_GDT), %ebx /* entry in GDT */
226 movl $GPROC0_SEL*8, %esi /* GSEL(GPROC0_SEL, SEL_KPL) */
230 /* Copy the %fs and %gs selectors into this pcpu gdt */
231 leal PCB_FSD(%edx), %esi
232 movl PCPU(FSGS_GDT), %edi
233 movl 0(%esi), %eax /* %fs selector */
237 movl 8(%esi), %eax /* %gs selector, comes straight after */
242 /* Restore context. */
243 movl PCB_EBX(%edx),%ebx
244 movl PCB_ESP(%edx),%esp
245 movl PCB_EBP(%edx),%ebp
246 movl PCB_ESI(%edx),%esi
247 movl PCB_EDI(%edx),%edi
248 movl PCB_EIP(%edx),%eax
251 movl %edx, PCPU(CURPCB)
252 movl %ecx, PCPU(CURTHREAD) /* into next thread */
255 * Determine the LDT to use and load it if is the default one and
256 * that is not the current one.
258 movl TD_PROC(%ecx),%eax
259 cmpl $0,P_MD+MD_LDT(%eax)
261 movl _default_ldt,%eax
262 cmpl PCPU(CURRENTLDT),%eax
265 movl %eax,PCPU(CURRENTLDT)
268 /* Load the LDT when it is not the default one. */
269 pushl %edx /* Preserve pointer to pcb. */
270 addl $P_MD,%eax /* Pointer to mdproc is arg. */
273 * Holding dt_lock prevents context switches, so dt_lock cannot
274 * be held now and set_user_ldt() will not deadlock acquiring it.
281 /* This must be done after loading the user LDT. */
282 .globl cpu_switch_load_gs
287 pushl PCPU(CURTHREAD)
292 /* Test if debug registers should be restored. */
293 testl $PCB_DBREGS,PCB_FLAGS(%edx)
297 * Restore debug registers. The special code for dr7 is to
298 * preserve the current values of its reserved bits.
300 movl PCB_DR6(%edx),%eax
302 movl PCB_DR3(%edx),%eax
304 movl PCB_DR2(%edx),%eax
306 movl PCB_DR1(%edx),%eax
308 movl PCB_DR0(%edx),%eax
311 andl $0x0000fc00,%eax
312 movl PCB_DR7(%edx),%ecx
313 andl $~0x0000fc00,%ecx
324 sw0_1: .asciz "cpu_throw: no newthread supplied"
330 sw0_2: .asciz "cpu_switch: no curthread supplied"
336 sw0_3: .asciz "cpu_switch: no newthread supplied"
342 * Update pcb, saving current processor state.
348 /* Save caller's return address. Child won't execute this routine. */
350 movl %eax,PCB_EIP(%ecx)
353 movl %eax,PCB_CR3(%ecx)
355 movl %ebx,PCB_EBX(%ecx)
356 movl %esp,PCB_ESP(%ecx)
357 movl %ebp,PCB_EBP(%ecx)
358 movl %esi,PCB_ESI(%ecx)
359 movl %edi,PCB_EDI(%ecx)
363 movl %eax,PCB_CR0(%ecx)
365 movl %eax,PCB_CR2(%ecx)
367 movl %eax,PCB_CR4(%ecx)
370 movl %eax,PCB_DR0(%ecx)
372 movl %eax,PCB_DR1(%ecx)
374 movl %eax,PCB_DR2(%ecx)
376 movl %eax,PCB_DR3(%ecx)
378 movl %eax,PCB_DR6(%ecx)
380 movl %eax,PCB_DR7(%ecx)
397 * resumectx(pcb) __fastcall
398 * Resuming processor state from pcb.
404 /* Restore segment registers */
405 movzwl PCB_DS(%ecx),%eax
407 movzwl PCB_ES(%ecx),%eax
409 movzwl PCB_FS(%ecx),%eax
411 movzwl PCB_GS(%ecx),%eax
413 movzwl PCB_SS(%ecx),%eax
416 /* Restore CR2, CR4, CR3 and CR0 */
417 movl PCB_CR2(%ecx),%eax
419 movl PCB_CR4(%ecx),%eax
421 movl PCB_CR3(%ecx),%eax
423 movl PCB_CR0(%ecx),%eax
428 /* Restore descriptor tables */
432 #define SDT_SYS386TSS 9
433 #define SDT_SYS386BSY 11
434 /* Clear "task busy" bit and reload TR */
435 movl PCPU(TSS_GDT),%eax
436 andb $(~SDT_SYS386BSY | SDT_SYS386TSS),5(%eax)
437 movzwl PCB_TR(%ecx),%eax
442 /* Restore debug registers */
443 movl PCB_DR0(%ecx),%eax
445 movl PCB_DR1(%ecx),%eax
447 movl PCB_DR2(%ecx),%eax
449 movl PCB_DR3(%ecx),%eax
451 movl PCB_DR6(%ecx),%eax
453 movl PCB_DR7(%ecx),%eax
456 /* Restore other registers */
457 movl PCB_EDI(%ecx),%edi
458 movl PCB_ESI(%ecx),%esi
459 movl PCB_EBP(%ecx),%ebp
460 movl PCB_ESP(%ecx),%esp
461 movl PCB_EBX(%ecx),%ebx
463 /* reload code selector by turning return into intersegmental return */