1 /* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
4 * Copyright 2003 Wasabi Systems, Inc.
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
42 * This code is derived from software written for Brini by Mark Brinicombe
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 * endorse or promote products derived from this software without specific
57 * prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * RiscBSD kernel project
75 * cpu switching functions
82 #include "opt_sched.h"
84 #include <machine/asm.h>
85 #include <machine/asmacros.h>
86 #include <machine/armreg.h>
87 #include <machine/sysreg.h>
88 #include <machine/vfp.h>
90 __FBSDID("$FreeBSD$");
93 #define GET_PCPU(tmp, tmp2) \
94 mrc CP15_MPIDR(tmp); \
96 ldr tmp2, .Lcurpcpu+4; \
98 ldr tmp2, .Lcurpcpu; \
102 #define GET_PCPU(tmp, tmp2) \
107 .fpu vfp /* allow VFP instructions */
111 .word _C_LABEL(__pcpu)
114 .word _C_LABEL(blocked_lock)
116 ENTRY(cpu_context_switch)
119 * We can directly switch between translation tables only when the
120 * size of the mapping for any given virtual address is the same
121 * in the old and new translation tables.
122 * Thus, we must switch to kernel pmap translation table as
123 * intermediate mapping because all sizes of these mappings are same
124 * (or unmapped). The same is true for switch from kernel pmap
125 * translation table to new pmap one.
127 mov r2, #(CPU_ASID_KERNEL)
128 ldr r1, =(_C_LABEL(pmap_kern_ttb))
130 mcr CP15_TTBR0(r1) /* switch to kernel TTB */
132 mcr CP15_TLBIASID(r2) /* flush not global TLBs */
134 mcr CP15_TTBR0(r0) /* switch to new TTB */
137 * We must flush not global TLBs again because PT2MAP mapping
140 mcr CP15_TLBIASID(r2) /* flush not global TLBs */
142 * Flush entire Branch Target Cache because of the branch predictor
143 * is not architecturally invisible. See ARM Architecture Reference
144 * Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
145 * predictors and Requirements for branch predictor maintenance
146 * operations sections.
149 * Additionally, to mitigate mistrained branch predictor attack
150 * we must invalidate it on affected CPUs. Unfortunately, BPIALL
151 * is effectively NOP on Cortex-A15 so it needs special treatment.
153 ldr r0, [r8, #PC_BP_HARDEN_KIND]
154 cmp r0, #PCPU_BP_HARDEN_KIND_ICIALLU
155 mcrne CP15_BPIALL /* Flush entire Branch Target Cache */
156 mcreq CP15_ICIALLU /* This is the only way how to flush */
157 /* Branch Target Cache on Cortex-A15. */
160 END(cpu_context_switch)
163 * cpu_throw(oldtd, newtd)
165 * Remove current thread state, then select the next thread to run
166 * and load its state.
171 mov r10, r0 /* r10 = oldtd */
172 mov r11, r1 /* r11 = newtd */
174 #ifdef VFP /* This thread is dying, disable */
175 bl _C_LABEL(vfp_discard) /* VFP without preserving state. */
177 GET_PCPU(r8, r9) /* r8 = current pcpu */
178 ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */
180 cmp r10, #0 /* old thread? */
181 beq 2f /* no, skip */
183 /* Remove this CPU from the active list. */
184 ldr r5, [r8, #PC_CURPMAP]
186 add r5, r0 /* r5 = old pm_active */
188 /* Compute position and mask. */
192 add r5, r0 /* r5 = position in old pm_active */
195 lsl r2, r0 /* r2 = mask */
198 lsl r2, r4 /* r2 = mask */
200 /* Clear cpu from old active list. */
215 cmp r11, #0 /* new thread? */
216 beq badsw1 /* no, panic */
218 ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */
221 * Registers at this point
222 * r4 = current cpu id
228 /* MMU switch to new thread. */
229 ldr r0, [r7, #(PCB_PAGEDIR)]
231 cmp r0, #0 /* new thread? */
232 beq badsw4 /* no, panic */
234 bl _C_LABEL(cpu_context_switch)
237 * Set new PMAP as current one.
238 * Insert cpu to new active list.
241 ldr r6, [r11, #(TD_PROC)] /* newtd->proc */
242 ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */
243 add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
244 str r6, [r8, #PC_CURPMAP] /* store to curpmap */
247 add r6, r0 /* r6 = new pm_active */
249 /* compute position and mask */
253 add r6, r0 /* r6 = position in new pm_active */
256 lsl r2, r0 /* r2 = mask */
259 lsl r2, r4 /* r2 = mask */
261 /* Set cpu to new active list. */
274 * Registers at this point.
278 * They must match the ones in sw1 position !!!
281 b sw1 /* share new thread init with cpu_switch() */
285 * cpu_switch(oldtd, newtd, lock)
287 * Save the current thread state, then select the next thread to run
288 * and load its state.
291 * r2 = lock (new lock for old thread)
294 /* Interrupts are disabled. */
296 cmp r0, #0 /* old thread? */
297 beq badsw2 /* no, panic */
299 /* Save all the registers in the old thread's pcb. */
300 ldr r3, [r0, #(TD_PCB)]
302 stmia r3, {r4-r12, sp, lr, pc}
303 mrc CP15_TPIDRURW(r4)
304 str r4, [r3, #(PCB_TPIDRURW - PCB_R4)]
307 cmp r1, #0 /* new thread? */
308 beq badsw3 /* no, panic */
311 * Save arguments. Note that we can now use r0-r14 until
312 * it is time to restore them for the new thread. However,
313 * some registers are not safe over function call.
315 mov r9, r2 /* r9 = lock */
316 mov r10, r0 /* r10 = oldtd */
317 mov r11, r1 /* r11 = newtd */
319 GET_PCPU(r8, r3) /* r8 = current PCPU */
320 ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */
325 ldr r3, [r10, #(TD_PCB)]
328 bl _C_LABEL(vfp_save_state)
332 * MMU switch. If we're switching to a thread with the same
333 * address space as the outgoing one, we can skip the MMU switch.
335 mrc CP15_TTBR0(r1) /* r1 = old TTB */
336 ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */
337 cmp r0, r1 /* Switching to the TTB? */
338 beq sw0 /* same TTB, skip */
341 cmp r0, #0 /* new thread? */
342 beq badsw4 /* no, panic */
345 bl cpu_context_switch /* new TTB as argument */
348 * Registers at this point
357 * Set new PMAP as current one.
358 * Update active list on PMAPs.
360 ldr r6, [r11, #TD_PROC] /* newtd->proc */
361 ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */
362 add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
364 ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */
365 str r6, [r8, #PC_CURPMAP] /* and save new one */
368 add r5, r0 /* r5 = old pm_active */
369 add r6, r0 /* r6 = new pm_active */
371 /* Compute position and mask. */
372 ldr r4, [r8, #PC_CPUID]
376 add r5, r0 /* r5 = position in old pm_active */
377 add r6, r0 /* r6 = position in new pm_active */
380 lsl r2, r0 /* r2 = mask */
383 lsl r2, r4 /* r2 = mask */
385 /* Clear cpu from old active list. */
397 /* Set cpu to new active list. */
412 * Registers at this point
420 /* Change the old thread lock. */
421 add r5, r10, #TD_LOCK
432 * Registers at this point
438 #if defined(SMP) && defined(SCHED_ULE)
440 * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
441 * QQQ: What does it mean in reality and why is it done?
443 ldr r6, =blocked_lock
445 ldr r3, [r11, #TD_LOCK] /* atomic write regular read */
450 /* We have a new curthread now so make a note it */
451 str r11, [r8, #PC_CURTHREAD]
452 mcr CP15_TPIDRPRW(r11)
454 /* store pcb in per cpu structure */
455 str r7, [r8, #PC_CURPCB]
458 * Restore all saved registers and return. Note that some saved
459 * registers can be changed when either cpu_fork(), cpu_copy_thread(),
460 * cpu_fork_kthread_handler(), or makectx() was called.
462 * The value of TPIDRURW is also written into TPIDRURO, as
463 * userspace still uses TPIDRURO, modifying it through
464 * sysarch(ARM_SET_TP, addr).
466 ldr r3, [r7, #PCB_TPIDRURW]
467 mcr CP15_TPIDRURW(r3) /* write tls thread reg 2 */
468 mcr CP15_TPIDRURO(r3) /* write tls thread reg 3 */
470 ldmia r3, {r4-r12, sp, pc}
474 ldr r0, =sw1_panic_str
480 ldr r0, =sw2_panic_str
486 ldr r0, =sw3_panic_str
492 ldr r0, =sw4_panic_str
498 .asciz "cpu_throw: no newthread supplied.\n"
500 .asciz "cpu_switch: no curthread supplied.\n"
502 .asciz "cpu_switch: no newthread supplied.\n"
504 .asciz "cpu_switch: new pagedir is NULL.\n"