2 * Copyright (c) 2014 Andrew Turner
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <machine/asm.h>
29 #include <machine/armreg.h>
30 __FBSDID("$FreeBSD$");
37 * This is limited to 28 instructions as it's placed in the exception vector
38 * slot that is 32 instructions long. We need one for the branch, and three
41 .macro save_registers_head el
46 sub sp, sp, #(TF_SIZE + 16)
47 stp x29, lr, [sp, #(TF_SIZE)]
48 stp x28, x29, [sp, #(TF_X + 28 * 8)]
49 stp x26, x27, [sp, #(TF_X + 26 * 8)]
50 stp x24, x25, [sp, #(TF_X + 24 * 8)]
51 stp x22, x23, [sp, #(TF_X + 22 * 8)]
52 stp x20, x21, [sp, #(TF_X + 20 * 8)]
53 stp x18, x19, [sp, #(TF_X + 18 * 8)]
54 stp x16, x17, [sp, #(TF_X + 16 * 8)]
55 stp x14, x15, [sp, #(TF_X + 14 * 8)]
56 stp x12, x13, [sp, #(TF_X + 12 * 8)]
57 stp x10, x11, [sp, #(TF_X + 10 * 8)]
58 stp x8, x9, [sp, #(TF_X + 8 * 8)]
59 stp x6, x7, [sp, #(TF_X + 6 * 8)]
60 stp x4, x5, [sp, #(TF_X + 4 * 8)]
61 stp x2, x3, [sp, #(TF_X + 2 * 8)]
62 stp x0, x1, [sp, #(TF_X + 0 * 8)]
69 str x10, [sp, #(TF_ELR)]
70 stp w11, w12, [sp, #(TF_SPSR)]
71 stp x18, lr, [sp, #(TF_SP)]
73 add x29, sp, #(TF_SIZE)
76 .macro save_registers el
78 #if defined(PERTHREAD_SSP)
79 /* Load the SSP canary to sp_el0 */
80 ldr x1, [x18, #(PC_CURTHREAD)]
81 add x1, x1, #(TD_MD_CANARY)
85 /* Apply the SSBD (CVE-2018-3639) workaround if needed */
86 ldr x1, [x18, #PC_SSBD]
92 ldr x0, [x18, #(PC_CURTHREAD)]
95 /* Unmask debug and SError exceptions */
96 msr daifclr, #(DAIF_D | DAIF_A)
99 * Unmask debug and SError exceptions.
100 * For EL1, debug exceptions are conditionally unmasked in
103 msr daifclr, #(DAIF_A)
107 .macro restore_registers el
109 * Mask all exceptions, x18 may change in the interrupt exception
112 msr daifset, #(DAIF_ALL)
114 ldr x0, [x18, #PC_CURTHREAD]
118 /* Remove the SSBD (CVE-2018-3639) workaround if needed */
119 ldr x1, [x18, #PC_SSBD]
125 ldp x18, lr, [sp, #(TF_SP)]
126 ldp x10, x11, [sp, #(TF_ELR)]
132 ldp x0, x1, [sp, #(TF_X + 0 * 8)]
133 ldp x2, x3, [sp, #(TF_X + 2 * 8)]
134 ldp x4, x5, [sp, #(TF_X + 4 * 8)]
135 ldp x6, x7, [sp, #(TF_X + 6 * 8)]
136 ldp x8, x9, [sp, #(TF_X + 8 * 8)]
137 ldp x10, x11, [sp, #(TF_X + 10 * 8)]
138 ldp x12, x13, [sp, #(TF_X + 12 * 8)]
139 ldp x14, x15, [sp, #(TF_X + 14 * 8)]
140 ldp x16, x17, [sp, #(TF_X + 16 * 8)]
143 * We only restore the callee saved registers when returning to
144 * userland as they may have been updated by a system call or signal.
146 ldp x18, x19, [sp, #(TF_X + 18 * 8)]
147 ldp x20, x21, [sp, #(TF_X + 20 * 8)]
148 ldp x22, x23, [sp, #(TF_X + 22 * 8)]
149 ldp x24, x25, [sp, #(TF_X + 24 * 8)]
150 ldp x26, x27, [sp, #(TF_X + 26 * 8)]
151 ldp x28, x29, [sp, #(TF_X + 28 * 8)]
153 ldr x29, [sp, #(TF_X + 29 * 8)]
156 add sp, sp, #(TF_SIZE + 16)
165 /* Make sure the IRQs are enabled before calling ast() */
169 * Mask interrupts while checking the ast pending flag
171 msr daifset, #(DAIF_INTR)
173 /* Read the current thread flags */
174 ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */
175 ldr x2, [x1, #TD_FLAGS]
177 /* Check if we have either bits set */
178 mov x3, #((TDF_ASTPENDING|TDF_NEEDRESCHED) >> 8)
183 /* Restore interrupts */
190 /* Re-check for new ast scheduled */
195 ENTRY(handle_el1h_sync)
197 ldr x0, [x18, #PC_CURTHREAD]
202 END(handle_el1h_sync)
204 ENTRY(handle_el1h_irq)
212 ENTRY(handle_el0_sync)
214 * Read the fault address early. The current thread structure may
215 * be transiently unmapped if it is part of a memory range being
216 * promoted or demoted to/from a superpage. As this involves a
217 * break-before-make sequence there is a short period of time where
218 * an access will raise an exception. If this happens the fault
219 * address will be changed to the kernel address so a later read of
220 * far_el1 will give the wrong value.
222 * The earliest memory access that could trigger a fault is in a
223 * function called by the save_registers macro so this is the latest
224 * we can read the userspace value.
228 ldr x0, [x18, #PC_CURTHREAD]
230 str x1, [x0, #TD_FRAME]
238 ENTRY(handle_el0_irq)
254 ENTRY(handle_empty_exception)
257 1: bl unhandled_exception
259 END(handle_empty_exception)
261 .macro vector name, el
263 save_registers_head \el
267 /* Break instruction to ensure we aren't executing code here. */
272 vector empty_exception \el
276 .globl exception_vectors
278 vempty 1 /* Synchronous EL1t */
279 vempty 1 /* IRQ EL1t */
280 vempty 1 /* FIQ EL1t */
281 vempty 1 /* Error EL1t */
283 vector el1h_sync 1 /* Synchronous EL1h */
284 vector el1h_irq 1 /* IRQ EL1h */
285 vempty 1 /* FIQ EL1h */
286 vector serror 1 /* Error EL1h */
288 vector el0_sync 0 /* Synchronous 64-bit EL0 */
289 vector el0_irq 0 /* IRQ 64-bit EL0 */
290 vempty 0 /* FIQ 64-bit EL0 */
291 vector serror 0 /* Error 64-bit EL0 */
293 vector el0_sync 0 /* Synchronous 32-bit EL0 */
294 vector el0_irq 0 /* IRQ 32-bit EL0 */
295 vempty 0 /* FIQ 32-bit EL0 */
296 vector serror 0 /* Error 32-bit EL0 */