1 //===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
11 // This implements the X86-specific assembler for the trampolines.
13 //===----------------------------------------------------------------------===//
15 #include "../builtins/assembly.h"
16 #include "../sanitizer_common/sanitizer_asm.h"
23 CFI_DEF_CFA_OFFSET(248)
25 movupd %xmm0, 216(%rsp)
26 movupd %xmm1, 200(%rsp)
27 movupd %xmm2, 184(%rsp)
28 movupd %xmm3, 168(%rsp)
29 movupd %xmm4, 152(%rsp)
30 movupd %xmm5, 136(%rsp)
31 movupd %xmm6, 120(%rsp)
32 movupd %xmm7, 104(%rsp)
48 .macro RESTORE_REGISTERS
50 movupd 216(%rsp), %xmm0
51 movupd 200(%rsp), %xmm1
52 movupd 184(%rsp), %xmm2
53 movupd 168(%rsp), %xmm3
54 movupd 152(%rsp), %xmm4
55 movupd 136(%rsp), %xmm5
56 movupd 120(%rsp) , %xmm6
57 movupd 104(%rsp) , %xmm7
76 .macro ALIGNED_CALL_RAX
77 // Call the logging handler, after aligning the stack to a 16-byte boundary.
78 // The approach we're taking here uses additional stack space to stash the
79 // stack pointer twice before aligning the pointer to 16-bytes. If the stack
80 // was 8-byte aligned, it will become 16-byte aligned -- when restoring the
81 // pointer, we can always look -8 bytes from the current position to get
82 // either of the values we've stashed in the first place.
91 #if !defined(__APPLE__)
93 .file "xray_trampoline_x86.S"
95 .section __TEXT,__text
98 //===----------------------------------------------------------------------===//
100 .globl ASM_SYMBOL(__xray_FunctionEntry)
101 ASM_HIDDEN(__xray_FunctionEntry)
103 ASM_TYPE_FUNCTION(__xray_FunctionEntry)
104 # LLVM-MCA-BEGIN __xray_FunctionEntry
105 ASM_SYMBOL(__xray_FunctionEntry):
109 // This load has to be atomic, it's concurrent with __xray_patch().
110 // On x86/amd64, a simple (type-aligned) MOV instruction is enough.
111 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
115 // The patched function prologue puts its xray_instr_map index into %r10d.
124 ASM_SIZE(__xray_FunctionEntry)
127 //===----------------------------------------------------------------------===//
129 .globl ASM_SYMBOL(__xray_FunctionExit)
130 ASM_HIDDEN(__xray_FunctionExit)
132 ASM_TYPE_FUNCTION(__xray_FunctionExit)
133 # LLVM-MCA-BEGIN __xray_FunctionExit
134 ASM_SYMBOL(__xray_FunctionExit):
136 // Save the important registers first. Since we're assuming that this
137 // function is only jumped into, we only preserve the registers for
140 CFI_DEF_CFA_OFFSET(64)
142 movupd %xmm0, 32(%rsp)
143 movupd %xmm1, 16(%rsp)
146 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
155 // Restore the important registers.
157 movupd 32(%rsp), %xmm0
158 movupd 16(%rsp), %xmm1
162 CFI_DEF_CFA_OFFSET(8)
165 ASM_SIZE(__xray_FunctionExit)
168 //===----------------------------------------------------------------------===//
170 .globl ASM_SYMBOL(__xray_FunctionTailExit)
171 ASM_HIDDEN(__xray_FunctionTailExit)
173 ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
174 # LLVM-MCA-BEGIN __xray_FunctionTailExit
175 ASM_SYMBOL(__xray_FunctionTailExit):
179 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
192 ASM_SIZE(__xray_FunctionTailExit)
195 //===----------------------------------------------------------------------===//
197 .globl ASM_SYMBOL(__xray_ArgLoggerEntry)
198 ASM_HIDDEN(__xray_ArgLoggerEntry)
200 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
201 # LLVM-MCA-BEGIN __xray_ArgLoggerEntry
202 ASM_SYMBOL(__xray_ArgLoggerEntry):
206 // Again, these function pointer loads must be atomic; MOV is fine.
207 movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
211 // If [arg1 logging handler] not set, defer to no-arg logging.
212 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
218 // First argument will become the third
221 // XRayEntryType::LOG_ARGS_ENTRY into the second
224 // 32-bit function ID becomes the first
232 ASM_SIZE(__xray_ArgLoggerEntry)
235 //===----------------------------------------------------------------------===//
237 .global ASM_SYMBOL(__xray_CustomEvent)
238 ASM_HIDDEN(__xray_CustomEvent)
240 ASM_TYPE_FUNCTION(__xray_CustomEvent)
241 # LLVM-MCA-BEGIN __xray_CustomEvent
242 ASM_SYMBOL(__xray_CustomEvent):
246 // We take two arguments to this trampoline, which should be in rdi and rsi
248 movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
250 je .LcustomEventCleanup
254 .LcustomEventCleanup:
258 ASM_SIZE(__xray_CustomEvent)
261 //===----------------------------------------------------------------------===//
263 .global ASM_SYMBOL(__xray_TypedEvent)
264 ASM_HIDDEN(__xray_TypedEvent)
266 ASM_TYPE_FUNCTION(__xray_TypedEvent)
267 # LLVM-MCA-BEGIN __xray_TypedEvent
268 ASM_SYMBOL(__xray_TypedEvent):
272 // We pass three arguments to this trampoline, which should be in rdi, rsi
273 // and rdx without our intervention.
274 movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
276 je .LtypedEventCleanup
284 ASM_SIZE(__xray_TypedEvent)
287 //===----------------------------------------------------------------------===//
289 NO_EXEC_STACK_DIRECTIVE