1 //===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of XRay, a dynamic runtime instrumentation system.
12 // This implements the X86-specific assembler for the trampolines.
14 //===----------------------------------------------------------------------===//
16 #include "../builtins/assembly.h"
17 #include "../sanitizer_common/sanitizer_asm.h"
24 CFI_DEF_CFA_OFFSET(248)
26 movupd %xmm0, 216(%rsp)
27 movupd %xmm1, 200(%rsp)
28 movupd %xmm2, 184(%rsp)
29 movupd %xmm3, 168(%rsp)
30 movupd %xmm4, 152(%rsp)
31 movupd %xmm5, 136(%rsp)
32 movupd %xmm6, 120(%rsp)
33 movupd %xmm7, 104(%rsp)
49 .macro RESTORE_REGISTERS
51 movupd 216(%rsp), %xmm0
52 movupd 200(%rsp), %xmm1
53 movupd 184(%rsp), %xmm2
54 movupd 168(%rsp), %xmm3
55 movupd 152(%rsp), %xmm4
56 movupd 136(%rsp), %xmm5
57 movupd 120(%rsp) , %xmm6
58 movupd 104(%rsp) , %xmm7
77 .macro ALIGNED_CALL_RAX
78 // Call the logging handler, after aligning the stack to a 16-byte boundary.
79 // The approach we're taking here uses additional stack space to stash the
80 // stack pointer twice before aligning the pointer to 16-bytes. If the stack
81 // was 8-byte aligned, it will become 16-byte aligned -- when restoring the
82 // pointer, we can always look -8 bytes from the current position to get
83 // either of the values we've stashed in the first place.
92 #if !defined(__APPLE__)
94 .file "xray_trampoline_x86.S"
96 .section __TEXT,__text
99 //===----------------------------------------------------------------------===//
101 .globl ASM_SYMBOL(__xray_FunctionEntry)
103 ASM_TYPE_FUNCTION(__xray_FunctionEntry)
104 # LLVM-MCA-BEGIN __xray_FunctionEntry
105 ASM_SYMBOL(__xray_FunctionEntry):
109 // This load has to be atomic, it's concurrent with __xray_patch().
110 // On x86/amd64, a simple (type-aligned) MOV instruction is enough.
111 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
115 // The patched function prologue puts its xray_instr_map index into %r10d.
124 ASM_SIZE(__xray_FunctionEntry)
127 //===----------------------------------------------------------------------===//
129 .globl ASM_SYMBOL(__xray_FunctionExit)
131 ASM_TYPE_FUNCTION(__xray_FunctionExit)
132 # LLVM-MCA-BEGIN __xray_FunctionExit
133 ASM_SYMBOL(__xray_FunctionExit):
135 // Save the important registers first. Since we're assuming that this
136 // function is only jumped into, we only preserve the registers for
139 CFI_DEF_CFA_OFFSET(64)
141 movupd %xmm0, 32(%rsp)
142 movupd %xmm1, 16(%rsp)
145 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
154 // Restore the important registers.
156 movupd 32(%rsp), %xmm0
157 movupd 16(%rsp), %xmm1
161 CFI_DEF_CFA_OFFSET(8)
164 ASM_SIZE(__xray_FunctionExit)
167 //===----------------------------------------------------------------------===//
169 .globl ASM_SYMBOL(__xray_FunctionTailExit)
171 ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
172 # LLVM-MCA-BEGIN __xray_FunctionTailExit
173 ASM_SYMBOL(__xray_FunctionTailExit):
177 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
190 ASM_SIZE(__xray_FunctionTailExit)
193 //===----------------------------------------------------------------------===//
195 .globl ASM_SYMBOL(__xray_ArgLoggerEntry)
197 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
198 # LLVM-MCA-BEGIN __xray_ArgLoggerEntry
199 ASM_SYMBOL(__xray_ArgLoggerEntry):
203 // Again, these function pointer loads must be atomic; MOV is fine.
204 movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
208 // If [arg1 logging handler] not set, defer to no-arg logging.
209 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
215 // First argument will become the third
218 // XRayEntryType::LOG_ARGS_ENTRY into the second
221 // 32-bit function ID becomes the first
229 ASM_SIZE(__xray_ArgLoggerEntry)
232 //===----------------------------------------------------------------------===//
234 .global ASM_SYMBOL(__xray_CustomEvent)
236 ASM_TYPE_FUNCTION(__xray_CustomEvent)
237 # LLVM-MCA-BEGIN __xray_CustomEvent
238 ASM_SYMBOL(__xray_CustomEvent):
242 // We take two arguments to this trampoline, which should be in rdi and rsi
244 movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
246 je .LcustomEventCleanup
250 .LcustomEventCleanup:
254 ASM_SIZE(__xray_CustomEvent)
257 //===----------------------------------------------------------------------===//
259 .global ASM_SYMBOL(__xray_TypedEvent)
261 ASM_TYPE_FUNCTION(__xray_TypedEvent)
262 # LLVM-MCA-BEGIN __xray_TypedEvent
263 ASM_SYMBOL(__xray_TypedEvent):
267 // We pass three arguments to this trampoline, which should be in rdi, rsi
268 // and rdx without our intervention.
269 movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
271 je .LtypedEventCleanup
279 ASM_SIZE(__xray_TypedEvent)
282 //===----------------------------------------------------------------------===//
284 NO_EXEC_STACK_DIRECTIVE