1 //===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of XRay, a dynamic runtime instrumentation system.
12 // This implements the X86-specific assembler for the trampolines.
14 //===----------------------------------------------------------------------===//
16 #include "../builtins/assembly.h"
17 #include "../sanitizer_common/sanitizer_asm.h"
23 CFI_DEF_CFA_OFFSET(248)
25 movupd %xmm0, 216(%rsp)
26 movupd %xmm1, 200(%rsp)
27 movupd %xmm2, 184(%rsp)
28 movupd %xmm3, 168(%rsp)
29 movupd %xmm4, 152(%rsp)
30 movupd %xmm5, 136(%rsp)
31 movupd %xmm6, 120(%rsp)
32 movupd %xmm7, 104(%rsp)
48 .macro RESTORE_REGISTERS
50 movupd 216(%rsp), %xmm0
51 movupd 200(%rsp), %xmm1
52 movupd 184(%rsp), %xmm2
53 movupd 168(%rsp), %xmm3
54 movupd 152(%rsp), %xmm4
55 movupd 136(%rsp), %xmm5
56 movupd 120(%rsp) , %xmm6
57 movupd 104(%rsp) , %xmm7
75 .macro ALIGNED_CALL_RAX
76 // Call the logging handler, after aligning the stack to a 16-byte boundary.
77 // The approach we're taking here uses additional stack space to stash the
78 // stack pointer twice before aligning the pointer to 16-bytes. If the stack
79 // was 8-byte aligned, it will become 16-byte aligned -- when restoring the
80 // pointer, we can always look -8 bytes from the current position to get
81 // either of the values we've stashed in the first place.
90 #if !defined(__APPLE__)
93 .section __TEXT,__text
95 .file "xray_trampoline_x86.S"
97 //===----------------------------------------------------------------------===//
99 .globl ASM_SYMBOL(__xray_FunctionEntry)
101 ASM_TYPE_FUNCTION(__xray_FunctionEntry)
102 # LLVM-MCA-BEGIN __xray_FunctionEntry
103 ASM_SYMBOL(__xray_FunctionEntry):
107 // This load has to be atomic, it's concurrent with __xray_patch().
108 // On x86/amd64, a simple (type-aligned) MOV instruction is enough.
109 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
113 // The patched function prologue puts its xray_instr_map index into %r10d.
122 ASM_SIZE(__xray_FunctionEntry)
125 //===----------------------------------------------------------------------===//
127 .globl ASM_SYMBOL(__xray_FunctionExit)
129 ASM_TYPE_FUNCTION(__xray_FunctionExit)
130 # LLVM-MCA-BEGIN __xray_FunctionExit
131 ASM_SYMBOL(__xray_FunctionExit):
133 // Save the important registers first. Since we're assuming that this
134 // function is only jumped into, we only preserve the registers for
137 CFI_DEF_CFA_OFFSET(64)
139 movupd %xmm0, 32(%rsp)
140 movupd %xmm1, 16(%rsp)
143 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
152 // Restore the important registers.
154 movupd 32(%rsp), %xmm0
155 movupd 16(%rsp), %xmm1
159 CFI_DEF_CFA_OFFSET(8)
162 ASM_SIZE(__xray_FunctionExit)
165 //===----------------------------------------------------------------------===//
167 .globl ASM_SYMBOL(__xray_FunctionTailExit)
169 ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
170 # LLVM-MCA-BEGIN __xray_FunctionTailExit
171 ASM_SYMBOL(__xray_FunctionTailExit):
175 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
188 ASM_SIZE(__xray_FunctionTailExit)
191 //===----------------------------------------------------------------------===//
193 .globl ASM_SYMBOL(__xray_ArgLoggerEntry)
195 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
196 # LLVM-MCA-BEGIN __xray_ArgLoggerEntry
197 ASM_SYMBOL(__xray_ArgLoggerEntry):
201 // Again, these function pointer loads must be atomic; MOV is fine.
202 movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
206 // If [arg1 logging handler] not set, defer to no-arg logging.
207 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
213 // First argument will become the third
216 // XRayEntryType::LOG_ARGS_ENTRY into the second
219 // 32-bit function ID becomes the first
227 ASM_SIZE(__xray_ArgLoggerEntry)
230 //===----------------------------------------------------------------------===//
232 .global ASM_SYMBOL(__xray_CustomEvent)
234 ASM_TYPE_FUNCTION(__xray_CustomEvent)
235 # LLVM-MCA-BEGIN __xray_CustomEvent
236 ASM_SYMBOL(__xray_CustomEvent):
240 // We take two arguments to this trampoline, which should be in rdi and rsi
242 movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
244 je .LcustomEventCleanup
248 .LcustomEventCleanup:
252 ASM_SIZE(__xray_CustomEvent)
255 //===----------------------------------------------------------------------===//
257 .global ASM_SYMBOL(__xray_TypedEvent)
259 ASM_TYPE_FUNCTION(__xray_TypedEvent)
260 # LLVM-MCA-BEGIN __xray_TypedEvent
261 ASM_SYMBOL(__xray_TypedEvent):
265 // We pass three arguments to this trampoline, which should be in rdi, rsi
266 // and rdx without our intervention.
267 movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
269 je .LtypedEventCleanup
277 ASM_SIZE(__xray_TypedEvent)
280 //===----------------------------------------------------------------------===//
282 NO_EXEC_STACK_DIRECTIVE