1 //===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
11 // This implements the X86-specific assembler for the trampolines.
13 //===----------------------------------------------------------------------===//
15 #include "../builtins/assembly.h"
16 #include "../sanitizer_common/sanitizer_asm.h"
23 CFI_DEF_CFA_OFFSET(248)
25 movupd %xmm0, 216(%rsp)
26 movupd %xmm1, 200(%rsp)
27 movupd %xmm2, 184(%rsp)
28 movupd %xmm3, 168(%rsp)
29 movupd %xmm4, 152(%rsp)
30 movupd %xmm5, 136(%rsp)
31 movupd %xmm6, 120(%rsp)
32 movupd %xmm7, 104(%rsp)
48 .macro RESTORE_REGISTERS
50 movupd 216(%rsp), %xmm0
51 movupd 200(%rsp), %xmm1
52 movupd 184(%rsp), %xmm2
53 movupd 168(%rsp), %xmm3
54 movupd 152(%rsp), %xmm4
55 movupd 136(%rsp), %xmm5
56 movupd 120(%rsp) , %xmm6
57 movupd 104(%rsp) , %xmm7
76 .macro ALIGNED_CALL_RAX
77 // Call the logging handler, after aligning the stack to a 16-byte boundary.
78 // The approach we're taking here uses additional stack space to stash the
79 // stack pointer twice before aligning the pointer to 16-bytes. If the stack
80 // was 8-byte aligned, it will become 16-byte aligned -- when restoring the
81 // pointer, we can always look -8 bytes from the current position to get
82 // either of the values we've stashed in the first place.
91 #if !defined(__APPLE__)
93 .file "xray_trampoline_x86.S"
95 .section __TEXT,__text
98 //===----------------------------------------------------------------------===//
100 .globl ASM_SYMBOL(__xray_FunctionEntry)
102 ASM_TYPE_FUNCTION(__xray_FunctionEntry)
103 # LLVM-MCA-BEGIN __xray_FunctionEntry
104 ASM_SYMBOL(__xray_FunctionEntry):
108 // This load has to be atomic, it's concurrent with __xray_patch().
109 // On x86/amd64, a simple (type-aligned) MOV instruction is enough.
110 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
114 // The patched function prologue puts its xray_instr_map index into %r10d.
123 ASM_SIZE(__xray_FunctionEntry)
126 //===----------------------------------------------------------------------===//
128 .globl ASM_SYMBOL(__xray_FunctionExit)
130 ASM_TYPE_FUNCTION(__xray_FunctionExit)
131 # LLVM-MCA-BEGIN __xray_FunctionExit
132 ASM_SYMBOL(__xray_FunctionExit):
134 // Save the important registers first. Since we're assuming that this
135 // function is only jumped into, we only preserve the registers for
138 CFI_DEF_CFA_OFFSET(64)
140 movupd %xmm0, 32(%rsp)
141 movupd %xmm1, 16(%rsp)
144 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
153 // Restore the important registers.
155 movupd 32(%rsp), %xmm0
156 movupd 16(%rsp), %xmm1
160 CFI_DEF_CFA_OFFSET(8)
163 ASM_SIZE(__xray_FunctionExit)
166 //===----------------------------------------------------------------------===//
168 .globl ASM_SYMBOL(__xray_FunctionTailExit)
170 ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
171 # LLVM-MCA-BEGIN __xray_FunctionTailExit
172 ASM_SYMBOL(__xray_FunctionTailExit):
176 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
189 ASM_SIZE(__xray_FunctionTailExit)
192 //===----------------------------------------------------------------------===//
194 .globl ASM_SYMBOL(__xray_ArgLoggerEntry)
196 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
197 # LLVM-MCA-BEGIN __xray_ArgLoggerEntry
198 ASM_SYMBOL(__xray_ArgLoggerEntry):
202 // Again, these function pointer loads must be atomic; MOV is fine.
203 movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
207 // If [arg1 logging handler] not set, defer to no-arg logging.
208 movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
214 // First argument will become the third
217 // XRayEntryType::LOG_ARGS_ENTRY into the second
220 // 32-bit function ID becomes the first
228 ASM_SIZE(__xray_ArgLoggerEntry)
231 //===----------------------------------------------------------------------===//
233 .global ASM_SYMBOL(__xray_CustomEvent)
235 ASM_TYPE_FUNCTION(__xray_CustomEvent)
236 # LLVM-MCA-BEGIN __xray_CustomEvent
237 ASM_SYMBOL(__xray_CustomEvent):
241 // We take two arguments to this trampoline, which should be in rdi and rsi
243 movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
245 je .LcustomEventCleanup
249 .LcustomEventCleanup:
253 ASM_SIZE(__xray_CustomEvent)
256 //===----------------------------------------------------------------------===//
258 .global ASM_SYMBOL(__xray_TypedEvent)
260 ASM_TYPE_FUNCTION(__xray_TypedEvent)
261 # LLVM-MCA-BEGIN __xray_TypedEvent
262 ASM_SYMBOL(__xray_TypedEvent):
266 // We pass three arguments to this trampoline, which should be in rdi, rsi
267 // and rdx without our intervention.
268 movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
270 je .LtypedEventCleanup
278 ASM_SIZE(__xray_TypedEvent)
281 //===----------------------------------------------------------------------===//
283 NO_EXEC_STACK_DIRECTIVE