1 //===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Implementations of internal_syscall and internal_iserror for Linux/aarch64.
12 //===----------------------------------------------------------------------===//
14 #define SYSCALL(name) __NR_ ## name
16 static uptr __internal_syscall(u64 nr) {
17 register u64 x8 asm("x8") = nr;
18 register u64 x0 asm("x0");
25 #define __internal_syscall0(n) \
26 (__internal_syscall)(n)
28 static uptr __internal_syscall(u64 nr, u64 arg1) {
29 register u64 x8 asm("x8") = nr;
30 register u64 x0 asm("x0") = arg1;
37 #define __internal_syscall1(n, a1) \
38 (__internal_syscall)(n, (u64)(a1))
40 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
41 register u64 x8 asm("x8") = nr;
42 register u64 x0 asm("x0") = arg1;
43 register u64 x1 asm("x1") = arg2;
46 : "r"(x8), "0"(x0), "r"(x1)
50 #define __internal_syscall2(n, a1, a2) \
51 (__internal_syscall)(n, (u64)(a1), (long)(a2))
53 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
54 register u64 x8 asm("x8") = nr;
55 register u64 x0 asm("x0") = arg1;
56 register u64 x1 asm("x1") = arg2;
57 register u64 x2 asm("x2") = arg3;
60 : "r"(x8), "0"(x0), "r"(x1), "r"(x2)
64 #define __internal_syscall3(n, a1, a2, a3) \
65 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
67 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
69 register u64 x8 asm("x8") = nr;
70 register u64 x0 asm("x0") = arg1;
71 register u64 x1 asm("x1") = arg2;
72 register u64 x2 asm("x2") = arg3;
73 register u64 x3 asm("x3") = arg4;
76 : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)
80 #define __internal_syscall4(n, a1, a2, a3, a4) \
81 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
83 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
84 u64 arg4, long arg5) {
85 register u64 x8 asm("x8") = nr;
86 register u64 x0 asm("x0") = arg1;
87 register u64 x1 asm("x1") = arg2;
88 register u64 x2 asm("x2") = arg3;
89 register u64 x3 asm("x3") = arg4;
90 register u64 x4 asm("x4") = arg5;
93 : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4)
97 #define __internal_syscall5(n, a1, a2, a3, a4, a5) \
98 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
101 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
102 u64 arg4, long arg5, long arg6) {
103 register u64 x8 asm("x8") = nr;
104 register u64 x0 asm("x0") = arg1;
105 register u64 x1 asm("x1") = arg2;
106 register u64 x2 asm("x2") = arg3;
107 register u64 x3 asm("x3") = arg4;
108 register u64 x4 asm("x4") = arg5;
109 register u64 x5 asm("x5") = arg6;
112 : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5)
116 #define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
117 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
118 (u64)(a5), (long)(a6))
120 #define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
121 #define __SYSCALL_NARGS(...) \
122 __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
123 #define __SYSCALL_CONCAT_X(a, b) a##b
124 #define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
125 #define __SYSCALL_DISP(b, ...) \
126 __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
128 #define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
130 // Helper function used to avoid cobbler errno.
131 bool internal_iserror(uptr retval, int *rverrno) {
132 if (retval >= (uptr)-4095) {