1 // RUN: %clang_cc1 -emit-llvm -o %t %s
2 // RUN: not grep __builtin %t
3 // RUN: %clang_cc1 %s -emit-llvm -o - -triple x86_64-darwin-apple | FileCheck %s
5 int printf(const char *, ...);
7 void p(char *str, int x) {
8 printf("%s: %d\n", str, x);
10 void q(char *str, double x) {
11 printf("%s: %f\n", str, x);
13 void r(char *str, void *ptr) {
14 printf("%s: %p\n", str, ptr);
21 #define P(n,args) p(#n #args, __builtin_##n args)
22 #define Q(n,args) q(#n #args, __builtin_##n args)
23 #define R(n,args) r(#n #args, __builtin_##n args)
24 #define V(n,args) p(#n #args, (__builtin_##n args, 0))
25 P(types_compatible_p, (int, float));
26 P(choose_expr, (0, 10, 20));
27 P(constant_p, (sizeof(10)));
28 P(expect, (N == 12, 0));
31 V(prefetch, (&N, 1, 0));
42 P(fpclassify, (0, 1, 2, 3, 4, 1.0));
43 P(fpclassify, (0, 1, 2, 3, 4, 1.0f));
44 P(fpclassify, (0, 1, 2, 3, 4, 1.0l));
55 P(isgreater, (1., 2.));
56 P(isgreaterequal, (1., 2.));
58 P(islessequal, (1., 2.));
59 P(islessgreater, (1., 2.));
60 P(isunordered, (1., 2.));
66 // Bitwise & Numeric Functions
90 int a, b, n = random(); // Avoid optimizing out.
91 char s0[10], s1[] = "Hello";
94 V(strncat, (s0, s1, n));
95 V(strchr, (s0, s1[0]));
96 V(strrchr, (s0, s1[0]));
98 V(strncpy, (s0, s1, n));
100 // Object size checking
101 V(__memset_chk, (s0, 0, sizeof s0, n));
102 V(__memcpy_chk, (s0, s1, sizeof s0, n));
103 V(__memmove_chk, (s0, s1, sizeof s0, n));
104 V(__mempcpy_chk, (s0, s1, sizeof s0, n));
105 V(__strncpy_chk, (s0, s1, sizeof s0, n));
106 V(__strcpy_chk, (s0, s1, n));
108 V(__strcat_chk, (s0, s1, n));
109 P(object_size, (s0, 0));
110 P(object_size, (s0, 1));
111 P(object_size, (s0, 2));
112 P(object_size, (s0, 3));
120 // CHECK: @llvm.bitreverse.i8
121 // CHECK: @llvm.bitreverse.i16
122 // CHECK: @llvm.bitreverse.i32
123 // CHECK: @llvm.bitreverse.i64
125 P(bitreverse16, (N));
126 P(bitreverse32, (N));
127 P(bitreverse64, (N));
130 // V(clear_cache, (&N, &N+1));
132 R(extract_return_addr, (&N));
143 __builtin_strcat(0, 0);
146 // CHECK-LABEL: define void @bar(
152 // LLVM's hex representation of float constants is really unfortunate;
153 // basically it does a float-to-double "conversion" and then prints the
154 // hex form of that. That gives us weird artifacts like exponents
155 // that aren't numerically similar to the original exponent and
156 // significand bit-patterns that are offset by three bits (because
157 // the exponent was expanded from 8 bits to 11).
159 // 0xAE98 == 1010111010011000
160 // 0x15D3 == 1010111010011
162 f = __builtin_huge_valf(); // CHECK: float 0x7FF0000000000000
163 d = __builtin_huge_val(); // CHECK: double 0x7FF0000000000000
164 ld = __builtin_huge_vall(); // CHECK: x86_fp80 0xK7FFF8000000000000000
165 f = __builtin_nanf(""); // CHECK: float 0x7FF8000000000000
166 d = __builtin_nan(""); // CHECK: double 0x7FF8000000000000
167 ld = __builtin_nanl(""); // CHECK: x86_fp80 0xK7FFFC000000000000000
168 f = __builtin_nanf("0xAE98"); // CHECK: float 0x7FF815D300000000
169 d = __builtin_nan("0xAE98"); // CHECK: double 0x7FF800000000AE98
170 ld = __builtin_nanl("0xAE98"); // CHECK: x86_fp80 0xK7FFFC00000000000AE98
171 f = __builtin_nansf(""); // CHECK: float 0x7FF4000000000000
172 d = __builtin_nans(""); // CHECK: double 0x7FF4000000000000
173 ld = __builtin_nansl(""); // CHECK: x86_fp80 0xK7FFFA000000000000000
174 f = __builtin_nansf("0xAE98"); // CHECK: float 0x7FF015D300000000
175 d = __builtin_nans("0xAE98"); // CHECK: double 0x7FF000000000AE98
176 ld = __builtin_nansl("0xAE98");// CHECK: x86_fp80 0xK7FFF800000000000AE98
181 // CHECK-LABEL: define void @test_conditional_bzero
182 void test_conditional_bzero() {
184 int _sz = 20, len = 20;
187 ? __builtin_bzero(dst, len)
189 : __builtin_bzero(dst, len));
190 // CHECK: call void @llvm.memset
191 // CHECK: call void @llvm.memset
195 // CHECK-LABEL: define void @test_float_builtins
196 void test_float_builtins(float F, double D, long double LD) {
198 res = __builtin_isinf(F);
199 // CHECK: call float @llvm.fabs.f32(float
200 // CHECK: fcmp oeq float {{.*}}, 0x7FF0000000000000
202 res = __builtin_isinf(D);
203 // CHECK: call double @llvm.fabs.f64(double
204 // CHECK: fcmp oeq double {{.*}}, 0x7FF0000000000000
206 res = __builtin_isinf(LD);
207 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
208 // CHECK: fcmp oeq x86_fp80 {{.*}}, 0xK7FFF8000000000000000
210 res = __builtin_isinf_sign(F);
211 // CHECK: %[[ABS:.*]] = call float @llvm.fabs.f32(float %[[ARG:.*]])
212 // CHECK: %[[ISINF:.*]] = fcmp oeq float %[[ABS]], 0x7FF0000000000000
213 // CHECK: %[[BITCAST:.*]] = bitcast float %[[ARG]] to i32
214 // CHECK: %[[ISNEG:.*]] = icmp slt i32 %[[BITCAST]], 0
215 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
216 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
218 res = __builtin_isinf_sign(D);
219 // CHECK: %[[ABS:.*]] = call double @llvm.fabs.f64(double %[[ARG:.*]])
220 // CHECK: %[[ISINF:.*]] = fcmp oeq double %[[ABS]], 0x7FF0000000000000
221 // CHECK: %[[BITCAST:.*]] = bitcast double %[[ARG]] to i64
222 // CHECK: %[[ISNEG:.*]] = icmp slt i64 %[[BITCAST]], 0
223 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
224 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
226 res = __builtin_isinf_sign(LD);
227 // CHECK: %[[ABS:.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 %[[ARG:.*]])
228 // CHECK: %[[ISINF:.*]] = fcmp oeq x86_fp80 %[[ABS]], 0xK7FFF8000000000000000
229 // CHECK: %[[BITCAST:.*]] = bitcast x86_fp80 %[[ARG]] to i80
230 // CHECK: %[[ISNEG:.*]] = icmp slt i80 %[[BITCAST]], 0
231 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
232 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
234 res = __builtin_isfinite(F);
235 // CHECK: call float @llvm.fabs.f32(float
236 // CHECK: fcmp one float {{.*}}, 0x7FF0000000000000
239 // CHECK: call double @llvm.fabs.f64(double
240 // CHECK: fcmp one double {{.*}}, 0x7FF0000000000000
242 res = __builtin_isnormal(F);
243 // CHECK: fcmp oeq float
244 // CHECK: call float @llvm.fabs.f32(float
245 // CHECK: fcmp ult float {{.*}}, 0x7FF0000000000000
246 // CHECK: fcmp uge float {{.*}}, 0x3810000000000000
251 // CHECK-LABEL: define void @test_float_builtin_ops
252 void test_float_builtin_ops(float F, double D, long double LD) {
254 volatile double resd;
255 volatile long double resld;
257 resf = __builtin_fmodf(F,F);
260 resd = __builtin_fmod(D,D);
261 // CHECK: frem double
263 resld = __builtin_fmodl(LD,LD);
264 // CHECK: frem x86_fp80
266 resf = __builtin_fabsf(F);
267 resd = __builtin_fabs(D);
268 resld = __builtin_fabsl(LD);
269 // CHECK: call float @llvm.fabs.f32(float
270 // CHECK: call double @llvm.fabs.f64(double
271 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
273 resf = __builtin_canonicalizef(F);
274 resd = __builtin_canonicalize(D);
275 resld = __builtin_canonicalizel(LD);
276 // CHECK: call float @llvm.canonicalize.f32(float
277 // CHECK: call double @llvm.canonicalize.f64(double
278 // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
280 resf = __builtin_fminf(F, F);
281 // CHECK: call float @llvm.minnum.f32
283 resd = __builtin_fmin(D, D);
284 // CHECK: call double @llvm.minnum.f64
286 resld = __builtin_fminl(LD, LD);
287 // CHECK: call x86_fp80 @llvm.minnum.f80
289 resf = __builtin_fmaxf(F, F);
290 // CHECK: call float @llvm.maxnum.f32
292 resd = __builtin_fmax(D, D);
293 // CHECK: call double @llvm.maxnum.f64
295 resld = __builtin_fmaxl(LD, LD);
296 // CHECK: call x86_fp80 @llvm.maxnum.f80
298 resf = __builtin_fabsf(F);
299 // CHECK: call float @llvm.fabs.f32
301 resd = __builtin_fabs(D);
302 // CHECK: call double @llvm.fabs.f64
304 resld = __builtin_fabsl(LD);
305 // CHECK: call x86_fp80 @llvm.fabs.f80
307 resf = __builtin_copysignf(F, F);
308 // CHECK: call float @llvm.copysign.f32
310 resd = __builtin_copysign(D, D);
311 // CHECK: call double @llvm.copysign.f64
313 resld = __builtin_copysignl(LD, LD);
314 // CHECK: call x86_fp80 @llvm.copysign.f80
317 resf = __builtin_ceilf(F);
318 // CHECK: call float @llvm.ceil.f32
320 resd = __builtin_ceil(D);
321 // CHECK: call double @llvm.ceil.f64
323 resld = __builtin_ceill(LD);
324 // CHECK: call x86_fp80 @llvm.ceil.f80
326 resf = __builtin_floorf(F);
327 // CHECK: call float @llvm.floor.f32
329 resd = __builtin_floor(D);
330 // CHECK: call double @llvm.floor.f64
332 resld = __builtin_floorl(LD);
333 // CHECK: call x86_fp80 @llvm.floor.f80
335 resf = __builtin_sqrtf(F);
336 // CHECK: call float @llvm.sqrt.f32(
338 resd = __builtin_sqrt(D);
339 // CHECK: call double @llvm.sqrt.f64(
341 resld = __builtin_sqrtl(LD);
342 // CHECK: call x86_fp80 @llvm.sqrt.f80
344 resf = __builtin_truncf(F);
345 // CHECK: call float @llvm.trunc.f32
347 resd = __builtin_trunc(D);
348 // CHECK: call double @llvm.trunc.f64
350 resld = __builtin_truncl(LD);
351 // CHECK: call x86_fp80 @llvm.trunc.f80
353 resf = __builtin_rintf(F);
354 // CHECK: call float @llvm.rint.f32
356 resd = __builtin_rint(D);
357 // CHECK: call double @llvm.rint.f64
359 resld = __builtin_rintl(LD);
360 // CHECK: call x86_fp80 @llvm.rint.f80
362 resf = __builtin_nearbyintf(F);
363 // CHECK: call float @llvm.nearbyint.f32
365 resd = __builtin_nearbyint(D);
366 // CHECK: call double @llvm.nearbyint.f64
368 resld = __builtin_nearbyintl(LD);
369 // CHECK: call x86_fp80 @llvm.nearbyint.f80
371 resf = __builtin_roundf(F);
372 // CHECK: call float @llvm.round.f32
374 resd = __builtin_round(D);
375 // CHECK: call double @llvm.round.f64
377 resld = __builtin_roundl(LD);
378 // CHECK: call x86_fp80 @llvm.round.f80
382 // __builtin_longjmp isn't supported on all platforms, so only test it on X86.
385 // CHECK-LABEL: define void @test_builtin_longjmp
386 void test_builtin_longjmp(void **buffer) {
387 // CHECK: [[BITCAST:%.*]] = bitcast
388 // CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(i8* [[BITCAST]])
389 __builtin_longjmp(buffer, 1);
390 // CHECK-NEXT: unreachable
395 // CHECK-LABEL: define i64 @test_builtin_readcyclecounter
396 long long test_builtin_readcyclecounter() {
397 // CHECK: call i64 @llvm.readcyclecounter()
398 return __builtin_readcyclecounter();
401 /// __builtin_launder should be a NOP in C since there are no vtables.
402 // CHECK-LABEL: define void @test_builtin_launder
403 void test_builtin_launder(int *p) {
404 // CHECK: [[TMP:%.*]] = load i32*,
405 // CHECK-NOT: @llvm.launder
406 // CHECK: store i32* [[TMP]],
407 int *d = __builtin_launder(p);
410 // Behavior of __builtin_os_log differs between platforms, so only test on X86
413 // CHECK-LABEL: define void @test_builtin_os_log
414 // CHECK: (i8* %[[BUF:.*]], i32 %[[I:.*]], i8* %[[DATA:.*]])
415 void test_builtin_os_log(void *buf, int i, const char *data) {
417 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
418 // CHECK: %[[I_ADDR:.*]] = alloca i32, align 4
419 // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
420 // CHECK: %[[LEN:.*]] = alloca i32, align 4
421 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
422 // CHECK: store i32 %[[I]], i32* %[[I_ADDR]], align 4
423 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
425 // CHECK: store volatile i32 34, i32* %[[LEN]]
426 len = __builtin_os_log_format_buffer_size("%d %{public}s %{private}.16P", i, data, data);
428 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]]
429 // CHECK: %[[V2:.*]] = load i32, i32* %[[I_ADDR]]
430 // CHECK: %[[V3:.*]] = load i8*, i8** %[[DATA_ADDR]]
431 // CHECK: %[[V4:.*]] = ptrtoint i8* %[[V3]] to i64
432 // CHECK: %[[V5:.*]] = load i8*, i8** %[[DATA_ADDR]]
433 // CHECK: %[[V6:.*]] = ptrtoint i8* %[[V5]] to i64
434 // CHECK: call void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49(i8* %[[V1]], i32 %[[V2]], i64 %[[V4]], i32 16, i64 %[[V6]])
435 __builtin_os_log_format(buf, "%d %{public}s %{private}.16P", i, data, data);
437 // privacy annotations aren't recognized when they are preceded or followed
438 // by non-whitespace characters.
440 // CHECK: call void @__os_log_helper_1_2_1_8_32(
441 __builtin_os_log_format(buf, "%{xyz public}s", data);
443 // CHECK: call void @__os_log_helper_1_2_1_8_32(
444 __builtin_os_log_format(buf, "%{ public xyz}s", data);
446 // CHECK: call void @__os_log_helper_1_2_1_8_32(
447 __builtin_os_log_format(buf, "%{ public1}s", data);
449 // Privacy annotations do not have to be in the first comma-delimited string.
451 // CHECK: call void @__os_log_helper_1_2_1_8_34(
452 __builtin_os_log_format(buf, "%{ xyz, public }s", "abc");
454 // CHECK: call void @__os_log_helper_1_3_1_8_33(
455 __builtin_os_log_format(buf, "%{ xyz, private }s", "abc");
457 // CHECK: call void @__os_log_helper_1_3_1_8_37(
458 __builtin_os_log_format(buf, "%{ xyz, sensitive }s", "abc");
460 // The strictest privacy annotation in the string wins.
462 // CHECK: call void @__os_log_helper_1_3_1_8_33(
463 __builtin_os_log_format(buf, "%{ private, public, private, public}s", "abc");
465 // CHECK: call void @__os_log_helper_1_3_1_8_37(
466 __builtin_os_log_format(buf, "%{ private, sensitive, private, public}s",
469 // CHECK: store volatile i32 22, i32* %[[LEN]], align 4
470 len = __builtin_os_log_format_buffer_size("%{mask.xyz}s", "abc");
472 // CHECK: call void @__os_log_helper_1_2_2_8_112_8_34(i8* {{.*}}, i64 8026488
473 __builtin_os_log_format(buf, "%{mask.xyz, public}s", "abc");
475 // CHECK: call void @__os_log_helper_1_3_2_8_112_4_1(i8* {{.*}}, i64 8026488
476 __builtin_os_log_format(buf, "%{ mask.xyz, private }d", 11);
478 // Mask type is silently ignored.
479 // CHECK: call void @__os_log_helper_1_2_1_8_32(
480 __builtin_os_log_format(buf, "%{ mask. xyz }s", "abc");
482 // CHECK: call void @__os_log_helper_1_2_1_8_32(
483 __builtin_os_log_format(buf, "%{ mask.xy z }s", "abc");
486 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_3_4_4_0_8_34_4_17_8_49
487 // CHECK: (i8* %[[BUFFER:.*]], i32 %[[ARG0:.*]], i64 %[[ARG1:.*]], i32 %[[ARG2:.*]], i64 %[[ARG3:.*]])
489 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
490 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
491 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
492 // CHECK: %[[ARG2_ADDR:.*]] = alloca i32, align 4
493 // CHECK: %[[ARG3_ADDR:.*]] = alloca i64, align 8
494 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
495 // CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
496 // CHECK: store i64 %[[ARG1]], i64* %[[ARG1_ADDR]], align 8
497 // CHECK: store i32 %[[ARG2]], i32* %[[ARG2_ADDR]], align 4
498 // CHECK: store i64 %[[ARG3]], i64* %[[ARG3_ADDR]], align 8
499 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
500 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
501 // CHECK: store i8 3, i8* %[[SUMMARY]], align 1
502 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
503 // CHECK: store i8 4, i8* %[[NUMARGS]], align 1
504 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
505 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
506 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
507 // CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
508 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
509 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
510 // CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
511 // CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
512 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 8
513 // CHECK: store i8 34, i8* %[[ARGDESCRIPTOR1]], align 1
514 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 9
515 // CHECK: store i8 8, i8* %[[ARGSIZE2]], align 1
516 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 10
517 // CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i64*
518 // CHECK: %[[V1:.*]] = load i64, i64* %[[ARG1_ADDR]], align 8
519 // CHECK: store i64 %[[V1]], i64* %[[ARGDATACAST4]], align 1
520 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, i8* %[[BUF]], i64 18
521 // CHECK: store i8 17, i8* %[[ARGDESCRIPTOR5]], align 1
522 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, i8* %[[BUF]], i64 19
523 // CHECK: store i8 4, i8* %[[ARGSIZE6]], align 1
524 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, i8* %[[BUF]], i64 20
525 // CHECK: %[[ARGDATACAST8:.*]] = bitcast i8* %[[ARGDATA7]] to i32*
526 // CHECK: %[[V2:.*]] = load i32, i32* %[[ARG2_ADDR]], align 4
527 // CHECK: store i32 %[[V2]], i32* %[[ARGDATACAST8]], align 1
528 // CHECK: %[[ARGDESCRIPTOR9:.*]] = getelementptr i8, i8* %[[BUF]], i64 24
529 // CHECK: store i8 49, i8* %[[ARGDESCRIPTOR9]], align 1
530 // CHECK: %[[ARGSIZE10:.*]] = getelementptr i8, i8* %[[BUF]], i64 25
531 // CHECK: store i8 8, i8* %[[ARGSIZE10]], align 1
532 // CHECK: %[[ARGDATA11:.*]] = getelementptr i8, i8* %[[BUF]], i64 26
533 // CHECK: %[[ARGDATACAST12:.*]] = bitcast i8* %[[ARGDATA11]] to i64*
534 // CHECK: %[[V3:.*]] = load i64, i64* %[[ARG3_ADDR]], align 8
535 // CHECK: store i64 %[[V3]], i64* %[[ARGDATACAST12]], align 1
537 // CHECK-LABEL: define void @test_builtin_os_log_wide
538 // CHECK: (i8* %[[BUF:.*]], i8* %[[DATA:.*]], i32* %[[STR:.*]])
540 void test_builtin_os_log_wide(void *buf, const char *data, wchar_t *str) {
543 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
544 // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
545 // CHECK: %[[STR_ADDR:.*]] = alloca i32*, align 8
546 // CHECK: %[[LEN:.*]] = alloca i32, align 4
547 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
548 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
549 // CHECK: store i32* %[[STR]], i32** %[[STR_ADDR]], align 8
551 // CHECK: store volatile i32 12, i32* %[[LEN]], align 4
552 len = __builtin_os_log_format_buffer_size("%S", str);
554 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
555 // CHECK: %[[V2:.*]] = load i32*, i32** %[[STR_ADDR]], align 8
556 // CHECK: %[[V3:.*]] = ptrtoint i32* %[[V2]] to i64
557 // CHECK: call void @__os_log_helper_1_2_1_8_80(i8* %[[V1]], i64 %[[V3]])
559 __builtin_os_log_format(buf, "%S", str);
562 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_8_80
563 // CHECK: (i8* %[[BUFFER:.*]], i64 %[[ARG0:.*]])
565 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
566 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
567 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
568 // CHECK: store i64 %[[ARG0]], i64* %[[ARG0_ADDR]], align 8
569 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
570 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
571 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
572 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
573 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
574 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
575 // CHECK: store i8 80, i8* %[[ARGDESCRIPTOR]], align 1
576 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
577 // CHECK: store i8 8, i8* %[[ARGSIZE]], align 1
578 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
579 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i64*
580 // CHECK: %[[V0:.*]] = load i64, i64* %[[ARG0_ADDR]], align 8
581 // CHECK: store i64 %[[V0]], i64* %[[ARGDATACAST]], align 1
583 // CHECK-LABEL: define void @test_builtin_os_log_precision_width
584 // CHECK: (i8* %[[BUF:.*]], i8* %[[DATA:.*]], i32 %[[PRECISION:.*]], i32 %[[WIDTH:.*]])
585 void test_builtin_os_log_precision_width(void *buf, const char *data,
586 int precision, int width) {
588 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
589 // CHECK: %[[DATA_ADDR:.*]] = alloca i8*, align 8
590 // CHECK: %[[PRECISION_ADDR:.*]] = alloca i32, align 4
591 // CHECK: %[[WIDTH_ADDR:.*]] = alloca i32, align 4
592 // CHECK: %[[LEN:.*]] = alloca i32, align 4
593 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
594 // CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8
595 // CHECK: store i32 %[[PRECISION]], i32* %[[PRECISION_ADDR]], align 4
596 // CHECK: store i32 %[[WIDTH]], i32* %[[WIDTH_ADDR]], align 4
598 // CHECK: store volatile i32 24, i32* %[[LEN]], align 4
599 len = __builtin_os_log_format_buffer_size("Hello %*.*s World", precision, width, data);
601 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
602 // CHECK: %[[V2:.*]] = load i32, i32* %[[PRECISION_ADDR]], align 4
603 // CHECK: %[[V3:.*]] = load i32, i32* %[[WIDTH_ADDR]], align 4
604 // CHECK: %[[V4:.*]] = load i8*, i8** %[[DATA_ADDR]], align 8
605 // CHECK: %[[V5:.*]] = ptrtoint i8* %[[V4]] to i64
606 // CHECK: call void @__os_log_helper_1_2_3_4_0_4_16_8_32(i8* %[[V1]], i32 %[[V2]], i32 %[[V3]], i64 %[[V5]])
607 __builtin_os_log_format(buf, "Hello %*.*s World", precision, width, data);
610 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_3_4_0_4_16_8_32
611 // CHECK: (i8* %[[BUFFER:.*]], i32 %[[ARG0:.*]], i32 %[[ARG1:.*]], i64 %[[ARG2:.*]])
613 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
614 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
615 // CHECK: %[[ARG1_ADDR:.*]] = alloca i32, align 4
616 // CHECK: %[[ARG2_ADDR:.*]] = alloca i64, align 8
617 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
618 // CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
619 // CHECK: store i32 %[[ARG1]], i32* %[[ARG1_ADDR]], align 4
620 // CHECK: store i64 %[[ARG2]], i64* %[[ARG2_ADDR]], align 8
621 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
622 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
623 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
624 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
625 // CHECK: store i8 3, i8* %[[NUMARGS]], align 1
626 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
627 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
628 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
629 // CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
630 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
631 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
632 // CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
633 // CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
634 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 8
635 // CHECK: store i8 16, i8* %[[ARGDESCRIPTOR1]], align 1
636 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 9
637 // CHECK: store i8 4, i8* %[[ARGSIZE2]], align 1
638 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 10
639 // CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i32*
640 // CHECK: %[[V1:.*]] = load i32, i32* %[[ARG1_ADDR]], align 4
641 // CHECK: store i32 %[[V1]], i32* %[[ARGDATACAST4]], align 1
642 // CHECK: %[[ARGDESCRIPTOR5:.*]] = getelementptr i8, i8* %[[BUF]], i64 14
643 // CHECK: store i8 32, i8* %[[ARGDESCRIPTOR5]], align 1
644 // CHECK: %[[ARGSIZE6:.*]] = getelementptr i8, i8* %[[BUF]], i64 15
645 // CHECK: store i8 8, i8* %[[ARGSIZE6]], align 1
646 // CHECK: %[[ARGDATA7:.*]] = getelementptr i8, i8* %[[BUF]], i64 16
647 // CHECK: %[[ARGDATACAST8:.*]] = bitcast i8* %[[ARGDATA7]] to i64*
648 // CHECK: %[[V2:.*]] = load i64, i64* %[[ARG2_ADDR]], align 8
649 // CHECK: store i64 %[[V2]], i64* %[[ARGDATACAST8]], align 1
651 // CHECK-LABEL: define void @test_builtin_os_log_invalid
652 // CHECK: (i8* %[[BUF:.*]], i32 %[[DATA:.*]])
653 void test_builtin_os_log_invalid(void *buf, int data) {
655 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
656 // CHECK: %[[DATA_ADDR:.*]] = alloca i32, align 4
657 // CHECK: %[[LEN:.*]] = alloca i32, align 4
658 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
659 // CHECK: store i32 %[[DATA]], i32* %[[DATA_ADDR]], align 4
661 // CHECK: store volatile i32 8, i32* %[[LEN]], align 4
662 len = __builtin_os_log_format_buffer_size("invalid specifier %: %d even a trailing one%", data);
664 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
665 // CHECK: %[[V2:.*]] = load i32, i32* %[[DATA_ADDR]], align 4
666 // CHECK: call void @__os_log_helper_1_0_1_4_0(i8* %[[V1]], i32 %[[V2]])
668 __builtin_os_log_format(buf, "invalid specifier %: %d even a trailing one%", data);
671 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_4_0
672 // CHECK: (i8* %[[BUFFER:.*]], i32 %[[ARG0:.*]])
674 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
675 // CHECK: %[[ARG0_ADDR:.*]] = alloca i32, align 4
676 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
677 // CHECK: store i32 %[[ARG0]], i32* %[[ARG0_ADDR]], align 4
678 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
679 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
680 // CHECK: store i8 0, i8* %[[SUMMARY]], align 1
681 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
682 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
683 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
684 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
685 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
686 // CHECK: store i8 4, i8* %[[ARGSIZE]], align 1
687 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
688 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i32*
689 // CHECK: %[[V0:.*]] = load i32, i32* %[[ARG0_ADDR]], align 4
690 // CHECK: store i32 %[[V0]], i32* %[[ARGDATACAST]], align 1
692 // CHECK-LABEL: define void @test_builtin_os_log_percent
693 // CHECK: (i8* %[[BUF:.*]], i8* %[[DATA1:.*]], i8* %[[DATA2:.*]])
694 // Check that the %% which does not consume any argument is correctly handled
695 void test_builtin_os_log_percent(void *buf, const char *data1, const char *data2) {
697 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
698 // CHECK: %[[DATA1_ADDR:.*]] = alloca i8*, align 8
699 // CHECK: %[[DATA2_ADDR:.*]] = alloca i8*, align 8
700 // CHECK: %[[LEN:.*]] = alloca i32, align 4
701 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
702 // CHECK: store i8* %[[DATA1]], i8** %[[DATA1_ADDR]], align 8
703 // CHECK: store i8* %[[DATA2]], i8** %[[DATA2_ADDR]], align 8
704 // CHECK: store volatile i32 22, i32* %[[LEN]], align 4
706 len = __builtin_os_log_format_buffer_size("%s %% %s", data1, data2);
708 // CHECK: %[[V1:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
709 // CHECK: %[[V2:.*]] = load i8*, i8** %[[DATA1_ADDR]], align 8
710 // CHECK: %[[V3:.*]] = ptrtoint i8* %[[V2]] to i64
711 // CHECK: %[[V4:.*]] = load i8*, i8** %[[DATA2_ADDR]], align 8
712 // CHECK: %[[V5:.*]] = ptrtoint i8* %[[V4]] to i64
713 // CHECK: call void @__os_log_helper_1_2_2_8_32_8_32(i8* %[[V1]], i64 %[[V3]], i64 %[[V5]])
715 __builtin_os_log_format(buf, "%s %% %s", data1, data2);
718 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_2_8_32_8_32
719 // CHECK: (i8* %[[BUFFER:.*]], i64 %[[ARG0:.*]], i64 %[[ARG1:.*]])
721 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
722 // CHECK: %[[ARG0_ADDR:.*]] = alloca i64, align 8
723 // CHECK: %[[ARG1_ADDR:.*]] = alloca i64, align 8
724 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
725 // CHECK: store i64 %[[ARG0]], i64* %[[ARG0_ADDR]], align 8
726 // CHECK: store i64 %[[ARG1]], i64* %[[ARG1_ADDR]], align 8
727 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
728 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
729 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
730 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
731 // CHECK: store i8 2, i8* %[[NUMARGS]], align 1
732 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
733 // CHECK: store i8 32, i8* %[[ARGDESCRIPTOR]], align 1
734 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
735 // CHECK: store i8 8, i8* %[[ARGSIZE]], align 1
736 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
737 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i64*
738 // CHECK: %[[V0:.*]] = load i64, i64* %[[ARG0_ADDR]], align 8
739 // CHECK: store i64 %[[V0]], i64* %[[ARGDATACAST]], align 1
740 // CHECK: %[[ARGDESCRIPTOR1:.*]] = getelementptr i8, i8* %[[BUF]], i64 12
741 // CHECK: store i8 32, i8* %[[ARGDESCRIPTOR1]], align 1
742 // CHECK: %[[ARGSIZE2:.*]] = getelementptr i8, i8* %[[BUF]], i64 13
743 // CHECK: store i8 8, i8* %[[ARGSIZE2]], align 1
744 // CHECK: %[[ARGDATA3:.*]] = getelementptr i8, i8* %[[BUF]], i64 14
745 // CHECK: %[[ARGDATACAST4:.*]] = bitcast i8* %[[ARGDATA3]] to i64*
746 // CHECK: %[[V1:.*]] = load i64, i64* %[[ARG1_ADDR]], align 8
747 // CHECK: store i64 %[[V1]], i64* %[[ARGDATACAST4]], align 1
749 // Check that the following two functions call the same helper function.
751 // CHECK-LABEL: define void @test_builtin_os_log_merge_helper0
752 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
753 void test_builtin_os_log_merge_helper0(void *buf, int i, double d) {
754 __builtin_os_log_format(buf, "%d %f", i, d);
757 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_2_4_0_8_0(
759 // CHECK-LABEL: define void @test_builtin_os_log_merge_helper1
760 // CHECK: call void @__os_log_helper_1_0_2_4_0_8_0(
761 void test_builtin_os_log_merge_helper1(void *buf, unsigned u, long long ll) {
762 __builtin_os_log_format(buf, "%u %lld", u, ll);
765 // Check that this function doesn't write past the end of array 'buf'.
767 // CHECK-LABEL: define void @test_builtin_os_log_errno
768 void test_builtin_os_log_errno() {
769 // CHECK-NOT: @stacksave
770 // CHECK: %[[BUF:.*]] = alloca [4 x i8], align 1
771 // CHECK: %[[DECAY:.*]] = getelementptr inbounds [4 x i8], [4 x i8]* %[[BUF]], i32 0, i32 0
772 // CHECK: call void @__os_log_helper_1_2_1_0_96(i8* %[[DECAY]])
773 // CHECK-NOT: @stackrestore
775 char buf[__builtin_os_log_format_buffer_size("%m")];
776 __builtin_os_log_format(buf, "%m");
779 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_2_1_0_96
780 // CHECK: (i8* %[[BUFFER:.*]])
782 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
783 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
784 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
785 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
786 // CHECK: store i8 2, i8* %[[SUMMARY]], align 1
787 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
788 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
789 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
790 // CHECK: store i8 96, i8* %[[ARGDESCRIPTOR]], align 1
791 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
792 // CHECK: store i8 0, i8* %[[ARGSIZE]], align 1
793 // CHECK-NEXT: ret void
795 // CHECK-LABEL: define void @test_builtin_os_log_long_double
796 // CHECK: (i8* %[[BUF:.*]], x86_fp80 %[[LD:.*]])
797 void test_builtin_os_log_long_double(void *buf, long double ld) {
798 // CHECK: %[[BUF_ADDR:.*]] = alloca i8*, align 8
799 // CHECK: %[[LD_ADDR:.*]] = alloca x86_fp80, align 16
800 // CHECK: %[[COERCE:.*]] = alloca i128, align 16
801 // CHECK: store i8* %[[BUF]], i8** %[[BUF_ADDR]], align 8
802 // CHECK: store x86_fp80 %[[LD]], x86_fp80* %[[LD_ADDR]], align 16
803 // CHECK: %[[V0:.*]] = load i8*, i8** %[[BUF_ADDR]], align 8
804 // CHECK: %[[V1:.*]] = load x86_fp80, x86_fp80* %[[LD_ADDR]], align 16
805 // CHECK: %[[V2:.*]] = bitcast x86_fp80 %[[V1]] to i80
806 // CHECK: %[[V3:.*]] = zext i80 %[[V2]] to i128
807 // CHECK: store i128 %[[V3]], i128* %[[COERCE]], align 16
808 // CHECK: %[[V4:.*]] = bitcast i128* %[[COERCE]] to { i64, i64 }*
809 // CHECK: %[[V5:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V4]], i32 0, i32 0
810 // CHECK: %[[V6:.*]] = load i64, i64* %[[V5]], align 16
811 // CHECK: %[[V7:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V4]], i32 0, i32 1
812 // CHECK: %[[V8:.*]] = load i64, i64* %[[V7]], align 8
813 // CHECK: call void @__os_log_helper_1_0_1_16_0(i8* %[[V0]], i64 %[[V6]], i64 %[[V8]])
815 __builtin_os_log_format(buf, "%Lf", ld);
818 // CHECK-LABEL: define linkonce_odr hidden void @__os_log_helper_1_0_1_16_0
819 // CHECK: (i8* %[[BUFFER:.*]], i64 %[[ARG0_COERCE0:.*]], i64 %[[ARG0_COERCE1:.*]])
821 // CHECK: %[[ARG0:.*]] = alloca i128, align 16
822 // CHECK: %[[BUFFER_ADDR:.*]] = alloca i8*, align 8
823 // CHECK: %[[ARG0_ADDR:.*]] = alloca i128, align 16
824 // CHECK: %[[V0:.*]] = bitcast i128* %[[ARG0]] to { i64, i64 }*
825 // CHECK: %[[V1:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V0]], i32 0, i32 0
826 // CHECK: store i64 %[[ARG0_COERCE0]], i64* %[[V1]], align 16
827 // CHECK: %[[V2:.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* %[[V0]], i32 0, i32 1
828 // CHECK: store i64 %[[ARG0_COERCE1]], i64* %[[V2]], align 8
829 // CHECK: %[[ARG01:.*]] = load i128, i128* %[[ARG0]], align 16
830 // CHECK: store i8* %[[BUFFER]], i8** %[[BUFFER_ADDR]], align 8
831 // CHECK: store i128 %[[ARG01]], i128* %[[ARG0_ADDR]], align 16
832 // CHECK: %[[BUF:.*]] = load i8*, i8** %[[BUFFER_ADDR]], align 8
833 // CHECK: %[[SUMMARY:.*]] = getelementptr i8, i8* %[[BUF]], i64 0
834 // CHECK: store i8 0, i8* %[[SUMMARY]], align 1
835 // CHECK: %[[NUMARGS:.*]] = getelementptr i8, i8* %[[BUF]], i64 1
836 // CHECK: store i8 1, i8* %[[NUMARGS]], align 1
837 // CHECK: %[[ARGDESCRIPTOR:.*]] = getelementptr i8, i8* %[[BUF]], i64 2
838 // CHECK: store i8 0, i8* %[[ARGDESCRIPTOR]], align 1
839 // CHECK: %[[ARGSIZE:.*]] = getelementptr i8, i8* %[[BUF]], i64 3
840 // CHECK: store i8 16, i8* %[[ARGSIZE]], align 1
841 // CHECK: %[[ARGDATA:.*]] = getelementptr i8, i8* %[[BUF]], i64 4
842 // CHECK: %[[ARGDATACAST:.*]] = bitcast i8* %[[ARGDATA]] to i128*
843 // CHECK: %[[V3:.*]] = load i128, i128* %[[ARG0_ADDR]], align 16
844 // CHECK: store i128 %[[V3]], i128* %[[ARGDATACAST]], align 1