1 // RUN: %clang_cc1 -emit-llvm -o %t %s
2 // RUN: not grep __builtin %t
3 // RUN: %clang_cc1 %s -emit-llvm -o - -triple x86_64-darwin-apple | FileCheck %s
5 int printf(const char *, ...);
7 void p(char *str, int x) {
8 printf("%s: %d\n", str, x);
10 void q(char *str, double x) {
11 printf("%s: %f\n", str, x);
13 void r(char *str, void *ptr) {
14 printf("%s: %p\n", str, ptr);
21 #define P(n,args) p(#n #args, __builtin_##n args)
22 #define Q(n,args) q(#n #args, __builtin_##n args)
23 #define R(n,args) r(#n #args, __builtin_##n args)
24 #define V(n,args) p(#n #args, (__builtin_##n args, 0))
25 P(types_compatible_p, (int, float));
26 P(choose_expr, (0, 10, 20));
27 P(constant_p, (sizeof(10)));
28 P(expect, (N == 12, 0));
31 V(prefetch, (&N, 1, 0));
42 P(fpclassify, (0, 1, 2, 3, 4, 1.0));
43 P(fpclassify, (0, 1, 2, 3, 4, 1.0f));
44 P(fpclassify, (0, 1, 2, 3, 4, 1.0l));
55 P(isgreater, (1., 2.));
56 P(isgreaterequal, (1., 2.));
58 P(islessequal, (1., 2.));
59 P(islessgreater, (1., 2.));
60 P(isunordered, (1., 2.));
66 // Bitwise & Numeric Functions
90 int a, b, n = random(); // Avoid optimizing out.
91 char s0[10], s1[] = "Hello";
94 V(strncat, (s0, s1, n));
95 V(strchr, (s0, s1[0]));
96 V(strrchr, (s0, s1[0]));
98 V(strncpy, (s0, s1, n));
100 // Object size checking
101 V(__memset_chk, (s0, 0, sizeof s0, n));
102 V(__memcpy_chk, (s0, s1, sizeof s0, n));
103 V(__memmove_chk, (s0, s1, sizeof s0, n));
104 V(__mempcpy_chk, (s0, s1, sizeof s0, n));
105 V(__strncpy_chk, (s0, s1, sizeof s0, n));
106 V(__strcpy_chk, (s0, s1, n));
108 V(__strcat_chk, (s0, s1, n));
109 P(object_size, (s0, 0));
110 P(object_size, (s0, 1));
111 P(object_size, (s0, 2));
112 P(object_size, (s0, 3));
120 // CHECK: @llvm.bitreverse.i8
121 // CHECK: @llvm.bitreverse.i16
122 // CHECK: @llvm.bitreverse.i32
123 // CHECK: @llvm.bitreverse.i64
125 P(bitreverse16, (N));
126 P(bitreverse32, (N));
127 P(bitreverse64, (N));
130 // V(clear_cache, (&N, &N+1));
132 R(extract_return_addr, (&N));
141 __builtin_strcat(0, 0);
144 // CHECK-LABEL: define void @bar(
150 // LLVM's hex representation of float constants is really unfortunate;
151 // basically it does a float-to-double "conversion" and then prints the
152 // hex form of that. That gives us weird artifacts like exponents
153 // that aren't numerically similar to the original exponent and
154 // significand bit-patterns that are offset by three bits (because
155 // the exponent was expanded from 8 bits to 11).
157 // 0xAE98 == 1010111010011000
158 // 0x15D3 == 1010111010011
160 f = __builtin_huge_valf(); // CHECK: float 0x7FF0000000000000
161 d = __builtin_huge_val(); // CHECK: double 0x7FF0000000000000
162 ld = __builtin_huge_vall(); // CHECK: x86_fp80 0xK7FFF8000000000000000
163 f = __builtin_nanf(""); // CHECK: float 0x7FF8000000000000
164 d = __builtin_nan(""); // CHECK: double 0x7FF8000000000000
165 ld = __builtin_nanl(""); // CHECK: x86_fp80 0xK7FFFC000000000000000
166 f = __builtin_nanf("0xAE98"); // CHECK: float 0x7FF815D300000000
167 d = __builtin_nan("0xAE98"); // CHECK: double 0x7FF800000000AE98
168 ld = __builtin_nanl("0xAE98"); // CHECK: x86_fp80 0xK7FFFC00000000000AE98
169 f = __builtin_nansf(""); // CHECK: float 0x7FF4000000000000
170 d = __builtin_nans(""); // CHECK: double 0x7FF4000000000000
171 ld = __builtin_nansl(""); // CHECK: x86_fp80 0xK7FFFA000000000000000
172 f = __builtin_nansf("0xAE98"); // CHECK: float 0x7FF015D300000000
173 d = __builtin_nans("0xAE98"); // CHECK: double 0x7FF000000000AE98
174 ld = __builtin_nansl("0xAE98");// CHECK: x86_fp80 0xK7FFF800000000000AE98
180 // CHECK-LABEL: define void @test_float_builtins
181 void test_float_builtins(float F, double D, long double LD) {
183 res = __builtin_isinf(F);
184 // CHECK: call float @llvm.fabs.f32(float
185 // CHECK: fcmp oeq float {{.*}}, 0x7FF0000000000000
187 res = __builtin_isinf(D);
188 // CHECK: call double @llvm.fabs.f64(double
189 // CHECK: fcmp oeq double {{.*}}, 0x7FF0000000000000
191 res = __builtin_isinf(LD);
192 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
193 // CHECK: fcmp oeq x86_fp80 {{.*}}, 0xK7FFF8000000000000000
195 res = __builtin_isinf_sign(F);
196 // CHECK: %[[ABS:.*]] = call float @llvm.fabs.f32(float %[[ARG:.*]])
197 // CHECK: %[[ISINF:.*]] = fcmp oeq float %[[ABS]], 0x7FF0000000000000
198 // CHECK: %[[BITCAST:.*]] = bitcast float %[[ARG]] to i32
199 // CHECK: %[[ISNEG:.*]] = icmp slt i32 %[[BITCAST]], 0
200 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
201 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
203 res = __builtin_isinf_sign(D);
204 // CHECK: %[[ABS:.*]] = call double @llvm.fabs.f64(double %[[ARG:.*]])
205 // CHECK: %[[ISINF:.*]] = fcmp oeq double %[[ABS]], 0x7FF0000000000000
206 // CHECK: %[[BITCAST:.*]] = bitcast double %[[ARG]] to i64
207 // CHECK: %[[ISNEG:.*]] = icmp slt i64 %[[BITCAST]], 0
208 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
209 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
211 res = __builtin_isinf_sign(LD);
212 // CHECK: %[[ABS:.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 %[[ARG:.*]])
213 // CHECK: %[[ISINF:.*]] = fcmp oeq x86_fp80 %[[ABS]], 0xK7FFF8000000000000000
214 // CHECK: %[[BITCAST:.*]] = bitcast x86_fp80 %[[ARG]] to i80
215 // CHECK: %[[ISNEG:.*]] = icmp slt i80 %[[BITCAST]], 0
216 // CHECK: %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
217 // CHECK: select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
219 res = __builtin_isfinite(F);
220 // CHECK: call float @llvm.fabs.f32(float
221 // CHECK: fcmp one float {{.*}}, 0x7FF0000000000000
224 // CHECK: call double @llvm.fabs.f64(double
225 // CHECK: fcmp one double {{.*}}, 0x7FF0000000000000
227 res = __builtin_isnormal(F);
228 // CHECK: fcmp oeq float
229 // CHECK: call float @llvm.fabs.f32(float
230 // CHECK: fcmp ult float {{.*}}, 0x7FF0000000000000
231 // CHECK: fcmp uge float {{.*}}, 0x3810000000000000
236 // CHECK-LABEL: define void @test_float_builtin_ops
237 void test_float_builtin_ops(float F, double D, long double LD) {
239 volatile double resd;
240 volatile long double resld;
242 resf = __builtin_fmodf(F,F);
245 resd = __builtin_fmod(D,D);
246 // CHECK: frem double
248 resld = __builtin_fmodl(LD,LD);
249 // CHECK: frem x86_fp80
251 resf = __builtin_fabsf(F);
252 resd = __builtin_fabs(D);
253 resld = __builtin_fabsl(LD);
254 // CHECK: call float @llvm.fabs.f32(float
255 // CHECK: call double @llvm.fabs.f64(double
256 // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
258 resf = __builtin_canonicalizef(F);
259 resd = __builtin_canonicalize(D);
260 resld = __builtin_canonicalizel(LD);
261 // CHECK: call float @llvm.canonicalize.f32(float
262 // CHECK: call double @llvm.canonicalize.f64(double
263 // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
265 resf = __builtin_fminf(F, F);
266 // CHECK: call float @llvm.minnum.f32
268 resd = __builtin_fmin(D, D);
269 // CHECK: call double @llvm.minnum.f64
271 resld = __builtin_fminl(LD, LD);
272 // CHECK: call x86_fp80 @llvm.minnum.f80
274 resf = __builtin_fmaxf(F, F);
275 // CHECK: call float @llvm.maxnum.f32
277 resd = __builtin_fmax(D, D);
278 // CHECK: call double @llvm.maxnum.f64
280 resld = __builtin_fmaxl(LD, LD);
281 // CHECK: call x86_fp80 @llvm.maxnum.f80
283 resf = __builtin_fabsf(F);
284 // CHECK: call float @llvm.fabs.f32
286 resd = __builtin_fabs(D);
287 // CHECK: call double @llvm.fabs.f64
289 resld = __builtin_fabsl(LD);
290 // CHECK: call x86_fp80 @llvm.fabs.f80
292 resf = __builtin_copysignf(F, F);
293 // CHECK: call float @llvm.copysign.f32
295 resd = __builtin_copysign(D, D);
296 // CHECK: call double @llvm.copysign.f64
298 resld = __builtin_copysignl(LD, LD);
299 // CHECK: call x86_fp80 @llvm.copysign.f80
302 resf = __builtin_ceilf(F);
303 // CHECK: call float @llvm.ceil.f32
305 resd = __builtin_ceil(D);
306 // CHECK: call double @llvm.ceil.f64
308 resld = __builtin_ceill(LD);
309 // CHECK: call x86_fp80 @llvm.ceil.f80
311 resf = __builtin_floorf(F);
312 // CHECK: call float @llvm.floor.f32
314 resd = __builtin_floor(D);
315 // CHECK: call double @llvm.floor.f64
317 resld = __builtin_floorl(LD);
318 // CHECK: call x86_fp80 @llvm.floor.f80
320 resf = __builtin_truncf(F);
321 // CHECK: call float @llvm.trunc.f32
323 resd = __builtin_trunc(D);
324 // CHECK: call double @llvm.trunc.f64
326 resld = __builtin_truncl(LD);
327 // CHECK: call x86_fp80 @llvm.trunc.f80
329 resf = __builtin_rintf(F);
330 // CHECK: call float @llvm.rint.f32
332 resd = __builtin_rint(D);
333 // CHECK: call double @llvm.rint.f64
335 resld = __builtin_rintl(LD);
336 // CHECK: call x86_fp80 @llvm.rint.f80
338 resf = __builtin_nearbyintf(F);
339 // CHECK: call float @llvm.nearbyint.f32
341 resd = __builtin_nearbyint(D);
342 // CHECK: call double @llvm.nearbyint.f64
344 resld = __builtin_nearbyintl(LD);
345 // CHECK: call x86_fp80 @llvm.nearbyint.f80
347 resf = __builtin_roundf(F);
348 // CHECK: call float @llvm.round.f32
350 resd = __builtin_round(D);
351 // CHECK: call double @llvm.round.f64
353 resld = __builtin_roundl(LD);
354 // CHECK: call x86_fp80 @llvm.round.f80
358 // __builtin_longjmp isn't supported on all platforms, so only test it on X86.
361 // CHECK-LABEL: define void @test_builtin_longjmp
362 void test_builtin_longjmp(void **buffer) {
363 // CHECK: [[BITCAST:%.*]] = bitcast
364 // CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(i8* [[BITCAST]])
365 __builtin_longjmp(buffer, 1);
366 // CHECK-NEXT: unreachable
371 // CHECK-LABEL: define i64 @test_builtin_readcyclecounter
372 long long test_builtin_readcyclecounter() {
373 // CHECK: call i64 @llvm.readcyclecounter()
374 return __builtin_readcyclecounter();
377 // Behavior of __builtin_os_log differs between platforms, so only test on X86
380 // CHECK-LABEL: define void @test_builtin_os_log
381 // CHECK: (i8* [[BUF:%.*]], i32 [[I:%.*]], i8* [[DATA:%.*]])
382 void test_builtin_os_log(void *buf, int i, const char *data) {
384 // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
385 // CHECK: store i32 [[I]], i32* [[I_ADDR:%.*]], align 4
386 // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
388 // CHECK: store volatile i32 34
389 len = __builtin_os_log_format_buffer_size("%d %{public}s %{private}.16P", i, data, data);
391 // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
392 // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
393 // CHECK: store i8 3, i8* [[SUMMARY]]
394 // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
395 // CHECK: store i8 4, i8* [[NUM_ARGS]]
397 // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
398 // CHECK: store i8 0, i8* [[ARG1_DESC]]
399 // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
400 // CHECK: store i8 4, i8* [[ARG1_SIZE]]
401 // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
402 // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
403 // CHECK: [[I2:%.*]] = load i32, i32* [[I_ADDR]]
404 // CHECK: store i32 [[I2]], i32* [[ARG1_INT]]
406 // CHECK: [[ARG2_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 8
407 // CHECK: store i8 34, i8* [[ARG2_DESC]]
408 // CHECK: [[ARG2_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 9
409 // CHECK: store i8 8, i8* [[ARG2_SIZE]]
410 // CHECK: [[ARG2:%.*]] = getelementptr i8, i8* [[BUF2]], i64 10
411 // CHECK: [[ARG2_PTR:%.*]] = bitcast i8* [[ARG2]] to i8**
412 // CHECK: [[DATA2:%.*]] = load i8*, i8** [[DATA_ADDR]]
413 // CHECK: store i8* [[DATA2]], i8** [[ARG2_PTR]]
415 // CHECK: [[ARG3_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 18
416 // CHECK: store i8 17, i8* [[ARG3_DESC]]
417 // CHECK: [[ARG3_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 19
418 // CHECK: store i8 4, i8* [[ARG3_SIZE]]
419 // CHECK: [[ARG3:%.*]] = getelementptr i8, i8* [[BUF2]], i64 20
420 // CHECK: [[ARG3_INT:%.*]] = bitcast i8* [[ARG3]] to i32*
421 // CHECK: store i32 16, i32* [[ARG3_INT]]
423 // CHECK: [[ARG4_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 24
424 // CHECK: store i8 49, i8* [[ARG4_DESC]]
425 // CHECK: [[ARG4_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 25
426 // CHECK: store i8 8, i8* [[ARG4_SIZE]]
427 // CHECK: [[ARG4:%.*]] = getelementptr i8, i8* [[BUF2]], i64 26
428 // CHECK: [[ARG4_PTR:%.*]] = bitcast i8* [[ARG4]] to i8**
429 // CHECK: [[DATA3:%.*]] = load i8*, i8** [[DATA_ADDR]]
430 // CHECK: store i8* [[DATA3]], i8** [[ARG4_PTR]]
432 __builtin_os_log_format(buf, "%d %{public}s %{private}.16P", i, data, data);
435 // CHECK-LABEL: define void @test_builtin_os_log_errno
436 // CHECK: (i8* [[BUF:%.*]], i8* [[DATA:%.*]])
437 void test_builtin_os_log_errno(void *buf, const char *data) {
439 // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
440 // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
442 // CHECK: store volatile i32 2
443 len = __builtin_os_log_format_buffer_size("%S");
445 // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
446 // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
447 // CHECK: store i8 2, i8* [[SUMMARY]]
448 // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
449 // CHECK: store i8 1, i8* [[NUM_ARGS]]
451 // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
452 // CHECK: store i8 96, i8* [[ARG1_DESC]]
453 // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
454 // CHECK: store i8 0, i8* [[ARG1_SIZE]]
455 // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
456 // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
457 // CHECK: store i32 0, i32* [[ARG1_INT]]
459 __builtin_os_log_format(buf, "%m");
462 // CHECK-LABEL: define void @test_builtin_os_log_wide
463 // CHECK: (i8* [[BUF:%.*]], i8* [[DATA:%.*]], i32* [[STR:%.*]])
465 void test_builtin_os_log_wide(void *buf, const char *data, wchar_t *str) {
467 // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
468 // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
469 // CHECK: store i32* [[STR]], i32** [[STR_ADDR:%.*]],
471 // CHECK: store volatile i32 12
472 len = __builtin_os_log_format_buffer_size("%S", str);
474 // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
475 // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
476 // CHECK: store i8 2, i8* [[SUMMARY]]
477 // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
478 // CHECK: store i8 1, i8* [[NUM_ARGS]]
480 // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
481 // CHECK: store i8 80, i8* [[ARG1_DESC]]
482 // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
483 // CHECK: store i8 8, i8* [[ARG1_SIZE]]
484 // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
485 // CHECK: [[ARG1_PTR:%.*]] = bitcast i8* [[ARG1]] to i32**
486 // CHECK: [[STR2:%.*]] = load i32*, i32** [[STR_ADDR]]
487 // CHECK: store i32* [[STR2]], i32** [[ARG1_PTR]]
489 __builtin_os_log_format(buf, "%S", str);
492 // CHECK-LABEL: define void @test_builtin_os_log_precision_width
493 // CHECK: (i8* [[BUF:%.*]], i8* [[DATA:%.*]], i32 [[PRECISION:%.*]], i32 [[WIDTH:%.*]])
494 void test_builtin_os_log_precision_width(void *buf, const char *data,
495 int precision, int width) {
497 // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
498 // CHECK: store i8* [[DATA]], i8** [[DATA_ADDR:%.*]], align 8
499 // CHECK: store i32 [[PRECISION]], i32* [[PRECISION_ADDR:%.*]], align 4
500 // CHECK: store i32 [[WIDTH]], i32* [[WIDTH_ADDR:%.*]], align 4
502 // CHECK: store volatile i32 24,
503 len = __builtin_os_log_format_buffer_size("Hello %*.*s World", precision, width, data);
505 // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
506 // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
507 // CHECK: store i8 2, i8* [[SUMMARY]]
508 // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
509 // CHECK: store i8 3, i8* [[NUM_ARGS]]
511 // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
512 // CHECK: store i8 0, i8* [[ARG1_DESC]]
513 // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
514 // CHECK: store i8 4, i8* [[ARG1_SIZE]]
515 // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
516 // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
517 // CHECK: [[ARG1_VAL:%.*]] = load i32, i32* [[PRECISION_ADDR]]
518 // CHECK: store i32 [[ARG1_VAL]], i32* [[ARG1_INT]]
520 // CHECK: [[ARG2_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 8
521 // CHECK: store i8 16, i8* [[ARG2_DESC]]
522 // CHECK: [[ARG2_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 9
523 // CHECK: store i8 4, i8* [[ARG2_SIZE]]
524 // CHECK: [[ARG2:%.*]] = getelementptr i8, i8* [[BUF2]], i64 10
525 // CHECK: [[ARG2_INT:%.*]] = bitcast i8* [[ARG2]] to i32*
526 // CHECK: [[ARG2_VAL:%.*]] = load i32, i32* [[WIDTH_ADDR]]
527 // CHECK: store i32 [[ARG2_VAL]], i32* [[ARG2_INT]]
529 // CHECK: [[ARG3_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 14
530 // CHECK: store i8 32, i8* [[ARG3_DESC]]
531 // CHECK: [[ARG3_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 15
532 // CHECK: store i8 8, i8* [[ARG3_SIZE]]
533 // CHECK: [[ARG3:%.*]] = getelementptr i8, i8* [[BUF2]], i64 16
534 // CHECK: [[ARG3_PTR:%.*]] = bitcast i8* [[ARG3]] to i8**
535 // CHECK: [[DATA2:%.*]] = load i8*, i8** [[DATA_ADDR]]
536 // CHECK: store i8* [[DATA2]], i8** [[ARG3_PTR]]
538 __builtin_os_log_format(buf, "Hello %*.*s World", precision, width, data);
541 // CHECK-LABEL: define void @test_builtin_os_log_invalid
542 // CHECK: (i8* [[BUF:%.*]], i32 [[DATA:%.*]])
543 void test_builtin_os_log_invalid(void *buf, int data) {
545 // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
546 // CHECK: store i32 [[DATA]], i32* [[DATA_ADDR:%.*]]
548 // CHECK: store volatile i32 8,
549 len = __builtin_os_log_format_buffer_size("invalid specifier %: %d even a trailing one%", data);
551 // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
552 // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
553 // CHECK: store i8 0, i8* [[SUMMARY]]
554 // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
555 // CHECK: store i8 1, i8* [[NUM_ARGS]]
557 // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
558 // CHECK: store i8 0, i8* [[ARG1_DESC]]
559 // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
560 // CHECK: store i8 4, i8* [[ARG1_SIZE]]
561 // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
562 // CHECK: [[ARG1_INT:%.*]] = bitcast i8* [[ARG1]] to i32*
563 // CHECK: [[ARG1_VAL:%.*]] = load i32, i32* [[DATA_ADDR]]
564 // CHECK: store i32 [[ARG1_VAL]], i32* [[ARG1_INT]]
566 __builtin_os_log_format(buf, "invalid specifier %: %d even a trailing one%", data);
569 // CHECK-LABEL: define void @test_builtin_os_log_percent
570 // CHECK: (i8* [[BUF:%.*]], i8* [[DATA1:%.*]], i8* [[DATA2:%.*]])
571 // Check that the %% which does not consume any argument is correctly handled
572 void test_builtin_os_log_percent(void *buf, const char *data1, const char *data2) {
574 // CHECK: store i8* [[BUF]], i8** [[BUF_ADDR:%.*]], align 8
575 // CHECK: store i8* [[DATA1]], i8** [[DATA1_ADDR:%.*]], align 8
576 // CHECK: store i8* [[DATA2]], i8** [[DATA2_ADDR:%.*]], align 8
577 // CHECK: store volatile i32 22
578 len = __builtin_os_log_format_buffer_size("%s %% %s", data1, data2);
580 // CHECK: [[BUF2:%.*]] = load i8*, i8** [[BUF_ADDR]]
581 // CHECK: [[SUMMARY:%.*]] = getelementptr i8, i8* [[BUF2]], i64 0
582 // CHECK: store i8 2, i8* [[SUMMARY]]
583 // CHECK: [[NUM_ARGS:%.*]] = getelementptr i8, i8* [[BUF2]], i64 1
584 // CHECK: store i8 2, i8* [[NUM_ARGS]]
586 // CHECK: [[ARG1_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 2
587 // CHECK: store i8 32, i8* [[ARG1_DESC]]
588 // CHECK: [[ARG1_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 3
589 // CHECK: store i8 8, i8* [[ARG1_SIZE]]
590 // CHECK: [[ARG1:%.*]] = getelementptr i8, i8* [[BUF2]], i64 4
591 // CHECK: [[ARG1_PTR:%.*]] = bitcast i8* [[ARG1]] to i8**
592 // CHECK: [[DATA1:%.*]] = load i8*, i8** [[DATA1_ADDR]]
593 // CHECK: store i8* [[DATA1]], i8** [[ARG1_PTR]]
595 // CHECK: [[ARG2_DESC:%.*]] = getelementptr i8, i8* [[BUF2]], i64 12
596 // CHECK: store i8 32, i8* [[ARG2_DESC]]
597 // CHECK: [[ARG2_SIZE:%.*]] = getelementptr i8, i8* [[BUF2]], i64 13
598 // CHECK: store i8 8, i8* [[ARG2_SIZE]]
599 // CHECK: [[ARG2:%.*]] = getelementptr i8, i8* [[BUF2]], i64 14
600 // CHECK: [[ARG2_PTR:%.*]] = bitcast i8* [[ARG2]] to i8**
601 // CHECK: [[DATA2:%.*]] = load i8*, i8** [[DATA2_ADDR]]
602 // CHECK: store i8* [[DATA2]], i8** [[ARG2_PTR]]
603 __builtin_os_log_format(buf, "%s %% %s", data1, data2);