1 // Test CodeGen for Security Check Overflow Builtins.
4 // RUN: %clang_cc1 -triple "i686-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck %s
5 // RUN: %clang_cc1 -triple "x86_64-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck %s
6 // RUN: %clang_cc1 -triple "x86_64-mingw32" -emit-llvm -x c %s -o - | FileCheck %s
8 extern unsigned UnsignedErrorCode;
9 extern unsigned long UnsignedLongErrorCode;
10 extern unsigned long long UnsignedLongLongErrorCode;
11 extern int IntErrorCode;
12 extern long LongErrorCode;
13 extern long long LongLongErrorCode;
14 void overflowed(void);
16 unsigned test_add_overflow_uint_uint_uint(unsigned x, unsigned y) {
17 // CHECK-LABEL: define i32 @test_add_overflow_uint_uint_uint
19 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
20 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
21 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
22 // CHECK: store i32 [[Q]], i32*
25 if (__builtin_add_overflow(x, y, &r))
30 int test_add_overflow_int_int_int(int x, int y) {
31 // CHECK-LABEL: define i32 @test_add_overflow_int_int_int
33 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
34 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
35 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
36 // CHECK: store i32 [[Q]], i32*
39 if (__builtin_add_overflow(x, y, &r))
44 unsigned test_sub_overflow_uint_uint_uint(unsigned x, unsigned y) {
45 // CHECK-LABEL: define i32 @test_sub_overflow_uint_uint_uint
47 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
48 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
49 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
50 // CHECK: store i32 [[Q]], i32*
53 if (__builtin_sub_overflow(x, y, &r))
58 int test_sub_overflow_int_int_int(int x, int y) {
59 // CHECK-LABEL: define i32 @test_sub_overflow_int_int_int
61 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
62 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
63 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
64 // CHECK: store i32 [[Q]], i32*
67 if (__builtin_sub_overflow(x, y, &r))
72 unsigned test_mul_overflow_uint_uint_uint(unsigned x, unsigned y) {
73 // CHECK-LABEL: define i32 @test_mul_overflow_uint_uint_uint
75 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
76 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
77 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
78 // CHECK: store i32 [[Q]], i32*
81 if (__builtin_mul_overflow(x, y, &r))
86 int test_mul_overflow_int_int_int(int x, int y) {
87 // CHECK-LABEL: define i32 @test_mul_overflow_int_int_int
89 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
90 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
91 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
92 // CHECK: store i32 [[Q]], i32*
95 if (__builtin_mul_overflow(x, y, &r))
100 int test_add_overflow_uint_int_int(unsigned x, int y) {
101 // CHECK-LABEL: define i32 @test_add_overflow_uint_int_int
102 // CHECK: [[XE:%.+]] = zext i32 %{{.+}} to i33
103 // CHECK: [[YE:%.+]] = sext i32 %{{.+}} to i33
104 // CHECK: [[S:%.+]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 [[XE]], i33 [[YE]])
105 // CHECK-DAG: [[Q:%.+]] = extractvalue { i33, i1 } [[S]], 0
106 // CHECK-DAG: [[C1:%.+]] = extractvalue { i33, i1 } [[S]], 1
107 // CHECK: [[QT:%.+]] = trunc i33 [[Q]] to i32
108 // CHECK: [[QTE:%.+]] = sext i32 [[QT]] to i33
109 // CHECK: [[C2:%.+]] = icmp ne i33 [[Q]], [[QTE]]
110 // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
111 // CHECK: store i32 [[QT]], i32*
112 // CHECK: br i1 [[C3]]
114 if (__builtin_add_overflow(x, y, &r))
119 _Bool test_add_overflow_uint_uint_bool(unsigned x, unsigned y) {
120 // CHECK-LABEL: define {{.*}} i1 @test_add_overflow_uint_uint_bool
122 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
123 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
124 // CHECK-DAG: [[C1:%.+]] = extractvalue { i32, i1 } [[S]], 1
125 // CHECK: [[QT:%.+]] = trunc i32 [[Q]] to i1
126 // CHECK: [[QTE:%.+]] = zext i1 [[QT]] to i32
127 // CHECK: [[C2:%.+]] = icmp ne i32 [[Q]], [[QTE]]
128 // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
129 // CHECK: [[QT2:%.+]] = zext i1 [[QT]] to i8
130 // CHECK: store i8 [[QT2]], i8*
131 // CHECK: br i1 [[C3]]
133 if (__builtin_add_overflow(x, y, &r))
138 unsigned test_add_overflow_bool_bool_uint(_Bool x, _Bool y) {
139 // CHECK-LABEL: define i32 @test_add_overflow_bool_bool_uint
140 // CHECK: [[XE:%.+]] = zext i1 %{{.+}} to i32
141 // CHECK: [[YE:%.+]] = zext i1 %{{.+}} to i32
142 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[XE]], i32 [[YE]])
143 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
144 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
145 // CHECK: store i32 [[Q]], i32*
146 // CHECK: br i1 [[C]]
148 if (__builtin_add_overflow(x, y, &r))
153 _Bool test_add_overflow_bool_bool_bool(_Bool x, _Bool y) {
154 // CHECK-LABEL: define {{.*}} i1 @test_add_overflow_bool_bool_bool
155 // CHECK: [[S:%.+]] = call { i1, i1 } @llvm.uadd.with.overflow.i1(i1 %{{.+}}, i1 %{{.+}})
156 // CHECK-DAG: [[Q:%.+]] = extractvalue { i1, i1 } [[S]], 0
157 // CHECK-DAG: [[C:%.+]] = extractvalue { i1, i1 } [[S]], 1
158 // CHECK: [[QT2:%.+]] = zext i1 [[Q]] to i8
159 // CHECK: store i8 [[QT2]], i8*
160 // CHECK: br i1 [[C]]
162 if (__builtin_add_overflow(x, y, &r))
167 int test_add_overflow_volatile(int x, int y) {
168 // CHECK-LABEL: define i32 @test_add_overflow_volatile
169 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
170 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
171 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
172 // CHECK: store volatile i32 [[Q]], i32*
173 // CHECK: br i1 [[C]]
175 if (__builtin_add_overflow(x, y, &result))
180 unsigned test_uadd_overflow(unsigned x, unsigned y) {
181 // CHECK: @test_uadd_overflow
182 // CHECK: %{{.+}} = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
184 if (__builtin_uadd_overflow(x, y, &result))
185 return UnsignedErrorCode;
189 unsigned long test_uaddl_overflow(unsigned long x, unsigned long y) {
190 // CHECK: @test_uaddl_overflow([[UL:i32|i64]] %x
191 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
192 unsigned long result;
193 if (__builtin_uaddl_overflow(x, y, &result))
194 return UnsignedLongErrorCode;
198 unsigned long long test_uaddll_overflow(unsigned long long x, unsigned long long y) {
199 // CHECK: @test_uaddll_overflow
200 // CHECK: %{{.+}} = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
201 unsigned long long result;
202 if (__builtin_uaddll_overflow(x, y, &result))
203 return UnsignedLongLongErrorCode;
207 unsigned test_usub_overflow(unsigned x, unsigned y) {
208 // CHECK: @test_usub_overflow
209 // CHECK: %{{.+}} = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
211 if (__builtin_usub_overflow(x, y, &result))
212 return UnsignedErrorCode;
216 unsigned long test_usubl_overflow(unsigned long x, unsigned long y) {
217 // CHECK: @test_usubl_overflow([[UL:i32|i64]] %x
218 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
219 unsigned long result;
220 if (__builtin_usubl_overflow(x, y, &result))
221 return UnsignedLongErrorCode;
225 unsigned long long test_usubll_overflow(unsigned long long x, unsigned long long y) {
226 // CHECK: @test_usubll_overflow
227 // CHECK: %{{.+}} = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
228 unsigned long long result;
229 if (__builtin_usubll_overflow(x, y, &result))
230 return UnsignedLongLongErrorCode;
234 unsigned test_umul_overflow(unsigned x, unsigned y) {
235 // CHECK: @test_umul_overflow
236 // CHECK: %{{.+}} = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
238 if (__builtin_umul_overflow(x, y, &result))
239 return UnsignedErrorCode;
243 unsigned long test_umull_overflow(unsigned long x, unsigned long y) {
244 // CHECK: @test_umull_overflow([[UL:i32|i64]] %x
245 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.umul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
246 unsigned long result;
247 if (__builtin_umull_overflow(x, y, &result))
248 return UnsignedLongErrorCode;
252 unsigned long long test_umulll_overflow(unsigned long long x, unsigned long long y) {
253 // CHECK: @test_umulll_overflow
254 // CHECK: %{{.+}} = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
255 unsigned long long result;
256 if (__builtin_umulll_overflow(x, y, &result))
257 return UnsignedLongLongErrorCode;
261 int test_sadd_overflow(int x, int y) {
262 // CHECK: @test_sadd_overflow
263 // CHECK: %{{.+}} = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
265 if (__builtin_sadd_overflow(x, y, &result))
270 long test_saddl_overflow(long x, long y) {
271 // CHECK: @test_saddl_overflow([[UL:i32|i64]] %x
272 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.sadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
274 if (__builtin_saddl_overflow(x, y, &result))
275 return LongErrorCode;
279 long long test_saddll_overflow(long long x, long long y) {
280 // CHECK: @test_saddll_overflow
281 // CHECK: %{{.+}} = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
283 if (__builtin_saddll_overflow(x, y, &result))
284 return LongLongErrorCode;
288 int test_ssub_overflow(int x, int y) {
289 // CHECK: @test_ssub_overflow
290 // CHECK: %{{.+}} = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
292 if (__builtin_ssub_overflow(x, y, &result))
297 long test_ssubl_overflow(long x, long y) {
298 // CHECK: @test_ssubl_overflow([[UL:i32|i64]] %x
299 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.ssub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
301 if (__builtin_ssubl_overflow(x, y, &result))
302 return LongErrorCode;
306 long long test_ssubll_overflow(long long x, long long y) {
307 // CHECK: @test_ssubll_overflow
308 // CHECK: %{{.+}} = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
310 if (__builtin_ssubll_overflow(x, y, &result))
311 return LongLongErrorCode;
315 int test_smul_overflow(int x, int y) {
316 // CHECK: @test_smul_overflow
317 // CHECK: %{{.+}} = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
319 if (__builtin_smul_overflow(x, y, &result))
324 long test_smull_overflow(long x, long y) {
325 // CHECK: @test_smull_overflow([[UL:i32|i64]] %x
326 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.smul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}})
328 if (__builtin_smull_overflow(x, y, &result))
329 return LongErrorCode;
333 long long test_smulll_overflow(long long x, long long y) {
334 // CHECK: @test_smulll_overflow
335 // CHECK: %{{.+}} = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}})
337 if (__builtin_smulll_overflow(x, y, &result))
338 return LongLongErrorCode;
342 int test_mixed_sign_mull_overflow(int x, unsigned y) {
343 // CHECK: @test_mixed_sign_mull_overflow
344 // CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
345 // CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
346 // CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
347 // CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
348 // CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
349 // CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
350 // CHECK-NEXT: [[IsNegZext:%.*]] = zext i1 [[IsNeg]] to i32
351 // CHECK-NEXT: [[MaxResult:%.*]] = add i32 2147483647, [[IsNegZext]]
352 // CHECK-NEXT: [[SignedOFlow:%.*]] = icmp ugt i32 [[UnsignedResult]], [[MaxResult]]
353 // CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[SignedOFlow]]
354 // CHECK-NEXT: [[NegativeResult:%.*]] = sub i32 0, [[UnsignedResult]]
355 // CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegativeResult]], i32 [[UnsignedResult]]
356 // CHECK-NEXT: store i32 [[Result]], i32* %{{.*}}, align 4
357 // CHECK: br i1 [[OFlow]]
360 if (__builtin_mul_overflow(x, y, &result))
361 return LongErrorCode;
365 int test_mixed_sign_mull_overflow_unsigned(int x, unsigned y) {
366 // CHECK: @test_mixed_sign_mull_overflow_unsigned
367 // CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
368 // CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
369 // CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
370 // CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
371 // CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
372 // CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
373 // CHECK-NEXT: [[NotNull:%.*]] = icmp ne i32 [[UnsignedResult]], 0
374 // CHECK-NEXT: [[Underflow:%.*]] = and i1 [[IsNeg]], [[NotNull]]
375 // CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[Underflow]]
376 // CHECK-NEXT: store i32 [[UnsignedResult]], i32* %{{.*}}, align 4
377 // CHECK: br i1 [[OFlow]]
380 if (__builtin_mul_overflow(x, y, &result))
381 return LongErrorCode;
385 int test_mixed_sign_mull_overflow_swapped(int x, unsigned y) {
386 // CHECK: @test_mixed_sign_mull_overflow_swapped
387 // CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32
388 // CHECK: add i32 2147483647
390 if (__builtin_mul_overflow(y, x, &result))
391 return LongErrorCode;
395 long long test_mixed_sign_mulll_overflow(long long x, unsigned long long y) {
396 // CHECK: @test_mixed_sign_mulll_overflow
397 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
398 // CHECK: add i64 92233720368547
400 if (__builtin_mul_overflow(x, y, &result))
401 return LongLongErrorCode;
405 long long test_mixed_sign_mulll_overflow_swapped(long long x, unsigned long long y) {
406 // CHECK: @test_mixed_sign_mulll_overflow_swapped
407 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
408 // CHECK: add i64 92233720368547
410 if (__builtin_mul_overflow(y, x, &result))
411 return LongLongErrorCode;
415 long long test_mixed_sign_mulll_overflow_trunc_signed(long long x, unsigned long long y) {
416 // CHECK: @test_mixed_sign_mulll_overflow_trunc_signed
417 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
418 // CHECK: add i64 2147483647
422 if (__builtin_mul_overflow(y, x, &result))
423 return LongLongErrorCode;
427 long long test_mixed_sign_mulll_overflow_trunc_unsigned(long long x, unsigned long long y) {
428 // CHECK: @test_mixed_sign_mulll_overflow_trunc_unsigned
429 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
430 // CHECK: [[NON_ZERO:%.*]] = icmp ne i64 [[UNSIGNED_RESULT:%.*]], 0
431 // CHECK-NEXT: [[UNDERFLOW:%.*]] = and i1 {{.*}}, [[NON_ZERO]]
432 // CHECK-NEXT: [[OVERFLOW_PRE_TRUNC:%.*]] = or i1 {{.*}}, [[UNDERFLOW]]
433 // CHECK-NEXT: [[TRUNC_OVERFLOW:%.*]] = icmp ugt i64 [[UNSIGNED_RESULT]], 4294967295
434 // CHECK-NEXT: [[OVERFLOW:%.*]] = or i1 [[OVERFLOW_PRE_TRUNC]], [[TRUNC_OVERFLOW]]
435 // CHECK-NEXT: trunc i64 [[UNSIGNED_RESULT]] to i32
438 if (__builtin_mul_overflow(y, x, &result))
439 return LongLongErrorCode;
443 long long test_mixed_sign_mul_overflow_extend_signed(int x, unsigned y) {
444 // CHECK: @test_mixed_sign_mul_overflow_extend_signed
445 // CHECK: call { i64, i1 } @llvm.smul.with.overflow.i64
447 if (__builtin_mul_overflow(y, x, &result))
448 return LongLongErrorCode;
452 long long test_mixed_sign_mul_overflow_extend_unsigned(int x, unsigned y) {
453 // CHECK: @test_mixed_sign_mul_overflow_extend_unsigned
454 // CHECK: call { i65, i1 } @llvm.smul.with.overflow.i65
455 unsigned long long result;
456 if (__builtin_mul_overflow(y, x, &result))
457 return LongLongErrorCode;