1 // RUN: %clang_cc1 -triple i386-unknown-unknown %s -emit-llvm -o - | FileCheck %s
5 // Extremely basic VLA test
14 return sizeof(int[n]);
34 // Make sure we emit sizes correctly in some obscure cases
40 // CHECK: define void @f_8403108
41 void f_8403108(unsigned x) {
42 // CHECK: call i8* @llvm.stacksave()
45 // CHECK: call i8* @llvm.stacksave()
49 // CHECK: call void @llvm.stackrestore(i8*
51 // CHECK: call void @llvm.stackrestore(i8*
55 void function(short width, int data[][width]) {} // expected-note {{passing argument to parameter 'data' here}}
59 // CHECK: call void @function(i16 signext 1, i32* null)
61 // CHECK: call void @function(i16 signext 1, i32* inttoptr
62 function(1, 0xbadbeef); // expected-warning {{incompatible integer to pointer conversion passing}}
63 // CHECK: call void @function(i16 signext 1, i32* {{.*}})
67 void function1(short width, int data[][width][width]) {}
70 // CHECK: call void @function1(i16 signext 1, i32* {{.*}})
72 // CHECK: call void @function(i16 signext 1, i32* {{.*}})
81 char b[1][n+3]; /* Variable length array. */
82 // CHECK: [[tmp_1:%.*]] = load i32* @GLOB, align 4
83 // CHECK-NEXT: add nsw i32 [[tmp_1]], 1
84 __typeof__(b[GLOB++]) c;
88 // http://llvm.org/PR8567
89 // CHECK: define double @test_PR8567
90 double test_PR8567(int n, double (*p)[n][5]) {
91 // CHECK: [[NV:%.*]] = alloca i32, align 4
92 // CHECK-NEXT: [[PV:%.*]] = alloca [5 x double]*, align 4
95 // CHECK-NEXT: [[N:%.*]] = load i32* [[NV]], align 4
96 // CHECK-NEXT: [[P:%.*]] = load [5 x double]** [[PV]], align 4
97 // CHECK-NEXT: [[T0:%.*]] = mul nsw i32 1, [[N]]
98 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [5 x double]* [[P]], i32 [[T0]]
99 // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [5 x double]* [[T1]], i32 2
100 // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [5 x double]* [[T2]], i32 0, i32 3
101 // CHECK-NEXT: [[T4:%.*]] = load double* [[T3]]
102 // CHECK-NEXT: ret double [[T4]]
106 int test4(unsigned n, char (*p)[n][n+1][6]) {
107 // CHECK: define i32 @test4(
108 // CHECK: [[N:%.*]] = alloca i32, align 4
109 // CHECK-NEXT: [[P:%.*]] = alloca [6 x i8]*, align 4
110 // CHECK-NEXT: [[P2:%.*]] = alloca [6 x i8]*, align 4
111 // CHECK-NEXT: store i32
112 // CHECK-NEXT: store [6 x i8]*
115 // CHECK-NEXT: [[DIM0:%.*]] = load i32* [[N]], align 4
116 // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4
117 // CHECK-NEXT: [[DIM1:%.*]] = add i32 [[T0]], 1
119 // __typeof. FIXME: does this really need to be loaded?
120 // CHECK-NEXT: load [6 x i8]** [[P]]
122 // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P]], align 4
123 // CHECK-NEXT: [[T1:%.*]] = load i32* [[N]], align 4
124 // CHECK-NEXT: [[T2:%.*]] = udiv i32 [[T1]], 2
125 // CHECK-NEXT: [[T3:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
126 // CHECK-NEXT: [[T4:%.*]] = mul nsw i32 [[T2]], [[T3]]
127 // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [6 x i8]* [[T0]], i32 [[T4]]
128 // CHECK-NEXT: [[T6:%.*]] = load i32* [[N]], align 4
129 // CHECK-NEXT: [[T7:%.*]] = udiv i32 [[T6]], 4
130 // CHECK-NEXT: [[T8:%.*]] = sub i32 0, [[T7]]
131 // CHECK-NEXT: [[T9:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
132 // CHECK-NEXT: [[T10:%.*]] = mul nsw i32 [[T8]], [[T9]]
133 // CHECK-NEXT: [[T11:%.*]] = getelementptr inbounds [6 x i8]* [[T5]], i32 [[T10]]
134 // CHECK-NEXT: store [6 x i8]* [[T11]], [6 x i8]** [[P2]], align 4
135 __typeof(p) p2 = (p + n/2) - n/4;
137 // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P2]], align 4
138 // CHECK-NEXT: [[T1:%.*]] = load [6 x i8]** [[P]], align 4
139 // CHECK-NEXT: [[T2:%.*]] = ptrtoint [6 x i8]* [[T0]] to i32
140 // CHECK-NEXT: [[T3:%.*]] = ptrtoint [6 x i8]* [[T1]] to i32
141 // CHECK-NEXT: [[T4:%.*]] = sub i32 [[T2]], [[T3]]
142 // CHECK-NEXT: [[T5:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
143 // CHECK-NEXT: [[T6:%.*]] = mul nuw i32 6, [[T5]]
144 // CHECK-NEXT: [[T7:%.*]] = sdiv exact i32 [[T4]], [[T6]]
145 // CHECK-NEXT: ret i32 [[T7]]