1 // RUN: %clang_cc1 -triple i386-unknown-unknown %s -emit-llvm -o - | FileCheck %s
5 // Extremely basic VLA test
14 return sizeof(int[n]);
34 // Make sure we emit sizes correctly in some obscure cases
40 // CHECK: define void @f_8403108
41 void f_8403108(unsigned x) {
42 // CHECK: call i8* @llvm.stacksave()
45 // CHECK: call i8* @llvm.stacksave()
49 // CHECK: call void @llvm.stackrestore(i8*
51 // CHECK: call void @llvm.stackrestore(i8*
55 void function(short width, int data[][width]) {} // expected-note {{passing argument to parameter 'data' here}}
59 // CHECK: call void @function(i16 signext 1, i32* null)
61 // CHECK: call void @function(i16 signext 1, i32* inttoptr
62 function(1, 0xbadbeef); // expected-warning {{incompatible integer to pointer conversion passing}}
63 // CHECK: call void @function(i16 signext 1, i32* {{.*}})
67 void function1(short width, int data[][width][width]) {}
70 // CHECK: call void @function1(i16 signext 1, i32* {{.*}})
72 // CHECK: call void @function(i16 signext 1, i32* {{.*}})
81 char b[1][n+3]; /* Variable length array. */
82 // CHECK: [[tmp_1:%.*]] = load i32* @GLOB, align 4
83 // CHECK-NEXT: add nsw i32 [[tmp_1]], 1
84 __typeof__(b[GLOB++]) c;
88 // http://llvm.org/PR8567
89 // CHECK: define double @test_PR8567
90 double test_PR8567(int n, double (*p)[n][5]) {
91 // CHECK: [[NV:%.*]] = alloca i32, align 4
92 // CHECK-NEXT: [[PV:%.*]] = alloca [5 x double]*, align 4
95 // CHECK-NEXT: [[N:%.*]] = load i32* [[NV]], align 4
96 // CHECK-NEXT: [[P:%.*]] = load [5 x double]** [[PV]], align 4
97 // CHECK-NEXT: [[T0:%.*]] = mul nsw i32 1, [[N]]
98 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [5 x double]* [[P]], i32 [[T0]]
99 // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [5 x double]* [[T1]], i32 2
100 // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [5 x double]* [[T2]], i32 0, i32 3
101 // CHECK-NEXT: [[T4:%.*]] = load double* [[T3]]
102 // CHECK-NEXT: ret double [[T4]]
106 int test4(unsigned n, char (*p)[n][n+1][6]) {
107 // CHECK: define i32 @test4(
108 // CHECK: [[N:%.*]] = alloca i32, align 4
109 // CHECK-NEXT: [[P:%.*]] = alloca [6 x i8]*, align 4
110 // CHECK-NEXT: [[P2:%.*]] = alloca [6 x i8]*, align 4
111 // CHECK-NEXT: store i32
112 // CHECK-NEXT: store [6 x i8]*
115 // CHECK-NEXT: [[DIM0:%.*]] = load i32* [[N]], align 4
116 // CHECK-NEXT: [[T0:%.*]] = load i32* [[N]], align 4
117 // CHECK-NEXT: [[DIM1:%.*]] = add i32 [[T0]], 1
119 // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P]], align 4
120 // CHECK-NEXT: [[T1:%.*]] = load i32* [[N]], align 4
121 // CHECK-NEXT: [[T2:%.*]] = udiv i32 [[T1]], 2
122 // CHECK-NEXT: [[T3:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
123 // CHECK-NEXT: [[T4:%.*]] = mul nsw i32 [[T2]], [[T3]]
124 // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [6 x i8]* [[T0]], i32 [[T4]]
125 // CHECK-NEXT: [[T6:%.*]] = load i32* [[N]], align 4
126 // CHECK-NEXT: [[T7:%.*]] = udiv i32 [[T6]], 4
127 // CHECK-NEXT: [[T8:%.*]] = sub i32 0, [[T7]]
128 // CHECK-NEXT: [[T9:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
129 // CHECK-NEXT: [[T10:%.*]] = mul nsw i32 [[T8]], [[T9]]
130 // CHECK-NEXT: [[T11:%.*]] = getelementptr inbounds [6 x i8]* [[T5]], i32 [[T10]]
131 // CHECK-NEXT: store [6 x i8]* [[T11]], [6 x i8]** [[P2]], align 4
132 __typeof(p) p2 = (p + n/2) - n/4;
134 // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]** [[P2]], align 4
135 // CHECK-NEXT: [[T1:%.*]] = load [6 x i8]** [[P]], align 4
136 // CHECK-NEXT: [[T2:%.*]] = ptrtoint [6 x i8]* [[T0]] to i32
137 // CHECK-NEXT: [[T3:%.*]] = ptrtoint [6 x i8]* [[T1]] to i32
138 // CHECK-NEXT: [[T4:%.*]] = sub i32 [[T2]], [[T3]]
139 // CHECK-NEXT: [[T5:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
140 // CHECK-NEXT: [[T6:%.*]] = mul nuw i32 6, [[T5]]
141 // CHECK-NEXT: [[T7:%.*]] = sdiv exact i32 [[T4]], [[T6]]
142 // CHECK-NEXT: ret i32 [[T7]]
149 // CHECK: define void @test5(
151 // CHECK: [[A:%.*]] = alloca [5 x i32], align 4
152 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
153 // CHECK-NEXT: [[CL:%.*]] = alloca i32*, align 4
154 // CHECK-NEXT: store i32 0, i32* [[I]], align 4
156 (typeof(++i, (int (*)[i])a)){&a} += 0;
157 // CHECK-NEXT: [[Z:%.*]] = load i32* [[I]], align 4
158 // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[Z]], 1
159 // CHECK-NEXT: store i32 [[INC]], i32* [[I]], align 4
160 // CHECK-NEXT: [[O:%.*]] = load i32* [[I]], align 4
161 // CHECK-NEXT: [[AR:%.*]] = getelementptr inbounds [5 x i32]* [[A]], i32 0, i32 0
162 // CHECK-NEXT: [[T:%.*]] = bitcast [5 x i32]* [[A]] to i32*
163 // CHECK-NEXT: store i32* [[T]], i32** [[CL]]
164 // CHECK-NEXT: [[TH:%.*]] = load i32** [[CL]]
165 // CHECK-NEXT: [[VLAIX:%.*]] = mul nsw i32 0, [[O]]
166 // CHECK-NEXT: [[ADDPTR:%.*]] = getelementptr inbounds i32* [[TH]], i32 [[VLAIX]]
167 // CHECK-NEXT: store i32* [[ADDPTR]], i32** [[CL]]
172 // CHECK: define void @test6(
173 int n = 20, **a, i=0;
174 // CHECK: [[N:%.*]] = alloca i32, align 4
175 // CHECK-NEXT: [[A:%.*]] = alloca i32**, align 4
176 // CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
177 (int (**)[i]){&a}[0][1][5] = 0;
178 // CHECK-NEXT: [[CL:%.*]] = alloca i32**, align 4
179 // CHECK-NEXT: store i32 20, i32* [[N]], align 4
180 // CHECK-NEXT: store i32 0, i32* [[I]], align 4
181 // CHECK-NEXT: [[Z:%.*]] = load i32* [[I]], align 4
182 // CHECK-NEXT: [[O:%.*]] = bitcast i32*** [[A]] to i32**
183 // CHECK-NEXT: store i32** [[O]], i32*** [[CL]]
184 // CHECK-NEXT: [[T:%.*]] = load i32*** [[CL]]
185 // CHECK-NEXT: [[IX:%.*]] = getelementptr inbounds i32** [[T]], i32 0
186 // CHECK-NEXT: [[TH:%.*]] = load i32** [[IX]], align 4
187 // CHECK-NEXT: [[F:%.*]] = mul nsw i32 1, [[Z]]
188 // CHECK-NEXT: [[IX1:%.*]] = getelementptr inbounds i32* [[TH]], i32 [[F]]
189 // CHECK-NEXT: [[IX2:%.*]] = getelementptr inbounds i32* [[IX1]], i32 5
190 // CHECK-NEXT: store i32 0, i32* [[IX2]], align 4