1 ; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
2 ; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
3 ; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X64
4 ; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X32
6 ; @simple is the most basic chain of address induction variables. Chaining
7 ; saves at least one register and avoids complex addressing and setup
13 ; no other address computation in the preheader
17 ; no complex address modes
18 ; X64-NOT: (%{{[^)]+}},%{{[^)]+}},
21 ; no expensive address computation in the preheader
24 ; no complex address modes
25 ; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
26 define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
30 %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
31 %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
33 %iv1 = getelementptr inbounds i32* %iv, i32 %x
35 %iv2 = getelementptr inbounds i32* %iv1, i32 %x
37 %iv3 = getelementptr inbounds i32* %iv2, i32 %x
40 %s2 = add i32 %s1, %v1
41 %s3 = add i32 %s2, %v2
42 %s4 = add i32 %s3, %v3
43 %iv4 = getelementptr inbounds i32* %iv3, i32 %x
44 %cmp = icmp eq i32* %iv4, %b
45 br i1 %cmp, label %exit, label %loop
50 ; @user is not currently chained because the IV is live across memory ops.
57 ; complex address modes
58 ; X64: (%{{[^)]+}},%{{[^)]+}},
61 ; expensive address computation in the preheader
64 ; complex address modes
65 ; X32: (%{{[^)]+}},%{{[^)]+}},
66 define i32 @user(i32* %a, i32* %b, i32 %x) nounwind {
70 %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
71 %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
73 %iv1 = getelementptr inbounds i32* %iv, i32 %x
75 %iv2 = getelementptr inbounds i32* %iv1, i32 %x
77 %iv3 = getelementptr inbounds i32* %iv2, i32 %x
80 %s2 = add i32 %s1, %v1
81 %s3 = add i32 %s2, %v2
82 %s4 = add i32 %s3, %v3
83 %iv4 = getelementptr inbounds i32* %iv3, i32 %x
84 store i32 %s4, i32* %iv
85 %cmp = icmp eq i32* %iv4, %b
86 br i1 %cmp, label %exit, label %loop
91 ; @extrastride is a slightly more interesting case of a single
92 ; complete chain with multiple strides. The test case IR is what LSR
93 ; used to do, and exactly what we don't want to do. LSR's new IV
94 ; chaining feature should now undo the damage.
97 ; We currently don't handle this on X64 because the sexts cause
98 ; strange increment expressions like this:
99 ; IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
102 ; no spills in the preheader
103 ; X32-NOT: mov{{.*}}(%esp){{$}}
104 ; X32: %for.body{{$}}
105 ; no complex address modes
106 ; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
109 define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
111 %cmp8 = icmp eq i32 %z, 0
112 br i1 %cmp8, label %for.end, label %for.body.lr.ph
114 for.body.lr.ph: ; preds = %entry
115 %add.ptr.sum = shl i32 %main_stride, 1 ; s*2
116 %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3
117 %add.ptr2.sum = add i32 %x, %main_stride ; s + x
118 %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4
119 %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x
122 for.body: ; preds = %for.body.lr.ph, %for.body
123 %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
124 %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
125 %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
126 %0 = bitcast i8* %main.addr.011 to i32*
127 %1 = load i32* %0, align 4
128 %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride
129 %2 = bitcast i8* %add.ptr to i32*
130 %3 = load i32* %2, align 4
131 %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum
132 %4 = bitcast i8* %add.ptr1 to i32*
133 %5 = load i32* %4, align 4
134 %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum
135 %6 = bitcast i8* %add.ptr2 to i32*
136 %7 = load i32* %6, align 4
137 %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum
138 %8 = bitcast i8* %add.ptr3 to i32*
139 %9 = load i32* %8, align 4
140 %add = add i32 %3, %1
141 %add4 = add i32 %add, %5
142 %add5 = add i32 %add4, %7
143 %add6 = add i32 %add5, %9
144 store i32 %add6, i32* %res.addr.09, align 4
145 %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum
146 %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y
147 %inc = add i32 %i.010, 1
148 %cmp = icmp eq i32 %inc, %z
149 br i1 %cmp, label %for.end, label %for.body
151 for.end: ; preds = %for.body, %entry
155 ; @foldedidx is an unrolled variant of this loop:
156 ; for (unsigned long i = 0; i < len; i += s) {
157 ; c[i] = a[i] + b[i];
159 ; where 's' can be folded into the addressing mode.
160 ; Consequently, we should *not* form any chains.
167 define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp {
171 for.body: ; preds = %for.body, %entry
172 %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
173 %arrayidx = getelementptr inbounds i8* %a, i32 %i.07
174 %0 = load i8* %arrayidx, align 1
175 %conv5 = zext i8 %0 to i32
176 %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07
177 %1 = load i8* %arrayidx1, align 1
178 %conv26 = zext i8 %1 to i32
179 %add = add nsw i32 %conv26, %conv5
180 %conv3 = trunc i32 %add to i8
181 %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07
182 store i8 %conv3, i8* %arrayidx4, align 1
183 %inc1 = or i32 %i.07, 1
184 %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1
185 %2 = load i8* %arrayidx.1, align 1
186 %conv5.1 = zext i8 %2 to i32
187 %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1
188 %3 = load i8* %arrayidx1.1, align 1
189 %conv26.1 = zext i8 %3 to i32
190 %add.1 = add nsw i32 %conv26.1, %conv5.1
191 %conv3.1 = trunc i32 %add.1 to i8
192 %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1
193 store i8 %conv3.1, i8* %arrayidx4.1, align 1
194 %inc.12 = or i32 %i.07, 2
195 %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12
196 %4 = load i8* %arrayidx.2, align 1
197 %conv5.2 = zext i8 %4 to i32
198 %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12
199 %5 = load i8* %arrayidx1.2, align 1
200 %conv26.2 = zext i8 %5 to i32
201 %add.2 = add nsw i32 %conv26.2, %conv5.2
202 %conv3.2 = trunc i32 %add.2 to i8
203 %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12
204 store i8 %conv3.2, i8* %arrayidx4.2, align 1
205 %inc.23 = or i32 %i.07, 3
206 %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23
207 %6 = load i8* %arrayidx.3, align 1
208 %conv5.3 = zext i8 %6 to i32
209 %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23
210 %7 = load i8* %arrayidx1.3, align 1
211 %conv26.3 = zext i8 %7 to i32
212 %add.3 = add nsw i32 %conv26.3, %conv5.3
213 %conv3.3 = trunc i32 %add.3 to i8
214 %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23
215 store i8 %conv3.3, i8* %arrayidx4.3, align 1
216 %inc.3 = add nsw i32 %i.07, 4
217 %exitcond.3 = icmp eq i32 %inc.3, 400
218 br i1 %exitcond.3, label %for.end, label %for.body
220 for.end: ; preds = %for.body
224 ; @multioper tests instructions with multiple IV user operands. We
225 ; should be able to chain them independent of each other.
229 ; X64: movl %{{.*}},4)
231 ; X64-NEXT: movl %{{.*}},4)
233 ; X64-NEXT: movl %{{.*}},4)
235 ; X64-NEXT: movl %{{.*}},4)
239 ; X32: movl %{{.*}},4)
241 ; X32-NEXT: movl %{{.*}},4)
243 ; X32-NEXT: movl %{{.*}},4)
245 ; X32-NEXT: movl %{{.*}},4)
246 define void @multioper(i32* %a, i32 %n) nounwind {
251 %p = phi i32* [ %p.next, %for.body ], [ %a, %entry ]
252 %i = phi i32 [ %inc4, %for.body ], [ 0, %entry ]
253 store i32 %i, i32* %p, align 4
255 %add.ptr.i1 = getelementptr inbounds i32* %p, i32 1
256 store i32 %inc1, i32* %add.ptr.i1, align 4
257 %inc2 = add nsw i32 %i, 2
258 %add.ptr.i2 = getelementptr inbounds i32* %p, i32 2
259 store i32 %inc2, i32* %add.ptr.i2, align 4
260 %inc3 = add nsw i32 %i, 3
261 %add.ptr.i3 = getelementptr inbounds i32* %p, i32 3
262 store i32 %inc3, i32* %add.ptr.i3, align 4
263 %p.next = getelementptr inbounds i32* %p, i32 4
264 %inc4 = add nsw i32 %i, 4
265 %cmp = icmp slt i32 %inc4, %n
266 br i1 %cmp, label %for.body, label %exit
272 ; @testCmpZero has a ICmpZero LSR use that should not be hidden from
273 ; LSR. Profitable chains should have more than one nonzero increment
277 ; X32: %for.body82.us
280 define void @testCmpZero(i8* %src, i8* %dst, i32 %srcidx, i32 %dstidx, i32 %len) nounwind ssp {
282 %dest0 = getelementptr inbounds i8* %src, i32 %srcidx
283 %source0 = getelementptr inbounds i8* %dst, i32 %dstidx
284 %add.ptr79.us.sum = add i32 %srcidx, %len
285 %lftr.limit = getelementptr i8* %src, i32 %add.ptr79.us.sum
286 br label %for.body82.us
289 %dest = phi i8* [ %dest0, %entry ], [ %incdec.ptr91.us, %for.body82.us ]
290 %source = phi i8* [ %source0, %entry ], [ %add.ptr83.us, %for.body82.us ]
291 %0 = bitcast i8* %source to i32*
292 %1 = load i32* %0, align 4
293 %trunc = trunc i32 %1 to i8
294 %add.ptr83.us = getelementptr inbounds i8* %source, i32 4
295 %incdec.ptr91.us = getelementptr inbounds i8* %dest, i32 1
296 store i8 %trunc, i8* %dest, align 1
297 %exitcond = icmp eq i8* %incdec.ptr91.us, %lftr.limit
298 br i1 %exitcond, label %return, label %for.body82.us