1 ; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
2 ; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 -addr-sink-using-gep=1 %s -o - | FileCheck %s -check-prefix=A9
4 ; @simple is the most basic chain of address induction variables. Chaining
5 ; saves at least one register and avoids complex addressing and setup
9 ; no expensive address computation in the preheader
13 ; no complex address modes
15 define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
19 %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
20 %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
22 %iv1 = getelementptr inbounds i32* %iv, i32 %x
24 %iv2 = getelementptr inbounds i32* %iv1, i32 %x
26 %iv3 = getelementptr inbounds i32* %iv2, i32 %x
29 %s2 = add i32 %s1, %v1
30 %s3 = add i32 %s2, %v2
31 %s4 = add i32 %s3, %v3
32 %iv4 = getelementptr inbounds i32* %iv3, i32 %x
33 %cmp = icmp eq i32* %iv4, %b
34 br i1 %cmp, label %exit, label %loop
39 ; @user is not currently chained because the IV is live across memory ops.
42 ; stride multiples computed in the preheader
46 ; complex address modes
49 define i32 @user(i32* %a, i32* %b, i32 %x) nounwind {
53 %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
54 %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
56 %iv1 = getelementptr inbounds i32* %iv, i32 %x
58 %iv2 = getelementptr inbounds i32* %iv1, i32 %x
60 %iv3 = getelementptr inbounds i32* %iv2, i32 %x
63 %s2 = add i32 %s1, %v1
64 %s3 = add i32 %s2, %v2
65 %s4 = add i32 %s3, %v3
66 %iv4 = getelementptr inbounds i32* %iv3, i32 %x
67 store i32 %s4, i32* %iv
68 %cmp = icmp eq i32* %iv4, %b
69 br i1 %cmp, label %exit, label %loop
74 ; @extrastride is a slightly more interesting case of a single
75 ; complete chain with multiple strides. The test case IR is what LSR
76 ; used to do, and exactly what we don't want to do. LSR's new IV
77 ; chaining feature should now undo the damage.
82 ; only one stride multiple in the preheader
84 ; A9-NOT: {{str r|lsl}}
86 ; no complex address modes or reloads
87 ; A9-NOT: {{ldr .*[sp]|lsl}}
88 define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
90 %cmp8 = icmp eq i32 %z, 0
91 br i1 %cmp8, label %for.end, label %for.body.lr.ph
93 for.body.lr.ph: ; preds = %entry
94 %add.ptr.sum = shl i32 %main_stride, 1 ; s*2
95 %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3
96 %add.ptr2.sum = add i32 %x, %main_stride ; s + x
97 %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4
98 %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x
101 for.body: ; preds = %for.body.lr.ph, %for.body
102 %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
103 %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
104 %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
105 %0 = bitcast i8* %main.addr.011 to i32*
106 %1 = load i32* %0, align 4
107 %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride
108 %2 = bitcast i8* %add.ptr to i32*
109 %3 = load i32* %2, align 4
110 %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum
111 %4 = bitcast i8* %add.ptr1 to i32*
112 %5 = load i32* %4, align 4
113 %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum
114 %6 = bitcast i8* %add.ptr2 to i32*
115 %7 = load i32* %6, align 4
116 %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum
117 %8 = bitcast i8* %add.ptr3 to i32*
118 %9 = load i32* %8, align 4
119 %add = add i32 %3, %1
120 %add4 = add i32 %add, %5
121 %add5 = add i32 %add4, %7
122 %add6 = add i32 %add5, %9
123 store i32 %add6, i32* %res.addr.09, align 4
124 %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum
125 %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y
126 %inc = add i32 %i.010, 1
127 %cmp = icmp eq i32 %inc, %z
128 br i1 %cmp, label %for.end, label %for.body
130 for.end: ; preds = %for.body, %entry
134 ; @foldedidx is an unrolled variant of this loop:
135 ; for (unsigned long i = 0; i < len; i += s) {
136 ; c[i] = a[i] + b[i];
138 ; where 's' can be folded into the addressing mode.
139 ; Consequently, we should *not* form any chains.
142 ; A9: ldrb{{(.w)?}} {{r[0-9]|lr}}, [{{r[0-9]|lr}}, #3]
143 define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp {
147 for.body: ; preds = %for.body, %entry
148 %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
149 %arrayidx = getelementptr inbounds i8* %a, i32 %i.07
150 %0 = load i8* %arrayidx, align 1
151 %conv5 = zext i8 %0 to i32
152 %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07
153 %1 = load i8* %arrayidx1, align 1
154 %conv26 = zext i8 %1 to i32
155 %add = add nsw i32 %conv26, %conv5
156 %conv3 = trunc i32 %add to i8
157 %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07
158 store i8 %conv3, i8* %arrayidx4, align 1
159 %inc1 = or i32 %i.07, 1
160 %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1
161 %2 = load i8* %arrayidx.1, align 1
162 %conv5.1 = zext i8 %2 to i32
163 %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1
164 %3 = load i8* %arrayidx1.1, align 1
165 %conv26.1 = zext i8 %3 to i32
166 %add.1 = add nsw i32 %conv26.1, %conv5.1
167 %conv3.1 = trunc i32 %add.1 to i8
168 %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1
169 store i8 %conv3.1, i8* %arrayidx4.1, align 1
170 %inc.12 = or i32 %i.07, 2
171 %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12
172 %4 = load i8* %arrayidx.2, align 1
173 %conv5.2 = zext i8 %4 to i32
174 %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12
175 %5 = load i8* %arrayidx1.2, align 1
176 %conv26.2 = zext i8 %5 to i32
177 %add.2 = add nsw i32 %conv26.2, %conv5.2
178 %conv3.2 = trunc i32 %add.2 to i8
179 %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12
180 store i8 %conv3.2, i8* %arrayidx4.2, align 1
181 %inc.23 = or i32 %i.07, 3
182 %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23
183 %6 = load i8* %arrayidx.3, align 1
184 %conv5.3 = zext i8 %6 to i32
185 %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23
186 %7 = load i8* %arrayidx1.3, align 1
187 %conv26.3 = zext i8 %7 to i32
188 %add.3 = add nsw i32 %conv26.3, %conv5.3
189 %conv3.3 = trunc i32 %add.3 to i8
190 %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23
191 store i8 %conv3.3, i8* %arrayidx4.3, align 1
192 %inc.3 = add nsw i32 %i.07, 4
193 %exitcond.3 = icmp eq i32 %inc.3, 400
194 br i1 %exitcond.3, label %for.end, label %for.body
196 for.end: ; preds = %for.body
200 ; @testNeon is an important example of the nead for ivchains.
202 ; Currently we have three extra add.w's that keep the store address
203 ; live past the next increment because ISEL is unfortunately undoing
204 ; the store chain. ISEL also fails to convert all but one of the stores to
205 ; post-increment addressing. However, the loads should use
206 ; post-increment addressing, no add's or add.w's beyond the three
207 ; mentioned. Most importantly, there should be no spills or reloads!
212 ; A9-NOT: {{ldr|str|adds|add r}}
213 ; A9: vst1.8 {{.*}} [r{{[0-9]+}}]!
214 ; A9-NOT: {{ldr|str|adds|add r}}
216 ; A9-NOT: {{ldr|str|adds|add r}}
218 ; A9-NOT: {{ldr|str|adds|add r}}
221 define hidden void @testNeon(i8* %ref_data, i32 %ref_stride, i32 %limit, <16 x i8>* nocapture %data) nounwind optsize {
222 %1 = icmp sgt i32 %limit, 0
223 br i1 %1, label %.lr.ph, label %45
226 %2 = shl nsw i32 %ref_stride, 1
227 %3 = mul nsw i32 %ref_stride, 3
228 %4 = shl nsw i32 %ref_stride, 2
229 %5 = mul nsw i32 %ref_stride, 5
230 %6 = mul nsw i32 %ref_stride, 6
231 %7 = mul nsw i32 %ref_stride, 7
232 %8 = shl nsw i32 %ref_stride, 3
234 %10 = mul i32 %limit, -64
237 ; <label>:11 ; preds = %11, %.lr.ph
238 %.05 = phi i8* [ %ref_data, %.lr.ph ], [ %42, %11 ]
239 %counter.04 = phi i32 [ 0, %.lr.ph ], [ %44, %11 ]
240 %result.03 = phi <16 x i8> [ zeroinitializer, %.lr.ph ], [ %41, %11 ]
241 %.012 = phi <16 x i8>* [ %data, %.lr.ph ], [ %43, %11 ]
242 %12 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %.05, i32 1) nounwind
243 %13 = getelementptr inbounds i8* %.05, i32 %ref_stride
244 %14 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %13, i32 1) nounwind
245 %15 = shufflevector <1 x i64> %12, <1 x i64> %14, <2 x i32> <i32 0, i32 1>
246 %16 = bitcast <2 x i64> %15 to <16 x i8>
247 %17 = getelementptr inbounds <16 x i8>* %.012, i32 1
248 store <16 x i8> %16, <16 x i8>* %.012, align 4
249 %18 = getelementptr inbounds i8* %.05, i32 %2
250 %19 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %18, i32 1) nounwind
251 %20 = getelementptr inbounds i8* %.05, i32 %3
252 %21 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %20, i32 1) nounwind
253 %22 = shufflevector <1 x i64> %19, <1 x i64> %21, <2 x i32> <i32 0, i32 1>
254 %23 = bitcast <2 x i64> %22 to <16 x i8>
255 %24 = getelementptr inbounds <16 x i8>* %.012, i32 2
256 store <16 x i8> %23, <16 x i8>* %17, align 4
257 %25 = getelementptr inbounds i8* %.05, i32 %4
258 %26 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %25, i32 1) nounwind
259 %27 = getelementptr inbounds i8* %.05, i32 %5
260 %28 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %27, i32 1) nounwind
261 %29 = shufflevector <1 x i64> %26, <1 x i64> %28, <2 x i32> <i32 0, i32 1>
262 %30 = bitcast <2 x i64> %29 to <16 x i8>
263 %31 = getelementptr inbounds <16 x i8>* %.012, i32 3
264 store <16 x i8> %30, <16 x i8>* %24, align 4
265 %32 = getelementptr inbounds i8* %.05, i32 %6
266 %33 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %32, i32 1) nounwind
267 %34 = getelementptr inbounds i8* %.05, i32 %7
268 %35 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %34, i32 1) nounwind
269 %36 = shufflevector <1 x i64> %33, <1 x i64> %35, <2 x i32> <i32 0, i32 1>
270 %37 = bitcast <2 x i64> %36 to <16 x i8>
271 store <16 x i8> %37, <16 x i8>* %31, align 4
272 %38 = add <16 x i8> %16, %23
273 %39 = add <16 x i8> %38, %30
274 %40 = add <16 x i8> %39, %37
275 %41 = add <16 x i8> %result.03, %40
276 %42 = getelementptr i8* %.05, i32 %9
277 %43 = getelementptr inbounds <16 x i8>* %.012, i32 -64
278 %44 = add nsw i32 %counter.04, 1
279 %exitcond = icmp eq i32 %44, %limit
280 br i1 %exitcond, label %._crit_edge, label %11
282 ._crit_edge: ; preds = %11
283 %scevgep = getelementptr <16 x i8>* %data, i32 %10
286 ; <label>:45 ; preds = %._crit_edge, %0
287 %result.0.lcssa = phi <16 x i8> [ %41, %._crit_edge ], [ zeroinitializer, %0 ]
288 %.01.lcssa = phi <16 x i8>* [ %scevgep, %._crit_edge ], [ %data, %0 ]
289 store <16 x i8> %result.0.lcssa, <16 x i8>* %.01.lcssa, align 4
293 declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32) nounwind readonly
295 ; Handle chains in which the same offset is used for both loads and
296 ; stores to the same array.
301 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE:[r[0-9]+]]], [[INC:r[0-9]]]
302 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
303 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
304 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
305 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
306 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
307 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
308 ; A9: vld1.8 {d{{[0-9]+}}}, [[BASE]], {{r[0-9]}}
309 ; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
310 ; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
311 ; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
312 ; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
313 ; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]], [[INC]]
314 ; A9: vst1.8 {d{{[0-9]+}}}, [[BASE]]
316 define void @testReuse(i8* %src, i32 %stride) nounwind ssp {
318 %mul = shl nsw i32 %stride, 2
319 %idx.neg = sub i32 0, %mul
320 %mul1 = mul nsw i32 %stride, 3
321 %idx.neg2 = sub i32 0, %mul1
322 %mul5 = shl nsw i32 %stride, 1
323 %idx.neg6 = sub i32 0, %mul5
324 %idx.neg10 = sub i32 0, %stride
327 for.body: ; preds = %for.body, %entry
328 %i.0110 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
329 %src.addr = phi i8* [ %src, %entry ], [ %add.ptr45, %for.body ]
330 %add.ptr = getelementptr inbounds i8* %src.addr, i32 %idx.neg
331 %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr, i32 1)
332 %add.ptr3 = getelementptr inbounds i8* %src.addr, i32 %idx.neg2
333 %vld2 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr3, i32 1)
334 %add.ptr7 = getelementptr inbounds i8* %src.addr, i32 %idx.neg6
335 %vld3 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr7, i32 1)
336 %add.ptr11 = getelementptr inbounds i8* %src.addr, i32 %idx.neg10
337 %vld4 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr11, i32 1)
338 %vld5 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %src.addr, i32 1)
339 %add.ptr17 = getelementptr inbounds i8* %src.addr, i32 %stride
340 %vld6 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr17, i32 1)
341 %add.ptr20 = getelementptr inbounds i8* %src.addr, i32 %mul5
342 %vld7 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr20, i32 1)
343 %add.ptr23 = getelementptr inbounds i8* %src.addr, i32 %mul1
344 %vld8 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr23, i32 1)
345 %vadd1 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld1, <8 x i8> %vld2) nounwind
346 %vadd2 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld2, <8 x i8> %vld3) nounwind
347 %vadd3 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld3, <8 x i8> %vld4) nounwind
348 %vadd4 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld4, <8 x i8> %vld5) nounwind
349 %vadd5 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld5, <8 x i8> %vld6) nounwind
350 %vadd6 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld6, <8 x i8> %vld7) nounwind
351 tail call void @llvm.arm.neon.vst1.v8i8(i8* %add.ptr3, <8 x i8> %vadd1, i32 1)
352 tail call void @llvm.arm.neon.vst1.v8i8(i8* %add.ptr7, <8 x i8> %vadd2, i32 1)
353 tail call void @llvm.arm.neon.vst1.v8i8(i8* %add.ptr11, <8 x i8> %vadd3, i32 1)
354 tail call void @llvm.arm.neon.vst1.v8i8(i8* %src.addr, <8 x i8> %vadd4, i32 1)
355 tail call void @llvm.arm.neon.vst1.v8i8(i8* %add.ptr17, <8 x i8> %vadd5, i32 1)
356 tail call void @llvm.arm.neon.vst1.v8i8(i8* %add.ptr20, <8 x i8> %vadd6, i32 1)
357 %inc = add nsw i32 %i.0110, 1
358 %add.ptr45 = getelementptr inbounds i8* %src.addr, i32 8
359 %exitcond = icmp eq i32 %inc, 4
360 br i1 %exitcond, label %for.end, label %for.body
362 for.end: ; preds = %for.body
366 declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*, i32) nounwind readonly
368 declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind
370 declare <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone