1 ; RUN-disabled: llc < %s -march=x86-64 -mcpu=core2 -pre-RA-sched=source -enable-misched \
2 ; RUN-disabled: -misched-topdown -verify-machineinstrs \
3 ; RUN-disabled: | FileCheck %s -check-prefix=TOPDOWN
4 ; RUN-disabled: llc < %s -march=x86-64 -mcpu=core2 -pre-RA-sched=source -enable-misched \
5 ; RUN-disabled: -misched=ilpmin -verify-machineinstrs \
6 ; RUN-disabled: | FileCheck %s -check-prefix=ILPMIN
7 ; RUN-disabled: llc < %s -march=x86-64 -mcpu=core2 -pre-RA-sched=source -enable-misched \
8 ; RUN-disabled: -misched=ilpmax -verify-machineinstrs \
9 ; RUN-disabled: | FileCheck %s -check-prefix=ILPMAX
12 ; Verify that the MI scheduler minimizes register pressure for a
13 ; uniform set of bottom-up subtrees (unrolled matrix multiply).
15 ; For current top-down heuristics, ensure that some folded imulls have
16 ; been reordered with the stores. This tests the scheduler's cheap
17 ; alias analysis ability (that doesn't require any AliasAnalysis pass).
20 ; TOPDOWN: movl %{{.*}}, (
21 ; TOPDOWN: imull {{[0-9]*}}(
22 ; TOPDOWN: movl %{{.*}}, 4(
23 ; TOPDOWN: imull {{[0-9]*}}(
24 ; TOPDOWN: movl %{{.*}}, 8(
25 ; TOPDOWN: movl %{{.*}}, 12(
28 ; For -misched=ilpmin, verify that each expression subtree is
29 ; scheduled independently, and that the imull/adds are interleaved.
32 ; ILPMIN: movl %{{.*}}, (
40 ; ILPMIN: movl %{{.*}}, 4(
48 ; ILPMIN: movl %{{.*}}, 8(
56 ; ILPMIN: movl %{{.*}}, 12(
59 ; For -misched=ilpmax, verify that each expression subtree is
60 ; scheduled independently, and that the imull/adds are clustered.
63 ; ILPMAX: movl %{{.*}}, (
71 ; ILPMAX: movl %{{.*}}, 4(
79 ; ILPMAX: movl %{{.*}}, 8(
87 ; ILPMAX: movl %{{.*}}, 12(
90 define void @mmult([4 x i32]* noalias nocapture %m1, [4 x i32]* noalias nocapture %m2,
91 [4 x i32]* noalias nocapture %m3) nounwind uwtable ssp {
95 for.body: ; preds = %for.body, %entry
96 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
97 %arrayidx8 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 0
98 %tmp = load i32* %arrayidx8, align 4
99 %arrayidx12 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 0
100 %tmp1 = load i32* %arrayidx12, align 4
101 %arrayidx8.1 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 1
102 %tmp2 = load i32* %arrayidx8.1, align 4
103 %arrayidx12.1 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 0
104 %tmp3 = load i32* %arrayidx12.1, align 4
105 %arrayidx8.2 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 2
106 %tmp4 = load i32* %arrayidx8.2, align 4
107 %arrayidx12.2 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 0
108 %tmp5 = load i32* %arrayidx12.2, align 4
109 %arrayidx8.3 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 3
110 %tmp6 = load i32* %arrayidx8.3, align 4
111 %arrayidx12.3 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 0
112 %tmp8 = load i32* %arrayidx8, align 4
113 %arrayidx12.137 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 1
114 %tmp9 = load i32* %arrayidx12.137, align 4
115 %tmp10 = load i32* %arrayidx8.1, align 4
116 %arrayidx12.1.1 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 1
117 %tmp11 = load i32* %arrayidx12.1.1, align 4
118 %tmp12 = load i32* %arrayidx8.2, align 4
119 %arrayidx12.2.1 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 1
120 %tmp13 = load i32* %arrayidx12.2.1, align 4
121 %tmp14 = load i32* %arrayidx8.3, align 4
122 %arrayidx12.3.1 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 1
123 %tmp15 = load i32* %arrayidx12.3.1, align 4
124 %tmp16 = load i32* %arrayidx8, align 4
125 %arrayidx12.239 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 2
126 %tmp17 = load i32* %arrayidx12.239, align 4
127 %tmp18 = load i32* %arrayidx8.1, align 4
128 %arrayidx12.1.2 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 2
129 %tmp19 = load i32* %arrayidx12.1.2, align 4
130 %tmp20 = load i32* %arrayidx8.2, align 4
131 %arrayidx12.2.2 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 2
132 %tmp21 = load i32* %arrayidx12.2.2, align 4
133 %tmp22 = load i32* %arrayidx8.3, align 4
134 %arrayidx12.3.2 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 2
135 %tmp23 = load i32* %arrayidx12.3.2, align 4
136 %tmp24 = load i32* %arrayidx8, align 4
137 %arrayidx12.341 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 3
138 %tmp25 = load i32* %arrayidx12.341, align 4
139 %tmp26 = load i32* %arrayidx8.1, align 4
140 %arrayidx12.1.3 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 3
141 %tmp27 = load i32* %arrayidx12.1.3, align 4
142 %tmp28 = load i32* %arrayidx8.2, align 4
143 %arrayidx12.2.3 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 3
144 %tmp29 = load i32* %arrayidx12.2.3, align 4
145 %tmp30 = load i32* %arrayidx8.3, align 4
146 %arrayidx12.3.3 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 3
147 %tmp31 = load i32* %arrayidx12.3.3, align 4
148 %tmp7 = load i32* %arrayidx12.3, align 4
149 %mul = mul nsw i32 %tmp1, %tmp
150 %mul.1 = mul nsw i32 %tmp3, %tmp2
151 %mul.2 = mul nsw i32 %tmp5, %tmp4
152 %mul.3 = mul nsw i32 %tmp7, %tmp6
153 %mul.138 = mul nsw i32 %tmp9, %tmp8
154 %mul.1.1 = mul nsw i32 %tmp11, %tmp10
155 %mul.2.1 = mul nsw i32 %tmp13, %tmp12
156 %mul.3.1 = mul nsw i32 %tmp15, %tmp14
157 %mul.240 = mul nsw i32 %tmp17, %tmp16
158 %mul.1.2 = mul nsw i32 %tmp19, %tmp18
159 %mul.2.2 = mul nsw i32 %tmp21, %tmp20
160 %mul.3.2 = mul nsw i32 %tmp23, %tmp22
161 %mul.342 = mul nsw i32 %tmp25, %tmp24
162 %mul.1.3 = mul nsw i32 %tmp27, %tmp26
163 %mul.2.3 = mul nsw i32 %tmp29, %tmp28
164 %mul.3.3 = mul nsw i32 %tmp31, %tmp30
165 %add.1 = add nsw i32 %mul.1, %mul
166 %add.2 = add nsw i32 %mul.2, %add.1
167 %add.3 = add nsw i32 %mul.3, %add.2
168 %add.1.1 = add nsw i32 %mul.1.1, %mul.138
169 %add.2.1 = add nsw i32 %mul.2.1, %add.1.1
170 %add.3.1 = add nsw i32 %mul.3.1, %add.2.1
171 %add.1.2 = add nsw i32 %mul.1.2, %mul.240
172 %add.2.2 = add nsw i32 %mul.2.2, %add.1.2
173 %add.3.2 = add nsw i32 %mul.3.2, %add.2.2
174 %add.1.3 = add nsw i32 %mul.1.3, %mul.342
175 %add.2.3 = add nsw i32 %mul.2.3, %add.1.3
176 %add.3.3 = add nsw i32 %mul.3.3, %add.2.3
177 %arrayidx16 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 0
178 store i32 %add.3, i32* %arrayidx16, align 4
179 %arrayidx16.1 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 1
180 store i32 %add.3.1, i32* %arrayidx16.1, align 4
181 %arrayidx16.2 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 2
182 store i32 %add.3.2, i32* %arrayidx16.2, align 4
183 %arrayidx16.3 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 3
184 store i32 %add.3.3, i32* %arrayidx16.3, align 4
185 %indvars.iv.next = add i64 %indvars.iv, 1
186 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
187 %exitcond = icmp eq i32 %lftr.wideiv, 4
188 br i1 %exitcond, label %for.end, label %for.body
190 for.end: ; preds = %for.body