1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
3 define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
4 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
5 %tmp3 = icmp eq <8 x i8> %A, %B;
6 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
10 define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
11 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
12 %tmp3 = icmp eq <16 x i8> %A, %B;
13 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
17 define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
18 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
19 %tmp3 = icmp eq <4 x i16> %A, %B;
20 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
24 define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
25 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
26 %tmp3 = icmp eq <8 x i16> %A, %B;
27 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
31 define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
32 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
33 %tmp3 = icmp eq <2 x i32> %A, %B;
34 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
38 define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
39 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
40 %tmp3 = icmp eq <4 x i32> %A, %B;
41 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
45 define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
46 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
47 %tmp3 = icmp eq <2 x i64> %A, %B;
48 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
52 define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
53 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
54 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
55 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
56 %tmp3 = icmp ne <8 x i8> %A, %B;
57 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
61 define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
62 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
63 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
64 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
65 %tmp3 = icmp ne <16 x i8> %A, %B;
66 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
70 define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
71 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
72 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
73 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
74 %tmp3 = icmp ne <4 x i16> %A, %B;
75 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
79 define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
80 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
81 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
82 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
83 %tmp3 = icmp ne <8 x i16> %A, %B;
84 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
88 define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
89 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
90 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
91 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
92 %tmp3 = icmp ne <2 x i32> %A, %B;
93 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
97 define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
98 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
99 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
100 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
101 %tmp3 = icmp ne <4 x i32> %A, %B;
102 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
106 define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
107 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
108 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
109 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
110 %tmp3 = icmp ne <2 x i64> %A, %B;
111 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
115 define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
116 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
117 %tmp3 = icmp sgt <8 x i8> %A, %B;
118 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
122 define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
123 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
124 %tmp3 = icmp sgt <16 x i8> %A, %B;
125 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
129 define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
130 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
131 %tmp3 = icmp sgt <4 x i16> %A, %B;
132 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
136 define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
137 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
138 %tmp3 = icmp sgt <8 x i16> %A, %B;
139 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
143 define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
144 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
145 %tmp3 = icmp sgt <2 x i32> %A, %B;
146 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
150 define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
151 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
152 %tmp3 = icmp sgt <4 x i32> %A, %B;
153 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
157 define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
158 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
159 %tmp3 = icmp sgt <2 x i64> %A, %B;
160 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
164 define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
165 ; Using registers other than v0, v1 are possible, but would be odd.
166 ; LT implemented as GT, so check reversed operands.
167 ;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
168 %tmp3 = icmp slt <8 x i8> %A, %B;
169 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
173 define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
174 ; Using registers other than v0, v1 are possible, but would be odd.
175 ; LT implemented as GT, so check reversed operands.
176 ;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
177 %tmp3 = icmp slt <16 x i8> %A, %B;
178 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
182 define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
183 ; Using registers other than v0, v1 are possible, but would be odd.
184 ; LT implemented as GT, so check reversed operands.
185 ;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
186 %tmp3 = icmp slt <4 x i16> %A, %B;
187 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
191 define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
192 ; Using registers other than v0, v1 are possible, but would be odd.
193 ; LT implemented as GT, so check reversed operands.
194 ;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
195 %tmp3 = icmp slt <8 x i16> %A, %B;
196 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
200 define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
201 ; Using registers other than v0, v1 are possible, but would be odd.
202 ; LT implemented as GT, so check reversed operands.
203 ;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
204 %tmp3 = icmp slt <2 x i32> %A, %B;
205 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
209 define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
210 ; Using registers other than v0, v1 are possible, but would be odd.
211 ; LT implemented as GT, so check reversed operands.
212 ;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
213 %tmp3 = icmp slt <4 x i32> %A, %B;
214 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
218 define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
219 ; Using registers other than v0, v1 are possible, but would be odd.
220 ; LT implemented as GT, so check reversed operands.
221 ;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
222 %tmp3 = icmp slt <2 x i64> %A, %B;
223 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
227 define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
228 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
229 %tmp3 = icmp sge <8 x i8> %A, %B;
230 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
234 define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
235 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
236 %tmp3 = icmp sge <16 x i8> %A, %B;
237 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
241 define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
242 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
243 %tmp3 = icmp sge <4 x i16> %A, %B;
244 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
248 define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
249 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
250 %tmp3 = icmp sge <8 x i16> %A, %B;
251 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
255 define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
256 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
257 %tmp3 = icmp sge <2 x i32> %A, %B;
258 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
262 define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
263 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
264 %tmp3 = icmp sge <4 x i32> %A, %B;
265 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
269 define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
270 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
271 %tmp3 = icmp sge <2 x i64> %A, %B;
272 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
276 define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
277 ; Using registers other than v0, v1 are possible, but would be odd.
278 ; LE implemented as GE, so check reversed operands.
279 ;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
280 %tmp3 = icmp sle <8 x i8> %A, %B;
281 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
285 define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
286 ; Using registers other than v0, v1 are possible, but would be odd.
287 ; LE implemented as GE, so check reversed operands.
288 ;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
289 %tmp3 = icmp sle <16 x i8> %A, %B;
290 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
294 define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
295 ; Using registers other than v0, v1 are possible, but would be odd.
296 ; LE implemented as GE, so check reversed operands.
297 ;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
298 %tmp3 = icmp sle <4 x i16> %A, %B;
299 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
303 define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
304 ; Using registers other than v0, v1 are possible, but would be odd.
305 ; LE implemented as GE, so check reversed operands.
306 ;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
307 %tmp3 = icmp sle <8 x i16> %A, %B;
308 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
312 define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
313 ; Using registers other than v0, v1 are possible, but would be odd.
314 ; LE implemented as GE, so check reversed operands.
315 ;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
316 %tmp3 = icmp sle <2 x i32> %A, %B;
317 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
321 define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
322 ; Using registers other than v0, v1 are possible, but would be odd.
323 ; LE implemented as GE, so check reversed operands.
324 ;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
325 %tmp3 = icmp sle <4 x i32> %A, %B;
326 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
330 define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
331 ; Using registers other than v0, v1 are possible, but would be odd.
332 ; LE implemented as GE, so check reversed operands.
333 ;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
334 %tmp3 = icmp sle <2 x i64> %A, %B;
335 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
339 define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
340 ;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
341 %tmp3 = icmp ugt <8 x i8> %A, %B;
342 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
346 define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
347 ;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
348 %tmp3 = icmp ugt <16 x i8> %A, %B;
349 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
353 define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
354 ;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
355 %tmp3 = icmp ugt <4 x i16> %A, %B;
356 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
360 define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
361 ;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
362 %tmp3 = icmp ugt <8 x i16> %A, %B;
363 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
367 define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
368 ;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
369 %tmp3 = icmp ugt <2 x i32> %A, %B;
370 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
374 define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
375 ;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
376 %tmp3 = icmp ugt <4 x i32> %A, %B;
377 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
381 define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
382 ;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
383 %tmp3 = icmp ugt <2 x i64> %A, %B;
384 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
388 define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
389 ; Using registers other than v0, v1 are possible, but would be odd.
390 ; LO implemented as HI, so check reversed operands.
391 ;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
392 %tmp3 = icmp ult <8 x i8> %A, %B;
393 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
397 define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
398 ; Using registers other than v0, v1 are possible, but would be odd.
399 ; LO implemented as HI, so check reversed operands.
400 ;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
401 %tmp3 = icmp ult <16 x i8> %A, %B;
402 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
406 define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
407 ; Using registers other than v0, v1 are possible, but would be odd.
408 ; LO implemented as HI, so check reversed operands.
409 ;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
410 %tmp3 = icmp ult <4 x i16> %A, %B;
411 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
415 define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
416 ; Using registers other than v0, v1 are possible, but would be odd.
417 ; LO implemented as HI, so check reversed operands.
418 ;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
419 %tmp3 = icmp ult <8 x i16> %A, %B;
420 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
424 define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
425 ; Using registers other than v0, v1 are possible, but would be odd.
426 ; LO implemented as HI, so check reversed operands.
427 ;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
428 %tmp3 = icmp ult <2 x i32> %A, %B;
429 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
433 define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
434 ; Using registers other than v0, v1 are possible, but would be odd.
435 ; LO implemented as HI, so check reversed operands.
436 ;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
437 %tmp3 = icmp ult <4 x i32> %A, %B;
438 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
442 define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
443 ; Using registers other than v0, v1 are possible, but would be odd.
444 ; LO implemented as HI, so check reversed operands.
445 ;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
446 %tmp3 = icmp ult <2 x i64> %A, %B;
447 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
451 define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
452 ;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
453 %tmp3 = icmp uge <8 x i8> %A, %B;
454 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
458 define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
459 ;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
460 %tmp3 = icmp uge <16 x i8> %A, %B;
461 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
465 define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
466 ;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
467 %tmp3 = icmp uge <4 x i16> %A, %B;
468 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
472 define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
473 ;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
474 %tmp3 = icmp uge <8 x i16> %A, %B;
475 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
479 define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
480 ;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
481 %tmp3 = icmp uge <2 x i32> %A, %B;
482 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
486 define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
487 ;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
488 %tmp3 = icmp uge <4 x i32> %A, %B;
489 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
493 define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
494 ;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
495 %tmp3 = icmp uge <2 x i64> %A, %B;
496 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
500 define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
501 ; Using registers other than v0, v1 are possible, but would be odd.
502 ; LS implemented as HS, so check reversed operands.
503 ;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
504 %tmp3 = icmp ule <8 x i8> %A, %B;
505 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
509 define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
510 ; Using registers other than v0, v1 are possible, but would be odd.
511 ; LS implemented as HS, so check reversed operands.
512 ;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
513 %tmp3 = icmp ule <16 x i8> %A, %B;
514 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
518 define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
519 ; Using registers other than v0, v1 are possible, but would be odd.
520 ; LS implemented as HS, so check reversed operands.
521 ;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
522 %tmp3 = icmp ule <4 x i16> %A, %B;
523 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
527 define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
528 ; Using registers other than v0, v1 are possible, but would be odd.
529 ; LS implemented as HS, so check reversed operands.
530 ;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
531 %tmp3 = icmp ule <8 x i16> %A, %B;
532 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
536 define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
537 ; Using registers other than v0, v1 are possible, but would be odd.
538 ; LS implemented as HS, so check reversed operands.
539 ;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
540 %tmp3 = icmp ule <2 x i32> %A, %B;
541 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
545 define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
546 ; Using registers other than v0, v1 are possible, but would be odd.
547 ; LS implemented as HS, so check reversed operands.
548 ;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
549 %tmp3 = icmp ule <4 x i32> %A, %B;
550 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
554 define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
555 ; Using registers other than v0, v1 are possible, but would be odd.
556 ; LS implemented as HS, so check reversed operands.
557 ;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
558 %tmp3 = icmp ule <2 x i64> %A, %B;
559 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
563 define <8 x i8> @cmtst8xi8(<8 x i8> %A, <8 x i8> %B) {
564 ;CHECK: cmtst {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
565 %tmp3 = and <8 x i8> %A, %B
566 %tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer
567 %tmp5 = sext <8 x i1> %tmp4 to <8 x i8>
571 define <16 x i8> @cmtst16xi8(<16 x i8> %A, <16 x i8> %B) {
572 ;CHECK: cmtst {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
573 %tmp3 = and <16 x i8> %A, %B
574 %tmp4 = icmp ne <16 x i8> %tmp3, zeroinitializer
575 %tmp5 = sext <16 x i1> %tmp4 to <16 x i8>
579 define <4 x i16> @cmtst4xi16(<4 x i16> %A, <4 x i16> %B) {
580 ;CHECK: cmtst {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
581 %tmp3 = and <4 x i16> %A, %B
582 %tmp4 = icmp ne <4 x i16> %tmp3, zeroinitializer
583 %tmp5 = sext <4 x i1> %tmp4 to <4 x i16>
587 define <8 x i16> @cmtst8xi16(<8 x i16> %A, <8 x i16> %B) {
588 ;CHECK: cmtst {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
589 %tmp3 = and <8 x i16> %A, %B
590 %tmp4 = icmp ne <8 x i16> %tmp3, zeroinitializer
591 %tmp5 = sext <8 x i1> %tmp4 to <8 x i16>
595 define <2 x i32> @cmtst2xi32(<2 x i32> %A, <2 x i32> %B) {
596 ;CHECK: cmtst {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
597 %tmp3 = and <2 x i32> %A, %B
598 %tmp4 = icmp ne <2 x i32> %tmp3, zeroinitializer
599 %tmp5 = sext <2 x i1> %tmp4 to <2 x i32>
603 define <4 x i32> @cmtst4xi32(<4 x i32> %A, <4 x i32> %B) {
604 ;CHECK: cmtst {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
605 %tmp3 = and <4 x i32> %A, %B
606 %tmp4 = icmp ne <4 x i32> %tmp3, zeroinitializer
607 %tmp5 = sext <4 x i1> %tmp4 to <4 x i32>
611 define <2 x i64> @cmtst2xi64(<2 x i64> %A, <2 x i64> %B) {
612 ;CHECK: cmtst {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
613 %tmp3 = and <2 x i64> %A, %B
614 %tmp4 = icmp ne <2 x i64> %tmp3, zeroinitializer
615 %tmp5 = sext <2 x i1> %tmp4 to <2 x i64>
621 define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
622 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
623 %tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
624 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
628 define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
629 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
630 %tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
631 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
635 define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
636 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
637 %tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
638 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
642 define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
643 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
644 %tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
645 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
649 define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
650 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
651 %tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
652 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
656 define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
657 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
658 %tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
659 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
663 define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
664 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
665 %tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
666 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
671 define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
672 ;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
673 %tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
674 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
678 define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
679 ;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
680 %tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
681 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
685 define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
686 ;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
687 %tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
688 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
692 define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
693 ;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
694 %tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
695 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
699 define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
700 ;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
701 %tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
702 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
706 define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
707 ;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
708 %tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
709 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
713 define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
714 ;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
715 %tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
716 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
721 define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
722 ;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
723 %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
724 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
728 define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
729 ;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
730 %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
731 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
735 define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
736 ;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
737 %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
738 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
742 define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
743 ;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
744 %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
745 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
749 define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
750 ;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
751 %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
752 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
756 define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
757 ;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
758 %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
759 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
763 define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
764 ;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
765 %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
766 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
770 define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
771 ;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
772 %tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
773 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
777 define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
778 ;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
779 %tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
780 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
784 define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
785 ;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
786 %tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
787 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
791 define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
792 ;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
793 %tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
794 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
798 define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
799 ;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
800 %tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
801 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
805 define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
806 ;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
807 %tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
808 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
812 define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
813 ;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
814 %tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
815 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
819 define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
820 ;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
821 %tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
822 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
826 define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
827 ;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
828 %tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
829 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
833 define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
834 ;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
835 %tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
836 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
840 define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
841 ;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
842 %tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
843 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
847 define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
848 ;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
849 %tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
850 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
854 define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
855 ;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
856 %tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
857 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
861 define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
862 ;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
863 %tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
864 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
868 define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
869 ;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
870 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
871 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
872 %tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
873 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
877 define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
878 ;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
879 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
880 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
881 %tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
882 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
886 define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
887 ;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
888 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
889 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
890 %tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
891 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
895 define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
896 ;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
897 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
898 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
899 %tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
900 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
904 define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
905 ;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
906 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
907 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
908 %tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
909 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
913 define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
914 ;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
915 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
916 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
917 %tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
918 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
922 define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
923 ;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
924 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
925 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
926 %tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
927 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
931 define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
932 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
933 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
934 %tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
935 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
939 define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
940 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
941 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
942 %tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
943 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
947 define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
948 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
949 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
950 %tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
951 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
955 define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
956 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
957 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
958 %tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
959 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
963 define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
964 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
965 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
966 %tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
967 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
971 define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
972 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
973 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
974 %tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
975 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
979 define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
980 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
981 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
982 %tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
983 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
988 define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
989 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
990 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
991 %tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
992 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
996 define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
997 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
998 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
999 %tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
1000 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
1004 define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
1005 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
1006 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
1007 %tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
1008 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1012 define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
1013 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
1014 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
1015 %tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
1016 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1020 define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
1021 ;CHECK: movi {{v[0-9]+}}.8b, #0x0
1022 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1023 %tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
1024 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1028 define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
1029 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
1030 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1031 %tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
1032 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1036 define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
1037 ;CHECK: movi {{v[0-9]+}}.16b, #0x0
1038 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1039 %tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
1040 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1044 define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
1045 ; Using registers other than v0, v1 are possible, but would be odd.
1046 ; LS implemented as HS, so check reversed operands.
1047 ;CHECK: movi v1.8b, #0x0
1048 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
1049 %tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
1050 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
1054 define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
1055 ; Using registers other than v0, v1 are possible, but would be odd.
1056 ; LS implemented as HS, so check reversed operands.
1057 ;CHECK: movi v1.16b, #0x0
1058 ;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
1059 %tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
1060 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
1064 define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
1065 ; Using registers other than v0, v1 are possible, but would be odd.
1066 ; LS implemented as HS, so check reversed operands.
1067 ;CHECK: movi v1.8b, #0x0
1068 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
1069 %tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
1070 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1074 define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
1075 ; Using registers other than v0, v1 are possible, but would be odd.
1076 ; LS implemented as HS, so check reversed operands.
1077 ;CHECK: movi v1.16b, #0x0
1078 ;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
1079 %tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
1080 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1084 define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
1085 ; Using registers other than v0, v1 are possible, but would be odd.
1086 ; LS implemented as HS, so check reversed operands.
1087 ;CHECK: movi v1.8b, #0x0
1088 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
1089 %tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
1090 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1094 define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
1095 ; Using registers other than v0, v1 are possible, but would be odd.
1096 ; LS implemented as HS, so check reversed operands.
1097 ;CHECK: movi v1.16b, #0x0
1098 ;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
1099 %tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
1100 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1104 define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
1105 ; Using registers other than v0, v1 are possible, but would be odd.
1106 ; LS implemented as HS, so check reversed operands.
1107 ;CHECK: movi v1.16b, #0x0
1108 ;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
1109 %tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
1110 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1114 define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
1115 ; Using registers other than v0, v1 are possible, but would be odd.
1116 ; LO implemented as HI, so check reversed operands.
1117 ;CHECK: movi v1.8b, #0x0
1118 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v1.8b, {{v[0-9]+}}.8b
1119 %tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
1120 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
1124 define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
1125 ; Using registers other than v0, v1 are possible, but would be odd.
1126 ; LO implemented as HI, so check reversed operands.
1127 ;CHECK: movi v1.16b, #0x0
1128 ;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
1129 %tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
1130 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
1134 define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
1135 ; Using registers other than v0, v1 are possible, but would be odd.
1136 ; LO implemented as HI, so check reversed operands.
1137 ;CHECK: movi v1.8b, #0x0
1138 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
1139 %tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
1140 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
1144 define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
1145 ; Using registers other than v0, v1 are possible, but would be odd.
1146 ; LO implemented as HI, so check reversed operands.
1147 ;CHECK: movi v1.16b, #0x0
1148 ;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
1149 %tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
1150 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
1154 define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
1155 ; Using registers other than v0, v1 are possible, but would be odd.
1156 ; LO implemented as HI, so check reversed operands.
1157 ;CHECK: movi v1.8b, #0x0
1158 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
1159 %tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
1160 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1164 define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
1165 ; Using registers other than v0, v1 are possible, but would be odd.
1166 ; LO implemented as HI, so check reversed operands.
1167 ;CHECK: movi v1.16b, #0x0
1168 ;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
1169 %tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
1170 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1174 define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
1175 ; Using registers other than v0, v1 are possible, but would be odd.
1176 ; LO implemented as HI, so check reversed operands.
1177 ;CHECK: movi v1.16b, #0x0
1178 ;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
1179 %tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
1180 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1185 define <2 x i32> @fcmoeq2xfloat(<2 x float> %A, <2 x float> %B) {
1186 ;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1187 %tmp3 = fcmp oeq <2 x float> %A, %B
1188 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1192 define <4 x i32> @fcmoeq4xfloat(<4 x float> %A, <4 x float> %B) {
1193 ;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1194 %tmp3 = fcmp oeq <4 x float> %A, %B
1195 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1198 define <2 x i64> @fcmoeq2xdouble(<2 x double> %A, <2 x double> %B) {
1199 ;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1200 %tmp3 = fcmp oeq <2 x double> %A, %B
1201 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1205 define <2 x i32> @fcmoge2xfloat(<2 x float> %A, <2 x float> %B) {
1206 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1207 %tmp3 = fcmp oge <2 x float> %A, %B
1208 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1212 define <4 x i32> @fcmoge4xfloat(<4 x float> %A, <4 x float> %B) {
1213 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1214 %tmp3 = fcmp oge <4 x float> %A, %B
1215 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1218 define <2 x i64> @fcmoge2xdouble(<2 x double> %A, <2 x double> %B) {
1219 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1220 %tmp3 = fcmp oge <2 x double> %A, %B
1221 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1225 define <2 x i32> @fcmogt2xfloat(<2 x float> %A, <2 x float> %B) {
1226 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
1227 %tmp3 = fcmp ogt <2 x float> %A, %B
1228 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1232 define <4 x i32> @fcmogt4xfloat(<4 x float> %A, <4 x float> %B) {
1233 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
1234 %tmp3 = fcmp ogt <4 x float> %A, %B
1235 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1238 define <2 x i64> @fcmogt2xdouble(<2 x double> %A, <2 x double> %B) {
1239 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
1240 %tmp3 = fcmp ogt <2 x double> %A, %B
1241 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1245 define <2 x i32> @fcmole2xfloat(<2 x float> %A, <2 x float> %B) {
1246 ; Using registers other than v0, v1 are possible, but would be odd.
1247 ; OLE implemented as OGE, so check reversed operands.
1248 ;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
1249 %tmp3 = fcmp ole <2 x float> %A, %B
1250 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1254 define <4 x i32> @fcmole4xfloat(<4 x float> %A, <4 x float> %B) {
1255 ; Using registers other than v0, v1 are possible, but would be odd.
1256 ; OLE implemented as OGE, so check reversed operands.
1257 ;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
1258 %tmp3 = fcmp ole <4 x float> %A, %B
1259 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1262 define <2 x i64> @fcmole2xdouble(<2 x double> %A, <2 x double> %B) {
1263 ; Using registers other than v0, v1 are possible, but would be odd.
1264 ; OLE implemented as OGE, so check reversed operands.
1265 ;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
1266 %tmp3 = fcmp ole <2 x double> %A, %B
1267 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1271 define <2 x i32> @fcmolt2xfloat(<2 x float> %A, <2 x float> %B) {
1272 ; Using registers other than v0, v1 are possible, but would be odd.
1273 ; OLE implemented as OGE, so check reversed operands.
1274 ;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1275 %tmp3 = fcmp olt <2 x float> %A, %B
1276 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1280 define <4 x i32> @fcmolt4xfloat(<4 x float> %A, <4 x float> %B) {
1281 ; Using registers other than v0, v1 are possible, but would be odd.
1282 ; OLE implemented as OGE, so check reversed operands.
1283 ;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1284 %tmp3 = fcmp olt <4 x float> %A, %B
1285 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1288 define <2 x i64> @fcmolt2xdouble(<2 x double> %A, <2 x double> %B) {
1289 ; Using registers other than v0, v1 are possible, but would be odd.
1290 ; OLE implemented as OGE, so check reversed operands.
1291 ;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1292 %tmp3 = fcmp olt <2 x double> %A, %B
1293 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1297 define <2 x i32> @fcmone2xfloat(<2 x float> %A, <2 x float> %B) {
1298 ; Using registers other than v0, v1 are possible, but would be odd.
1299 ; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
1300 ;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
1301 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1302 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1303 %tmp3 = fcmp one <2 x float> %A, %B
1304 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1308 define <4 x i32> @fcmone4xfloat(<4 x float> %A, <4 x float> %B) {
1309 ; Using registers other than v0, v1 are possible, but would be odd.
1310 ; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
1311 ;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
1312 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1313 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1314 %tmp3 = fcmp one <4 x float> %A, %B
1315 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1318 define <2 x i64> @fcmone2xdouble(<2 x double> %A, <2 x double> %B) {
1319 ; Using registers other than v0, v1 are possible, but would be odd.
1320 ; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
1321 ;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
1322 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1323 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1324 ; todo check reversed operands
1325 %tmp3 = fcmp one <2 x double> %A, %B
1326 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1331 define <2 x i32> @fcmord2xfloat(<2 x float> %A, <2 x float> %B) {
1332 ; Using registers other than v0, v1 are possible, but would be odd.
1333 ; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
1334 ;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
1335 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1336 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1337 %tmp3 = fcmp ord <2 x float> %A, %B
1338 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1343 define <4 x i32> @fcmord4xfloat(<4 x float> %A, <4 x float> %B) {
1344 ; Using registers other than v0, v1 are possible, but would be odd.
1345 ; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
1346 ;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
1347 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1348 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1349 %tmp3 = fcmp ord <4 x float> %A, %B
1350 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1354 define <2 x i64> @fcmord2xdouble(<2 x double> %A, <2 x double> %B) {
1355 ; Using registers other than v0, v1 are possible, but would be odd.
1356 ; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
1357 ;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
1358 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1359 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1360 %tmp3 = fcmp ord <2 x double> %A, %B
1361 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1366 define <2 x i32> @fcmuno2xfloat(<2 x float> %A, <2 x float> %B) {
1367 ; Using registers other than v0, v1 are possible, but would be odd.
1368 ; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
1369 ;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
1370 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1371 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1372 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1373 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1374 %tmp3 = fcmp uno <2 x float> %A, %B
1375 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1379 define <4 x i32> @fcmuno4xfloat(<4 x float> %A, <4 x float> %B) {
1380 ; Using registers other than v0, v1 are possible, but would be odd.
1381 ; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
1382 ;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
1383 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1384 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1385 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1386 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1387 %tmp3 = fcmp uno <4 x float> %A, %B
1388 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1392 define <2 x i64> @fcmuno2xdouble(<2 x double> %A, <2 x double> %B) {
1393 ; Using registers other than v0, v1 are possible, but would be odd.
1394 ; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
1395 ;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
1396 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1397 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1398 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1399 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1400 %tmp3 = fcmp uno <2 x double> %A, %B
1401 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1405 define <2 x i32> @fcmueq2xfloat(<2 x float> %A, <2 x float> %B) {
1406 ; Using registers other than v0, v1 are possible, but would be odd.
1407 ; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
1408 ;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
1409 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1410 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1411 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1412 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1413 %tmp3 = fcmp ueq <2 x float> %A, %B
1414 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1418 define <4 x i32> @fcmueq4xfloat(<4 x float> %A, <4 x float> %B) {
1419 ; Using registers other than v0, v1 are possible, but would be odd.
1420 ; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
1421 ;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
1422 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1423 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1424 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1425 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1426 %tmp3 = fcmp ueq <4 x float> %A, %B
1427 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1431 define <2 x i64> @fcmueq2xdouble(<2 x double> %A, <2 x double> %B) {
1432 ; Using registers other than v0, v1 are possible, but would be odd.
1433 ; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
1434 ;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
1435 ;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1436 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1437 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1438 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1439 %tmp3 = fcmp ueq <2 x double> %A, %B
1440 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1444 define <2 x i32> @fcmuge2xfloat(<2 x float> %A, <2 x float> %B) {
1445 ; Using registers other than v0, v1 are possible, but would be odd.
1446 ; UGE = ULE with swapped operands, ULE implemented as !OGT.
1447 ;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
1448 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1449 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1450 %tmp3 = fcmp uge <2 x float> %A, %B
1451 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1455 define <4 x i32> @fcmuge4xfloat(<4 x float> %A, <4 x float> %B) {
1456 ; Using registers other than v0, v1 are possible, but would be odd.
1457 ; UGE = ULE with swapped operands, ULE implemented as !OGT.
1458 ;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
1459 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1460 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1461 %tmp3 = fcmp uge <4 x float> %A, %B
1462 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1466 define <2 x i64> @fcmuge2xdouble(<2 x double> %A, <2 x double> %B) {
1467 ; Using registers other than v0, v1 are possible, but would be odd.
1468 ; UGE = ULE with swapped operands, ULE implemented as !OGT.
1469 ;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
1470 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1471 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1472 %tmp3 = fcmp uge <2 x double> %A, %B
1473 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1477 define <2 x i32> @fcmugt2xfloat(<2 x float> %A, <2 x float> %B) {
1478 ; Using registers other than v0, v1 are possible, but would be odd.
1479 ; UGT = ULT with swapped operands, ULT implemented as !OGE.
1480 ;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
1481 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1482 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1483 %tmp3 = fcmp ugt <2 x float> %A, %B
1484 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1488 define <4 x i32> @fcmugt4xfloat(<4 x float> %A, <4 x float> %B) {
1489 ; Using registers other than v0, v1 are possible, but would be odd.
1490 ; UGT = ULT with swapped operands, ULT implemented as !OGE.
1491 ;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
1492 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1493 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1494 %tmp3 = fcmp ugt <4 x float> %A, %B
1495 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1498 define <2 x i64> @fcmugt2xdouble(<2 x double> %A, <2 x double> %B) {
1499 ;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
1500 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1501 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1502 %tmp3 = fcmp ugt <2 x double> %A, %B
1503 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1507 define <2 x i32> @fcmule2xfloat(<2 x float> %A, <2 x float> %B) {
1508 ; Using registers other than v0, v1 are possible, but would be odd.
1509 ; ULE implemented as !OGT.
1510 ;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
1511 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1512 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1513 %tmp3 = fcmp ule <2 x float> %A, %B
1514 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1518 define <4 x i32> @fcmule4xfloat(<4 x float> %A, <4 x float> %B) {
1519 ; Using registers other than v0, v1 are possible, but would be odd.
1520 ; ULE implemented as !OGT.
1521 ;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
1522 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1523 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1524 %tmp3 = fcmp ule <4 x float> %A, %B
1525 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1528 define <2 x i64> @fcmule2xdouble(<2 x double> %A, <2 x double> %B) {
1529 ; Using registers other than v0, v1 are possible, but would be odd.
1530 ; ULE implemented as !OGT.
1531 ;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
1532 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1533 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1534 %tmp3 = fcmp ule <2 x double> %A, %B
1535 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1539 define <2 x i32> @fcmult2xfloat(<2 x float> %A, <2 x float> %B) {
1540 ; Using registers other than v0, v1 are possible, but would be odd.
1541 ; ULT implemented as !OGE.
1542 ;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
1543 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1544 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1545 %tmp3 = fcmp ult <2 x float> %A, %B
1546 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1550 define <4 x i32> @fcmult4xfloat(<4 x float> %A, <4 x float> %B) {
1551 ; Using registers other than v0, v1 are possible, but would be odd.
1552 ; ULT implemented as !OGE.
1553 ;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
1554 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1555 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1556 %tmp3 = fcmp ult <4 x float> %A, %B
1557 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1560 define <2 x i64> @fcmult2xdouble(<2 x double> %A, <2 x double> %B) {
1561 ; Using registers other than v0, v1 are possible, but would be odd.
1562 ; ULT implemented as !OGE.
1563 ;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
1564 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1565 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1566 %tmp3 = fcmp ult <2 x double> %A, %B
1567 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1571 define <2 x i32> @fcmune2xfloat(<2 x float> %A, <2 x float> %B) {
1572 ; Using registers other than v0, v1 are possible, but would be odd.
1574 ;CHECK: fcmeq {{v[0-9]+}}.2s, v0.2s, v1.2s
1575 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1576 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1577 %tmp3 = fcmp une <2 x float> %A, %B
1578 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1582 define <4 x i32> @fcmune4xfloat(<4 x float> %A, <4 x float> %B) {
1583 ; Using registers other than v0, v1 are possible, but would be odd.
1585 ;CHECK: fcmeq {{v[0-9]+}}.4s, v0.4s, v1.4s
1586 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1587 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1588 %tmp3 = fcmp une <4 x float> %A, %B
1589 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1592 define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
1593 ; Using registers other than v0, v1 are possible, but would be odd.
1595 ;CHECK: fcmeq {{v[0-9]+}}.2d, v0.2d, v1.2d
1596 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1597 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1598 %tmp3 = fcmp une <2 x double> %A, %B
1599 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1603 define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) {
1604 ;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1605 %tmp3 = fcmp oeq <2 x float> %A, zeroinitializer
1606 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1610 define <4 x i32> @fcmoeqz4xfloat(<4 x float> %A) {
1611 ;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1612 %tmp3 = fcmp oeq <4 x float> %A, zeroinitializer
1613 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1616 define <2 x i64> @fcmoeqz2xdouble(<2 x double> %A) {
1617 ;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1618 %tmp3 = fcmp oeq <2 x double> %A, zeroinitializer
1619 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1624 define <2 x i32> @fcmogez2xfloat(<2 x float> %A) {
1625 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1626 %tmp3 = fcmp oge <2 x float> %A, zeroinitializer
1627 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1631 define <4 x i32> @fcmogez4xfloat(<4 x float> %A) {
1632 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1633 %tmp3 = fcmp oge <4 x float> %A, zeroinitializer
1634 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1637 define <2 x i64> @fcmogez2xdouble(<2 x double> %A) {
1638 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1639 %tmp3 = fcmp oge <2 x double> %A, zeroinitializer
1640 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1644 define <2 x i32> @fcmogtz2xfloat(<2 x float> %A) {
1645 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1646 %tmp3 = fcmp ogt <2 x float> %A, zeroinitializer
1647 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1651 define <4 x i32> @fcmogtz4xfloat(<4 x float> %A) {
1652 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1653 %tmp3 = fcmp ogt <4 x float> %A, zeroinitializer
1654 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1657 define <2 x i64> @fcmogtz2xdouble(<2 x double> %A) {
1658 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1659 %tmp3 = fcmp ogt <2 x double> %A, zeroinitializer
1660 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1664 define <2 x i32> @fcmoltz2xfloat(<2 x float> %A) {
1665 ;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1666 %tmp3 = fcmp olt <2 x float> %A, zeroinitializer
1667 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1671 define <4 x i32> @fcmoltz4xfloat(<4 x float> %A) {
1672 ;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1673 %tmp3 = fcmp olt <4 x float> %A, zeroinitializer
1674 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1678 define <2 x i64> @fcmoltz2xdouble(<2 x double> %A) {
1679 ;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1680 %tmp3 = fcmp olt <2 x double> %A, zeroinitializer
1681 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1685 define <2 x i32> @fcmolez2xfloat(<2 x float> %A) {
1686 ;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1687 %tmp3 = fcmp ole <2 x float> %A, zeroinitializer
1688 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1692 define <4 x i32> @fcmolez4xfloat(<4 x float> %A) {
1693 ;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1694 %tmp3 = fcmp ole <4 x float> %A, zeroinitializer
1695 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1699 define <2 x i64> @fcmolez2xdouble(<2 x double> %A) {
1700 ;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1701 %tmp3 = fcmp ole <2 x double> %A, zeroinitializer
1702 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1706 define <2 x i32> @fcmonez2xfloat(<2 x float> %A) {
1707 ; ONE with zero = OLT | OGT
1708 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1709 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1710 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1711 %tmp3 = fcmp one <2 x float> %A, zeroinitializer
1712 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1716 define <4 x i32> @fcmonez4xfloat(<4 x float> %A) {
1717 ; ONE with zero = OLT | OGT
1718 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1719 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1720 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1721 %tmp3 = fcmp one <4 x float> %A, zeroinitializer
1722 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1725 define <2 x i64> @fcmonez2xdouble(<2 x double> %A) {
1726 ; ONE with zero = OLT | OGT
1727 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1728 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1729 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1730 %tmp3 = fcmp one <2 x double> %A, zeroinitializer
1731 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1735 define <2 x i32> @fcmordz2xfloat(<2 x float> %A) {
1736 ; ORD with zero = OLT | OGE
1737 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1738 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1739 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1740 %tmp3 = fcmp ord <2 x float> %A, zeroinitializer
1741 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1745 define <4 x i32> @fcmordz4xfloat(<4 x float> %A) {
1746 ; ORD with zero = OLT | OGE
1747 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1748 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1749 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1750 %tmp3 = fcmp ord <4 x float> %A, zeroinitializer
1751 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1754 define <2 x i64> @fcmordz2xdouble(<2 x double> %A) {
1755 ; ORD with zero = OLT | OGE
1756 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1757 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1758 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1759 %tmp3 = fcmp ord <2 x double> %A, zeroinitializer
1760 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1764 define <2 x i32> @fcmueqz2xfloat(<2 x float> %A) {
1765 ; UEQ with zero = !ONE = !(OLT |OGT)
1766 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1767 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1768 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1769 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1770 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1771 %tmp3 = fcmp ueq <2 x float> %A, zeroinitializer
1772 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1776 define <4 x i32> @fcmueqz4xfloat(<4 x float> %A) {
1777 ; UEQ with zero = !ONE = !(OLT |OGT)
1778 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1779 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1780 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1781 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1782 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1783 %tmp3 = fcmp ueq <4 x float> %A, zeroinitializer
1784 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1788 define <2 x i64> @fcmueqz2xdouble(<2 x double> %A) {
1789 ; UEQ with zero = !ONE = !(OLT |OGT)
1790 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1791 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1792 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1793 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1794 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1795 %tmp3 = fcmp ueq <2 x double> %A, zeroinitializer
1796 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1800 define <2 x i32> @fcmugez2xfloat(<2 x float> %A) {
1801 ; UGE with zero = !OLT
1802 ;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1803 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1804 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1805 %tmp3 = fcmp uge <2 x float> %A, zeroinitializer
1806 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1810 define <4 x i32> @fcmugez4xfloat(<4 x float> %A) {
1811 ; UGE with zero = !OLT
1812 ;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1813 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1814 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1815 %tmp3 = fcmp uge <4 x float> %A, zeroinitializer
1816 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1819 define <2 x i64> @fcmugez2xdouble(<2 x double> %A) {
1820 ; UGE with zero = !OLT
1821 ;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1822 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1823 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1824 %tmp3 = fcmp uge <2 x double> %A, zeroinitializer
1825 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1829 define <2 x i32> @fcmugtz2xfloat(<2 x float> %A) {
1830 ; UGT with zero = !OLE
1831 ;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1832 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1833 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1834 %tmp3 = fcmp ugt <2 x float> %A, zeroinitializer
1835 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1839 define <4 x i32> @fcmugtz4xfloat(<4 x float> %A) {
1840 ; UGT with zero = !OLE
1841 ;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1842 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1843 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1844 %tmp3 = fcmp ugt <4 x float> %A, zeroinitializer
1845 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1848 define <2 x i64> @fcmugtz2xdouble(<2 x double> %A) {
1849 ; UGT with zero = !OLE
1850 ;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1851 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1852 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1853 %tmp3 = fcmp ugt <2 x double> %A, zeroinitializer
1854 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1858 define <2 x i32> @fcmultz2xfloat(<2 x float> %A) {
1859 ; ULT with zero = !OGE
1860 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1861 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1862 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1863 %tmp3 = fcmp ult <2 x float> %A, zeroinitializer
1864 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1868 define <4 x i32> @fcmultz4xfloat(<4 x float> %A) {
1869 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1870 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1871 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1872 %tmp3 = fcmp ult <4 x float> %A, zeroinitializer
1873 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1877 define <2 x i64> @fcmultz2xdouble(<2 x double> %A) {
1878 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1879 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1880 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1881 %tmp3 = fcmp ult <2 x double> %A, zeroinitializer
1882 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1887 define <2 x i32> @fcmulez2xfloat(<2 x float> %A) {
1888 ; ULE with zero = !OGT
1889 ;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1890 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1891 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1892 %tmp3 = fcmp ule <2 x float> %A, zeroinitializer
1893 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1897 define <4 x i32> @fcmulez4xfloat(<4 x float> %A) {
1898 ; ULE with zero = !OGT
1899 ;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1900 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1901 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1902 %tmp3 = fcmp ule <4 x float> %A, zeroinitializer
1903 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1907 define <2 x i64> @fcmulez2xdouble(<2 x double> %A) {
1908 ; ULE with zero = !OGT
1909 ;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1910 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1911 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1912 %tmp3 = fcmp ule <2 x double> %A, zeroinitializer
1913 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1917 define <2 x i32> @fcmunez2xfloat(<2 x float> %A) {
1918 ; UNE with zero = !OEQ with zero
1919 ;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1920 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1921 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1922 %tmp3 = fcmp une <2 x float> %A, zeroinitializer
1923 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1927 define <4 x i32> @fcmunez4xfloat(<4 x float> %A) {
1928 ; UNE with zero = !OEQ with zero
1929 ;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1930 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1931 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1932 %tmp3 = fcmp une <4 x float> %A, zeroinitializer
1933 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1936 define <2 x i64> @fcmunez2xdouble(<2 x double> %A) {
1937 ; UNE with zero = !OEQ with zero
1938 ;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1939 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1940 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1941 %tmp3 = fcmp une <2 x double> %A, zeroinitializer
1942 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
1947 define <2 x i32> @fcmunoz2xfloat(<2 x float> %A) {
1948 ; UNO with zero = !ORD = !(OLT | OGE)
1949 ;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1950 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
1951 ;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1952 ;CHECK-NEXT: movi {{v[0-9]+}}.8b, #0xff
1953 ;CHECK-NEXT: eor {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
1954 %tmp3 = fcmp uno <2 x float> %A, zeroinitializer
1955 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
1959 define <4 x i32> @fcmunoz4xfloat(<4 x float> %A) {
1960 ; UNO with zero = !ORD = !(OLT | OGE)
1961 ;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1962 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
1963 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1964 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1965 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1966 %tmp3 = fcmp uno <4 x float> %A, zeroinitializer
1967 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
1971 define <2 x i64> @fcmunoz2xdouble(<2 x double> %A) {
1972 ; UNO with zero = !ORD = !(OLT | OGE)
1973 ;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1974 ;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
1975 ;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1976 ;CHECK-NEXT: movi {{v[0-9]+}}.16b, #0xff
1977 ;CHECK-NEXT: eor {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
1978 %tmp3 = fcmp uno <2 x double> %A, zeroinitializer
1979 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>