X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=test%2FCodeGen%2FARM%2Fvrev.ll;h=a20d4b6baf293172cadf0c6cfe0536079ba7e464;hp=51d4f99747fc972053976f37891387234323f76c;hb=b1337aba53a9112aa5f45a54e7fb6f5102071afa;hpb=198d8baafbfdfcf5a5e219602a5d94ed263973b4 diff --git a/test/CodeGen/ARM/vrev.ll b/test/CodeGen/ARM/vrev.ll index 51d4f99747f..a20d4b6baf2 100644 --- a/test/CodeGen/ARM/vrev.ll +++ b/test/CodeGen/ARM/vrev.ll @@ -3,7 +3,7 @@ define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: test_vrev64D8: ;CHECK: vrev64.8 - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> ret <8 x i8> %tmp2 } @@ -11,7 +11,7 @@ define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind { define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: test_vrev64D16: ;CHECK: vrev64.16 - %tmp1 = load <4 x i16>* %A + %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> ret <4 x i16> %tmp2 } @@ -19,7 +19,7 @@ define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind { define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: test_vrev64D32: ;CHECK: vrev64.32 - %tmp1 = load <2 x i32>* %A + %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> ret <2 x i32> %tmp2 } @@ -27,7 +27,7 @@ define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind { define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind { ;CHECK-LABEL: test_vrev64Df: ;CHECK: vrev64.32 - %tmp1 = load <2 x float>* %A + %tmp1 = load <2 x float>, <2 x float>* %A %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> ret <2 x float> %tmp2 } @@ -35,7 +35,7 @@ define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind { define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind { ;CHECK-LABEL: test_vrev64Q8: ;CHECK: vrev64.8 - %tmp1 = load <16 x i8>* %A + %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> ret <16 x i8> %tmp2 } @@ -43,7 +43,7 @@ define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind { define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind { ;CHECK-LABEL: test_vrev64Q16: ;CHECK: vrev64.16 - %tmp1 = load <8 x i16>* %A + %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> ret <8 x i16> %tmp2 } @@ -51,7 +51,7 @@ define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind { define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind { ;CHECK-LABEL: test_vrev64Q32: ;CHECK: vrev64.32 - %tmp1 = load <4 x i32>* %A + %tmp1 = load <4 x i32>, <4 x i32>* %A %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> ret <4 x i32> %tmp2 } @@ -59,7 +59,7 @@ define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind { define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind { ;CHECK-LABEL: test_vrev64Qf: ;CHECK: vrev64.32 - %tmp1 = load <4 x float>* %A + %tmp1 = load <4 x float>, <4 x float>* %A %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> ret <4 x float> %tmp2 } @@ -67,7 +67,7 @@ define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind { define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: test_vrev32D8: ;CHECK: vrev32.8 - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> ret <8 x i8> %tmp2 } @@ -75,7 +75,7 @@ define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind { define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: test_vrev32D16: ;CHECK: vrev32.16 - %tmp1 = load <4 x i16>* %A + %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> ret <4 x i16> %tmp2 } @@ -83,7 +83,7 @@ define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind { define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind { ;CHECK-LABEL: test_vrev32Q8: ;CHECK: vrev32.8 - %tmp1 = load <16 x i8>* %A + %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> ret <16 x i8> %tmp2 } @@ -91,7 +91,7 @@ define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind { define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind { ;CHECK-LABEL: test_vrev32Q16: ;CHECK: vrev32.16 - %tmp1 = load <8 x i16>* %A + %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> ret <8 x i16> %tmp2 } @@ -99,7 +99,7 @@ define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind { define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: test_vrev16D8: ;CHECK: vrev16.8 - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> ret <8 x i8> %tmp2 } @@ -107,7 +107,7 @@ define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind { define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind { ;CHECK-LABEL: test_vrev16Q8: ;CHECK: vrev16.8 - %tmp1 = load <16 x i8>* %A + %tmp1 = load <16 x i8>, <16 x i8>* %A %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> ret <16 x i8> %tmp2 } @@ -117,7 +117,7 @@ define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind { define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind { ;CHECK-LABEL: test_vrev64D8_undef: ;CHECK: vrev64.8 - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> ret <8 x i8> %tmp2 } @@ -125,7 +125,7 @@ define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind { define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind { ;CHECK-LABEL: test_vrev32Q16_undef: ;CHECK: vrev32.16 - %tmp1 = load <8 x i16>* %A + %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> ret <8 x i16> %tmp2 } @@ -136,7 +136,7 @@ define void @test_with_vcombine(<4 x float>* %v) nounwind { ;CHECK-LABEL: test_with_vcombine: ;CHECK-NOT: vext ;CHECK: vrev64.32 - %tmp1 = load <4 x float>* %v, align 16 + %tmp1 = load <4 x float>, <4 x float>* %v, align 16 %tmp2 = bitcast <4 x float> %tmp1 to <2 x double> %tmp3 = extractelement <2 x double> %tmp2, i32 0 %tmp4 = bitcast double %tmp3 to <2 x float> @@ -155,7 +155,7 @@ define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst ; CHECK: vst1.32 entry: %0 = bitcast <4 x i16>* %source to <8 x i16>* - %tmp2 = load <8 x i16>* %0, align 4 + %tmp2 = load <8 x i16>, <8 x i16>* %0, align 4 %tmp3 = extractelement <8 x i16> %tmp2, i32 6 %tmp5 = insertelement <2 x i16> undef, i16 %tmp3, i32 0 %tmp9 = extractelement <8 x i16> %tmp2, i32 5 @@ -171,7 +171,7 @@ define void @float_vrev64(float* nocapture %source, <4 x float>* nocapture %dest ; CHECK: vrev64.32 entry: %0 = bitcast float* %source to <4 x float>* - %tmp2 = load <4 x float>* %0, align 4 + %tmp2 = load <4 x float>, <4 x float>* %0, align 4 %tmp5 = shufflevector <4 x float> , <4 x float> %tmp2, <4 x i32> %arrayidx8 = getelementptr inbounds <4 x float>, <4 x float>* %dest, i32 11 store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4