From d4b4cf524b8afc342b618254d69f48f214b60093 Mon Sep 17 00:00:00 2001 From: Bob Wilson Date: Fri, 21 Aug 2009 00:01:42 +0000 Subject: [PATCH] Remove Neon intrinsics for VZIP, VUZP, and VTRN. We will represent these as vector shuffles. Temporarily remove the tests for these operations until the new implementation is working. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@79579 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IntrinsicsARM.td | 12 --- lib/Target/ARM/ARMISelDAGToDAG.cpp | 57 -------------- test/CodeGen/ARM/vtrn.ll | 117 ----------------------------- test/CodeGen/ARM/vuzp.ll | 117 ----------------------------- test/CodeGen/ARM/vzip.ll | 117 ----------------------------- 5 files changed, 420 deletions(-) delete mode 100644 test/CodeGen/ARM/vtrn.ll delete mode 100644 test/CodeGen/ARM/vuzp.ll delete mode 100644 test/CodeGen/ARM/vzip.ll diff --git a/include/llvm/IntrinsicsARM.td b/include/llvm/IntrinsicsARM.td index f78b20a4633..e9dbb490b47 100644 --- a/include/llvm/IntrinsicsARM.td +++ b/include/llvm/IntrinsicsARM.td @@ -61,9 +61,6 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". LLVMTruncatedElementVectorType<0>, LLVMTruncatedElementVectorType<0>], [IntrNoMem]>; - class Neon_2Result_Intrinsic - : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], - [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>; class Neon_CvtFxToFP_Intrinsic : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>; class Neon_CvtFPToFx_Intrinsic @@ -315,15 +312,6 @@ def int_arm_neon_vtbx2 : Neon_Tbl4Arg_Intrinsic; def int_arm_neon_vtbx3 : Neon_Tbl5Arg_Intrinsic; def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic; -// Vector Transpose. -def int_arm_neon_vtrn : Neon_2Result_Intrinsic; - -// Vector Interleave (vzip). -def int_arm_neon_vzip : Neon_2Result_Intrinsic; - -// Vector Deinterleave (vuzp). -def int_arm_neon_vuzp : Neon_2Result_Intrinsic; - let TargetPrefix = "arm" in { // De-interleaving vector loads from N-element structures. diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 5bf1781ff64..3ef15a119af 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1415,63 +1415,6 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) { N->getOperand(4), N->getOperand(5), Chain }; return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 8); } - - case ISD::INTRINSIC_WO_CHAIN: { - unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); - EVT VT = N->getValueType(0); - unsigned Opc = 0; - - // Match intrinsics that return multiple values. - switch (IntNo) { - default: break; - - case Intrinsic::arm_neon_vtrn: - switch (VT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::v8i8: Opc = ARM::VTRNd8; break; - case MVT::v4i16: Opc = ARM::VTRNd16; break; - case MVT::v2f32: - case MVT::v2i32: Opc = ARM::VTRNd32; break; - case MVT::v16i8: Opc = ARM::VTRNq8; break; - case MVT::v8i16: Opc = ARM::VTRNq16; break; - case MVT::v4f32: - case MVT::v4i32: Opc = ARM::VTRNq32; break; - } - return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1), - N->getOperand(2)); - - case Intrinsic::arm_neon_vuzp: - switch (VT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::v8i8: Opc = ARM::VUZPd8; break; - case MVT::v4i16: Opc = ARM::VUZPd16; break; - case MVT::v2f32: - case MVT::v2i32: Opc = ARM::VUZPd32; break; - case MVT::v16i8: Opc = ARM::VUZPq8; break; - case MVT::v8i16: Opc = ARM::VUZPq16; break; - case MVT::v4f32: - case MVT::v4i32: Opc = ARM::VUZPq32; break; - } - return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1), - N->getOperand(2)); - - case Intrinsic::arm_neon_vzip: - switch (VT.getSimpleVT().SimpleTy) { - default: return NULL; - case MVT::v8i8: Opc = ARM::VZIPd8; break; - case MVT::v4i16: Opc = ARM::VZIPd16; break; - case MVT::v2f32: - case MVT::v2i32: Opc = ARM::VZIPd32; break; - case MVT::v16i8: Opc = ARM::VZIPq8; break; - case MVT::v8i16: Opc = ARM::VZIPq16; break; - case MVT::v4f32: - case MVT::v4i32: Opc = ARM::VZIPq32; break; - } - return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1), - N->getOperand(2)); - } - break; - } } return SelectCode(Op); diff --git a/test/CodeGen/ARM/vtrn.ll b/test/CodeGen/ARM/vtrn.ll deleted file mode 100644 index 36a05617055..00000000000 --- a/test/CodeGen/ARM/vtrn.ll +++ /dev/null @@ -1,117 +0,0 @@ -; RUN: llvm-as < %s | llc -march=arm -mattr=+neon | FileCheck %s - -%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> } -%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> } -%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> } -%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> } - -%struct.__builtin_neon_v16qi2 = type { <16 x i8>, <16 x i8> } -%struct.__builtin_neon_v8hi2 = type { <8 x i16>, <8 x i16> } -%struct.__builtin_neon_v4si2 = type { <4 x i32>, <4 x i32> } -%struct.__builtin_neon_v4sf2 = type { <4 x float>, <4 x float> } - -define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK: vtrni8: -;CHECK: vtrn.8 - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B - %tmp3 = call %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vtrn.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v8qi2 %tmp3, 1 - %tmp6 = add <8 x i8> %tmp4, %tmp5 - ret <8 x i8> %tmp6 -} - -define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK: vtrni16: -;CHECK: vtrn.16 - %tmp1 = load <4 x i16>* %A - %tmp2 = load <4 x i16>* %B - %tmp3 = call %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vtrn.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4hi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4hi2 %tmp3, 1 - %tmp6 = add <4 x i16> %tmp4, %tmp5 - ret <4 x i16> %tmp6 -} - -define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK: vtrni32: -;CHECK: vtrn.32 - %tmp1 = load <2 x i32>* %A - %tmp2 = load <2 x i32>* %B - %tmp3 = call %struct.__builtin_neon_v2si2 @llvm.arm.neon.vtrn.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v2si2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v2si2 %tmp3, 1 - %tmp6 = add <2 x i32> %tmp4, %tmp5 - ret <2 x i32> %tmp6 -} - -define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind { -;CHECK: vtrnf: -;CHECK: vtrn.32 - %tmp1 = load <2 x float>* %A - %tmp2 = load <2 x float>* %B - %tmp3 = call %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vtrn.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v2sf2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v2sf2 %tmp3, 1 - %tmp6 = add <2 x float> %tmp4, %tmp5 - ret <2 x float> %tmp6 -} - -define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK: vtrnQi8: -;CHECK: vtrn.8 - %tmp1 = load <16 x i8>* %A - %tmp2 = load <16 x i8>* %B - %tmp3 = call %struct.__builtin_neon_v16qi2 @llvm.arm.neon.vtrn.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v16qi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v16qi2 %tmp3, 1 - %tmp6 = add <16 x i8> %tmp4, %tmp5 - ret <16 x i8> %tmp6 -} - -define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK: vtrnQi16: -;CHECK: vtrn.16 - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i16>* %B - %tmp3 = call %struct.__builtin_neon_v8hi2 @llvm.arm.neon.vtrn.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v8hi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v8hi2 %tmp3, 1 - %tmp6 = add <8 x i16> %tmp4, %tmp5 - ret <8 x i16> %tmp6 -} - -define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK: vtrnQi32: -;CHECK: vtrn.32 - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i32>* %B - %tmp3 = call %struct.__builtin_neon_v4si2 @llvm.arm.neon.vtrn.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4si2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4si2 %tmp3, 1 - %tmp6 = add <4 x i32> %tmp4, %tmp5 - ret <4 x i32> %tmp6 -} - -define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind { -;CHECK: vtrnQf: -;CHECK: vtrn.32 - %tmp1 = load <4 x float>* %A - %tmp2 = load <4 x float>* %B - %tmp3 = call %struct.__builtin_neon_v4sf2 @llvm.arm.neon.vtrn.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4sf2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4sf2 %tmp3, 1 - %tmp6 = add <4 x float> %tmp4, %tmp5 - ret <4 x float> %tmp6 -} - -declare %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vtrn.v8i8(<8 x i8>, <8 x i8>) nounwind readnone -declare %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vtrn.v4i16(<4 x i16>, <4 x i16>) nounwind readnone -declare %struct.__builtin_neon_v2si2 @llvm.arm.neon.vtrn.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -declare %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vtrn.v2f32(<2 x float>, <2 x float>) nounwind readnone - -declare %struct.__builtin_neon_v16qi2 @llvm.arm.neon.vtrn.v16i8(<16 x i8>, <16 x i8>) nounwind readnone -declare %struct.__builtin_neon_v8hi2 @llvm.arm.neon.vtrn.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -declare %struct.__builtin_neon_v4si2 @llvm.arm.neon.vtrn.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -declare %struct.__builtin_neon_v4sf2 @llvm.arm.neon.vtrn.v4f32(<4 x float>, <4 x float>) nounwind readnone diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll deleted file mode 100644 index 883e0722abc..00000000000 --- a/test/CodeGen/ARM/vuzp.ll +++ /dev/null @@ -1,117 +0,0 @@ -; RUN: llvm-as < %s | llc -march=arm -mattr=+neon | FileCheck %s - -%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> } -%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> } -%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> } -%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> } - -%struct.__builtin_neon_v16qi2 = type { <16 x i8>, <16 x i8> } -%struct.__builtin_neon_v8hi2 = type { <8 x i16>, <8 x i16> } -%struct.__builtin_neon_v4si2 = type { <4 x i32>, <4 x i32> } -%struct.__builtin_neon_v4sf2 = type { <4 x float>, <4 x float> } - -define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK: vuzpi8: -;CHECK: vuzp.8 - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B - %tmp3 = call %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vuzp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v8qi2 %tmp3, 1 - %tmp6 = add <8 x i8> %tmp4, %tmp5 - ret <8 x i8> %tmp6 -} - -define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK: vuzpi16: -;CHECK: vuzp.16 - %tmp1 = load <4 x i16>* %A - %tmp2 = load <4 x i16>* %B - %tmp3 = call %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vuzp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4hi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4hi2 %tmp3, 1 - %tmp6 = add <4 x i16> %tmp4, %tmp5 - ret <4 x i16> %tmp6 -} - -define <2 x i32> @vuzpi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK: vuzpi32: -;CHECK: vuzp.32 - %tmp1 = load <2 x i32>* %A - %tmp2 = load <2 x i32>* %B - %tmp3 = call %struct.__builtin_neon_v2si2 @llvm.arm.neon.vuzp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v2si2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v2si2 %tmp3, 1 - %tmp6 = add <2 x i32> %tmp4, %tmp5 - ret <2 x i32> %tmp6 -} - -define <2 x float> @vuzpf(<2 x float>* %A, <2 x float>* %B) nounwind { -;CHECK: vuzpf: -;CHECK: vuzp.32 - %tmp1 = load <2 x float>* %A - %tmp2 = load <2 x float>* %B - %tmp3 = call %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vuzp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v2sf2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v2sf2 %tmp3, 1 - %tmp6 = add <2 x float> %tmp4, %tmp5 - ret <2 x float> %tmp6 -} - -define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK: vuzpQi8: -;CHECK: vuzp.8 - %tmp1 = load <16 x i8>* %A - %tmp2 = load <16 x i8>* %B - %tmp3 = call %struct.__builtin_neon_v16qi2 @llvm.arm.neon.vuzp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v16qi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v16qi2 %tmp3, 1 - %tmp6 = add <16 x i8> %tmp4, %tmp5 - ret <16 x i8> %tmp6 -} - -define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK: vuzpQi16: -;CHECK: vuzp.16 - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i16>* %B - %tmp3 = call %struct.__builtin_neon_v8hi2 @llvm.arm.neon.vuzp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v8hi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v8hi2 %tmp3, 1 - %tmp6 = add <8 x i16> %tmp4, %tmp5 - ret <8 x i16> %tmp6 -} - -define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK: vuzpQi32: -;CHECK: vuzp.32 - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i32>* %B - %tmp3 = call %struct.__builtin_neon_v4si2 @llvm.arm.neon.vuzp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4si2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4si2 %tmp3, 1 - %tmp6 = add <4 x i32> %tmp4, %tmp5 - ret <4 x i32> %tmp6 -} - -define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind { -;CHECK: vuzpQf: -;CHECK: vuzp.32 - %tmp1 = load <4 x float>* %A - %tmp2 = load <4 x float>* %B - %tmp3 = call %struct.__builtin_neon_v4sf2 @llvm.arm.neon.vuzp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4sf2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4sf2 %tmp3, 1 - %tmp6 = add <4 x float> %tmp4, %tmp5 - ret <4 x float> %tmp6 -} - -declare %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vuzp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone -declare %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vuzp.v4i16(<4 x i16>, <4 x i16>) nounwind readnone -declare %struct.__builtin_neon_v2si2 @llvm.arm.neon.vuzp.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -declare %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vuzp.v2f32(<2 x float>, <2 x float>) nounwind readnone - -declare %struct.__builtin_neon_v16qi2 @llvm.arm.neon.vuzp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone -declare %struct.__builtin_neon_v8hi2 @llvm.arm.neon.vuzp.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -declare %struct.__builtin_neon_v4si2 @llvm.arm.neon.vuzp.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -declare %struct.__builtin_neon_v4sf2 @llvm.arm.neon.vuzp.v4f32(<4 x float>, <4 x float>) nounwind readnone diff --git a/test/CodeGen/ARM/vzip.ll b/test/CodeGen/ARM/vzip.ll deleted file mode 100644 index 0485b30954b..00000000000 --- a/test/CodeGen/ARM/vzip.ll +++ /dev/null @@ -1,117 +0,0 @@ -; RUN: llvm-as < %s | llc -march=arm -mattr=+neon | FileCheck %s - -%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> } -%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> } -%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> } -%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> } - -%struct.__builtin_neon_v16qi2 = type { <16 x i8>, <16 x i8> } -%struct.__builtin_neon_v8hi2 = type { <8 x i16>, <8 x i16> } -%struct.__builtin_neon_v4si2 = type { <4 x i32>, <4 x i32> } -%struct.__builtin_neon_v4sf2 = type { <4 x float>, <4 x float> } - -define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { -;CHECK: vzipi8: -;CHECK: vzip.8 - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B - %tmp3 = call %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vzip.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v8qi2 %tmp3, 1 - %tmp6 = add <8 x i8> %tmp4, %tmp5 - ret <8 x i8> %tmp6 -} - -define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { -;CHECK: vzipi16: -;CHECK: vzip.16 - %tmp1 = load <4 x i16>* %A - %tmp2 = load <4 x i16>* %B - %tmp3 = call %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vzip.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4hi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4hi2 %tmp3, 1 - %tmp6 = add <4 x i16> %tmp4, %tmp5 - ret <4 x i16> %tmp6 -} - -define <2 x i32> @vzipi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { -;CHECK: vzipi32: -;CHECK: vzip.32 - %tmp1 = load <2 x i32>* %A - %tmp2 = load <2 x i32>* %B - %tmp3 = call %struct.__builtin_neon_v2si2 @llvm.arm.neon.vzip.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v2si2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v2si2 %tmp3, 1 - %tmp6 = add <2 x i32> %tmp4, %tmp5 - ret <2 x i32> %tmp6 -} - -define <2 x float> @vzipf(<2 x float>* %A, <2 x float>* %B) nounwind { -;CHECK: vzipf: -;CHECK: vzip.32 - %tmp1 = load <2 x float>* %A - %tmp2 = load <2 x float>* %B - %tmp3 = call %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vzip.v2f32(<2 x float> %tmp1, <2 x float> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v2sf2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v2sf2 %tmp3, 1 - %tmp6 = add <2 x float> %tmp4, %tmp5 - ret <2 x float> %tmp6 -} - -define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { -;CHECK: vzipQi8: -;CHECK: vzip.8 - %tmp1 = load <16 x i8>* %A - %tmp2 = load <16 x i8>* %B - %tmp3 = call %struct.__builtin_neon_v16qi2 @llvm.arm.neon.vzip.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v16qi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v16qi2 %tmp3, 1 - %tmp6 = add <16 x i8> %tmp4, %tmp5 - ret <16 x i8> %tmp6 -} - -define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { -;CHECK: vzipQi16: -;CHECK: vzip.16 - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i16>* %B - %tmp3 = call %struct.__builtin_neon_v8hi2 @llvm.arm.neon.vzip.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v8hi2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v8hi2 %tmp3, 1 - %tmp6 = add <8 x i16> %tmp4, %tmp5 - ret <8 x i16> %tmp6 -} - -define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { -;CHECK: vzipQi32: -;CHECK: vzip.32 - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i32>* %B - %tmp3 = call %struct.__builtin_neon_v4si2 @llvm.arm.neon.vzip.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4si2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4si2 %tmp3, 1 - %tmp6 = add <4 x i32> %tmp4, %tmp5 - ret <4 x i32> %tmp6 -} - -define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind { -;CHECK: vzipQf: -;CHECK: vzip.32 - %tmp1 = load <4 x float>* %A - %tmp2 = load <4 x float>* %B - %tmp3 = call %struct.__builtin_neon_v4sf2 @llvm.arm.neon.vzip.v4f32(<4 x float> %tmp1, <4 x float> %tmp2) - %tmp4 = extractvalue %struct.__builtin_neon_v4sf2 %tmp3, 0 - %tmp5 = extractvalue %struct.__builtin_neon_v4sf2 %tmp3, 1 - %tmp6 = add <4 x float> %tmp4, %tmp5 - ret <4 x float> %tmp6 -} - -declare %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vzip.v8i8(<8 x i8>, <8 x i8>) nounwind readnone -declare %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vzip.v4i16(<4 x i16>, <4 x i16>) nounwind readnone -declare %struct.__builtin_neon_v2si2 @llvm.arm.neon.vzip.v2i32(<2 x i32>, <2 x i32>) nounwind readnone -declare %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vzip.v2f32(<2 x float>, <2 x float>) nounwind readnone - -declare %struct.__builtin_neon_v16qi2 @llvm.arm.neon.vzip.v16i8(<16 x i8>, <16 x i8>) nounwind readnone -declare %struct.__builtin_neon_v8hi2 @llvm.arm.neon.vzip.v8i16(<8 x i16>, <8 x i16>) nounwind readnone -declare %struct.__builtin_neon_v4si2 @llvm.arm.neon.vzip.v4i32(<4 x i32>, <4 x i32>) nounwind readnone -declare %struct.__builtin_neon_v4sf2 @llvm.arm.neon.vzip.v4f32(<4 x float>, <4 x float>) nounwind readnone -- 2.34.1