def int_aarch64_neon_sminv : Neon_Across_Intrinsic;
def int_aarch64_neon_uminv : Neon_Across_Intrinsic;
def int_aarch64_neon_vaddv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vmaxv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vminv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vmaxnmv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vminnmv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vmaxv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vminv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vmaxnmv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_aarch64_neon_vminnmv :
+ Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
// Vector Table Lookup.
def int_aarch64_neon_vtbl1 :
def _1s4s: NeonI_2VAcross<0b1, u, size, opcode,
(outs FPR32:$Rd), (ins VPR128:$Rn),
asmop # "\t$Rd, $Rn.4s",
- [(set (v1f32 FPR32:$Rd),
- (v1f32 (opnode (v4f32 VPR128:$Rn))))],
+ [(set (f32 FPR32:$Rd),
+ (f32 (opnode (v4f32 VPR128:$Rn))))],
NoItinerary>;
}
; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
-declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float>)
+declare float @llvm.aarch64.neon.vminnmv(<4 x float>)
-declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float>)
+declare float @llvm.aarch64.neon.vmaxnmv(<4 x float>)
-declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float>)
+declare float @llvm.aarch64.neon.vminv(<4 x float>)
-declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float>)
+declare float @llvm.aarch64.neon.vmaxv(<4 x float>)
declare <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32>)
; CHECK: test_vmaxvq_f32:
; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
- %vmaxv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vmaxv.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vmaxv(<4 x float> %a)
ret float %0
}
; CHECK: test_vminvq_f32:
; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
- %vminv.i = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vminv.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vminv(<4 x float> %a)
ret float %0
}
; CHECK: test_vmaxnmvq_f32:
; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
- %vmaxnmv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vmaxnmv.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vmaxnmv(<4 x float> %a)
ret float %0
}
; CHECK: test_vminnmvq_f32:
; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
entry:
- %vminnmv.i = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float> %a)
- %0 = extractelement <1 x float> %vminnmv.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vminnmv(<4 x float> %a)
ret float %0
}