def int_aarch64_neon_fcvtzu :
Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+// Scalar Floating-point Reciprocal Estimate.
+def int_aarch64_neon_vrecpe :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
// Scalar Floating-point Reciprocal Exponent
-def int_aarch64_neon_vrecpx : Neon_1Arg_Intrinsic;
+def int_aarch64_neon_vrecpx :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+// Scalar Floating-point Reciprocal Square Root Estimate
+def int_aarch64_neon_vrsqrte :
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
class Neon_Cmp_Intrinsic
: Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
Instruction INSTS,
Instruction INSTD> {
- def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn))),
+ def : Pat<(f32 (opnode (f32 FPR32:$Rn))),
(INSTS FPR32:$Rn)>;
- def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
+ def : Pat<(f64 (opnode (f64 FPR64:$Rn))),
(INSTD FPR64:$Rn)>;
}
+class Neon_Scalar2SameMisc_V1_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
+ : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
+ (INSTD FPR64:$Rn)>;
+
class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
: NeonI_Scalar2SameMisc<u, 0b11, opcode,
(outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
(INSTD FPR64:$Rn, imm:$Imm)>;
}
-class Neon_ScalarShiftImm_arm_D_size_patterns<SDPatternOperator opnode,
- Instruction INSTD>
+class Neon_ScalarShiftImm_V1_D_size_patterns<SDPatternOperator opnode,
+ Instruction INSTD>
: Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
(v1i64 (Neon_vdup (i32 shr_imm64:$Imm))))),
(INSTD FPR64:$Rn, imm:$Imm)>;
defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftImm_arm_D_size_patterns<sra, SSHRddi>;
+def : Neon_ScalarShiftImm_V1_D_size_patterns<sra, SSHRddi>;
// Scalar Unsigned Shift Right (Immediate)
defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
defm : Neon_ScalarShiftRImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftImm_arm_D_size_patterns<srl, USHRddi>;
+def : Neon_ScalarShiftImm_V1_D_size_patterns<srl, USHRddi>;
// Scalar Signed Rounding Shift Right (Immediate)
defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
defm : Neon_ScalarShiftLImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
// Pattern to match llvm.arm.* intrinsic.
-def : Neon_ScalarShiftImm_arm_D_size_patterns<shl, SHLddi>;
+def : Neon_ScalarShiftImm_V1_D_size_patterns<shl, SHLddi>;
// Signed Saturating Shift Left (Immediate)
defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
// Scalar Floating-point Reciprocal Estimate
defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrecpe,
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpe,
FRECPEss, FRECPEdd>;
+def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrecpe,
+ FRECPEdd>;
// Scalar Floating-point Reciprocal Exponent
defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
// Scalar Floating-point Reciprocal Square Root Estimate
defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
-defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrsqrte,
- FRSQRTEss, FRSQRTEdd>;
+defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrsqrte,
+ FRSQRTEss, FRSQRTEdd>;
+def : Neon_Scalar2SameMisc_V1_D_size_patterns<int_arm_neon_vrsqrte,
+ FRSQRTEdd>;
// Scalar Floating-point Round
class Neon_ScalarFloatRound_pattern<SDPatternOperator opnode, Instruction INST>
; CHECK: test_vrecpes_f32
; CHECK: frecpe {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vrecpe.i = insertelement <1 x float> undef, float %a, i32 0
- %vrecpe1.i = tail call <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float> %vrecpe.i)
- %0 = extractelement <1 x float> %vrecpe1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vrecpe.f32(float %a)
ret float %0
}
; CHECK: test_vrecped_f64
; CHECK: frecpe {{d[0-9]+}}, {{d[0-9]+}}
entry:
- %vrecpe.i = insertelement <1 x double> undef, double %a, i32 0
- %vrecpe1.i = tail call <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double> %vrecpe.i)
- %0 = extractelement <1 x double> %vrecpe1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vrecpe.f64(double %a)
ret double %0
}
-declare <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float>)
-declare <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double>)
+declare float @llvm.aarch64.neon.vrecpe.f32(float)
+declare double @llvm.aarch64.neon.vrecpe.f64(double)
define float @test_vrecpxs_f32(float %a) {
; CHECK: test_vrecpxs_f32
; CHECK: frecpx {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vrecpx.i = insertelement <1 x float> undef, float %a, i32 0
- %vrecpx1.i = tail call <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float> %vrecpx.i)
- %0 = extractelement <1 x float> %vrecpx1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vrecpx.f32(float %a)
ret float %0
}
; CHECK: test_vrecpxd_f64
; CHECK: frecpx {{d[0-9]+}}, {{d[0-9]+}}
entry:
- %vrecpx.i = insertelement <1 x double> undef, double %a, i32 0
- %vrecpx1.i = tail call <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double> %vrecpx.i)
- %0 = extractelement <1 x double> %vrecpx1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vrecpx.f64(double %a)
ret double %0
}
-declare <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float>)
-declare <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double>)
+declare float @llvm.aarch64.neon.vrecpx.f32(float)
+declare double @llvm.aarch64.neon.vrecpx.f64(double)
define float @test_vrsqrtes_f32(float %a) {
; CHECK: test_vrsqrtes_f32
; CHECK: frsqrte {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vrsqrte.i = insertelement <1 x float> undef, float %a, i32 0
- %vrsqrte1.i = tail call <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float> %vrsqrte.i)
- %0 = extractelement <1 x float> %vrsqrte1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vrsqrte.f32(float %a)
ret float %0
}
; CHECK: test_vrsqrted_f64
; CHECK: frsqrte {{d[0-9]+}}, {{d[0-9]+}}
entry:
- %vrsqrte.i = insertelement <1 x double> undef, double %a, i32 0
- %vrsqrte1.i = tail call <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double> %vrsqrte.i)
- %0 = extractelement <1 x double> %vrsqrte1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vrsqrte.f64(double %a)
ret double %0
}
-declare <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float>)
-declare <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double>)
+declare float @llvm.aarch64.neon.vrsqrte.f32(float)
+declare double @llvm.aarch64.neon.vrsqrte.f64(double)