From 177cf1e1a3685209ab805f82897902a8d2b61661 Mon Sep 17 00:00:00 2001 From: Elena Demikhovsky Date: Thu, 31 May 2012 09:20:20 +0000 Subject: [PATCH] Added FMA3 Intel instructions. I disabled FMA3 autodetection, since the result may differ from expected for some benchmarks. I added tests for GodeGen and intrinsics. I did not change llvm.fma.f32/64 - it may be done later. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157737 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../X86/MCTargetDesc/X86MCCodeEmitter.cpp | 8 +- lib/Target/X86/X86CodeEmitter.cpp | 3 + lib/Target/X86/X86InstrFMA.td | 363 ++++++++++++++++-- lib/Target/X86/X86InstrInfo.cpp | 70 ++++ lib/Target/X86/X86InstrInfo.h | 4 +- lib/Target/X86/X86Subtarget.cpp | 10 +- test/CodeGen/X86/fma3-intrinsics.ll | 132 +++++++ test/CodeGen/X86/fma3.ll | 66 ++++ 8 files changed, 607 insertions(+), 49 deletions(-) create mode 100755 test/CodeGen/X86/fma3-intrinsics.ll create mode 100755 test/CodeGen/X86/fma3.ll diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index f79073ff588..12f1961ed80 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -570,7 +570,11 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, } // Classify VEX_B, VEX_4V, VEX_R, VEX_X + unsigned NumOps = Desc.getNumOperands(); unsigned CurOp = 0; + if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1) + ++CurOp; + switch (TSFlags & X86II::FormMask) { case X86II::MRMInitReg: llvm_unreachable("FIXME: Remove this!"); case X86II::MRMDestMem: { @@ -603,11 +607,11 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // FMA4: // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM) // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), - if (X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) + if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp++).getReg())) VEX_R = 0x0; if (HasVEX_4V) - VEX_4V = getVEXRegisterEncoding(MI, 1); + VEX_4V = getVEXRegisterEncoding(MI, CurOp); if (X86II::isX86_64ExtendedReg( MI.getOperand(MemOperand+X86::AddrBaseReg).getReg())) diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index d84329f33a3..3079dfa7cf4 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -933,7 +933,10 @@ void Emitter::emitVEXOpcodePrefix(uint64_t TSFlags, } // Classify VEX_B, VEX_4V, VEX_R, VEX_X + unsigned NumOps = Desc->getNumOperands(); unsigned CurOp = 0; + if (NumOps > 1 && Desc->getOperandConstraint(1, MCOI::TIED_TO) != -1) + ++CurOp; switch (TSFlags & X86II::FormMask) { case X86II::MRMInitReg: // Duplicate register. diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td index d57937b2e1b..273666985ca 100644 --- a/lib/Target/X86/X86InstrFMA.td +++ b/lib/Target/X86/X86InstrFMA.td @@ -15,83 +15,358 @@ // FMA3 - Intel 3 operand Fused Multiply-Add instructions //===----------------------------------------------------------------------===// +let Constraints = "$src1 = $dst" in { multiclass fma3p_rm opc, string OpcodeStr> { def r : FMA3; + let mayLoad = 1 in def m : FMA3; def rY : FMA3; + let mayLoad = 1 in def mY : FMA3; } +// Intrinsic for 132 pattern +multiclass fma3p_rm_int opc, string OpcodeStr, + PatFrag MemFrag128, PatFrag MemFrag256, + Intrinsic Int128, Intrinsic Int256> { + def r_Int : FMA3; + //let mayLoad = 1 in + def m_Int : FMA3; + def rY_Int : FMA3; + //let mayLoad = 1 in + def mY_Int : FMA3; +} +} + multiclass fma3p_forms opc132, bits<8> opc213, bits<8> opc231, - string OpcodeStr, string PackTy> { - defm r132 : fma3p_rm; - defm r213 : fma3p_rm; - defm r231 : fma3p_rm; + string OpcodeStr, string PackTy, + PatFrag MemFrag128, PatFrag MemFrag256, + Intrinsic Int128, Intrinsic Int256> { + defm r132 : fma3p_rm_int ; + defm r132 : fma3p_rm ; + defm r213 : fma3p_rm ; + defm r231 : fma3p_rm ; } // Fused Multiply-Add let ExeDomain = SSEPackedSingle in { - defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps">; - defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps">; - defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps">; - defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps">; + defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32, memopv8f32, + int_x86_fma4_vfmadd_ps, int_x86_fma4_vfmadd_ps_256>; + defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32, memopv8f32, + int_x86_fma4_vfmsub_ps, int_x86_fma4_vfmsub_ps_256>; + defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", memopv4f32, memopv8f32, + int_x86_fma4_vfmaddsub_ps, int_x86_fma4_vfmaddsub_ps_256>; + defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", memopv4f32, memopv8f32, + int_x86_fma4_vfmsubadd_ps, int_x86_fma4_vfmaddsub_ps_256>; } let ExeDomain = SSEPackedDouble in { - defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd">, VEX_W; - defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd">, VEX_W; - defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd">, VEX_W; - defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd">, VEX_W; + defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64, memopv4f64, + int_x86_fma4_vfmadd_pd, int_x86_fma4_vfmadd_pd_256>, VEX_W; + defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64, memopv4f64, + int_x86_fma4_vfmsub_pd, int_x86_fma4_vfmsub_pd_256>, VEX_W; + defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", memopv2f64, memopv4f64, + int_x86_fma4_vfmaddsub_pd, int_x86_fma4_vfmaddsub_pd_256>, VEX_W; + defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", memopv2f64, memopv4f64, + int_x86_fma4_vfmsubadd_pd, int_x86_fma4_vfmsubadd_pd_256>, VEX_W; } // Fused Negative Multiply-Add let ExeDomain = SSEPackedSingle in { - defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps">; - defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps">; + defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32, memopv8f32, + int_x86_fma4_vfnmadd_ps, int_x86_fma4_vfnmadd_ps_256>; + defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32, memopv8f32, + int_x86_fma4_vfnmsub_ps, int_x86_fma4_vfnmsub_ps_256>; } let ExeDomain = SSEPackedDouble in { - defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd">, VEX_W; - defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd">, VEX_W; + defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64, memopv4f64, + int_x86_fma4_vfnmadd_pd, int_x86_fma4_vfnmadd_pd_256>, VEX_W; + defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", memopv2f64, memopv4f64, + int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W; } -multiclass fma3s_rm opc, string OpcodeStr, X86MemOperand x86memop> { - def r : FMA3; + +// FMA231: src1 = src2*src3 + src1 +def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)), + (VFMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; + + +//------------ +// FP double precision ADD - 128 +//------------ + + +// FMA231: src1 = src2*src3 + src1 +def : Pat<(v2f64 (fadd (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)), + (VFMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; + +// FMA231: src1 = src2*src3 + src1 +def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)), + (VFMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; + +//------------ +// FP double precision SUB - 256 +//------------ +// FMA231: src1 = src2*src3 - src1 +def : Pat<(v4f64 (fsub (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)), + (VFMSUBPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; + +// FMA231: src1 = src2*src3 - src1 +def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)), + (VFMSUBPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; + + +//------------ +// FP double precision SUB - 128 +//------------ + +// FMA231: src1 = src2*src3 - src1 +def : Pat<(v2f64 (fsub (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)), + (VFMSUBPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; + +// FMA231: src1 = src2*src3 - src1 +def : Pat<(v2f64 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)), + (VFMSUBPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; + +//------------ +// FP double precision FNMADD - 256 +//------------ +// FMA231: src1 = - src2*src3 + src1 +def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, (memopv4f64 addr:$src3)))), + (VFNMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; + +// FMA231: src1 = - src2*src3 + src1 +def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))), + (VFNMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; + +//------------ +// FP double precision FNMADD - 128 +//------------ + +// FMA231: src1 = - src2*src3 + src1 +def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, (memopv2f64 addr:$src3)))), + (VFNMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; + +// FMA231: src1 = - src2*src3 + src1 +def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), + (VFNMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; + +//------------ +// FP single precision ADD - 256 +//------------ + +// FMA231: src1 = src2*src3 + src1 +def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)), + (VFMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; + +// FMA213 : src1 = src2*src1 + src3 +def : Pat<(v8f32 (fadd (fmul VR256:$src1, VR256:$src2), (memopv8f32 addr:$src3))), + (VFMADDPSr213mY VR256:$src1, VR256:$src2, addr:$src3)>; + +// FMA231: src1 = src2*src3 + src1 +def : Pat<(v8f32 (fadd (fmul (memopv8f32 addr:$src3), VR256:$src2), VR256:$src1)), + (VFMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>; + +// FMA213: src1 = src2*src1 + src3 +def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src1), VR256:$src3)), + (VFMADDPSr213rY VR256:$src1, VR256:$src2, VR256:$src3)>; + +//------------ +// FP single precision ADD - 128 +//------------ + +// FMA231 : src1 = src2*src3 + src1 +def : Pat<(v4f32 (fadd (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)), + (VFMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; + +// FMA231 : src1 = src2*src3 + src1 +def : Pat<(v4f32 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)), + (VFMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; + +//------------ +// FP single precision SUB - 256 +//------------ +// FMA231: src1 = src2*src3 - src1 +def : Pat<(v8f32 (fsub (fmul VR256:$src2, (memopv8f32 addr:$src3)), VR256:$src1)), + (VFMSUBPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>; + +// FMA231: src1 = src2*src3 - src1 +def : Pat<(v8f32 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)), + (VFMSUBPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; + +//------------ +// FP single precision SUB - 128 +//------------ +// FMA231 : src1 = src2*src3 - src1 +def : Pat<(v4f32 (fsub (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)), + (VFMSUBPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; + +// FMA231 : src1 = src2*src3 - src1 +def : Pat<(v4f32 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)), + (VFMSUBPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; + +//------------ +// FP single precision FNMADD - 256 +//------------ +// FMA231: src1 = - src2*src3 + src1 +def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, (memopv8f32 addr:$src3)))), + (VFNMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>; + +// FMA231: src1 = - src2*src3 + src1 +def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))), + (VFNMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; + +//------------ +// FP single precision FNMADD - 128 +//------------ + +// FMA231 : src1 = src2*src3 - src1 +def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, (memopv4f32 addr:$src3)))), + (VFNMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; + +// FMA231 : src1 = src2*src3 - src1 +def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), + (VFNMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; + +} // HasFMA3 + +//------------------------------ +// SCALAR +//------------------------------ + +let Constraints = "$src1 = $dst" in { +multiclass fma3s_rm opc, string OpcodeStr, X86MemOperand x86memop, RegisterClass RC> { + def r : FMA3; - def m : FMA3; } +multiclass fma3s_rm_int opc, string OpcodeStr,X86MemOperand x86memop, RegisterClass RC, + Intrinsic IntId> { + def r_Int : FMA3; + def m_Int : FMA3; +} +} + multiclass fma3s_forms opc132, bits<8> opc213, bits<8> opc231, - string OpcodeStr> { - defm SSr132 : fma3s_rm; - defm SSr213 : fma3s_rm; - defm SSr231 : fma3s_rm; - defm SDr132 : fma3s_rm, VEX_W; - defm SDr213 : fma3s_rm, VEX_W; - defm SDr231 : fma3s_rm, VEX_W; + string OpcodeStr, string PackTy, X86MemOperand MemOp, + RegisterClass RC, Intrinsic IntId> { + defm r132 : fma3s_rm ; + defm r213 : fma3s_rm ; + defm r231 : fma3s_rm ; + defm r132_Int: fma3s_rm_int ; } -defm VFMADD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd">, VEX_LIG; -defm VFMSUB : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub">, VEX_LIG; +defm VFMADDSS : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "ss", f32mem, FR32, int_x86_fma4_vfmadd_ss>, VEX_LIG; +defm VFMADDSD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "sd", f64mem, FR64, int_x86_fma4_vfmadd_sd>, VEX_W, VEX_LIG; +defm VFMSUBSS : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "ss", f32mem, FR32, int_x86_fma4_vfmsub_ss>, VEX_LIG; +defm VFMSUBSD : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "sd", f64mem, FR64, int_x86_fma4_vfmsub_sd>, VEX_W, VEX_LIG; + +defm VFNMADDSS : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "ss", f32mem, FR32, int_x86_fma4_vfnmadd_ss>, VEX_LIG; +defm VFNMADDSD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "sd", f64mem, FR64, int_x86_fma4_vfnmadd_sd>, VEX_W, VEX_LIG; +defm VFNMSUBSS : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "ss", f32mem, FR32, int_x86_fma4_vfnmsub_ss>, VEX_LIG; +defm VFNMSUBSD : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "sd", f64mem, FR64, int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG; + + +let Predicates = [HasFMA3], AddedComplexity = 20 in { + +//------------ +// FP scalar ADD +//------------ + + +// FMADD231 : src1 = src2*src3 + src1 +def : Pat<(f32 (fadd (fmul FR32:$src2, FR32:$src3), FR32:$src1)), + (VFMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>; + +def : Pat<(f32 (fadd (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)), + (VFMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>; -defm VFNMADD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd">, VEX_LIG; -defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub">, VEX_LIG; +def : Pat<(f64 (fadd (fmul FR64:$src2, FR64:$src3), FR64:$src1)), + (VFMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>; + +def : Pat<(f64 (fadd (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)), + (VFMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>; + + + +//------------ +// FP scalar SUB src2*src3 - src1 +//------------ + +def : Pat<(f32 (fsub (fmul FR32:$src2, FR32:$src3), FR32:$src1)), + (VFMSUBSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>; + +def : Pat<(f32 (fsub (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)), + (VFMSUBSSr231m FR32:$src1, FR32:$src2, addr:$src3)>; + +def : Pat<(f64 (fsub (fmul FR64:$src2, FR64:$src3), FR64:$src1)), + (VFMSUBSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>; + +def : Pat<(f64 (fsub (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)), + (VFMSUBSDr231m FR64:$src1, FR64:$src2, addr:$src3)>; + +//------------ +// FP scalar NADD src1 - src2*src3 +//------------ + +def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, FR32:$src3))), + (VFNMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>; + +def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, (loadf32 addr:$src3)))), + (VFNMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>; + +def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, FR64:$src3))), + (VFNMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>; + +def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, (loadf64 addr:$src3)))), + (VFNMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>; + +} // HasFMA3 //===----------------------------------------------------------------------===// // FMA4 - AMD 4 operand Fused Multiply-Add instructions @@ -178,6 +453,8 @@ let isCodeGenOnly = 1 in { } // isCodeGenOnly = 1 } +let Predicates = [HasFMA4] in { + defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", ssmem, sse_load_f32, int_x86_fma4_vfmadd_ss>; defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", sdmem, sse_load_f64, @@ -218,3 +495,5 @@ defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", int_x86_fma4_vfmsubadd_ps, int_x86_fma4_vfmsubadd_ps_256, memopv4f32, memopv8f32>; defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", int_x86_fma4_vfmsubadd_pd, int_x86_fma4_vfmsubadd_pd_256, memopv2f64, memopv4f64>; +} // HasFMA4 + diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index d8fd9064f3d..7254ddf56cb 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -58,6 +58,7 @@ enum { TB_INDEX_0 = 0, TB_INDEX_1 = 1, TB_INDEX_2 = 2, + TB_INDEX_3 = 3, TB_INDEX_MASK = 0xff, // Minimum alignment required for load/store. @@ -1122,6 +1123,75 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) // Index 2, folded load Flags | TB_INDEX_2 | TB_FOLDED_LOAD); } + + static const X86OpTblEntry OpTbl3[] = { + // FMA foldable instructions + { X86::VFMADDSSr231r, X86::VFMADDSSr231m, 0 }, + { X86::VFMADDSDr231r, X86::VFMADDSDr231m, 0 }, + { X86::VFMADDSSr132r, X86::VFMADDSSr132m, 0 }, + { X86::VFMADDSDr132r, X86::VFMADDSDr132m, 0 }, + + { X86::VFMADDPSr231r, X86::VFMADDPSr231m, TB_ALIGN_16 }, + { X86::VFMADDPDr231r, X86::VFMADDPDr231m, TB_ALIGN_16 }, + { X86::VFMADDPSr132r, X86::VFMADDPSr132m, TB_ALIGN_16 }, + { X86::VFMADDPDr132r, X86::VFMADDPDr132m, TB_ALIGN_16 }, + { X86::VFMADDPSr213r, X86::VFMADDPSr213m, TB_ALIGN_16 }, + { X86::VFMADDPDr213r, X86::VFMADDPDr213m, TB_ALIGN_16 }, + { X86::VFMADDPSr231rY, X86::VFMADDPSr231mY, TB_ALIGN_32 }, + { X86::VFMADDPDr231rY, X86::VFMADDPDr231mY, TB_ALIGN_32 }, + { X86::VFMADDPSr132rY, X86::VFMADDPSr132mY, TB_ALIGN_32 }, + { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_32 }, + { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_32 }, + { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_32 }, + + { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, 0 }, + { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, 0 }, + { X86::VFNMADDSSr132r, X86::VFNMADDSSr132m, 0 }, + { X86::VFNMADDSDr132r, X86::VFNMADDSDr132m, 0 }, + + { X86::VFNMADDPSr231r, X86::VFNMADDPSr231m, TB_ALIGN_16 }, + { X86::VFNMADDPDr231r, X86::VFNMADDPDr231m, TB_ALIGN_16 }, + { X86::VFNMADDPSr132r, X86::VFNMADDPSr132m, TB_ALIGN_16 }, + { X86::VFNMADDPDr132r, X86::VFNMADDPDr132m, TB_ALIGN_16 }, + { X86::VFNMADDPSr213r, X86::VFNMADDPSr213m, TB_ALIGN_16 }, + { X86::VFNMADDPDr213r, X86::VFNMADDPDr213m, TB_ALIGN_16 }, + { X86::VFNMADDPSr231rY, X86::VFNMADDPSr231mY, TB_ALIGN_32 }, + { X86::VFNMADDPDr231rY, X86::VFNMADDPDr231mY, TB_ALIGN_32 }, + { X86::VFNMADDPSr132rY, X86::VFNMADDPSr132mY, TB_ALIGN_32 }, + { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_32 }, + { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_32 }, + { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_32 }, + + { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, 0 }, + { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, 0 }, + { X86::VFMSUBSSr132r, X86::VFMSUBSSr132m, 0 }, + { X86::VFMSUBSDr132r, X86::VFMSUBSDr132m, 0 }, + + { X86::VFMSUBPSr231r, X86::VFMSUBPSr231m, TB_ALIGN_16 }, + { X86::VFMSUBPDr231r, X86::VFMSUBPDr231m, TB_ALIGN_16 }, + { X86::VFMSUBPSr132r, X86::VFMSUBPSr132m, TB_ALIGN_16 }, + { X86::VFMSUBPDr132r, X86::VFMSUBPDr132m, TB_ALIGN_16 }, + { X86::VFMSUBPSr213r, X86::VFMSUBPSr213m, TB_ALIGN_16 }, + { X86::VFMSUBPDr213r, X86::VFMSUBPDr213m, TB_ALIGN_16 }, + { X86::VFMSUBPSr231rY, X86::VFMSUBPSr231mY, TB_ALIGN_32 }, + { X86::VFMSUBPDr231rY, X86::VFMSUBPDr231mY, TB_ALIGN_32 }, + { X86::VFMSUBPSr132rY, X86::VFMSUBPSr132mY, TB_ALIGN_32 }, + { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_32 }, + { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_32 }, + { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_32 }, + + }; + + for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) { + unsigned RegOp = OpTbl3[i].RegOp; + unsigned MemOp = OpTbl3[i].MemOp; + unsigned Flags = OpTbl3[i].Flags; + AddTableEntry(RegOp2MemOpTable3, MemOp2RegOpTable, + RegOp, MemOp, + // Index 3, folded load + Flags | TB_INDEX_3 | TB_FOLDED_LOAD); + } + } void diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index b23d7560ec1..856f3be57ce 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -128,7 +128,8 @@ class X86InstrInfo : public X86GenInstrInfo { X86TargetMachine &TM; const X86RegisterInfo RI; - /// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1, + /// RegOp2MemOpTable3Addr, RegOp2MemOpTable2Addr, + /// RegOp2MemOpTable0, RegOp2MemOpTable1, /// RegOp2MemOpTable2 - Load / store folding opcode maps. /// typedef DenseMap> 12) & 0x1) { - HasFMA3 = true; - ToggleFeature(X86::FeatureFMA3); - } + // FMA3 autodetection is switched off until we have a special flag + // in code generator + //if ((ECX >> 12) & 0x1) { + // HasFMA3 = true; + // ToggleFeature(X86::FeatureFMA3); + //} if (IsIntel && ((ECX >> 22) & 0x1)) { HasMOVBE = true; ToggleFeature(X86::FeatureMOVBE); diff --git a/test/CodeGen/X86/fma3-intrinsics.ll b/test/CodeGen/X86/fma3-intrinsics.ll new file mode 100755 index 00000000000..183d1882b97 --- /dev/null +++ b/test/CodeGen/X86/fma3-intrinsics.ll @@ -0,0 +1,132 @@ +; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=core-avx2 -mattr=avx2,+fma3 | FileCheck %s + +define <4 x float> @test_x86_fmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fmadd132ss {{.*\(%r.*}}, %xmm + %res = call <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_x86_fmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fmadd132ps + %res = call <4 x float> @llvm.x86.fma4.vfmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +define <8 x float> @test_x86_fmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { + ; CHECK: fmadd132ps {{.*\(%r.*}}, %ymm + %res = call <8 x float> @llvm.x86.fma4.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) nounwind + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.fma4.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone + +define <4 x float> @test_x86_fnmadd_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fnmadd132ss {{.*\(%r.*}}, %xmm + %res = call <4 x float> @llvm.x86.fma4.vfnmadd.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfnmadd.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_x86_fnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fnmadd132ps + %res = call <4 x float> @llvm.x86.fma4.vfnmadd.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfnmadd.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +define <8 x float> @test_x86_fnmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { + ; CHECK: fnmadd132ps {{.*\(%r.*}}, %ymm + %res = call <8 x float> @llvm.x86.fma4.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) nounwind + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.fma4.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone + + +define <4 x float> @test_x86_fmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fmsub132ss + %res = call <4 x float> @llvm.x86.fma4.vfmsub.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_x86_fmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fmsub132ps + %res = call <4 x float> @llvm.x86.fma4.vfmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_x86_fnmsub_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fnmsub132ss + %res = call <4 x float> @llvm.x86.fma4.vfnmsub.ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfnmsub.ss(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +define <4 x float> @test_x86_fnmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fnmsub132ps + %res = call <4 x float> @llvm.x86.fma4.vfnmsub.ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) nounwind + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.fma4.vfnmsub.ps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone + +;;;; + +define <2 x double> @test_x86_fmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fmadd132sd + %res = call <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_x86_fmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fmadd132pd + %res = call <2 x double> @llvm.x86.fma4.vfmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_x86_fnmadd_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fnmadd132sd + %res = call <2 x double> @llvm.x86.fma4.vfnmadd.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfnmadd.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_x86_fnmadd_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fnmadd132pd + %res = call <2 x double> @llvm.x86.fma4.vfnmadd.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfnmadd.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone + + + +define <2 x double> @test_x86_fmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fmsub132sd + %res = call <2 x double> @llvm.x86.fma4.vfmsub.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_x86_fmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fmsub132pd + %res = call <2 x double> @llvm.x86.fma4.vfmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_x86_fnmsub_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fnmsub132sd + %res = call <2 x double> @llvm.x86.fma4.vfnmsub.sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfnmsub.sd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone + +define <2 x double> @test_x86_fnmsub_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { + ; CHECK: fnmsub132pd + %res = call <2 x double> @llvm.x86.fma4.vfnmsub.pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) nounwind + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.fma4.vfnmsub.pd(<2 x double>, <2 x double>, <2 x double>) nounwind readnone diff --git a/test/CodeGen/X86/fma3.ll b/test/CodeGen/X86/fma3.ll new file mode 100755 index 00000000000..34f0ad44508 --- /dev/null +++ b/test/CodeGen/X86/fma3.ll @@ -0,0 +1,66 @@ +; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=core-avx2 -mattr=avx2,+fma3 | FileCheck %s + +define <4 x float> @test_x86_fmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fmadd231ps {{.*\(%r.*}}, %xmm + %x = fmul <4 x float> %a0, %a1 + %res = fadd <4 x float> %x, %a2 + ret <4 x float> %res +} + +define <4 x float> @test_x86_fmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fmsub231ps {{.*\(%r.*}}, %xmm + %x = fmul <4 x float> %a0, %a1 + %res = fsub <4 x float> %x, %a2 + ret <4 x float> %res +} + +define <4 x float> @test_x86_fnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { + ; CHECK: fnmadd231ps {{.*\(%r.*}}, %xmm + %x = fmul <4 x float> %a0, %a1 + %res = fsub <4 x float> %a2, %x + ret <4 x float> %res +} + +define <8 x float> @test_x86_fmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { + ; CHECK: vfmadd213ps {{.*\(%r.*}}, %ymm + %x = fmul <8 x float> %a0, %a1 + %res = fadd <8 x float> %x, %a2 + ret <8 x float> %res +} + +define <4 x double> @test_x86_fmadd_pd_y(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { + ; CHECK: vfmadd231pd {{.*\(%r.*}}, %ymm + %x = fmul <4 x double> %a0, %a1 + %res = fadd <4 x double> %x, %a2 + ret <4 x double> %res +} + + +define <8 x float> @test_x86_fmsub_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { + ; CHECK: fmsub231ps {{.*\(%r.*}}, %ymm + %x = fmul <8 x float> %a0, %a1 + %res = fsub <8 x float> %x, %a2 + ret <8 x float> %res +} + +define <8 x float> @test_x86_fnmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { + ; CHECK: fnmadd231ps {{.*\(%r.*}}, %ymm + %x = fmul <8 x float> %a0, %a1 + %res = fsub <8 x float> %a2, %x + ret <8 x float> %res +} + +define float @test_x86_fnmadd_ss(float %a0, float %a1, float %a2) { + ; CHECK: vfnmadd231ss %xmm1, %xmm0, %xmm2 + %x = fmul float %a0, %a1 + %res = fsub float %a2, %x + ret float %res +} + +define double @test_x86_fnmadd_sd(double %a0, double %a1, double %a2) { + ; CHECK: vfnmadd231sd %xmm1, %xmm0, %xmm2 + %x = fmul double %a0, %a1 + %res = fsub double %a2, %x + ret double %res +} + -- 2.34.1