X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrSSE.td;h=54dd872e2bc3dac55781d0f508be35bf7d2eb245;hb=0488db9b99fcfca407e859ef5cccf40dea23de16;hp=327e44ccbd5c08a821759c210799f60500762bea;hpb=d300622ebacde5bffb5b5e58142323e505df9dbe;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 327e44ccbd5..54dd872e2bc 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -36,6 +36,9 @@ def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest, [SDNPHasChain, SDNPOutFlag]>; def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest, [SDNPHasChain, SDNPOutFlag]>; +def X86comi_new: SDNode<"X86ISD::COMI_NEW", SDTX86CmpTest, + [SDNPHasChain]>; +def X86ucomi_new: SDNode<"X86ISD::UCOMI_NEW",SDTX86CmpTest>; def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>; def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>; def X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>; @@ -50,7 +53,7 @@ def IMPLICIT_DEF_VR128 : I<0, Pseudo, (outs VR128:$dst), (ins), Requires<[HasSSE1]>; def IMPLICIT_DEF_FR32 : I<0, Pseudo, (outs FR32:$dst), (ins), "#IMPLICIT_DEF $dst", - [(set FR32:$dst, (undef))]>, Requires<[HasSSE2]>; + [(set FR32:$dst, (undef))]>, Requires<[HasSSE1]>; def IMPLICIT_DEF_FR64 : I<0, Pseudo, (outs FR64:$dst), (ins), "#IMPLICIT_DEF $dst", [(set FR64:$dst, (undef))]>, Requires<[HasSSE2]>; @@ -131,6 +134,22 @@ def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>; def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>; def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>; +// SSSE3 uses MMX registers for some instructions. They aren't aligned on a +// 16-byte boundary. +def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{ + if (LoadSDNode *LD = dyn_cast(N)) + return LD->getExtensionType() == ISD::NON_EXTLOAD && + LD->getAddressingMode() == ISD::UNINDEXED && + LD->getAlignment() >= 8; + return false; +}]>; + +def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>; +def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop64 node:$ptr))>; +def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>; +def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>; +def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>; + def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>; def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>; def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>; @@ -247,7 +266,8 @@ def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{ // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the // scheduler into a branch sequence. -let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler. +// These are expanded by the scheduler. +let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in { def CMOV_FR32 : I<0, Pseudo, (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond), "#CMOV_FR32 PSEUDO!", @@ -271,78 +291,94 @@ let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler. "#CMOV_V2I64 PSEUDO!", [(set VR128:$dst, (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>; + + def NEW_CMOV_FR32 : I<0, Pseudo, + (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond), + "#CMOV_FR32 PSEUDO!", + [(set FR32:$dst, (X86cmov_new FR32:$t, FR32:$f, imm:$cond, + EFLAGS))]>; + def NEW_CMOV_FR64 : I<0, Pseudo, + (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond), + "#CMOV_FR64 PSEUDO!", + [(set FR64:$dst, (X86cmov_new FR64:$t, FR64:$f, imm:$cond, + EFLAGS))]>; + def NEW_CMOV_V4F32 : I<0, Pseudo, + (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), + "#CMOV_V4F32 PSEUDO!", + [(set VR128:$dst, + (v4f32 (X86cmov_new VR128:$t, VR128:$f, imm:$cond, + EFLAGS)))]>; + def NEW_CMOV_V2F64 : I<0, Pseudo, + (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), + "#CMOV_V2F64 PSEUDO!", + [(set VR128:$dst, + (v2f64 (X86cmov_new VR128:$t, VR128:$f, imm:$cond, + EFLAGS)))]>; + def NEW_CMOV_V2I64 : I<0, Pseudo, + (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), + "#CMOV_V2I64 PSEUDO!", + [(set VR128:$dst, + (v2i64 (X86cmov_new VR128:$t, VR128:$f, imm:$cond, + EFLAGS)))]>; } //===----------------------------------------------------------------------===// // SSE1 Instructions //===----------------------------------------------------------------------===// -// SSE1 Instruction Templates: -// -// SSI - SSE1 instructions with XS prefix. -// PSI - SSE1 instructions with TB prefix. -// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix. - -class SSI o, Format F, dag outs, dag ins, string asm, list pattern> - : I, XS, Requires<[HasSSE1]>; -class PSI o, Format F, dag outs, dag ins, string asm, list pattern> - : I, TB, Requires<[HasSSE1]>; -class PSIi8 o, Format F, dag outs, dag ins, string asm, - list pattern> - : Ii8, TB, Requires<[HasSSE1]>; - // Move Instructions def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src), - "movss {$src, $dst|$dst, $src}", []>; + "movss\t{$src, $dst|$dst, $src}", []>; +let isLoad = 1, isReMaterializable = 1 in def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src), - "movss {$src, $dst|$dst, $src}", + "movss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (loadf32 addr:$src))]>; def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src), - "movss {$src, $dst|$dst, $src}", + "movss\t{$src, $dst|$dst, $src}", [(store FR32:$src, addr:$dst)]>; // Conversion instructions def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src), - "cvttss2si {$src, $dst|$dst, $src}", + "cvttss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (fp_to_sint FR32:$src))]>; def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src), - "cvttss2si {$src, $dst|$dst, $src}", + "cvttss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>; def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src), - "cvtsi2ss {$src, $dst|$dst, $src}", + "cvtsi2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (sint_to_fp GR32:$src))]>; def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), - "cvtsi2ss {$src, $dst|$dst, $src}", + "cvtsi2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>; // Match intrinsics which expect XMM operand(s). def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), - "cvtss2si {$src, $dst|$dst, $src}", + "cvtss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>; def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src), - "cvtss2si {$src, $dst|$dst, $src}", + "cvtss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse_cvtss2si (load addr:$src)))]>; // Aliases for intrinsics def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), - "cvttss2si {$src, $dst|$dst, $src}", + "cvttss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse_cvttss2si VR128:$src))]>; def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src), - "cvttss2si {$src, $dst|$dst, $src}", + "cvttss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse_cvttss2si(load addr:$src)))]>; let isTwoAddress = 1 in { def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, GR32:$src2), - "cvtsi2ss {$src2, $dst|$dst, $src2}", + "cvtsi2ss\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1, GR32:$src2))]>; def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2), - "cvtsi2ss {$src2, $dst|$dst, $src2}", + "cvtsi2ss\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1, (loadi32 addr:$src2)))]>; } @@ -351,99 +387,135 @@ let isTwoAddress = 1 in { let isTwoAddress = 1 in { def CMPSSrr : SSI<0xC2, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc), - "cmp${cc}ss {$src, $dst|$dst, $src}", []>; + "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>; def CMPSSrm : SSI<0xC2, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc), - "cmp${cc}ss {$src, $dst|$dst, $src}", []>; + "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>; } +let Defs = [EFLAGS] in { def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2), - "ucomiss {$src2, $src1|$src1, $src2}", + "ucomiss\t{$src2, $src1|$src1, $src2}", [(X86cmp FR32:$src1, FR32:$src2)]>; def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2), - "ucomiss {$src2, $src1|$src1, $src2}", + "ucomiss\t{$src2, $src1|$src1, $src2}", [(X86cmp FR32:$src1, (loadf32 addr:$src2))]>; +def NEW_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2), + "ucomiss\t{$src2, $src1|$src1, $src2}", + [(X86cmp_new FR32:$src1, FR32:$src2), (implicit EFLAGS)]>; +def NEW_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2), + "ucomiss\t{$src2, $src1|$src1, $src2}", + [(X86cmp_new FR32:$src1, (loadf32 addr:$src2)), + (implicit EFLAGS)]>; +} // Defs = [EFLAGS] + // Aliases to match intrinsics which expect XMM operand(s). let isTwoAddress = 1 in { def Int_CMPSSrr : SSI<0xC2, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc), - "cmp${cc}ss {$src, $dst|$dst, $src}", + "cmp${cc}ss\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1, VR128:$src, imm:$cc))]>; def Int_CMPSSrm : SSI<0xC2, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc), - "cmp${cc}ss {$src, $dst|$dst, $src}", + "cmp${cc}ss\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1, (load addr:$src), imm:$cc))]>; } +let Defs = [EFLAGS] in { def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "ucomiss {$src2, $src1|$src1, $src2}", + "ucomiss\t{$src2, $src1|$src1, $src2}", [(X86ucomi (v4f32 VR128:$src1), VR128:$src2)]>; def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "ucomiss {$src2, $src1|$src1, $src2}", + "ucomiss\t{$src2, $src1|$src1, $src2}", [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2))]>; def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "comiss {$src2, $src1|$src1, $src2}", + "comiss\t{$src2, $src1|$src1, $src2}", [(X86comi (v4f32 VR128:$src1), VR128:$src2)]>; def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "comiss {$src2, $src1|$src1, $src2}", + "comiss\t{$src2, $src1|$src1, $src2}", [(X86comi (v4f32 VR128:$src1), (load addr:$src2))]>; +def NEW_Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2), + "ucomiss\t{$src2, $src1|$src1, $src2}", + [(X86ucomi_new (v4f32 VR128:$src1), VR128:$src2), + (implicit EFLAGS)]>; +def NEW_Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), + (ins VR128:$src1, f128mem:$src2), + "ucomiss\t{$src2, $src1|$src1, $src2}", + [(X86ucomi_new (v4f32 VR128:$src1), (load addr:$src2)), + (implicit EFLAGS)]>; + +def NEW_Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2), + "comiss\t{$src2, $src1|$src1, $src2}", + [(X86comi_new (v4f32 VR128:$src1), VR128:$src2), + (implicit EFLAGS)]>; +def NEW_Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), + (ins VR128:$src1, f128mem:$src2), + "comiss\t{$src2, $src1|$src1, $src2}", + [(X86comi_new (v4f32 VR128:$src1), (load addr:$src2)), + (implicit EFLAGS)]>; +} // Defs = [EFLAGS] + // Aliases of packed SSE1 instructions for scalar use. These all have names that // start with 'Fs'. // Alias instructions that map fld0 to pxor for sse. +let isReMaterializable = 1 in def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), - "pxor $dst, $dst", [(set FR32:$dst, fp32imm0)]>, + "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>, TB, OpSize; // Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are // disregarded. def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src), - "movaps {$src, $dst|$dst, $src}", []>; + "movaps\t{$src, $dst|$dst, $src}", []>; // Alias instruction to load FR32 from f128mem using movaps. Upper bits are // disregarded. +let isLoad = 1 in def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src), - "movaps {$src, $dst|$dst, $src}", + "movaps\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>; // Alias bitwise logical operations using SSE logical ops on packed FP values. let isTwoAddress = 1 in { let isCommutable = 1 in { def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2), - "andps {$src2, $dst|$dst, $src2}", + "andps\t{$src2, $dst|$dst, $src2}", [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>; def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2), - "orps {$src2, $dst|$dst, $src2}", + "orps\t{$src2, $dst|$dst, $src2}", [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>; def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2), - "xorps {$src2, $dst|$dst, $src2}", + "xorps\t{$src2, $dst|$dst, $src2}", [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>; } def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2), - "andps {$src2, $dst|$dst, $src2}", + "andps\t{$src2, $dst|$dst, $src2}", [(set FR32:$dst, (X86fand FR32:$src1, (memopfsf32 addr:$src2)))]>; def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2), - "orps {$src2, $dst|$dst, $src2}", + "orps\t{$src2, $dst|$dst, $src2}", [(set FR32:$dst, (X86for FR32:$src1, (memopfsf32 addr:$src2)))]>; def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2), - "xorps {$src2, $dst|$dst, $src2}", + "xorps\t{$src2, $dst|$dst, $src2}", [(set FR32:$dst, (X86fxor FR32:$src1, (memopfsf32 addr:$src2)))]>; def FsANDNPSrr : PSI<0x55, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2), - "andnps {$src2, $dst|$dst, $src2}", []>; + "andnps\t{$src2, $dst|$dst, $src2}", []>; def FsANDNPSrm : PSI<0x55, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2), - "andnps {$src2, $dst|$dst, $src2}", []>; + "andnps\t{$src2, $dst|$dst, $src2}", []>; } /// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms. @@ -462,38 +534,38 @@ multiclass basic_sse1_fp_binop_rm opc, string OpcodeStr, bit Commutable = 0> { // Scalar operation, reg+reg. def SSrr : SSI { let isCommutable = Commutable; } // Scalar operation, reg+mem. def SSrm : SSI; // Vector operation, reg+reg. def PSrr : PSI { let isCommutable = Commutable; } // Vector operation, reg+mem. def PSrm : PSI; // Intrinsic operation, reg+reg. def SSrr_Int : SSI { let isCommutable = Commutable; } // Intrinsic operation, reg+mem. def SSrm_Int : SSI; } @@ -523,51 +595,51 @@ multiclass sse1_fp_binop_rm opc, string OpcodeStr, // Scalar operation, reg+reg. def SSrr : SSI { let isCommutable = Commutable; } // Scalar operation, reg+mem. def SSrm : SSI; // Vector operation, reg+reg. def PSrr : PSI { let isCommutable = Commutable; } // Vector operation, reg+mem. def PSrm : PSI; // Intrinsic operation, reg+reg. def SSrr_Int : SSI { let isCommutable = Commutable; } // Intrinsic operation, reg+mem. def SSrm_Int : SSI; // Vector intrinsic operation, reg+reg. def PSrr_Int : PSI { let isCommutable = Commutable; } // Vector intrinsic operation, reg+mem. - def PSrm_Int : PSI; } } @@ -582,44 +654,47 @@ defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin, // Move Instructions def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movaps {$src, $dst|$dst, $src}", []>; + "movaps\t{$src, $dst|$dst, $src}", []>; +let isLoad = 1, isReMaterializable = 1 in def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movaps {$src, $dst|$dst, $src}", + "movaps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>; def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), - "movaps {$src, $dst|$dst, $src}", + "movaps\t{$src, $dst|$dst, $src}", [(alignedstore (v4f32 VR128:$src), addr:$dst)]>; def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movups {$src, $dst|$dst, $src}", []>; + "movups\t{$src, $dst|$dst, $src}", []>; +let isLoad = 1 in def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movups {$src, $dst|$dst, $src}", + "movups\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (loadv4f32 addr:$src))]>; def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), - "movups {$src, $dst|$dst, $src}", + "movups\t{$src, $dst|$dst, $src}", [(store (v4f32 VR128:$src), addr:$dst)]>; // Intrinsic forms of MOVUPS load and store +let isLoad = 1 in def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movups {$src, $dst|$dst, $src}", + "movups\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>; def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), - "movups {$src, $dst|$dst, $src}", + "movups\t{$src, $dst|$dst, $src}", [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>; let isTwoAddress = 1 in { let AddedComplexity = 20 in { def MOVLPSrm : PSI<0x12, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2), - "movlps {$src2, $dst|$dst, $src2}", + "movlps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))), MOVLP_shuffle_mask)))]>; def MOVHPSrm : PSI<0x16, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2), - "movhps {$src2, $dst|$dst, $src2}", + "movhps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))), @@ -628,14 +703,14 @@ let isTwoAddress = 1 in { } // isTwoAddress def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), - "movlps {$src, $dst|$dst, $src}", + "movlps\t{$src, $dst|$dst, $src}", [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)), (iPTR 0))), addr:$dst)]>; // v2f64 extract element 1 is always custom lowered to unpack high to low // and extract element 0 so the non-store version isn't too horrible. def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), - "movhps {$src, $dst|$dst, $src}", + "movhps\t{$src, $dst|$dst, $src}", [(store (f64 (vector_extract (v2f64 (vector_shuffle (bc_v2f64 (v4f32 VR128:$src)), (undef), @@ -645,13 +720,13 @@ def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), let isTwoAddress = 1 in { let AddedComplexity = 15 in { def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "movlhps {$src2, $dst|$dst, $src2}", + "movlhps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, MOVHP_shuffle_mask)))]>; def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "movhlps {$src2, $dst|$dst, $src2}", + "movhlps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, MOVHLPS_shuffle_mask)))]>; @@ -681,50 +756,50 @@ multiclass sse1_fp_unop_rm opc, string OpcodeStr, bit Commutable = 0> { // Scalar operation, reg. def SSr : SSI { let isCommutable = Commutable; } // Scalar operation, mem. def SSm : SSI; // Vector operation, reg. def PSr : PSI { let isCommutable = Commutable; } // Vector operation, mem. def PSm : PSI; // Intrinsic operation, reg. def SSr_Int : SSI { let isCommutable = Commutable; } // Intrinsic operation, mem. def SSm_Int : SSI; // Vector intrinsic operation, reg def PSr_Int : PSI { let isCommutable = Commutable; } // Vector intrinsic operation, mem - def PSm_Int : PSI; } @@ -744,46 +819,46 @@ let isTwoAddress = 1 in { let isCommutable = 1 in { def ANDPSrr : PSI<0x54, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "andps {$src2, $dst|$dst, $src2}", + "andps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (and VR128:$src1, VR128:$src2)))]>; def ORPSrr : PSI<0x56, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "orps {$src2, $dst|$dst, $src2}", + "orps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (or VR128:$src1, VR128:$src2)))]>; def XORPSrr : PSI<0x57, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "xorps {$src2, $dst|$dst, $src2}", + "xorps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (xor VR128:$src1, VR128:$src2)))]>; } def ANDPSrm : PSI<0x54, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "andps {$src2, $dst|$dst, $src2}", + "andps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)), (memopv2i64 addr:$src2)))]>; def ORPSrm : PSI<0x56, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "orps {$src2, $dst|$dst, $src2}", + "orps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)), (memopv2i64 addr:$src2)))]>; def XORPSrm : PSI<0x57, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "xorps {$src2, $dst|$dst, $src2}", + "xorps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)), (memopv2i64 addr:$src2)))]>; def ANDNPSrr : PSI<0x55, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "andnps {$src2, $dst|$dst, $src2}", + "andnps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))), VR128:$src2)))]>; def ANDNPSrm : PSI<0x55, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2), - "andnps {$src2, $dst|$dst, $src2}", + "andnps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)), (bc_v2i64 (v4i32 immAllOnesV))), @@ -793,12 +868,12 @@ let isTwoAddress = 1 in { let isTwoAddress = 1 in { def CMPPSrri : PSIi8<0xC2, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc), - "cmp${cc}ps {$src, $dst|$dst, $src}", + "cmp${cc}ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1, VR128:$src, imm:$cc))]>; def CMPPSrmi : PSIi8<0xC2, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc), - "cmp${cc}ps {$src, $dst|$dst, $src}", + "cmp${cc}ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1, (load addr:$src), imm:$cc))]>; } @@ -809,7 +884,7 @@ let isTwoAddress = 1 in { def SHUFPSrri : PSIi8<0xC6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3), - "shufps {$src3, $src2, $dst|$dst, $src2, $src3}", + "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, @@ -817,63 +892,63 @@ let isTwoAddress = 1 in { def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2, i32i8imm:$src3), - "shufps {$src3, $src2, $dst|$dst, $src2, $src3}", + "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v4f32 (vector_shuffle - VR128:$src1, (load addr:$src2), + VR128:$src1, (memopv4f32 addr:$src2), SHUFP_shuffle_mask:$src3)))]>; let AddedComplexity = 10 in { def UNPCKHPSrr : PSI<0x15, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "unpckhps {$src2, $dst|$dst, $src2}", + "unpckhps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; def UNPCKHPSrm : PSI<0x15, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "unpckhps {$src2, $dst|$dst, $src2}", + "unpckhps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle - VR128:$src1, (load addr:$src2), + VR128:$src1, (memopv4f32 addr:$src2), UNPCKH_shuffle_mask)))]>; def UNPCKLPSrr : PSI<0x14, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "unpcklps {$src2, $dst|$dst, $src2}", + "unpcklps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKL_shuffle_mask)))]>; def UNPCKLPSrm : PSI<0x14, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "unpcklps {$src2, $dst|$dst, $src2}", + "unpcklps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle - VR128:$src1, (load addr:$src2), + VR128:$src1, (memopv4f32 addr:$src2), UNPCKL_shuffle_mask)))]>; } // AddedComplexity } // isTwoAddress // Mask creation def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), - "movmskps {$src, $dst|$dst, $src}", + "movmskps\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>; def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), - "movmskpd {$src, $dst|$dst, $src}", + "movmskpd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>; // Prefetching loads. // TODO: no intrinsics for these? -def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src), "prefetcht0 $src", []>; -def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src), "prefetcht1 $src", []>; -def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src), "prefetcht2 $src", []>; -def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src), "prefetchnta $src", []>; +def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src), "prefetcht0\t$src", []>; +def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src), "prefetcht1\t$src", []>; +def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src), "prefetcht2\t$src", []>; +def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src), "prefetchnta\t$src", []>; // Non-temporal stores def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "movntps {$src, $dst|$dst, $src}", + "movntps\t{$src, $dst|$dst, $src}", [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>; // Load, store, and memory fence @@ -881,24 +956,24 @@ def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>; // MXCSR register def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src), - "ldmxcsr $src", [(int_x86_sse_ldmxcsr addr:$src)]>; + "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>; def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), - "stmxcsr $dst", [(int_x86_sse_stmxcsr addr:$dst)]>; + "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>; // Alias instructions that map zero vector to pxor / xorp* for sse. // FIXME: remove when we can teach regalloc that xor reg, reg is ok. let isReMaterializable = 1 in def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), - "xorps $dst, $dst", + "xorps\t$dst, $dst", [(set VR128:$dst, (v4f32 immAllZerosV))]>; // FR32 to 128-bit vector conversion. def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src), - "movss {$src, $dst|$dst, $src}", + "movss\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (scalar_to_vector FR32:$src)))]>; def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src), - "movss {$src, $dst|$dst, $src}", + "movss\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>; @@ -908,11 +983,11 @@ def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src), // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))), // (f32 FR32:$src)>; def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src), - "movss {$src, $dst|$dst, $src}", + "movss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (vector_extract (v4f32 VR128:$src), (iPTR 0)))]>; def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src), - "movss {$src, $dst|$dst, $src}", + "movss\t{$src, $dst|$dst, $src}", [(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))), addr:$dst)]>; @@ -922,12 +997,12 @@ def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src), let isTwoAddress = 1 in { def MOVLSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, FR32:$src2), - "movss {$src2, $dst|$dst, $src2}", []>; + "movss\t{$src2, $dst|$dst, $src2}", []>; let AddedComplexity = 15 in def MOVLPSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "movss {$src2, $dst|$dst, $src2}", + "movss\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src1, VR128:$src2, MOVL_shuffle_mask)))]>; @@ -937,7 +1012,7 @@ let isTwoAddress = 1 in { // Loading from memory automatically zeroing upper bits. let AddedComplexity = 20 in def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src), - "movss {$src, $dst|$dst, $src}", + "movss\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV, (v4f32 (scalar_to_vector (loadf32 addr:$src))), MOVL_shuffle_mask)))]>; @@ -947,76 +1022,63 @@ def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src), // SSE2 Instructions //===----------------------------------------------------------------------===// -// SSE2 Instruction Templates: -// -// SDI - SSE2 instructions with XD prefix. -// PDI - SSE2 instructions with TB and OpSize prefixes. -// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes. - -class SDI o, Format F, dag outs, dag ins, string asm, list pattern> - : I, XD, Requires<[HasSSE2]>; -class PDI o, Format F, dag outs, dag ins, string asm, list pattern> - : I, TB, OpSize, Requires<[HasSSE2]>; -class PDIi8 o, Format F, dag outs, dag ins, string asm, - list pattern> - : Ii8, TB, OpSize, Requires<[HasSSE2]>; - // Move Instructions def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src), - "movsd {$src, $dst|$dst, $src}", []>; + "movsd\t{$src, $dst|$dst, $src}", []>; +let isLoad = 1, isReMaterializable = 1 in def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src), - "movsd {$src, $dst|$dst, $src}", + "movsd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (loadf64 addr:$src))]>; def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src), - "movsd {$src, $dst|$dst, $src}", + "movsd\t{$src, $dst|$dst, $src}", [(store FR64:$src, addr:$dst)]>; // Conversion instructions def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src), - "cvttsd2si {$src, $dst|$dst, $src}", + "cvttsd2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (fp_to_sint FR64:$src))]>; def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src), - "cvttsd2si {$src, $dst|$dst, $src}", + "cvttsd2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>; def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src), - "cvtsd2ss {$src, $dst|$dst, $src}", + "cvtsd2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (fround FR64:$src))]>; def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), - "cvtsd2ss {$src, $dst|$dst, $src}", + "cvtsd2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (fround (loadf64 addr:$src)))]>; def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src), - "cvtsi2sd {$src, $dst|$dst, $src}", + "cvtsi2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (sint_to_fp GR32:$src))]>; def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src), - "cvtsi2sd {$src, $dst|$dst, $src}", + "cvtsi2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>; // SSE2 instructions with XS prefix def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src), - "cvtss2sd {$src, $dst|$dst, $src}", + "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (fextend FR32:$src))]>, XS, Requires<[HasSSE2]>; def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), - "cvtss2sd {$src, $dst|$dst, $src}", + "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (extloadf32 addr:$src))]>, XS, Requires<[HasSSE2]>; // Match intrinsics which expect XMM operand(s). def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), - "cvtsd2si {$src, $dst|$dst, $src}", + "cvtsd2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>; def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src), - "cvtsd2si {$src, $dst|$dst, $src}", + "cvtsd2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_cvtsd2si (load addr:$src)))]>; // Aliases for intrinsics def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), - "cvttsd2si {$src, $dst|$dst, $src}", + "cvttsd2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_cvttsd2si VR128:$src))]>; def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src), - "cvttsd2si {$src, $dst|$dst, $src}", + "cvttsd2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_cvttsd2si (load addr:$src)))]>; @@ -1024,99 +1086,135 @@ def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src), let isTwoAddress = 1 in { def CMPSDrr : SDI<0xC2, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc), - "cmp${cc}sd {$src, $dst|$dst, $src}", []>; + "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>; def CMPSDrm : SDI<0xC2, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc), - "cmp${cc}sd {$src, $dst|$dst, $src}", []>; + "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>; } +let Defs = [EFLAGS] in { def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2), - "ucomisd {$src2, $src1|$src1, $src2}", + "ucomisd\t{$src2, $src1|$src1, $src2}", [(X86cmp FR64:$src1, FR64:$src2)]>; def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2), - "ucomisd {$src2, $src1|$src1, $src2}", + "ucomisd\t{$src2, $src1|$src1, $src2}", [(X86cmp FR64:$src1, (loadf64 addr:$src2))]>; +def NEW_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2), + "ucomisd\t{$src2, $src1|$src1, $src2}", + [(X86cmp_new FR64:$src1, FR64:$src2), (implicit EFLAGS)]>; +def NEW_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2), + "ucomisd\t{$src2, $src1|$src1, $src2}", + [(X86cmp_new FR64:$src1, (loadf64 addr:$src2)), + (implicit EFLAGS)]>; +} + // Aliases to match intrinsics which expect XMM operand(s). let isTwoAddress = 1 in { def Int_CMPSDrr : SDI<0xC2, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc), - "cmp${cc}sd {$src, $dst|$dst, $src}", + "cmp${cc}sd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1, VR128:$src, imm:$cc))]>; def Int_CMPSDrm : SDI<0xC2, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc), - "cmp${cc}sd {$src, $dst|$dst, $src}", + "cmp${cc}sd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1, (load addr:$src), imm:$cc))]>; } +let Defs = [EFLAGS] in { def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "ucomisd {$src2, $src1|$src1, $src2}", + "ucomisd\t{$src2, $src1|$src1, $src2}", [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>; def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "ucomisd {$src2, $src1|$src1, $src2}", + "ucomisd\t{$src2, $src1|$src1, $src2}", [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2))]>; def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "comisd {$src2, $src1|$src1, $src2}", + "comisd\t{$src2, $src1|$src1, $src2}", [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>; def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "comisd {$src2, $src1|$src1, $src2}", + "comisd\t{$src2, $src1|$src1, $src2}", [(X86comi (v2f64 VR128:$src1), (load addr:$src2))]>; +def NEW_Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2), + "ucomisd\t{$src2, $src1|$src1, $src2}", + [(X86ucomi_new (v2f64 VR128:$src1), (v2f64 VR128:$src2)), + (implicit EFLAGS)]>; +def NEW_Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), + (ins VR128:$src1, f128mem:$src2), + "ucomisd\t{$src2, $src1|$src1, $src2}", + [(X86ucomi_new (v2f64 VR128:$src1), (load addr:$src2)), + (implicit EFLAGS)]>; + +def NEW_Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2), + "comisd\t{$src2, $src1|$src1, $src2}", + [(X86comi_new (v2f64 VR128:$src1), (v2f64 VR128:$src2)), + (implicit EFLAGS)]>; +def NEW_Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), + (ins VR128:$src1, f128mem:$src2), + "comisd\t{$src2, $src1|$src1, $src2}", + [(X86comi_new (v2f64 VR128:$src1), (load addr:$src2)), + (implicit EFLAGS)]>; +} // Defs = EFLAGS] + // Aliases of packed SSE2 instructions for scalar use. These all have names that // start with 'Fs'. // Alias instructions that map fld0 to pxor for sse. +let isReMaterializable = 1 in def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), - "pxor $dst, $dst", [(set FR64:$dst, fpimm0)]>, + "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>, TB, OpSize; // Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are // disregarded. def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src), - "movapd {$src, $dst|$dst, $src}", []>; + "movapd\t{$src, $dst|$dst, $src}", []>; // Alias instruction to load FR64 from f128mem using movapd. Upper bits are // disregarded. +let isLoad = 1 in def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src), - "movapd {$src, $dst|$dst, $src}", + "movapd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>; // Alias bitwise logical operations using SSE logical ops on packed FP values. let isTwoAddress = 1 in { let isCommutable = 1 in { def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2), - "andpd {$src2, $dst|$dst, $src2}", + "andpd\t{$src2, $dst|$dst, $src2}", [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>; def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2), - "orpd {$src2, $dst|$dst, $src2}", + "orpd\t{$src2, $dst|$dst, $src2}", [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>; def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2), - "xorpd {$src2, $dst|$dst, $src2}", + "xorpd\t{$src2, $dst|$dst, $src2}", [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>; } def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2), - "andpd {$src2, $dst|$dst, $src2}", + "andpd\t{$src2, $dst|$dst, $src2}", [(set FR64:$dst, (X86fand FR64:$src1, (memopfsf64 addr:$src2)))]>; def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2), - "orpd {$src2, $dst|$dst, $src2}", + "orpd\t{$src2, $dst|$dst, $src2}", [(set FR64:$dst, (X86for FR64:$src1, (memopfsf64 addr:$src2)))]>; def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2), - "xorpd {$src2, $dst|$dst, $src2}", + "xorpd\t{$src2, $dst|$dst, $src2}", [(set FR64:$dst, (X86fxor FR64:$src1, (memopfsf64 addr:$src2)))]>; def FsANDNPDrr : PDI<0x55, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2), - "andnpd {$src2, $dst|$dst, $src2}", []>; + "andnpd\t{$src2, $dst|$dst, $src2}", []>; def FsANDNPDrm : PDI<0x55, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2), - "andnpd {$src2, $dst|$dst, $src2}", []>; + "andnpd\t{$src2, $dst|$dst, $src2}", []>; } /// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms. @@ -1135,38 +1233,38 @@ multiclass basic_sse2_fp_binop_rm opc, string OpcodeStr, bit Commutable = 0> { // Scalar operation, reg+reg. def SDrr : SDI { let isCommutable = Commutable; } // Scalar operation, reg+mem. def SDrm : SDI; // Vector operation, reg+reg. def PDrr : PDI { let isCommutable = Commutable; } // Vector operation, reg+mem. def PDrm : PDI; // Intrinsic operation, reg+reg. def SDrr_Int : SDI { let isCommutable = Commutable; } // Intrinsic operation, reg+mem. def SDrm_Int : SDI; } @@ -1196,51 +1294,51 @@ multiclass sse2_fp_binop_rm opc, string OpcodeStr, // Scalar operation, reg+reg. def SDrr : SDI { let isCommutable = Commutable; } // Scalar operation, reg+mem. def SDrm : SDI; // Vector operation, reg+reg. def PDrr : PDI { let isCommutable = Commutable; } // Vector operation, reg+mem. def PDrm : PDI; // Intrinsic operation, reg+reg. def SDrr_Int : SDI { let isCommutable = Commutable; } // Intrinsic operation, reg+mem. def SDrm_Int : SDI; // Vector intrinsic operation, reg+reg. def PDrr_Int : PDI { let isCommutable = Commutable; } // Vector intrinsic operation, reg+mem. - def PDrm_Int : PDI; } } @@ -1255,44 +1353,46 @@ defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin, // Move Instructions def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movapd {$src, $dst|$dst, $src}", []>; + "movapd\t{$src, $dst|$dst, $src}", []>; +let isLoad = 1, isReMaterializable = 1 in def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movapd {$src, $dst|$dst, $src}", + "movapd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>; def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), - "movapd {$src, $dst|$dst, $src}", + "movapd\t{$src, $dst|$dst, $src}", [(alignedstore (v2f64 VR128:$src), addr:$dst)]>; def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movupd {$src, $dst|$dst, $src}", []>; + "movupd\t{$src, $dst|$dst, $src}", []>; +let isLoad = 1 in def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movupd {$src, $dst|$dst, $src}", + "movupd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (loadv2f64 addr:$src))]>; def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), - "movupd {$src, $dst|$dst, $src}", + "movupd\t{$src, $dst|$dst, $src}", [(store (v2f64 VR128:$src), addr:$dst)]>; // Intrinsic forms of MOVUPD load and store def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movupd {$src, $dst|$dst, $src}", + "movupd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>; def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), - "movupd {$src, $dst|$dst, $src}", + "movupd\t{$src, $dst|$dst, $src}", [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>; let isTwoAddress = 1 in { let AddedComplexity = 20 in { def MOVLPDrm : PDI<0x12, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2), - "movlpd {$src2, $dst|$dst, $src2}", + "movlpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, (scalar_to_vector (loadf64 addr:$src2)), MOVLP_shuffle_mask)))]>; def MOVHPDrm : PDI<0x16, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2), - "movhpd {$src2, $dst|$dst, $src2}", + "movhpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, (scalar_to_vector (loadf64 addr:$src2)), @@ -1301,14 +1401,14 @@ let isTwoAddress = 1 in { } // isTwoAddress def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), - "movlpd {$src, $dst|$dst, $src}", + "movlpd\t{$src, $dst|$dst, $src}", [(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))), addr:$dst)]>; // v2f64 extract element 1 is always custom lowered to unpack high to low // and extract element 0 so the non-store version isn't too horrible. def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), - "movhpd {$src, $dst|$dst, $src}", + "movhpd\t{$src, $dst|$dst, $src}", [(store (f64 (vector_extract (v2f64 (vector_shuffle VR128:$src, (undef), UNPCKH_shuffle_mask)), (iPTR 0))), @@ -1316,79 +1416,79 @@ def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), // SSE2 instructions without OpSize prefix def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvtdq2ps {$src, $dst|$dst, $src}", + "cvtdq2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>, TB, Requires<[HasSSE2]>; def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "cvtdq2ps {$src, $dst|$dst, $src}", + "cvtdq2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtdq2ps (bitconvert (memopv2i64 addr:$src))))]>, TB, Requires<[HasSSE2]>; // SSE2 instructions with XS prefix def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvtdq2pd {$src, $dst|$dst, $src}", + "cvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>, XS, Requires<[HasSSE2]>; def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), - "cvtdq2pd {$src, $dst|$dst, $src}", + "cvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtdq2pd (bitconvert (memopv2i64 addr:$src))))]>, XS, Requires<[HasSSE2]>; def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvtps2dq {$src, $dst|$dst, $src}", + "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>; def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "cvtps2dq {$src, $dst|$dst, $src}", + "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2dq (load addr:$src)))]>; // SSE2 packed instructions with XS prefix def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvttps2dq {$src, $dst|$dst, $src}", + "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>, XS, Requires<[HasSSE2]>; def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "cvttps2dq {$src, $dst|$dst, $src}", + "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttps2dq (load addr:$src)))]>, XS, Requires<[HasSSE2]>; // SSE2 packed instructions with XD prefix def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvtpd2dq {$src, $dst|$dst, $src}", + "cvtpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>, XD, Requires<[HasSSE2]>; def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "cvtpd2dq {$src, $dst|$dst, $src}", + "cvtpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2dq (load addr:$src)))]>, XD, Requires<[HasSSE2]>; def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvttpd2dq {$src, $dst|$dst, $src}", + "cvttpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>; def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "cvttpd2dq {$src, $dst|$dst, $src}", + "cvttpd2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvttpd2dq (load addr:$src)))]>; // SSE2 instructions without OpSize prefix def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvtps2pd {$src, $dst|$dst, $src}", + "cvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>, TB, Requires<[HasSSE2]>; def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src), - "cvtps2pd {$src, $dst|$dst, $src}", + "cvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd (load addr:$src)))]>, TB, Requires<[HasSSE2]>; def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvtpd2ps {$src, $dst|$dst, $src}", + "cvtpd2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>; def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src), - "cvtpd2ps {$src, $dst|$dst, $src}", + "cvtpd2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps (load addr:$src)))]>; @@ -1397,33 +1497,33 @@ def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src), let isTwoAddress = 1 in { def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, GR32:$src2), - "cvtsi2sd {$src2, $dst|$dst, $src2}", + "cvtsi2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1, GR32:$src2))]>; def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2), - "cvtsi2sd {$src2, $dst|$dst, $src2}", + "cvtsi2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1, (loadi32 addr:$src2)))]>; def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "cvtsd2ss {$src2, $dst|$dst, $src2}", + "cvtsd2ss\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))]>; def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2), - "cvtsd2ss {$src2, $dst|$dst, $src2}", + "cvtsd2ss\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, (load addr:$src2)))]>; def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "cvtss2sd {$src2, $dst|$dst, $src2}", + "cvtss2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))]>, XS, Requires<[HasSSE2]>; def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2), - "cvtss2sd {$src2, $dst|$dst, $src2}", + "cvtss2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, (load addr:$src2)))]>, XS, Requires<[HasSSE2]>; @@ -1450,50 +1550,50 @@ multiclass sse2_fp_unop_rm opc, string OpcodeStr, bit Commutable = 0> { // Scalar operation, reg. def SDr : SDI { let isCommutable = Commutable; } // Scalar operation, mem. def SDm : SDI; // Vector operation, reg. def PDr : PDI { let isCommutable = Commutable; } // Vector operation, mem. def PDm : PDI; // Intrinsic operation, reg. def SDr_Int : SDI { let isCommutable = Commutable; } // Intrinsic operation, mem. def SDm_Int : SDI; // Vector intrinsic operation, reg def PDr_Int : PDI { let isCommutable = Commutable; } // Vector intrinsic operation, mem - def PDm_Int : PDI; } @@ -1508,19 +1608,19 @@ let isTwoAddress = 1 in { let isCommutable = 1 in { def ANDPDrr : PDI<0x54, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "andpd {$src2, $dst|$dst, $src2}", + "andpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (and (bc_v2i64 (v2f64 VR128:$src1)), (bc_v2i64 (v2f64 VR128:$src2))))]>; def ORPDrr : PDI<0x56, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "orpd {$src2, $dst|$dst, $src2}", + "orpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (or (bc_v2i64 (v2f64 VR128:$src1)), (bc_v2i64 (v2f64 VR128:$src2))))]>; def XORPDrr : PDI<0x57, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "xorpd {$src2, $dst|$dst, $src2}", + "xorpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (xor (bc_v2i64 (v2f64 VR128:$src1)), (bc_v2i64 (v2f64 VR128:$src2))))]>; @@ -1528,31 +1628,31 @@ let isTwoAddress = 1 in { def ANDPDrm : PDI<0x54, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "andpd {$src2, $dst|$dst, $src2}", + "andpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (and (bc_v2i64 (v2f64 VR128:$src1)), (memopv2i64 addr:$src2)))]>; def ORPDrm : PDI<0x56, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "orpd {$src2, $dst|$dst, $src2}", + "orpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (or (bc_v2i64 (v2f64 VR128:$src1)), (memopv2i64 addr:$src2)))]>; def XORPDrm : PDI<0x57, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "xorpd {$src2, $dst|$dst, $src2}", + "xorpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (xor (bc_v2i64 (v2f64 VR128:$src1)), (memopv2i64 addr:$src2)))]>; def ANDNPDrr : PDI<0x55, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "andnpd {$src2, $dst|$dst, $src2}", + "andnpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))), (bc_v2i64 (v2f64 VR128:$src2))))]>; def ANDNPDrm : PDI<0x55, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2), - "andnpd {$src2, $dst|$dst, $src2}", + "andnpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))), (memopv2i64 addr:$src2)))]>; @@ -1561,12 +1661,12 @@ let isTwoAddress = 1 in { let isTwoAddress = 1 in { def CMPPDrri : PDIi8<0xC2, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc), - "cmp${cc}pd {$src, $dst|$dst, $src}", + "cmp${cc}pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1, VR128:$src, imm:$cc))]>; def CMPPDrmi : PDIi8<0xC2, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc), - "cmp${cc}pd {$src, $dst|$dst, $src}", + "cmp${cc}pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1, (load addr:$src), imm:$cc))]>; } @@ -1575,48 +1675,48 @@ let isTwoAddress = 1 in { let isTwoAddress = 1 in { def SHUFPDrri : PDIi8<0xC6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "shufpd {$src3, $src2, $dst|$dst, $src2, $src3}", + "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$src3)))]>; def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2, i8imm:$src3), - "shufpd {$src3, $src2, $dst|$dst, $src2, $src3}", + "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v2f64 (vector_shuffle - VR128:$src1, (load addr:$src2), + VR128:$src1, (memopv2f64 addr:$src2), SHUFP_shuffle_mask:$src3)))]>; let AddedComplexity = 10 in { def UNPCKHPDrr : PDI<0x15, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "unpckhpd {$src2, $dst|$dst, $src2}", + "unpckhpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; def UNPCKHPDrm : PDI<0x15, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "unpckhpd {$src2, $dst|$dst, $src2}", + "unpckhpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle - VR128:$src1, (load addr:$src2), + VR128:$src1, (memopv2f64 addr:$src2), UNPCKH_shuffle_mask)))]>; def UNPCKLPDrr : PDI<0x14, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "unpcklpd {$src2, $dst|$dst, $src2}", + "unpcklpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKL_shuffle_mask)))]>; def UNPCKLPDrm : PDI<0x14, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "unpcklpd {$src2, $dst|$dst, $src2}", + "unpcklpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle - VR128:$src1, (load addr:$src2), + VR128:$src1, (memopv2f64 addr:$src2), UNPCKL_shuffle_mask)))]>; } // AddedComplexity } // isTwoAddress @@ -1627,29 +1727,32 @@ let isTwoAddress = 1 in { // Move Instructions def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movdqa {$src, $dst|$dst, $src}", []>; + "movdqa\t{$src, $dst|$dst, $src}", []>; +let isLoad = 1 in def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "movdqa {$src, $dst|$dst, $src}", + "movdqa\t{$src, $dst|$dst, $src}", [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>; def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "movdqa {$src, $dst|$dst, $src}", + "movdqa\t{$src, $dst|$dst, $src}", [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>; +let isLoad = 1 in def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "movdqu {$src, $dst|$dst, $src}", + "movdqu\t{$src, $dst|$dst, $src}", [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>, XS, Requires<[HasSSE2]>; def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "movdqu {$src, $dst|$dst, $src}", + "movdqu\t{$src, $dst|$dst, $src}", [/*(store (v2i64 VR128:$src), addr:$dst)*/]>, XS, Requires<[HasSSE2]>; // Intrinsic forms of MOVDQU load and store +let isLoad = 1 in def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "movdqu {$src, $dst|$dst, $src}", + "movdqu\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>, XS, Requires<[HasSSE2]>; def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "movdqu {$src, $dst|$dst, $src}", + "movdqu\t{$src, $dst|$dst, $src}", [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>, XS, Requires<[HasSSE2]>; @@ -1658,12 +1761,12 @@ let isTwoAddress = 1 in { multiclass PDI_binop_rm_int opc, string OpcodeStr, Intrinsic IntId, bit Commutable = 0> { def rr : PDI { let isCommutable = Commutable; } def rm : PDI; } @@ -1671,14 +1774,14 @@ multiclass PDI_binop_rm_int opc, string OpcodeStr, Intrinsic IntId, multiclass PDI_binop_rmi_int opc, bits<8> opc2, Format ImmForm, string OpcodeStr, Intrinsic IntId> { def rr : PDI; def rm : PDI; def ri : PDIi8; } @@ -1688,12 +1791,12 @@ multiclass PDI_binop_rmi_int opc, bits<8> opc2, Format ImmForm, multiclass PDI_binop_rm opc, string OpcodeStr, SDNode OpNode, ValueType OpVT, bit Commutable = 0> { def rr : PDI { let isCommutable = Commutable; } def rm : PDI; } @@ -1706,12 +1809,12 @@ multiclass PDI_binop_rm opc, string OpcodeStr, SDNode OpNode, multiclass PDI_binop_rm_v2i64 opc, string OpcodeStr, SDNode OpNode, bit Commutable = 0> { def rr : PDI { let isCommutable = Commutable; } def rm : PDI; } @@ -1774,10 +1877,10 @@ defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad", int_x86_sse2_psra_d>; let isTwoAddress = 1 in { def PSLLDQri : PDIi8<0x73, MRM7r, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2), - "pslldq {$src2, $dst|$dst, $src2}", []>; + "pslldq\t{$src2, $dst|$dst, $src2}", []>; def PSRLDQri : PDIi8<0x73, MRM3r, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2), - "psrldq {$src2, $dst|$dst, $src2}", []>; + "psrldq\t{$src2, $dst|$dst, $src2}", []>; // PSRADQri doesn't exist in SSE[1-3]. } @@ -1798,15 +1901,15 @@ defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>; let isTwoAddress = 1 in { def PANDNrr : PDI<0xDF, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "pandn {$src2, $dst|$dst, $src2}", + "pandn\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1), VR128:$src2)))]>; def PANDNrm : PDI<0xDF, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "pandn {$src2, $dst|$dst, $src2}", + "pandn\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1), - (load addr:$src2))))]>; + (memopv2i64 addr:$src2))))]>; } // SSE2 Integer comparison @@ -1825,13 +1928,13 @@ defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>; // Shuffle and unpack instructions def PSHUFDri : PDIi8<0x70, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2), - "pshufd {$src2, $src1, $dst|$dst, $src1, $src2}", + "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (v4i32 (vector_shuffle VR128:$src1, (undef), PSHUFD_shuffle_mask:$src2)))]>; def PSHUFDmi : PDIi8<0x70, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2), - "pshufd {$src2, $src1, $dst|$dst, $src1, $src2}", + "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (v4i32 (vector_shuffle (bc_v4i32(memopv2i64 addr:$src1)), (undef), @@ -1840,14 +1943,14 @@ def PSHUFDmi : PDIi8<0x70, MRMSrcMem, // SSE2 with ImmT == Imm8 and XS prefix. def PSHUFHWri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2), - "pshufhw {$src2, $src1, $dst|$dst, $src1, $src2}", + "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle VR128:$src1, (undef), PSHUFHW_shuffle_mask:$src2)))]>, XS, Requires<[HasSSE2]>; def PSHUFHWmi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2), - "pshufhw {$src2, $src1, $dst|$dst, $src1, $src2}", + "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle (bc_v8i16 (memopv2i64 addr:$src1)), (undef), @@ -1857,14 +1960,14 @@ def PSHUFHWmi : Ii8<0x70, MRMSrcMem, // SSE2 with ImmT == Imm8 and XD prefix. def PSHUFLWri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2), - "pshuflw {$src2, $src1, $dst|$dst, $src1, $src2}", + "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle VR128:$src1, (undef), PSHUFLW_shuffle_mask:$src2)))]>, XD, Requires<[HasSSE2]>; def PSHUFLWmi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2), - "pshuflw {$src2, $src1, $dst|$dst, $src1, $src2}", + "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle (bc_v8i16 (memopv2i64 addr:$src1)), (undef), @@ -1875,52 +1978,52 @@ def PSHUFLWmi : Ii8<0x70, MRMSrcMem, let isTwoAddress = 1 in { def PUNPCKLBWrr : PDI<0x60, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpcklbw {$src2, $dst|$dst, $src2}", + "punpcklbw\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v16i8 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKL_shuffle_mask)))]>; def PUNPCKLBWrm : PDI<0x60, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpcklbw {$src2, $dst|$dst, $src2}", + "punpcklbw\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v16i8 (vector_shuffle VR128:$src1, (bc_v16i8 (memopv2i64 addr:$src2)), UNPCKL_shuffle_mask)))]>; def PUNPCKLWDrr : PDI<0x61, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpcklwd {$src2, $dst|$dst, $src2}", + "punpcklwd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKL_shuffle_mask)))]>; def PUNPCKLWDrm : PDI<0x61, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpcklwd {$src2, $dst|$dst, $src2}", + "punpcklwd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle VR128:$src1, (bc_v8i16 (memopv2i64 addr:$src2)), UNPCKL_shuffle_mask)))]>; def PUNPCKLDQrr : PDI<0x62, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpckldq {$src2, $dst|$dst, $src2}", + "punpckldq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4i32 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKL_shuffle_mask)))]>; def PUNPCKLDQrm : PDI<0x62, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpckldq {$src2, $dst|$dst, $src2}", + "punpckldq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)), UNPCKL_shuffle_mask)))]>; def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpcklqdq {$src2, $dst|$dst, $src2}", + "punpcklqdq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKL_shuffle_mask)))]>; def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpcklqdq {$src2, $dst|$dst, $src2}", + "punpcklqdq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2), @@ -1928,52 +2031,52 @@ let isTwoAddress = 1 in { def PUNPCKHBWrr : PDI<0x68, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpckhbw {$src2, $dst|$dst, $src2}", + "punpckhbw\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v16i8 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; def PUNPCKHBWrm : PDI<0x68, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpckhbw {$src2, $dst|$dst, $src2}", + "punpckhbw\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v16i8 (vector_shuffle VR128:$src1, (bc_v16i8 (memopv2i64 addr:$src2)), UNPCKH_shuffle_mask)))]>; def PUNPCKHWDrr : PDI<0x69, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpckhwd {$src2, $dst|$dst, $src2}", + "punpckhwd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; def PUNPCKHWDrm : PDI<0x69, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpckhwd {$src2, $dst|$dst, $src2}", + "punpckhwd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v8i16 (vector_shuffle VR128:$src1, (bc_v8i16 (memopv2i64 addr:$src2)), UNPCKH_shuffle_mask)))]>; def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpckhdq {$src2, $dst|$dst, $src2}", + "punpckhdq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4i32 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpckhdq {$src2, $dst|$dst, $src2}", + "punpckhdq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)), UNPCKH_shuffle_mask)))]>; def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "punpckhqdq {$src2, $dst|$dst, $src2}", + "punpckhqdq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (vector_shuffle VR128:$src1, VR128:$src2, UNPCKH_shuffle_mask)))]>; def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), - "punpckhqdq {$src2, $dst|$dst, $src2}", + "punpckhqdq\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2), @@ -1983,21 +2086,21 @@ let isTwoAddress = 1 in { // Extract / Insert def PEXTRWri : PDIi8<0xC5, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2), - "pextrw {$src2, $src1, $dst|$dst, $src1, $src2}", + "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1), (iPTR imm:$src2)))]>; let isTwoAddress = 1 in { def PINSRWrri : PDIi8<0xC4, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, GR32:$src2, i32i8imm:$src3), - "pinsrw {$src3, $src2, $dst|$dst, $src2, $src3}", + "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v8i16 (X86pinsrw (v8i16 VR128:$src1), GR32:$src2, (iPTR imm:$src3))))]>; def PINSRWrmi : PDIi8<0xC4, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i16mem:$src2, i32i8imm:$src3), - "pinsrw {$src3, $src2, $dst|$dst, $src2, $src3}", + "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR128:$dst, (v8i16 (X86pinsrw (v8i16 VR128:$src1), (i32 (anyext (loadi16 addr:$src2))), @@ -2006,30 +2109,30 @@ let isTwoAddress = 1 in { // Mask creation def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), - "pmovmskb {$src, $dst|$dst, $src}", + "pmovmskb\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>; // Conditional store +let Uses = [EDI] in def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask), - "maskmovdqu {$mask, $src|$src, $mask}", - [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, - Imp<[EDI],[]>; + "maskmovdqu\t{$mask, $src|$src, $mask}", + [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>; // Non-temporal stores def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "movntpd {$src, $dst|$dst, $src}", + "movntpd\t{$src, $dst|$dst, $src}", [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>; def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), - "movntdq {$src, $dst|$dst, $src}", + "movntdq\t{$src, $dst|$dst, $src}", [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>; def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), - "movnti {$src, $dst|$dst, $src}", + "movnti\t{$src, $dst|$dst, $src}", [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>, TB, Requires<[HasSSE2]>; // Flush cache def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src), - "clflush $src", [(int_x86_sse2_clflush addr:$src)]>, + "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>, TB, Requires<[HasSSE2]>; // Load, store, and memory fence @@ -2043,44 +2146,44 @@ def MFENCE : I<0xAE, MRM6m, (outs), (ins), // FIXME: remove when we can teach regalloc that xor reg, reg is ok. let isReMaterializable = 1 in def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), - "pcmpeqd $dst, $dst", + "pcmpeqd\t$dst, $dst", [(set VR128:$dst, (v2f64 immAllOnesV))]>; // FR64 to 128-bit vector conversion. def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src), - "movsd {$src, $dst|$dst, $src}", + "movsd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (scalar_to_vector FR64:$src)))]>; def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), - "movsd {$src, $dst|$dst, $src}", + "movsd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>; def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (scalar_to_vector GR32:$src)))]>; def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>; def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (bitconvert GR32:$src))]>; def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>; // SSE2 instructions with XS prefix def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), - "movq {$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS, Requires<[HasSSE2]>; def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), - "movq {$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(store (i64 (vector_extract (v2i64 VR128:$src), (iPTR 0))), addr:$dst)]>; @@ -2090,27 +2193,27 @@ def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), // def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))), // (f32 FR32:$src)>; def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src), - "movsd {$src, $dst|$dst, $src}", + "movsd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (vector_extract (v2f64 VR128:$src), (iPTR 0)))]>; def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src), - "movsd {$src, $dst|$dst, $src}", + "movsd\t{$src, $dst|$dst, $src}", [(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))), addr:$dst)]>; def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (vector_extract (v4i32 VR128:$src), (iPTR 0)))]>; def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(store (i32 (vector_extract (v4i32 VR128:$src), (iPTR 0))), addr:$dst)]>; def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (bitconvert FR32:$src))]>; def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>; @@ -2119,12 +2222,12 @@ def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src), let isTwoAddress = 1 in { def MOVLSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, FR64:$src2), - "movsd {$src2, $dst|$dst, $src2}", []>; + "movsd\t{$src2, $dst|$dst, $src2}", []>; let AddedComplexity = 15 in def MOVLPDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "movsd {$src2, $dst|$dst, $src2}", + "movsd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src1, VR128:$src2, MOVL_shuffle_mask)))]>; @@ -2132,14 +2235,14 @@ let isTwoAddress = 1 in { // Store / copy lower 64-bits of a XMM register. def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src), - "movq {$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>; // Move to lower bits of a VR128 and zeroing upper bits. // Loading from memory automatically zeroing upper bits. let AddedComplexity = 20 in def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), - "movsd {$src, $dst|$dst, $src}", + "movsd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (vector_shuffle immAllZerosV, (v2f64 (scalar_to_vector @@ -2149,14 +2252,14 @@ let AddedComplexity = 20 in let AddedComplexity = 15 in // movd / movq to XMM register zero-extends def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, (v4i32 (scalar_to_vector GR32:$src)), MOVL_shuffle_mask)))]>; let AddedComplexity = 20 in def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), - "movd {$src, $dst|$dst, $src}", + "movd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV, (v4i32 (scalar_to_vector (loadi32 addr:$src))), @@ -2165,12 +2268,12 @@ def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src), // Moving from XMM to XMM but still clear upper 64 bits. let AddedComplexity = 15 in def MOVZQI2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movq {$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_movl_dq VR128:$src))]>, XS, Requires<[HasSSE2]>; let AddedComplexity = 20 in def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), - "movq {$src, $dst|$dst, $src}", + "movq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_movl_dq (bitconvert (memopv2i64 addr:$src))))]>, XS, Requires<[HasSSE2]>; @@ -2180,49 +2283,36 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), // SSE3 Instructions //===----------------------------------------------------------------------===// -// SSE3 Instruction Templates: -// -// S3I - SSE3 instructions with TB and OpSize prefixes. -// S3SI - SSE3 instructions with XS prefix. -// S3DI - SSE3 instructions with XD prefix. - -class S3SI o, Format F, dag outs, dag ins, string asm, list pattern> - : I, XS, Requires<[HasSSE3]>; -class S3DI o, Format F, dag outs, dag ins, string asm, list pattern> - : I, XD, Requires<[HasSSE3]>; -class S3I o, Format F, dag outs, dag ins, string asm, list pattern> - : I, TB, OpSize, Requires<[HasSSE3]>; - // Move Instructions def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movshdup {$src, $dst|$dst, $src}", + "movshdup\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src, (undef), MOVSHDUP_shuffle_mask)))]>; def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movshdup {$src, $dst|$dst, $src}", + "movshdup\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (vector_shuffle (memopv4f32 addr:$src), (undef), MOVSHDUP_shuffle_mask)))]>; def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movsldup {$src, $dst|$dst, $src}", + "movsldup\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (vector_shuffle VR128:$src, (undef), MOVSLDUP_shuffle_mask)))]>; def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "movsldup {$src, $dst|$dst, $src}", + "movsldup\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v4f32 (vector_shuffle (memopv4f32 addr:$src), (undef), MOVSLDUP_shuffle_mask)))]>; def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movddup {$src, $dst|$dst, $src}", + "movddup\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (vector_shuffle VR128:$src, (undef), SSE_splat_lo_mask)))]>; def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), - "movddup {$src, $dst|$dst, $src}", + "movddup\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2f64 (vector_shuffle (scalar_to_vector (loadf64 addr:$src)), @@ -2233,46 +2323,46 @@ def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), let isTwoAddress = 1 in { def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "addsubps {$src2, $dst|$dst, $src2}", + "addsubps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1, VR128:$src2))]>; def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "addsubps {$src2, $dst|$dst, $src2}", + "addsubps\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1, (load addr:$src2)))]>; def ADDSUBPDrr : S3I<0xD0, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), - "addsubpd {$src2, $dst|$dst, $src2}", + "addsubpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1, VR128:$src2))]>; def ADDSUBPDrm : S3I<0xD0, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), - "addsubpd {$src2, $dst|$dst, $src2}", + "addsubpd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1, (load addr:$src2)))]>; } def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "lddqu {$src, $dst|$dst, $src}", + "lddqu\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>; // Horizontal ops class S3D_Intrr o, string OpcodeStr, Intrinsic IntId> : S3DI; class S3D_Intrm o, string OpcodeStr, Intrinsic IntId> : S3DI; class S3_Intrr o, string OpcodeStr, Intrinsic IntId> : S3I; class S3_Intrm o, string OpcodeStr, Intrinsic IntId> : S3I; let isTwoAddress = 1 in { @@ -2316,43 +2406,310 @@ let AddedComplexity = 20 in // SSSE3 Instructions //===----------------------------------------------------------------------===// -// SSE3 Instruction Templates: +// SSSE3 Instruction Templates: // -// SS38I - SSSE3 instructions with T8 and OpSize prefixes. -// SS3AI - SSSE3 instructions with TA and OpSize prefixes. +// SS38I - SSSE3 instructions with T8 prefix. +// SS3AI - SSSE3 instructions with TA prefix. +// +// Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version +// uses the MMX registers. We put those instructions here because they better +// fit into the SSSE3 instruction category rather than the MMX category. class SS38I o, Format F, dag outs, dag ins, string asm, list pattern> - : I, T8, OpSize, Requires<[HasSSSE3]>; + : I, T8, Requires<[HasSSSE3]>; class SS3AI o, Format F, dag outs, dag ins, string asm, list pattern> - : I, TA, OpSize, Requires<[HasSSSE3]>; + : I, TA, Requires<[HasSSSE3]>; + +/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8. +let isTwoAddress = 1 in { + multiclass SS3I_unop_rm_int_8 opc, string OpcodeStr, + Intrinsic IntId64, Intrinsic IntId128, + bit Commutable = 0> { + def rr64 : SS38I { + let isCommutable = Commutable; + } + def rm64 : SS38I; + + def rr128 : SS38I, + OpSize { + let isCommutable = Commutable; + } + def rm128 : SS38I, OpSize; + } +} -/// SS3I_binop_rm_int - Simple SSSE3 binary operatr whose type is v2i64. +/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16. let isTwoAddress = 1 in { - multiclass SS3I_binop_rm_int opc, string OpcodeStr, Intrinsic IntId, - bit Commutable = 0> { - def rr : SS38I { + multiclass SS3I_unop_rm_int_16 opc, string OpcodeStr, + Intrinsic IntId64, Intrinsic IntId128, + bit Commutable = 0> { + def rr64 : SS38I { let isCommutable = Commutable; } - def rm : SS38I; + def rm64 : SS38I; + + def rr128 : SS38I, + OpSize { + let isCommutable = Commutable; + } + def rm128 : SS38I, OpSize; + } +} + +/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32. +let isTwoAddress = 1 in { + multiclass SS3I_unop_rm_int_32 opc, string OpcodeStr, + Intrinsic IntId64, Intrinsic IntId128, + bit Commutable = 0> { + def rr64 : SS38I { + let isCommutable = Commutable; + } + def rm64 : SS38I; + + def rr128 : SS38I, + OpSize { + let isCommutable = Commutable; + } + def rm128 : SS38I, OpSize; } } -defm PMULHRSW128 : SS3I_binop_rm_int<0x0B, "pmulhrsw", - int_x86_ssse3_pmulhrsw_128, 1>; +defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb", + int_x86_ssse3_pabs_b, + int_x86_ssse3_pabs_b_128>; +defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw", + int_x86_ssse3_pabs_w, + int_x86_ssse3_pabs_w_128>; +defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd", + int_x86_ssse3_pabs_d, + int_x86_ssse3_pabs_d_128>; + +/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8. +let isTwoAddress = 1 in { + multiclass SS3I_binop_rm_int_8 opc, string OpcodeStr, + Intrinsic IntId64, Intrinsic IntId128, + bit Commutable = 0> { + def rr64 : SS38I { + let isCommutable = Commutable; + } + def rm64 : SS38I; + + def rr128 : SS38I, + OpSize { + let isCommutable = Commutable; + } + def rm128 : SS38I, OpSize; + } +} + +/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16. +let isTwoAddress = 1 in { + multiclass SS3I_binop_rm_int_16 opc, string OpcodeStr, + Intrinsic IntId64, Intrinsic IntId128, + bit Commutable = 0> { + def rr64 : SS38I { + let isCommutable = Commutable; + } + def rm64 : SS38I; + + def rr128 : SS38I, + OpSize { + let isCommutable = Commutable; + } + def rm128 : SS38I, OpSize; + } +} + +/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32. +let isTwoAddress = 1 in { + multiclass SS3I_binop_rm_int_32 opc, string OpcodeStr, + Intrinsic IntId64, Intrinsic IntId128, + bit Commutable = 0> { + def rr64 : SS38I { + let isCommutable = Commutable; + } + def rm64 : SS38I; + + def rr128 : SS38I, + OpSize { + let isCommutable = Commutable; + } + def rm128 : SS38I, OpSize; + } +} + +defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw", + int_x86_ssse3_phadd_w, + int_x86_ssse3_phadd_w_128, 1>; +defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd", + int_x86_ssse3_phadd_d, + int_x86_ssse3_phadd_d_128, 1>; +defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw", + int_x86_ssse3_phadd_sw, + int_x86_ssse3_phadd_sw_128, 1>; +defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw", + int_x86_ssse3_phsub_w, + int_x86_ssse3_phsub_w_128>; +defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd", + int_x86_ssse3_phsub_d, + int_x86_ssse3_phsub_d_128>; +defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw", + int_x86_ssse3_phsub_sw, + int_x86_ssse3_phsub_sw_128>; +defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw", + int_x86_ssse3_pmadd_ub_sw, + int_x86_ssse3_pmadd_ub_sw_128, 1>; +defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw", + int_x86_ssse3_pmul_hr_sw, + int_x86_ssse3_pmul_hr_sw_128, 1>; +defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb", + int_x86_ssse3_pshuf_b, + int_x86_ssse3_pshuf_b_128>; +defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb", + int_x86_ssse3_psign_b, + int_x86_ssse3_psign_b_128>; +defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw", + int_x86_ssse3_psign_w, + int_x86_ssse3_psign_w_128>; +defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd", + int_x86_ssse3_psign_d, + int_x86_ssse3_psign_d_128>; + +let isTwoAddress = 1 in { + def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst), + (ins VR64:$src1, VR64:$src2, i16imm:$src3), + "palignr\t{$src2, $dst|$dst, $src2}", + [(set VR64:$dst, + (int_x86_ssse3_palign_r + VR64:$src1, VR64:$src2, + imm:$src3))]>; + def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst), + (ins VR64:$src1, i64mem:$src2, i16imm:$src3), + "palignr\t{$src2, $dst|$dst, $src2}", + [(set VR64:$dst, + (int_x86_ssse3_palign_r + VR64:$src1, + (bitconvert (memopv2i32 addr:$src2)), + imm:$src3))]>; + + def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2, i32imm:$src3), + "palignr\t{$src2, $dst|$dst, $src2}", + [(set VR128:$dst, + (int_x86_ssse3_palign_r_128 + VR128:$src1, VR128:$src2, + imm:$src3))]>, OpSize; + def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2, i32imm:$src3), + "palignr\t{$src2, $dst|$dst, $src2}", + [(set VR128:$dst, + (int_x86_ssse3_palign_r_128 + VR128:$src1, + (bitconvert (memopv4i32 addr:$src2)), + imm:$src3))]>, OpSize; +} //===----------------------------------------------------------------------===// // Non-Instruction Patterns //===----------------------------------------------------------------------===// // 128-bit vector undef's. +def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>; def : Pat<(v2f64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>; def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>; def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>; @@ -2456,6 +2813,11 @@ def : Pat<(vector_shuffle (v4f32 VR128:$src1), (undef), SHUFP_unary_shuffle_mask:$sm), (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>, Requires<[HasSSE1]>; +// Special unary SHUFPDrri case. +def : Pat<(vector_shuffle (v2f64 VR128:$src1), (undef), + SHUFP_unary_shuffle_mask:$sm), + (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>, + Requires<[HasSSE2]>; // Unary v4f32 shuffle with PSHUF* in order to fold a load. def : Pat<(vector_shuffle (memopv4f32 addr:$src1), (undef), SHUFP_unary_shuffle_mask:$sm), @@ -2620,13 +2982,13 @@ def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))), (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))), - (load addr:$src2))), + (memopv2i64 addr:$src2))), (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))), - (load addr:$src2))), + (memopv2i64 addr:$src2))), (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))), - (load addr:$src2))), + (memopv2i64 addr:$src2))), (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>; // Use movaps / movups for SSE integer load / store (one byte shorter). @@ -2655,3 +3017,11 @@ def : Pat<(store (v8i16 VR128:$src), addr:$dst), (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>; def : Pat<(store (v16i8 VR128:$src), addr:$dst), (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>; + +// (vextract (v4i32 bc (v4f32 s2v (f32 load $addr))), 0) -> (i32 load $addr) +def : Pat<(vector_extract + (bc_v4i32 (v4f32 (scalar_to_vector (loadf32 addr:$src)))), (iPTR 0)), + (MOV32rm addr:$src)>, Requires<[HasSSE2]>; +def : Pat<(vector_extract + (bc_v2i64 (v2f64 (scalar_to_vector (loadf64 addr:$src)))), (iPTR 0)), + (MOV64rm addr:$src)>, Requires<[HasSSE2, In64BitMode]>;