X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrMMX.td;h=d484695f60ea2214e70dd6e154cd57e2cdcb8f7b;hb=22b942aa4df824adbd3f6eaede53abe451f616e9;hp=604eea4fd9e212e46f276fc0d48d558093f33918;hpb=73a902b2281398c187b134e7a17e6b1e394166be;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 604eea4fd9e..d484695f60e 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by Evan Cheng and is distributed under the -// University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -13,18 +13,6 @@ // //===----------------------------------------------------------------------===// -// Some 'special' instructions -def IMPLICIT_DEF_VR64 : I<0, Pseudo, (outs VR64:$dst), (ins), - "#IMPLICIT_DEF $dst", - [(set VR64:$dst, (v8i8 (undef)))]>, - Requires<[HasMMX]>; - -// 64-bit vector undef's. -def : Pat<(v8i8 (undef)), (IMPLICIT_DEF_VR64)>; -def : Pat<(v4i16 (undef)), (IMPLICIT_DEF_VR64)>; -def : Pat<(v2i32 (undef)), (IMPLICIT_DEF_VR64)>; -def : Pat<(v1i64 (undef)), (IMPLICIT_DEF_VR64)>; - //===----------------------------------------------------------------------===// // MMX Pattern Fragments //===----------------------------------------------------------------------===// @@ -116,30 +104,35 @@ let isTwoAddress = 1 in { // multiclass MMXI_binop_rm_v1i64 opc, string OpcodeStr, SDNode OpNode, bit Commutable = 0> { - def rr : MMXI { let isCommutable = Commutable; } - def rm : MMXI; } multiclass MMXI_binop_rmi_int opc, bits<8> opc2, Format ImmForm, - string OpcodeStr, Intrinsic IntId> { - def rr : MMXI { + def rr : MMXI; - def rm : MMXI; - def ri : MMXIi8; + [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))]>; } } @@ -156,19 +149,28 @@ def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", [(int_x86_mmx_femms)] // Data Transfer Instructions def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), - "movd\t{$src, $dst|$dst, $src}", []>; -let isReMaterializable = 1 in + "movd\t{$src, $dst|$dst, $src}", + [(set VR64:$dst, (v2i32 (scalar_to_vector GR32:$src)))]>; +let isSimpleLoad = 1, isReMaterializable = 1 in def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), - "movd\t{$src, $dst|$dst, $src}", []>; + "movd\t{$src, $dst|$dst, $src}", + [(set VR64:$dst, (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; +let mayStore = 1 in def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src), "movd\t{$src, $dst|$dst, $src}", []>; +let neverHasSideEffects = 1 in def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), "movd\t{$src, $dst|$dst, $src}", []>; +let neverHasSideEffects = 1 in +def MMX_MOVD64from64rr : MMXRI<0x7E, MRMSrcReg, (outs GR64:$dst), (ins VR64:$src), + "movd\t{$src, $dst|$dst, $src}", []>; + +let neverHasSideEffects = 1 in def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), "movq\t{$src, $dst|$dst, $src}", []>; -let isReMaterializable = 1 in +let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src), "movq\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (load_mmx addr:$src))]>; @@ -179,13 +181,16 @@ def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), def MMX_MOVDQ2Qrr : MMXID<0xD6, MRMDestMem, (outs VR64:$dst), (ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}", [(set VR64:$dst, - (v1i64 (vector_extract (v2i64 VR128:$src), - (iPTR 0))))]>; + (v1i64 (bitconvert + (i64 (vector_extract (v2i64 VR128:$src), + (iPTR 0))))))]>; def MMX_MOVQ2DQrr : MMXIS<0xD6, MRMDestMem, (outs VR128:$dst), (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, - (bitconvert (v1i64 VR64:$src)))]>; + [(set VR128:$dst, + (v2i64 (vector_shuffle immAllZerosV, + (v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src)))), + MOVL_shuffle_mask)))]>; def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), "movntq\t{$src, $dst|$dst, $src}", @@ -193,14 +198,14 @@ def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), let AddedComplexity = 15 in // movd to MMX register zero-extends -def MMX_MOVZDI2PDIrr : MMX2I<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), +def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (v2i32 (vector_shuffle immAllZerosV, (v2i32 (scalar_to_vector GR32:$src)), MMX_MOVL_shuffle_mask)))]>; let AddedComplexity = 20 in -def MMX_MOVZDI2PDIrm : MMX2I<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), +def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (v2i32 (vector_shuffle immAllZerosV, @@ -275,23 +280,23 @@ let isTwoAddress = 1 in { // Shift Instructions defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw", - int_x86_mmx_psrl_w>; + int_x86_mmx_psrl_w, int_x86_mmx_psrli_w>; defm MMX_PSRLD : MMXI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld", - int_x86_mmx_psrl_d>; + int_x86_mmx_psrl_d, int_x86_mmx_psrli_d>; defm MMX_PSRLQ : MMXI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq", - int_x86_mmx_psrl_q>; + int_x86_mmx_psrl_q, int_x86_mmx_psrli_q>; defm MMX_PSLLW : MMXI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw", - int_x86_mmx_psll_w>; + int_x86_mmx_psll_w, int_x86_mmx_pslli_w>; defm MMX_PSLLD : MMXI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld", - int_x86_mmx_psll_d>; + int_x86_mmx_psll_d, int_x86_mmx_pslli_d>; defm MMX_PSLLQ : MMXI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq", - int_x86_mmx_psll_q>; + int_x86_mmx_psll_q, int_x86_mmx_pslli_q>; defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw", - int_x86_mmx_psra_w>; + int_x86_mmx_psra_w, int_x86_mmx_psrai_w>; defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad", - int_x86_mmx_psra_d>; + int_x86_mmx_psra_d, int_x86_mmx_psrai_d>; // Comparison Instructions defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>; @@ -416,35 +421,44 @@ def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem, MMX_PSHUFW_shuffle_mask:$src2)))]>; // -- Conversion Instructions +let neverHasSideEffects = 1 in { def MMX_CVTPD2PIrr : MMX2I<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvtpd2pi\t{$src, $dst|$dst, $src}", []>; +let mayLoad = 1 in def MMX_CVTPD2PIrm : MMX2I<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src), "cvtpd2pi\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPI2PDrr : MMX2I<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src), "cvtpi2pd\t{$src, $dst|$dst, $src}", []>; +let mayLoad = 1 in def MMX_CVTPI2PDrm : MMX2I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), "cvtpi2pd\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src), "cvtpi2ps\t{$src, $dst|$dst, $src}", []>; +let mayLoad = 1 in def MMX_CVTPI2PSrm : MMXI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), "cvtpi2ps\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPS2PIrr : MMXI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvtps2pi\t{$src, $dst|$dst, $src}", []>; +let mayLoad = 1 in def MMX_CVTPS2PIrm : MMXI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src), "cvtps2pi\t{$src, $dst|$dst, $src}", []>; def MMX_CVTTPD2PIrr : MMX2I<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvttpd2pi\t{$src, $dst|$dst, $src}", []>; +let mayLoad = 1 in def MMX_CVTTPD2PIrm : MMX2I<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src), "cvttpd2pi\t{$src, $dst|$dst, $src}", []>; def MMX_CVTTPS2PIrr : MMXI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvttps2pi\t{$src, $dst|$dst, $src}", []>; +let mayLoad = 1 in def MMX_CVTTPS2PIrm : MMXI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src), "cvttps2pi\t{$src, $dst|$dst, $src}", []>; +} // end neverHasSideEffects + // Extract / Insert def MMX_X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>; @@ -476,24 +490,29 @@ def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src), [(set GR32:$dst, (int_x86_mmx_pmovmskb VR64:$src))]>; // Misc. +let Uses = [EDI] in def MMX_MASKMOVQ : MMXI<0xF7, MRMDestMem, (outs), (ins VR64:$src, VR64:$mask), "maskmovq\t{$mask, $src|$src, $mask}", - [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)]>, - Imp<[EDI],[]>; + [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)]>; //===----------------------------------------------------------------------===// // Alias Instructions //===----------------------------------------------------------------------===// // Alias instructions that map zero vector to pxor. -// FIXME: remove when we can teach regalloc that xor reg, reg is ok. let isReMaterializable = 1 in { def MMX_V_SET0 : MMXI<0xEF, MRMInitReg, (outs VR64:$dst), (ins), "pxor\t$dst, $dst", - [(set VR64:$dst, (v1i64 immAllZerosV))]>; + [(set VR64:$dst, (v2i32 immAllZerosV))]>; def MMX_V_SETALLONES : MMXI<0x76, MRMInitReg, (outs VR64:$dst), (ins), "pcmpeqd\t$dst, $dst", - [(set VR64:$dst, (v1i64 immAllOnesV))]>; + [(set VR64:$dst, (v2i32 immAllOnesV))]>; +} + +let Predicates = [HasMMX] in { + def : Pat<(v1i64 immAllZerosV), (MMX_V_SET0)>; + def : Pat<(v4i16 immAllZerosV), (MMX_V_SET0)>; + def : Pat<(v8i8 immAllZerosV), (MMX_V_SET0)>; } //===----------------------------------------------------------------------===// @@ -510,18 +529,6 @@ def : Pat<(store (v2i32 VR64:$src), addr:$dst), def : Pat<(store (v1i64 VR64:$src), addr:$dst), (MMX_MOVQ64mr addr:$dst, VR64:$src)>; -// 64-bit vector all zero's. -def : Pat<(v8i8 immAllZerosV), (MMX_V_SET0)>; -def : Pat<(v4i16 immAllZerosV), (MMX_V_SET0)>; -def : Pat<(v2i32 immAllZerosV), (MMX_V_SET0)>; -def : Pat<(v1i64 immAllZerosV), (MMX_V_SET0)>; - -// 64-bit vector all one's. -def : Pat<(v8i8 immAllOnesV), (MMX_V_SETALLONES)>; -def : Pat<(v4i16 immAllOnesV), (MMX_V_SETALLONES)>; -def : Pat<(v2i32 immAllOnesV), (MMX_V_SETALLONES)>; -def : Pat<(v1i64 immAllOnesV), (MMX_V_SETALLONES)>; - // Bit convert. def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>; def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>; @@ -545,28 +552,34 @@ def : Pat<(v4i16 (bitconvert (i64 GR64:$src))), (MMX_MOVD64to64rr GR64:$src)>; def : Pat<(v8i8 (bitconvert (i64 GR64:$src))), (MMX_MOVD64to64rr GR64:$src)>; - -def MMX_X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>; +def : Pat<(i64 (bitconvert (v1i64 VR64:$src))), + (MMX_MOVD64from64rr VR64:$src)>; +def : Pat<(i64 (bitconvert (v2i32 VR64:$src))), + (MMX_MOVD64from64rr VR64:$src)>; +def : Pat<(i64 (bitconvert (v4i16 VR64:$src))), + (MMX_MOVD64from64rr VR64:$src)>; +def : Pat<(i64 (bitconvert (v8i8 VR64:$src))), + (MMX_MOVD64from64rr VR64:$src)>; // Move scalar to XMM zero-extended // movd to XMM register zero-extends let AddedComplexity = 15 in { - def : Pat<(v8i8 (vector_shuffle immAllZerosV, - (v8i8 (MMX_X86s2vec GR32:$src)), MMX_MOVL_shuffle_mask)), - (MMX_MOVZDI2PDIrr GR32:$src)>; - def : Pat<(v4i16 (vector_shuffle immAllZerosV, - (v4i16 (MMX_X86s2vec GR32:$src)), MMX_MOVL_shuffle_mask)), + def : Pat<(v8i8 (vector_shuffle immAllZerosV_bc, + (bc_v8i8 (v2i32 (scalar_to_vector GR32:$src))), + MMX_MOVL_shuffle_mask)), (MMX_MOVZDI2PDIrr GR32:$src)>; - def : Pat<(v2i32 (vector_shuffle immAllZerosV, - (v2i32 (MMX_X86s2vec GR32:$src)), MMX_MOVL_shuffle_mask)), + def : Pat<(v4i16 (vector_shuffle immAllZerosV_bc, + (bc_v4i16 (v2i32 (scalar_to_vector GR32:$src))), + MMX_MOVL_shuffle_mask)), (MMX_MOVZDI2PDIrr GR32:$src)>; } -// Scalar to v2i32 / v4i16 / v8i8. The source may be a GR32, but only the lower +// Scalar to v4i16 / v8i8. The source may be a GR32, but only the lower // 8 or 16-bits matter. -def : Pat<(v8i8 (MMX_X86s2vec GR32:$src)), (MMX_MOVD64rr GR32:$src)>; -def : Pat<(v4i16 (MMX_X86s2vec GR32:$src)), (MMX_MOVD64rr GR32:$src)>; -def : Pat<(v2i32 (MMX_X86s2vec GR32:$src)), (MMX_MOVD64rr GR32:$src)>; +def : Pat<(bc_v8i8 (v2i32 (scalar_to_vector GR32:$src))), + (MMX_MOVD64rr GR32:$src)>; +def : Pat<(bc_v4i16 (v2i32 (scalar_to_vector GR32:$src))), + (MMX_MOVD64rr GR32:$src)>; // Patterns to perform canonical versions of vector shuffling. let AddedComplexity = 10 in { @@ -606,19 +619,35 @@ let AddedComplexity = 20 in { def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))), VR64:$src2)), (MMX_PANDNrr VR64:$src1, VR64:$src2)>; -def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV))), +def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))), VR64:$src2)), (MMX_PANDNrr VR64:$src1, VR64:$src2)>; -def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV))), +def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))), VR64:$src2)), (MMX_PANDNrr VR64:$src1, VR64:$src2)>; def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))), (load addr:$src2))), (MMX_PANDNrm VR64:$src1, addr:$src2)>; -def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV))), +def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))), (load addr:$src2))), (MMX_PANDNrm VR64:$src1, addr:$src2)>; -def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV))), +def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))), (load addr:$src2))), (MMX_PANDNrm VR64:$src1, addr:$src2)>; + +// Move MMX to lower 64-bit of XMM +def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src)))), + (v2i64 (MMX_MOVQ2DQrr VR64:$src))>; + +// Move lower 64-bit of XMM to MMX. +def : Pat<(v2i32 (bitconvert (i64 (vector_extract (v2i64 VR128:$src), + (iPTR 0))))), + (v2i32 (MMX_MOVDQ2Qrr VR128:$src))>; +def : Pat<(v4i16 (bitconvert (i64 (vector_extract (v2i64 VR128:$src), + (iPTR 0))))), + (v4i16 (MMX_MOVDQ2Qrr VR128:$src))>; +def : Pat<(v8i8 (bitconvert (i64 (vector_extract (v2i64 VR128:$src), + (iPTR 0))))), + (v8i8 (MMX_MOVDQ2Qrr VR128:$src))>; +