X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrInfo.td;h=f3b51dc127dce22f789de580c4b23df50d22dda1;hb=627c00b663f881600b4af1ae135af6ee2cb19c1a;hp=1a0fab3b8644e72614c329db9632f30d8bc39255;hpb=e5f6204cd5d2306379bf8954e280ad35619a38b5;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 1a0fab3b864..f3b51dc127d 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -1,4 +1,4 @@ -//===- X86InstrInfo.td - Describe the X86 Instruction Set -------*- C++ -*-===// +//===- X86InstrInfo.td - Describe the X86 Instruction Set --*- tablegen -*-===// // // The LLVM Compiler Infrastructure // @@ -37,9 +37,9 @@ def SDTX86SetCC : SDTypeProfile<1, 2, def SDTX86Ret : SDTypeProfile<0, 1, [SDTCisVT<0, i16>]>; -def SDT_X86CallSeqStart : SDTypeProfile<0, 1, [ SDTCisVT<0, i32> ]>; -def SDT_X86CallSeqEnd : SDTypeProfile<0, 2, [ SDTCisVT<0, i32>, - SDTCisVT<1, i32> ]>; +def SDT_X86CallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; +def SDT_X86CallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, + SDTCisVT<1, i32> ]>; def SDT_X86Call : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>; @@ -55,6 +55,10 @@ def SDT_X86TLSTP : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>; def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>; +def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; + +def X86bsf : SDNode<"X86ISD::BSF", SDTIntUnaryOp>; +def X86bsr : SDNode<"X86ISD::BSR", SDTIntUnaryOp>; def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>; def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>; @@ -73,7 +77,7 @@ def X86callseq_start : [SDNPHasChain, SDNPOutFlag]>; def X86callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd, - [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>; + [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; def X86call : SDNode<"X86ISD::CALL", SDT_X86Call, [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>; @@ -99,6 +103,8 @@ def X86TLStp : SDNode<"X86ISD::THREAD_POINTER", SDT_X86TLSTP, []>; def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET, [SDNPHasChain]>; +def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET, + [SDNPHasChain, SDNPOptInFlag]>; //===----------------------------------------------------------------------===// // X86 Operand Definitions. @@ -249,7 +255,8 @@ def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>; // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become sub / add // which can clobber EFLAGS. let Defs = [ESP, EFLAGS], Uses = [ESP] in { -def ADJCALLSTACKDOWN : I<0, Pseudo, (outs), (ins i32imm:$amt), "#ADJCALLSTACKDOWN", +def ADJCALLSTACKDOWN : I<0, Pseudo, (outs), (ins i32imm:$amt), + "#ADJCALLSTACKDOWN", [(X86callseq_start imm:$amt)]>; def ADJCALLSTACKUP : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), "#ADJCALLSTACKUP", @@ -257,6 +264,7 @@ def ADJCALLSTACKUP : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), } def IMPLICIT_USE : I<0, Pseudo, (outs), (ins variable_ops), "#IMPLICIT_USE", []>; +let isImplicitDef = 1 in { def IMPLICIT_DEF : I<0, Pseudo, (outs variable_ops), (ins), "#IMPLICIT_DEF", []>; def IMPLICIT_DEF_GR8 : I<0, Pseudo, (outs GR8:$dst), (ins), @@ -268,6 +276,7 @@ def IMPLICIT_DEF_GR16 : I<0, Pseudo, (outs GR16:$dst), (ins), def IMPLICIT_DEF_GR32 : I<0, Pseudo, (outs GR32:$dst), (ins), "#IMPLICIT_DEF $dst", [(set GR32:$dst, (undef))]>; +} // Nop def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>; @@ -290,11 +299,11 @@ let isBranch = 1, isTerminator = 1 in class IBr opcode, dag ins, string asm, list pattern> : I; -// Indirect branches let isBranch = 1, isBarrier = 1 in def JMP : IBr<0xE9, (ins brtarget:$dst), "jmp\t$dst", [(br bb:$dst)]>; -let isBranch = 1, isTerminator = 1, isBarrier = 1 in { +// Indirect branches +let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst", [(brind GR32:$dst)]>; def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst", @@ -356,15 +365,30 @@ let isCall = 1 in } // Tail call stuff. + +def TAILCALL : I<0, Pseudo, (outs), (ins ), + "#TAILCALL", + []>; + +let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in +def TCRETURNdi : I<0, Pseudo, (outs), (ins i32imm:$dst, i32imm:$offset), + "#TC_RETURN $dst $offset", + []>; + let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in - def TAILJMPd : IBr<0xE9, (ins i32imm:$dst), "jmp\t${dst:call} # TAIL CALL", +def TCRETURNri : I<0, Pseudo, (outs), (ins GR32:$dst, i32imm:$offset), + "#TC_RETURN $dst $offset", []>; + let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in - def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp\t{*}$dst # TAIL CALL", + def TAILJMPd : IBr<0xE9, (ins i32imm:$dst), "jmp\t${dst:call} # TAILCALL", []>; +let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in + def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst # TAILCALL", + []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), - "jmp\t{*}$dst # TAIL CALL", []>; + "jmp\t{*}$dst # TAILCALL", []>; //===----------------------------------------------------------------------===// // Miscellaneous Instructions... @@ -423,6 +447,39 @@ def XCHG32rm : I<0x87, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2), "xchg{l}\t{$src2|$src1}, {$src1|$src2}", []>; +// Bit scan instructions. +let Defs = [EFLAGS] in { +def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "bsf{w}\t{$src, $dst|$dst, $src}", + [(set GR16:$dst, (X86bsf GR16:$src)), (implicit EFLAGS)]>, TB; +def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), + "bsf{w}\t{$src, $dst|$dst, $src}", + [(set GR16:$dst, (X86bsf (loadi16 addr:$src))), + (implicit EFLAGS)]>, TB; +def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "bsf{l}\t{$src, $dst|$dst, $src}", + [(set GR32:$dst, (X86bsf GR32:$src)), (implicit EFLAGS)]>, TB; +def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "bsf{l}\t{$src, $dst|$dst, $src}", + [(set GR32:$dst, (X86bsf (loadi32 addr:$src))), + (implicit EFLAGS)]>, TB; + +def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "bsr{w}\t{$src, $dst|$dst, $src}", + [(set GR16:$dst, (X86bsr GR16:$src)), (implicit EFLAGS)]>, TB; +def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), + "bsr{w}\t{$src, $dst|$dst, $src}", + [(set GR16:$dst, (X86bsr (loadi16 addr:$src))), + (implicit EFLAGS)]>, TB; +def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "bsr{l}\t{$src, $dst|$dst, $src}", + [(set GR32:$dst, (X86bsr GR32:$src)), (implicit EFLAGS)]>, TB; +def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "bsr{l}\t{$src, $dst|$dst, $src}", + [(set GR32:$dst, (X86bsr (loadi32 addr:$src))), + (implicit EFLAGS)]>, TB; +} // Defs = [EFLAGS] + def LEA16r : I<0x8D, MRMSrcMem, (outs GR16:$dst), (ins i32mem:$src), "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize; @@ -506,7 +563,7 @@ def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize; def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), "mov{l}\t{$src, $dst|$dst, $src}", []>; -let isReMaterializable = 1 in { +let isReMaterializable = 1, neverHasSideEffects = 1 in { def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src), "mov{b}\t{$src, $dst|$dst, $src}", [(set GR8:$dst, imm:$src)]>; @@ -527,7 +584,7 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src), "mov{l}\t{$src, $dst|$dst, $src}", [(store (i32 imm:$src), addr:$dst)]>; -let isLoad = 1 in { +let isLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src), "mov{b}\t{$src, $dst|$dst, $src}", [(set GR8:$dst, (load addr:$src))]>; @@ -647,30 +704,19 @@ let isTwoAddress = 1 in { // Conditional moves let Uses = [EFLAGS] in { +let isCommutable = 1 in { def CMOVB16rr : I<0x42, MRMSrcReg, // if , TB, OpSize; -def CMOVB16rm : I<0x42, MRMSrcMem, // if , - TB, OpSize; def CMOVB32rr : I<0x42, MRMSrcReg, // if , TB; -def CMOVB32rm : I<0x42, MRMSrcMem, // if , - TB; def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), @@ -678,324 +724,327 @@ def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_AE, EFLAGS))]>, TB, OpSize; -def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_AE, EFLAGS))]>, - TB, OpSize; def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovae\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_AE, EFLAGS))]>, TB; -def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_AE, EFLAGS))]>, - TB; - def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_E, EFLAGS))]>, TB, OpSize; -def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_E, EFLAGS))]>, - TB, OpSize; def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_E, EFLAGS))]>, TB; -def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_E, EFLAGS))]>, - TB; - def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NE, EFLAGS))]>, TB, OpSize; -def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NE, EFLAGS))]>, - TB, OpSize; def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NE, EFLAGS))]>, TB; -def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NE, EFLAGS))]>, - TB; - def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_BE, EFLAGS))]>, TB, OpSize; -def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_BE, EFLAGS))]>, - TB, OpSize; def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_BE, EFLAGS))]>, TB; -def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_BE, EFLAGS))]>, - TB; - def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_A, EFLAGS))]>, TB, OpSize; -def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_A, EFLAGS))]>, - TB, OpSize; def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_A, EFLAGS))]>, TB; -def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_A, EFLAGS))]>, - TB; - def CMOVL16rr : I<0x4C, MRMSrcReg, // if , TB, OpSize; -def CMOVL16rm : I<0x4C, MRMSrcMem, // if , - TB, OpSize; def CMOVL32rr : I<0x4C, MRMSrcReg, // if , TB; -def CMOVL32rm : I<0x4C, MRMSrcMem, // if , - TB; - def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_GE, EFLAGS))]>, TB, OpSize; -def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_GE, EFLAGS))]>, - TB, OpSize; def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_GE, EFLAGS))]>, TB; -def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_GE, EFLAGS))]>, - TB; - def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_LE, EFLAGS))]>, TB, OpSize; -def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_LE, EFLAGS))]>, - TB, OpSize; def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_LE, EFLAGS))]>, TB; -def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_LE, EFLAGS))]>, - TB; - def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_G, EFLAGS))]>, TB, OpSize; -def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_G, EFLAGS))]>, - TB, OpSize; def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_G, EFLAGS))]>, TB; -def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_G, EFLAGS))]>, - TB; - def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_S, EFLAGS))]>, TB, OpSize; -def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_S, EFLAGS))]>, - TB, OpSize; def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_S, EFLAGS))]>, TB; -def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_S, EFLAGS))]>, - TB; - def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NS, EFLAGS))]>, TB, OpSize; -def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NS, EFLAGS))]>, - TB, OpSize; def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NS, EFLAGS))]>, TB; -def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NS, EFLAGS))]>, - TB; - def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_P, EFLAGS))]>, TB, OpSize; -def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_P, EFLAGS))]>, - TB, OpSize; def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_P, EFLAGS))]>, TB; -def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_P, EFLAGS))]>, - TB; - def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NP, EFLAGS))]>, TB, OpSize; -def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NP, EFLAGS))]>, - TB, OpSize; def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NP, EFLAGS))]>, TB; +} // isCommutable = 1 + def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NP, EFLAGS))]>, TB; + +def CMOVB16rm : I<0x42, MRMSrcMem, // if , + TB, OpSize; +def CMOVB32rm : I<0x42, MRMSrcMem, // if , + TB; +def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovae\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_AE, EFLAGS))]>, + TB, OpSize; +def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovae\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_AE, EFLAGS))]>, + TB; +def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmove\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_E, EFLAGS))]>, + TB, OpSize; +def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmove\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_E, EFLAGS))]>, + TB; +def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovne\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_NE, EFLAGS))]>, + TB, OpSize; +def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovne\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_NE, EFLAGS))]>, + TB; +def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovbe\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_BE, EFLAGS))]>, + TB, OpSize; +def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovbe\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_BE, EFLAGS))]>, + TB; +def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmova\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_A, EFLAGS))]>, + TB, OpSize; +def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmova\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_A, EFLAGS))]>, + TB; +def CMOVL16rm : I<0x4C, MRMSrcMem, // if , + TB, OpSize; +def CMOVL32rm : I<0x4C, MRMSrcMem, // if , + TB; +def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovge\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_GE, EFLAGS))]>, + TB, OpSize; +def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovge\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_GE, EFLAGS))]>, + TB; +def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovle\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_LE, EFLAGS))]>, + TB, OpSize; +def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovle\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_LE, EFLAGS))]>, + TB; +def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovg\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_G, EFLAGS))]>, + TB, OpSize; +def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovg\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_G, EFLAGS))]>, + TB; +def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovs\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_S, EFLAGS))]>, + TB, OpSize; +def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovs\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_S, EFLAGS))]>, + TB; +def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovns\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_NS, EFLAGS))]>, + TB, OpSize; +def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovns\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_NS, EFLAGS))]>, + TB; +def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovp\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_P, EFLAGS))]>, + TB, OpSize; +def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32] + (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + "cmovp\t{$src2, $dst|$dst, $src2}", + [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), + X86_COND_P, EFLAGS))]>, + TB; +def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16] + (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), + "cmovnp\t{$src2, $dst|$dst, $src2}", + [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), + X86_COND_NP, EFLAGS))]>, + TB, OpSize; } // Uses = [EFLAGS] @@ -1051,9 +1100,11 @@ let isTwoAddress = 0, CodeSize = 2 in { def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst", [(store (add (loadi8 addr:$dst), 1), addr:$dst)]>; def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst", - [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>, OpSize; + [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>, + OpSize, Requires<[In32BitMode]>; def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst", - [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>; + [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>, + Requires<[In32BitMode]>; } let CodeSize = 2 in @@ -1071,9 +1122,11 @@ let isTwoAddress = 0, CodeSize = 2 in { def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst", [(store (add (loadi8 addr:$dst), -1), addr:$dst)]>; def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst", - [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>, OpSize; + [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>, + OpSize, Requires<[In32BitMode]>; def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst", - [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>; + [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>, + Requires<[In32BitMode]>; } } // Defs = [EFLAGS] @@ -1853,6 +1906,7 @@ let isTwoAddress = 0 in { [(store (add (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>; } +let Uses = [EFLAGS] in { let isCommutable = 1 in { // X = ADC Y, Z --> X = ADC Z, Y def ADC32rr : I<0x11, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "adc{l}\t{$src2, $dst|$dst, $src2}", @@ -1879,6 +1933,7 @@ let isTwoAddress = 0 in { "adc{l}\t{$src2, $dst|$dst, $src2}", [(store (adde (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>; } +} // Uses = [EFLAGS] def SUB8rr : I<0x28, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2), "sub{b}\t{$src2, $dst|$dst, $src2}", @@ -1945,6 +2000,7 @@ let isTwoAddress = 0 in { [(store (sub (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>; } +let Uses = [EFLAGS] in { def SBB32rr : I<0x19, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "sbb{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (sube GR32:$src1, GR32:$src2))]>; @@ -1972,6 +2028,7 @@ def SBB32ri : Ii32<0x81, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2 def SBB32ri8 : Ii8<0x83, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), "sbb{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (sube GR32:$src1, i32immSExt8:$src2))]>; +} // Uses = [EFLAGS] } // Defs = [EFLAGS] let Defs = [EFLAGS] in { @@ -2406,7 +2463,7 @@ def CDQ : I<0x99, RawFrm, (outs), (ins), // Alias instructions that map movr0 to xor. // FIXME: remove when we can teach regalloc that xor reg, reg is ok. -let Defs = [EFLAGS], isReMaterializable = 1 in { +let Defs = [EFLAGS], isReMaterializable = 1, neverHasSideEffects = 1 in { def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "xor{b}\t$dst, $dst", [(set GR8:$dst, 0)]>; @@ -2429,7 +2486,7 @@ def MOV16_rr : I<0x89, MRMDestReg, (outs GR16_:$dst), (ins GR16_:$src), "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize; def MOV32_rr : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32_:$src), "mov{l}\t{$src, $dst|$dst, $src}", []>; -let isLoad = 1 in { +let isLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { def MOV16_rm : I<0x8B, MRMSrcMem, (outs GR16_:$dst), (ins i16mem:$src), "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize; def MOV32_rm : I<0x8B, MRMSrcMem, (outs GR32_:$dst), (ins i32mem:$src), @@ -2511,13 +2568,23 @@ def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst), (MOV32mi addr:$dst, texternalsym:$src)>; // Calls +// tailcall stuff def : Pat<(X86tailcall GR32:$dst), - (CALL32r GR32:$dst)>; + (TAILCALL)>; def : Pat<(X86tailcall (i32 tglobaladdr:$dst)), - (CALLpcrel32 tglobaladdr:$dst)>; + (TAILCALL)>; def : Pat<(X86tailcall (i32 texternalsym:$dst)), - (CALLpcrel32 texternalsym:$dst)>; + (TAILCALL)>; + +def : Pat<(X86tcret GR32:$dst, imm:$off), + (TCRETURNri GR32:$dst, imm:$off)>; + +def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off), + (TCRETURNdi texternalsym:$dst, imm:$off)>; + +def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off), + (TCRETURNdi texternalsym:$dst, imm:$off)>; def : Pat<(X86call (i32 tglobaladdr:$dst)), (CALLpcrel32 tglobaladdr:$dst)>; @@ -2581,6 +2648,10 @@ def : Pat<(i16 (anyext (loadi8 addr:$src))), (MOVZX16rm8 addr:$src)>; def : Pat<(i32 (anyext (loadi8 addr:$src))), (MOVZX32rm8 addr:$src)>; def : Pat<(i32 (anyext (loadi16 addr:$src))), (MOVZX32rm16 addr:$src)>; +// (and (i32 load), 255) -> (zextload i8) +def : Pat<(i32 (and (loadi32 addr:$src), (i32 255))), (MOVZX32rm8 addr:$src)>; +def : Pat<(i32 (and (loadi32 addr:$src), (i32 65535))),(MOVZX32rm16 addr:$src)>; + //===----------------------------------------------------------------------===// // Some peepholes //===----------------------------------------------------------------------===//