From 071a279e94e30d51aff3b46a4651d686982488a0 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Tue, 11 Sep 2007 19:55:27 +0000 Subject: [PATCH] Remove (somewhat confusing) Imp<> helper, use let Defs = [], Uses = [] instead. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@41863 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMInstrInfo.td | 48 ++-- lib/Target/ARM/ARMInstrThumb.td | 6 +- lib/Target/ARM/ARMInstrVFP.td | 3 +- lib/Target/Alpha/AlphaInstrInfo.td | 6 +- lib/Target/Mips/MipsInstrInfo.td | 6 +- lib/Target/PowerPC/PPCInstr64Bit.td | 4 +- lib/Target/PowerPC/PPCInstrInfo.td | 10 +- lib/Target/Sparc/SparcInstrInfo.td | 6 +- lib/Target/Target.td | 7 - lib/Target/X86/X86InstrFPStack.td | 23 +- lib/Target/X86/X86InstrInfo.td | 361 +++++++++++++++------------- lib/Target/X86/X86InstrMMX.td | 4 +- lib/Target/X86/X86InstrSSE.td | 4 +- lib/Target/X86/X86InstrX86-64.td | 100 ++++---- 14 files changed, 328 insertions(+), 260 deletions(-) diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index fe26e252773..8e59848611f 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -517,31 +517,35 @@ multiclass AsI1_bin_irs opcod, string opc, PatFrag opnode> { /// ASI1_bin_s_irs - Similar to AsI1_bin_irs except it sets the 's' bit so the /// instruction modifies the CSPR register. +let Defs = [CPSR] in { multiclass ASI1_bin_s_irs opcod, string opc, PatFrag opnode> { def ri : AI1, Imp<[], [CPSR]>; + [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>; def rr : AI1, Imp<[], [CPSR]>; + [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>; def rs : AI1, Imp<[], [CPSR]>; + [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>; +} } /// AI1_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test /// patterns. Similar to AsI1_bin_irs except the instruction does not produce /// a explicit result, only implicitly set CPSR. +let Defs = [CPSR] in { multiclass AI1_cmp_irs opcod, string opc, PatFrag opnode> { def ri : AI1, Imp<[], [CPSR]>; + [(opnode GPR:$a, so_imm:$b)]>; def rr : AI1, Imp<[], [CPSR]>; + [(opnode GPR:$a, GPR:$b)]>; def rs : AI1, Imp<[], [CPSR]>; + [(opnode GPR:$a, so_reg:$b)]>; +} } /// AI_unary_rrot - A unary operation with two forms: one whose operand is a @@ -620,16 +624,18 @@ class JTI2 opcod, dag oops, dag iops, string asm, list pattern> /// AsXI1_bin_c_irs - Same as AsI1_bin_irs but without the predicate operand and /// setting carry bit. But it can optionally set CPSR. +let Uses = [CPSR] in { multiclass AsXI1_bin_c_irs opcod, string opc, PatFrag opnode> { def ri : AXI1, Imp<[CPSR], []>; + [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>; def rr : AXI1, Imp<[CPSR], []>; + [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>; def rs : AXI1, Imp<[CPSR], []>; + [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>; +} } //===----------------------------------------------------------------------===// @@ -655,15 +661,17 @@ PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx, i32imm:$size), "${instid:label} ${cpidx:cpentry}", []>; +let Defs = [SP], Uses = [SP] in { def ADJCALLSTACKUP : PseudoInst<(outs), (ins i32imm:$amt, pred:$p), "@ ADJCALLSTACKUP $amt", - [(ARMcallseq_end imm:$amt)]>, Imp<[SP],[SP]>; + [(ARMcallseq_end imm:$amt)]>; def ADJCALLSTACKDOWN : PseudoInst<(outs), (ins i32imm:$amt, pred:$p), "@ ADJCALLSTACKDOWN $amt", - [(ARMcallseq_start imm:$amt)]>, Imp<[SP],[SP]>; + [(ARMcallseq_start imm:$amt)]>; +} def DWARF_LOC : PseudoInst<(outs), (ins i32imm:$line, i32imm:$col, i32imm:$file), @@ -963,12 +971,14 @@ def MOVrx : AsI1<0xD, (outs GPR:$dst), (ins GPR:$src), DPRdMisc, // These aren't really mov instructions, but we have to define them this way // due to flag operands. +let Defs = [CPSR] in { def MOVsrl_flag : AI1<0xD, (outs GPR:$dst), (ins GPR:$src), DPRdMisc, "mov", "s $dst, $src, lsr #1", - [(set GPR:$dst, (ARMsrl_flag GPR:$src))]>, Imp<[], [CPSR]>; + [(set GPR:$dst, (ARMsrl_flag GPR:$src))]>; def MOVsra_flag : AI1<0xD, (outs GPR:$dst), (ins GPR:$src), DPRdMisc, "mov", "s $dst, $src, asr #1", - [(set GPR:$dst, (ARMsra_flag GPR:$src))]>, Imp<[], [CPSR]>; + [(set GPR:$dst, (ARMsra_flag GPR:$src))]>; +} //===----------------------------------------------------------------------===// // Extend Instructions. @@ -1034,20 +1044,24 @@ def RSBrs : AsI1<0x3, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPRSoReg, [(set GPR:$dst, (sub so_reg:$b, GPR:$a))]>; // RSB with 's' bit set. +let Defs = [CPSR] in { def RSBSri : AI1<0x3, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPRIm, "rsb", "s $dst, $a, $b", - [(set GPR:$dst, (subc so_imm:$b, GPR:$a))]>, Imp<[], [CPSR]>; + [(set GPR:$dst, (subc so_imm:$b, GPR:$a))]>; def RSBSrs : AI1<0x3, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPRSoReg, "rsb", "s $dst, $a, $b", - [(set GPR:$dst, (subc so_reg:$b, GPR:$a))]>, Imp<[], [CPSR]>; + [(set GPR:$dst, (subc so_reg:$b, GPR:$a))]>; +} // FIXME: Do not allow RSC to be predicated for now. But they can set CPSR. +let Uses = [CPSR] in { def RSCri : AXI1<0x7, (outs GPR:$dst), (ins GPR:$a, so_imm:$b, cc_out:$s), DPRIm, "rsc${s} $dst, $a, $b", - [(set GPR:$dst, (sube so_imm:$b, GPR:$a))]>, Imp<[CPSR], []>; + [(set GPR:$dst, (sube so_imm:$b, GPR:$a))]>; def RSCrs : AXI1<0x7, (outs GPR:$dst), (ins GPR:$a, so_reg:$b, cc_out:$s), DPRSoReg, "rsc${s} $dst, $a, $b", - [(set GPR:$dst, (sube so_reg:$b, GPR:$a))]>, Imp<[CPSR], []>; + [(set GPR:$dst, (sube so_reg:$b, GPR:$a))]>; +} // (sub X, imm) gets canonicalized to (add X, -imm). Match this form. def : ARMPat<(add GPR:$src, so_imm_neg:$imm), diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 278d182cbbd..bc8161a1fed 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -160,15 +160,17 @@ def t_addrmode_sp : Operand, // Miscellaneous Instructions. // +let Defs = [SP], Uses = [SP] in { def tADJCALLSTACKUP : PseudoInst<(outs), (ins i32imm:$amt), "@ tADJCALLSTACKUP $amt", - [(ARMcallseq_end imm:$amt)]>, Imp<[SP],[SP]>, Requires<[IsThumb]>; + [(ARMcallseq_end imm:$amt)]>, Requires<[IsThumb]>; def tADJCALLSTACKDOWN : PseudoInst<(outs), (ins i32imm:$amt), "@ tADJCALLSTACKDOWN $amt", - [(ARMcallseq_start imm:$amt)]>, Imp<[SP],[SP]>, Requires<[IsThumb]>; + [(ARMcallseq_start imm:$amt)]>, Requires<[IsThumb]>; +} let isNotDuplicable = 1 in def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td index 4fca6055c01..5b7a4034efd 100644 --- a/lib/Target/ARM/ARMInstrVFP.td +++ b/lib/Target/ARM/ARMInstrVFP.td @@ -285,7 +285,8 @@ def FMDRR : ADI<(outs DPR:$dst), (ins GPR:$src1, GPR:$src2), // FMSRR: GPR -> SPR -def FMSTAT : ASI<(outs), (ins), "fmstat", "", [(arm_fmstat)]>, Imp<[], [CPSR]>; +let Defs = [CPSR] in +def FMSTAT : ASI<(outs), (ins), "fmstat", "", [(arm_fmstat)]>; // FMXR: GPR -> VFP Sstem reg diff --git a/lib/Target/Alpha/AlphaInstrInfo.td b/lib/Target/Alpha/AlphaInstrInfo.td index 7f77d86e239..602ab7aa8a0 100644 --- a/lib/Target/Alpha/AlphaInstrInfo.td +++ b/lib/Target/Alpha/AlphaInstrInfo.td @@ -147,11 +147,11 @@ def IDEF_F64 : PseudoInstAlpha<(outs F8RC:$RA), (ins), ";#idef $RA", def WTF : PseudoInstAlpha<(outs), (ins variable_ops), "#wtf", [], s_pseudo>; -let isLoad = 1, hasCtrlDep = 1 in { +let isLoad = 1, hasCtrlDep = 1, Defs = [R30], Uses = [R30] in { def ADJUSTSTACKUP : PseudoInstAlpha<(outs), (ins s64imm:$amt), "; ADJUP $amt", - [(callseq_start imm:$amt)], s_pseudo>, Imp<[R30],[R30]>; + [(callseq_start imm:$amt)], s_pseudo>; def ADJUSTSTACKDOWN : PseudoInstAlpha<(outs), (ins s64imm:$amt), "; ADJDOWN $amt", - [(callseq_end imm:$amt)], s_pseudo>, Imp<[R30],[R30]>; + [(callseq_end imm:$amt)], s_pseudo>; } def ALTENT : PseudoInstAlpha<(outs), (ins s64imm:$TARGET), "$$$TARGET..ng:\n", [], s_pseudo>; def PCLABEL : PseudoInstAlpha<(outs), (ins s64imm:$num), "PCMARKER_$num:\n",[], s_pseudo>; diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index b11a91d2945..b1f975482aa 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -341,12 +341,14 @@ class Pseudo pattern>: MipsInst; // As stack alignment is always done with addiu, we need a 16-bit immediate +let Defs = [SP], Uses = [SP] in { def ADJCALLSTACKDOWN : Pseudo<(outs), (ins uimm16:$amt), "!ADJCALLSTACKDOWN $amt", - [(callseq_start imm:$amt)]>, Imp<[SP],[SP]>; + [(callseq_start imm:$amt)]>; def ADJCALLSTACKUP : Pseudo<(outs), (ins uimm16:$amt), "!ADJCALLSTACKUP $amt", - [(callseq_end imm:$amt)]>, Imp<[SP],[SP]>; + [(callseq_end imm:$amt)]>; +} def IMPLICIT_DEF_CPURegs : Pseudo<(outs CPURegs:$dst), (ins), "!IMPLICIT_DEF $dst", diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td index f55ce6cacb4..9d728cfc8ff 100644 --- a/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/lib/Target/PowerPC/PPCInstr64Bit.td @@ -130,11 +130,11 @@ def MTCTR8 : XFXForm_7_ext<31, 467, 9, (outs), (ins G8RC:$rS), PPC970_DGroup_First, PPC970_Unit_FXU; } +let Defs = [X1], Uses = [X1] in def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi), "${:comment} DYNALLOC8 $result, $negsize, $fpsi", [(set G8RC:$result, - (PPCdynalloc G8RC:$negsize, iaddr:$fpsi))]>, - Imp<[X1],[X1]>; + (PPCdynalloc G8RC:$negsize, iaddr:$fpsi))]>; def MTLR8 : XFXForm_7_ext<31, 467, 8, (outs), (ins G8RC:$rS), "mtlr $rS", SprMTSPR>, diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index 9813c967652..1591148b880 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -297,22 +297,24 @@ def FPContractions : Predicate<"!NoExcessFPPrecision">; // Pseudo-instructions: let hasCtrlDep = 1 in { +let Defs = [R1], Uses = [R1] in { def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt), "${:comment} ADJCALLSTACKDOWN", - [(callseq_start imm:$amt)]>, Imp<[R1],[R1]>; + [(callseq_start imm:$amt)]>; def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt), "${:comment} ADJCALLSTACKUP", - [(callseq_end imm:$amt)]>, Imp<[R1],[R1]>; + [(callseq_end imm:$amt)]>; +} def UPDATE_VRSAVE : Pseudo<(outs GPRC:$rD), (ins GPRC:$rS), "UPDATE_VRSAVE $rD, $rS", []>; } +let Defs = [R1], Uses = [R1] in def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "${:comment} DYNALLOC $result, $negsize, $fpsi", [(set GPRC:$result, - (PPCdynalloc GPRC:$negsize, iaddr:$fpsi))]>, - Imp<[R1],[R1]>; + (PPCdynalloc GPRC:$negsize, iaddr:$fpsi))]>; def IMPLICIT_DEF_GPRC: Pseudo<(outs GPRC:$rD), (ins), "${:comment}IMPLICIT_DEF_GPRC $rD", diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td index ff2ed871701..c41c191a5d8 100644 --- a/lib/Target/Sparc/SparcInstrInfo.td +++ b/lib/Target/Sparc/SparcInstrInfo.td @@ -201,12 +201,14 @@ multiclass F3_12np Op3Val> { class Pseudo pattern> : InstSP; +let Defs = [O6], Uses = [O6] in { def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt), "!ADJCALLSTACKDOWN $amt", - [(callseq_start imm:$amt)]>, Imp<[O6],[O6]>; + [(callseq_start imm:$amt)]>; def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt), "!ADJCALLSTACKUP $amt", - [(callseq_end imm:$amt)]>, Imp<[O6],[O6]>; + [(callseq_end imm:$amt)]>; +} def IMPLICIT_DEF_Int : Pseudo<(outs IntRegs:$dst), (ins), "!IMPLICIT_DEF $dst", [(set IntRegs:$dst, (undef))]>; diff --git a/lib/Target/Target.td b/lib/Target/Target.td index 84f62273d51..4e495fb75cd 100644 --- a/lib/Target/Target.td +++ b/lib/Target/Target.td @@ -203,13 +203,6 @@ class Instruction { string DisableEncoding = ""; } -/// Imp - Helper class for specifying the implicit uses/defs set for an -/// instruction. -class Imp uses, list defs> { - list Uses = uses; - list Defs = defs; -} - /// Predicates - These are extra conditionals which are turned into instruction /// selector matching code. Currently each predicate is just a string. class Predicate { diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index 1c76d718dbb..d47add6d1ae 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -141,14 +141,16 @@ def FpGETRESULT64 : FpI_<(outs RFP64:$dst), (ins), SpecialFP, def FpGETRESULT80 : FpI_<(outs RFP80:$dst), (ins), SpecialFP, [(set RFP80:$dst, X86fpget)]>; // FPR = ST(0) +let Defs = [ST0] in { def FpSETRESULT32 : FpI_<(outs), (ins RFP32:$src), SpecialFP, - [(X86fpset RFP32:$src)]>, Imp<[], [ST0]>;// ST(0) = FPR + [(X86fpset RFP32:$src)]>;// ST(0) = FPR def FpSETRESULT64 : FpI_<(outs), (ins RFP64:$src), SpecialFP, - [(X86fpset RFP64:$src)]>, Imp<[], [ST0]>;// ST(0) = FPR + [(X86fpset RFP64:$src)]>;// ST(0) = FPR def FpSETRESULT80 : FpI_<(outs), (ins RFP80:$src), SpecialFP, - [(X86fpset RFP80:$src)]>, Imp<[], [ST0]>;// ST(0) = FPR + [(X86fpset RFP80:$src)]>;// ST(0) = FPR +} // FpI - Floating Point Psuedo Instruction template. Predicated on FPStack. // Note that f80-only instructions are used even in SSE mode and use FpI_ @@ -486,26 +488,29 @@ def UCOM_Fpr80 : FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP, def UCOM_FpIr80: FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP, [(X86cmp RFP80:$lhs, RFP80:$rhs)]>; // CC = ST(0) cmp ST(i) +let Uses = [ST0] in { def UCOM_Fr : FPI<0xE0, AddRegFrm, // FPSW = cmp ST(0) with ST(i) (outs), (ins RST:$reg), - "fucom\t$reg">, DD, Imp<[ST0],[]>; + "fucom\t$reg">, DD; def UCOM_FPr : FPI<0xE8, AddRegFrm, // FPSW = cmp ST(0) with ST(i), pop (outs), (ins RST:$reg), - "fucomp\t$reg">, DD, Imp<[ST0],[]>; + "fucomp\t$reg">, DD; def UCOM_FPPr : FPI<0xE9, RawFrm, // cmp ST(0) with ST(1), pop, pop (outs), (ins), - "fucompp">, DA, Imp<[ST0],[]>; + "fucompp">, DA; def UCOM_FIr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i) (outs), (ins RST:$reg), - "fucomi\t{$reg, %st(0)|%ST(0), $reg}">, DB, Imp<[ST0],[]>; + "fucomi\t{$reg, %st(0)|%ST(0), $reg}">, DB; def UCOM_FIPr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop (outs), (ins RST:$reg), - "fucomip\t{$reg, %st(0)|%ST(0), $reg}">, DF, Imp<[ST0],[]>; + "fucomip\t{$reg, %st(0)|%ST(0), $reg}">, DF; +} // Floating point flag ops. +let Defs = [AX] in def FNSTSW8r : I<0xE0, RawFrm, // AX = fp flags - (outs), (ins), "fnstsw", []>, DF, Imp<[],[AX]>; + (outs), (ins), "fnstsw", []>, DF; def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world (outs), (ins i16mem:$dst), "fnstcw\t$dst", []>; diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 4168c1965d4..9e0dc6d3fac 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -246,12 +246,13 @@ def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>; // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into // a stack adjustment and the codegen must know that they may modify the stack // pointer before prolog-epilog rewriting occurs. +let Defs = [ESP], Uses = [ESP] in { def ADJCALLSTACKDOWN : I<0, Pseudo, (outs), (ins i32imm:$amt), "#ADJCALLSTACKDOWN", - [(X86callseq_start imm:$amt)]>, Imp<[ESP],[ESP]>; + [(X86callseq_start imm:$amt)]>; def ADJCALLSTACKUP : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), "#ADJCALLSTACKUP", - [(X86callseq_end imm:$amt1, imm:$amt2)]>, - Imp<[ESP],[ESP]>; + [(X86callseq_end imm:$amt1, imm:$amt2)]>; +} def IMPLICIT_USE : I<0, Pseudo, (outs), (ins variable_ops), "#IMPLICIT_USE", []>; def IMPLICIT_DEF : I<0, Pseudo, (outs variable_ops), (ins), @@ -364,13 +365,17 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in //===----------------------------------------------------------------------===// // Miscellaneous Instructions... // +let Defs = [EBP, ESP], Uses = [EBP, ESP] in def LEAVE : I<0xC9, RawFrm, - (outs), (ins), "leave", []>, Imp<[EBP,ESP],[EBP,ESP]>; + (outs), (ins), "leave", []>; + +let Defs = [ESP], Uses = [ESP] in { def POP32r : I<0x58, AddRegFrm, - (outs GR32:$reg), (ins), "pop{l}\t$reg", []>, Imp<[ESP],[ESP]>; + (outs GR32:$reg), (ins), "pop{l}\t$reg", []>; def PUSH32r : I<0x50, AddRegFrm, - (outs), (ins GR32:$reg), "push{l}\t$reg", []>, Imp<[ESP],[ESP]>; + (outs), (ins GR32:$reg), "push{l}\t$reg", []>; +} def MovePCtoStack : I<0, Pseudo, (outs), (ins piclabel:$label), "call\t$label", []>; @@ -419,77 +424,71 @@ def LEA32r : I<0x8D, MRMSrcMem, "lea{l}\t{$src|$dst}, {$dst|$src}", [(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>; +let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI] in { def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}", - [(X86rep_movs i8)]>, - Imp<[ECX,EDI,ESI], [ECX,EDI,ESI]>, REP; + [(X86rep_movs i8)]>, REP; def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}", - [(X86rep_movs i16)]>, - Imp<[ECX,EDI,ESI], [ECX,EDI,ESI]>, REP, OpSize; + [(X86rep_movs i16)]>, REP, OpSize; def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}", - [(X86rep_movs i32)]>, - Imp<[ECX,EDI,ESI], [ECX,EDI,ESI]>, REP; + [(X86rep_movs i32)]>, REP; +} +let Defs = [ECX,EDI], Uses = [AL,ECX,EDI] in def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}", - [(X86rep_stos i8)]>, - Imp<[AL,ECX,EDI], [ECX,EDI]>, REP; + [(X86rep_stos i8)]>, REP; +let Defs = [ECX,EDI], Uses = [AX,ECX,EDI] in def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}", - [(X86rep_stos i16)]>, - Imp<[AX,ECX,EDI], [ECX,EDI]>, REP, OpSize; + [(X86rep_stos i16)]>, REP, OpSize; +let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI] in def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}", - [(X86rep_stos i32)]>, - Imp<[EAX,ECX,EDI], [ECX,EDI]>, REP; + [(X86rep_stos i32)]>, REP; +let Defs = [RAX, RDX] in def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)]>, - TB, Imp<[],[RAX,RDX]>; + TB; //===----------------------------------------------------------------------===// // Input/Output Instructions... // +let Defs = [AL], Uses = [DX] in def IN8rr : I<0xEC, RawFrm, (outs), (ins), - "in{b}\t{%dx, %al|%AL, %DX}", - []>, Imp<[DX], [AL]>; + "in{b}\t{%dx, %al|%AL, %DX}", []>; +let Defs = [AX], Uses = [DX] in def IN16rr : I<0xED, RawFrm, (outs), (ins), - "in{w}\t{%dx, %ax|%AX, %DX}", - []>, Imp<[DX], [AX]>, OpSize; + "in{w}\t{%dx, %ax|%AX, %DX}", []>, OpSize; +let Defs = [EAX], Uses = [DX] in def IN32rr : I<0xED, RawFrm, (outs), (ins), - "in{l}\t{%dx, %eax|%EAX, %DX}", - []>, Imp<[DX],[EAX]>; + "in{l}\t{%dx, %eax|%EAX, %DX}", []>; +let Defs = [AL] in def IN8ri : Ii8<0xE4, RawFrm, (outs), (ins i16i8imm:$port), - "in{b}\t{$port, %al|%AL, $port}", - []>, - Imp<[], [AL]>; + "in{b}\t{$port, %al|%AL, $port}", []>; +let Defs = [AX] in def IN16ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port), - "in{w}\t{$port, %ax|%AX, $port}", - []>, - Imp<[], [AX]>, OpSize; + "in{w}\t{$port, %ax|%AX, $port}", []>, OpSize; +let Defs = [EAX] in def IN32ri : Ii8<0xE5, RawFrm, (outs), (ins i16i8imm:$port), - "in{l}\t{$port, %eax|%EAX, $port}", - []>, - Imp<[],[EAX]>; + "in{l}\t{$port, %eax|%EAX, $port}", []>; +let Uses = [DX, AL] in def OUT8rr : I<0xEE, RawFrm, (outs), (ins), - "out{b}\t{%al, %dx|%DX, %AL}", - []>, Imp<[DX, AL], []>; + "out{b}\t{%al, %dx|%DX, %AL}", []>; +let Uses = [DX, AX] in def OUT16rr : I<0xEF, RawFrm, (outs), (ins), - "out{w}\t{%ax, %dx|%DX, %AX}", - []>, Imp<[DX, AX], []>, OpSize; + "out{w}\t{%ax, %dx|%DX, %AX}", []>, OpSize; +let Uses = [DX, EAX] in def OUT32rr : I<0xEF, RawFrm, (outs), (ins), - "out{l}\t{%eax, %dx|%DX, %EAX}", - []>, Imp<[DX, EAX], []>; + "out{l}\t{%eax, %dx|%DX, %EAX}", []>; +let Uses = [AL] in def OUT8ir : Ii8<0xE6, RawFrm, (outs), (ins i16i8imm:$port), - "out{b}\t{%al, $port|$port, %AL}", - []>, - Imp<[AL], []>; + "out{b}\t{%al, $port|$port, %AL}", []>; +let Uses = [AX] in def OUT16ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port), - "out{w}\t{%ax, $port|$port, %AX}", - []>, - Imp<[AX], []>, OpSize; + "out{w}\t{%ax, $port|$port, %AX}", []>, OpSize; +let Uses = [EAX] in def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port), - "out{l}\t{%eax, $port|$port, %EAX}", - []>, - Imp<[EAX], []>; + "out{l}\t{%eax, $port|$port, %EAX}", []>; //===----------------------------------------------------------------------===// // Move Instructions... @@ -548,71 +547,90 @@ def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), // // Extra precision multiplication +let Defs = [AL,AH], Uses = [AL] in def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src", // FIXME: Used for 8-bit mul, ignore result upper 8 bits. // This probably ought to be moved to a def : Pat<> if the // syntax can be accepted. - [(set AL, (mul AL, GR8:$src))]>, - Imp<[AL],[AL,AH]>; // AL,AH = AL*GR8 + [(set AL, (mul AL, GR8:$src))]>; // AL,AH = AL*GR8 +let Defs = [AX,DX], Uses = [AX] in def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src), "mul{w}\t$src", []>, - Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*GR16 -def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src), "mul{l}\t$src", []>, - Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*GR32 + OpSize; // AX,DX = AX*GR16 +let Defs = [EAX,EDX], Uses = [EAX] in +def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src), "mul{l}\t$src", []>; + // EAX,EDX = EAX*GR32 +let Defs = [AL,AH], Uses = [AL] in def MUL8m : I<0xF6, MRM4m, (outs), (ins i8mem :$src), "mul{b}\t$src", // FIXME: Used for 8-bit mul, ignore result upper 8 bits. // This probably ought to be moved to a def : Pat<> if the // syntax can be accepted. - [(set AL, (mul AL, (loadi8 addr:$src)))]>, - Imp<[AL],[AL,AH]>; // AL,AH = AL*[mem8] + [(set AL, (mul AL, (loadi8 addr:$src)))]>; // AL,AH = AL*[mem8] +let Defs = [AX,DX], Uses = [AX] in def MUL16m : I<0xF7, MRM4m, (outs), (ins i16mem:$src), - "mul{w}\t$src", []>, Imp<[AX],[AX,DX]>, - OpSize; // AX,DX = AX*[mem16] + "mul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16] +let Defs = [EAX,EDX], Uses = [EAX] in def MUL32m : I<0xF7, MRM4m, (outs), (ins i32mem:$src), - "mul{l}\t$src", []>, Imp<[EAX],[EAX,EDX]>;// EAX,EDX = EAX*[mem32] + "mul{l}\t$src", []>; // EAX,EDX = EAX*[mem32] -def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", []>, - Imp<[AL],[AL,AH]>; // AL,AH = AL*GR8 +let Defs = [AL,AH], Uses = [AL] in +def IMUL8r : I<0xF6, MRM5r, (outs), (ins GR8:$src), "imul{b}\t$src", []>; + // AL,AH = AL*GR8 +let Defs = [AX,DX], Uses = [AX] in def IMUL16r : I<0xF7, MRM5r, (outs), (ins GR16:$src), "imul{w}\t$src", []>, - Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*GR16 -def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", []>, - Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*GR32 + OpSize; // AX,DX = AX*GR16 +let Defs = [EAX,EDX], Uses = [EAX] in +def IMUL32r : I<0xF7, MRM5r, (outs), (ins GR32:$src), "imul{l}\t$src", []>; + // EAX,EDX = EAX*GR32 +let Defs = [AL,AH], Uses = [AL] in def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src), - "imul{b}\t$src", []>, Imp<[AL],[AL,AH]>; // AL,AH = AL*[mem8] + "imul{b}\t$src", []>; // AL,AH = AL*[mem8] +let Defs = [AX,DX], Uses = [AX] in def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src), - "imul{w}\t$src", []>, Imp<[AX],[AX,DX]>, - OpSize; // AX,DX = AX*[mem16] + "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16] +let Defs = [EAX,EDX], Uses = [EAX] in def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src), - "imul{l}\t$src", []>, - Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*[mem32] + "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32] // unsigned division/remainder +let Defs = [AX], Uses = [AL,AH] in def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH - "div{b}\t$src", []>, Imp<[AX],[AX]>; + "div{b}\t$src", []>; +let Defs = [AX,DX], Uses = [AX,DX] in def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX - "div{w}\t$src", []>, Imp<[AX,DX],[AX,DX]>, OpSize; + "div{w}\t$src", []>, OpSize; +let Defs = [EAX,EDX], Uses = [EAX,EDX] in def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX - "div{l}\t$src", []>, Imp<[EAX,EDX],[EAX,EDX]>; + "div{l}\t$src", []>; +let Defs = [AX], Uses = [AL,AH] in def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH - "div{b}\t$src", []>, Imp<[AX],[AX]>; + "div{b}\t$src", []>; +let Defs = [AX,DX], Uses = [AX,DX] in def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX - "div{w}\t$src", []>, Imp<[AX,DX],[AX,DX]>, OpSize; + "div{w}\t$src", []>, OpSize; +let Defs = [EAX,EDX], Uses = [EAX,EDX] in def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX - "div{l}\t$src", []>, Imp<[EAX,EDX],[EAX,EDX]>; + "div{l}\t$src", []>; // Signed division/remainder. +let Defs = [AX], Uses = [AL,AH] in def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH - "idiv{b}\t$src", []>, Imp<[AX],[AX]>; + "idiv{b}\t$src", []>; +let Defs = [AX,DX], Uses = [AX,DX] in def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX - "idiv{w}\t$src", []>, Imp<[AX,DX],[AX,DX]>, OpSize; + "idiv{w}\t$src", []>, OpSize; +let Defs = [EAX,EDX], Uses = [EAX,EDX] in def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX - "idiv{l}\t$src", []>, Imp<[EAX,EDX],[EAX,EDX]>; + "idiv{l}\t$src", []>; +let Defs = [AX], Uses = [AL,AH] in def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH - "idiv{b}\t$src", []>, Imp<[AX],[AX]>; + "idiv{b}\t$src", []>; +let Defs = [AX,DX], Uses = [AX,DX] in def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX - "idiv{w}\t$src", []>, Imp<[AX,DX],[AX,DX]>, OpSize; + "idiv{w}\t$src", []>, OpSize; +let Defs = [EAX,EDX], Uses = [EAX,EDX] in def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX - "idiv{l}\t$src", []>, Imp<[EAX,EDX],[EAX,EDX]>; + "idiv{l}\t$src", []>; //===----------------------------------------------------------------------===// @@ -1291,15 +1309,17 @@ let isTwoAddress = 0 in { } // Shift instructions +let Uses = [CL] in { def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src), "shl{b}\t{%cl, $dst|$dst, %CL}", - [(set GR8:$dst, (shl GR8:$src, CL))]>, Imp<[CL],[]>; + [(set GR8:$dst, (shl GR8:$src, CL))]>; def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src), "shl{w}\t{%cl, $dst|$dst, %CL}", - [(set GR16:$dst, (shl GR16:$src, CL))]>, Imp<[CL],[]>, OpSize; + [(set GR16:$dst, (shl GR16:$src, CL))]>, OpSize; def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src), "shl{l}\t{%cl, $dst|$dst, %CL}", - [(set GR32:$dst, (shl GR32:$src, CL))]>, Imp<[CL],[]>; + [(set GR32:$dst, (shl GR32:$src, CL))]>; +} def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), "shl{b}\t{$src2, $dst|$dst, $src2}", @@ -1322,18 +1342,17 @@ def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1), "shl{l}\t$dst", []>; let isTwoAddress = 0 in { + let Uses = [CL] in { def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst), "shl{b}\t{%cl, $dst|$dst, %CL}", - [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (shl (loadi8 addr:$dst), CL), addr:$dst)]>; def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst), "shl{w}\t{%cl, $dst|$dst, %CL}", - [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>, OpSize; + [(store (shl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize; def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst), "shl{l}\t{%cl, $dst|$dst, %CL}", - [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (shl (loadi32 addr:$dst), CL), addr:$dst)]>; + } def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, i8imm:$src), "shl{b}\t{$src, $dst|$dst, $src}", [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -1358,15 +1377,17 @@ let isTwoAddress = 0 in { [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>; } +let Uses = [CL] in { def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src), "shr{b}\t{%cl, $dst|$dst, %CL}", - [(set GR8:$dst, (srl GR8:$src, CL))]>, Imp<[CL],[]>; + [(set GR8:$dst, (srl GR8:$src, CL))]>; def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src), "shr{w}\t{%cl, $dst|$dst, %CL}", - [(set GR16:$dst, (srl GR16:$src, CL))]>, Imp<[CL],[]>, OpSize; + [(set GR16:$dst, (srl GR16:$src, CL))]>, OpSize; def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src), "shr{l}\t{%cl, $dst|$dst, %CL}", - [(set GR32:$dst, (srl GR32:$src, CL))]>, Imp<[CL],[]>; + [(set GR32:$dst, (srl GR32:$src, CL))]>; +} def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2), "shr{b}\t{$src2, $dst|$dst, $src2}", @@ -1390,18 +1411,18 @@ def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1), [(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>; let isTwoAddress = 0 in { + let Uses = [CL] in { def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst), "shr{b}\t{%cl, $dst|$dst, %CL}", - [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (srl (loadi8 addr:$dst), CL), addr:$dst)]>; def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst), "shr{w}\t{%cl, $dst|$dst, %CL}", [(store (srl (loadi16 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>, OpSize; + OpSize; def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst), "shr{l}\t{%cl, $dst|$dst, %CL}", - [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (srl (loadi32 addr:$dst), CL), addr:$dst)]>; + } def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src), "shr{b}\t{$src, $dst|$dst, $src}", [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -1425,15 +1446,17 @@ let isTwoAddress = 0 in { [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>; } +let Uses = [CL] in { def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src), "sar{b}\t{%cl, $dst|$dst, %CL}", - [(set GR8:$dst, (sra GR8:$src, CL))]>, Imp<[CL],[]>; + [(set GR8:$dst, (sra GR8:$src, CL))]>; def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src), "sar{w}\t{%cl, $dst|$dst, %CL}", - [(set GR16:$dst, (sra GR16:$src, CL))]>, Imp<[CL],[]>, OpSize; + [(set GR16:$dst, (sra GR16:$src, CL))]>, OpSize; def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src), "sar{l}\t{%cl, $dst|$dst, %CL}", - [(set GR32:$dst, (sra GR32:$src, CL))]>, Imp<[CL],[]>; + [(set GR32:$dst, (sra GR32:$src, CL))]>; +} def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), "sar{b}\t{$src2, $dst|$dst, $src2}", @@ -1458,18 +1481,17 @@ def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1), [(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>; let isTwoAddress = 0 in { + let Uses = [CL] in { def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst), "sar{b}\t{%cl, $dst|$dst, %CL}", - [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (sra (loadi8 addr:$dst), CL), addr:$dst)]>; def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst), "sar{w}\t{%cl, $dst|$dst, %CL}", - [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>, OpSize; + [(store (sra (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize; def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst), "sar{l}\t{%cl, $dst|$dst, %CL}", - [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (sra (loadi32 addr:$dst), CL), addr:$dst)]>; + } def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, i8imm:$src), "sar{b}\t{$src, $dst|$dst, $src}", [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -1496,15 +1518,17 @@ let isTwoAddress = 0 in { // Rotate instructions // FIXME: provide shorter instructions when imm8 == 1 +let Uses = [CL] in { def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "rol{b}\t{%cl, $dst|$dst, %CL}", - [(set GR8:$dst, (rotl GR8:$src, CL))]>, Imp<[CL],[]>; + [(set GR8:$dst, (rotl GR8:$src, CL))]>; def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src), "rol{w}\t{%cl, $dst|$dst, %CL}", - [(set GR16:$dst, (rotl GR16:$src, CL))]>, Imp<[CL],[]>, OpSize; + [(set GR16:$dst, (rotl GR16:$src, CL))]>, OpSize; def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src), "rol{l}\t{%cl, $dst|$dst, %CL}", - [(set GR32:$dst, (rotl GR32:$src, CL))]>, Imp<[CL],[]>; + [(set GR32:$dst, (rotl GR32:$src, CL))]>; +} def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), "rol{b}\t{$src2, $dst|$dst, $src2}", @@ -1528,18 +1552,17 @@ def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1), [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>; let isTwoAddress = 0 in { + let Uses = [CL] in { def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst), "rol{b}\t{%cl, $dst|$dst, %CL}", - [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)]>; def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst), "rol{w}\t{%cl, $dst|$dst, %CL}", - [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>, OpSize; + [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize; def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst), "rol{l}\t{%cl, $dst|$dst, %CL}", - [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)]>; + } def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, i8imm:$src), "rol{b}\t{$src, $dst|$dst, $src}", [(store (rotl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -1564,15 +1587,17 @@ let isTwoAddress = 0 in { [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>; } +let Uses = [CL] in { def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "ror{b}\t{%cl, $dst|$dst, %CL}", - [(set GR8:$dst, (rotr GR8:$src, CL))]>, Imp<[CL],[]>; + [(set GR8:$dst, (rotr GR8:$src, CL))]>; def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src), "ror{w}\t{%cl, $dst|$dst, %CL}", - [(set GR16:$dst, (rotr GR16:$src, CL))]>, Imp<[CL],[]>, OpSize; + [(set GR16:$dst, (rotr GR16:$src, CL))]>, OpSize; def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src), "ror{l}\t{%cl, $dst|$dst, %CL}", - [(set GR32:$dst, (rotr GR32:$src, CL))]>, Imp<[CL],[]>; + [(set GR32:$dst, (rotr GR32:$src, CL))]>; +} def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), "ror{b}\t{$src2, $dst|$dst, $src2}", @@ -1596,18 +1621,17 @@ def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1), [(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>; let isTwoAddress = 0 in { + let Uses = [CL] in { def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst), "ror{b}\t{%cl, $dst|$dst, %CL}", - [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)]>; def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst), "ror{w}\t{%cl, $dst|$dst, %CL}", - [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>, OpSize; + [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)]>, OpSize; def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst), "ror{l}\t{%cl, $dst|$dst, %CL}", - [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)]>; + } def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src), "ror{b}\t{$src, $dst|$dst, $src}", [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -1635,22 +1659,22 @@ let isTwoAddress = 0 in { // Double shift instructions (generalizations of rotate) +let Uses = [CL] in { def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "shld{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, - Imp<[CL],[]>, TB; + [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB; def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, - Imp<[CL],[]>, TB; + [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB; def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "shld{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}", [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>, - Imp<[CL],[]>, TB, OpSize; + TB, OpSize; def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}", [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>, - Imp<[CL],[]>, TB, OpSize; + TB, OpSize; +} let isCommutable = 1 in { // These instructions commute to each other. def SHLD32rri8 : Ii8<0xA4, MRMDestReg, @@ -1680,16 +1704,16 @@ def SHRD16rri8 : Ii8<0xAC, MRMDestReg, } let isTwoAddress = 0 in { + let Uses = [CL] in { def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), "shld{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}", [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL), - addr:$dst)]>, - Imp<[CL],[]>, TB; + addr:$dst)]>, TB; def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, %CL}", [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL), - addr:$dst)]>, - Imp<[CL],[]>, TB; + addr:$dst)]>, TB; + } def SHLD32mri8 : Ii8<0xA4, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2, i8imm:$src3), "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}", @@ -1703,16 +1727,16 @@ let isTwoAddress = 0 in { (i8 imm:$src3)), addr:$dst)]>, TB; + let Uses = [CL] in { def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), "shld{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}", [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL), - addr:$dst)]>, - Imp<[CL],[]>, TB, OpSize; + addr:$dst)]>, TB, OpSize; def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, %CL}", [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL), - addr:$dst)]>, - Imp<[CL],[]>, TB, OpSize; + addr:$dst)]>, TB, OpSize; + } def SHLD16mri8 : Ii8<0xA4, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2, i8imm:$src3), "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}", @@ -1730,25 +1754,31 @@ let isTwoAddress = 0 in { // Arithmetic. let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y -def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2), +def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst), + (ins GR8 :$src1, GR8 :$src2), "add{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (add GR8:$src1, GR8:$src2))]>; let isConvertibleToThreeAddress = 1 in { // Can transform into LEA. -def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), +def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), "add{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (add GR16:$src1, GR16:$src2))]>, OpSize; -def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), +def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), "add{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>; } // end isConvertibleToThreeAddress } // end isCommutable -def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2), +def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst), + (ins GR8 :$src1, i8mem :$src2), "add{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (add GR8:$src1, (load addr:$src2)))]>; -def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), +def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), + (ins GR16:$src1, i16mem:$src2), "add{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (add GR16:$src1, (load addr:$src2)))]>, OpSize; -def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), + [(set GR16:$dst, (add GR16:$src1, (load addr:$src2)))]>,OpSize; +def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), + (ins GR32:$src1, i32mem:$src2), "add{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (add GR32:$src1, (load addr:$src2)))]>; @@ -1757,19 +1787,22 @@ def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2), [(set GR8:$dst, (add GR8:$src1, imm:$src2))]>; let isConvertibleToThreeAddress = 1 in { // Can transform into LEA. -def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), +def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst), + (ins GR16:$src1, i16imm:$src2), "add{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (add GR16:$src1, imm:$src2))]>, OpSize; -def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), +def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst), + (ins GR32:$src1, i32imm:$src2), "add{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (add GR32:$src1, imm:$src2))]>; -def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), +def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst), + (ins GR16:$src1, i16i8imm:$src2), "add{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2))]>, - OpSize; -def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), + [(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2))]>, OpSize; +def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst), + (ins GR32:$src1, i32i8imm:$src2), "add{l}\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2))]>; + [(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2))]>; } let isTwoAddress = 0 in { @@ -2033,8 +2066,10 @@ def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32 // Condition code ops, incl. set if equal/not equal/... -def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>, Imp<[AH],[]>; // flags = AH -def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, Imp<[],[AH]>; // AH = flags +let Uses = [AH] in +def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH +let Defs = [AH] in +def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags def SETEr : I<0x94, MRM0r, (outs GR8 :$dst), (ins), @@ -2296,15 +2331,19 @@ def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), "movz{wl|x}\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB; +let Defs = [AX], Uses = [AL] in def CBW : I<0x98, RawFrm, (outs), (ins), - "{cbtw|cbw}", []>, Imp<[AL],[AX]>, OpSize; // AX = signext(AL) + "{cbtw|cbw}", []>, OpSize; // AX = signext(AL) +let Defs = [EAX], Uses = [AX] in def CWDE : I<0x98, RawFrm, (outs), (ins), - "{cwtl|cwde}", []>, Imp<[AX],[EAX]>; // EAX = signext(AX) + "{cwtl|cwde}", []>; // EAX = signext(AX) +let Defs = [AX,DX], Uses = [AX] in def CWD : I<0x99, RawFrm, (outs), (ins), - "{cwtd|cwd}", []>, Imp<[AX],[AX,DX]>, OpSize; // DX:AX = signext(AX) + "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX) +let Defs = [EAX,EDX], Uses = [EAX] in def CDQ : I<0x99, RawFrm, (outs), (ins), - "{cltd|cdq}", []>, Imp<[EAX],[EAX,EDX]>; // EDX:EAX = signext(EAX) + "{cltd|cdq}", []>; // EDX:EAX = signext(EAX) //===----------------------------------------------------------------------===// @@ -2351,10 +2390,10 @@ def MOV32_mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32_:$src), // Thread Local Storage Instructions // +let Uses = [EBX] in def TLS_addr : I<0, Pseudo, (outs GR32:$dst), (ins i32imm:$sym), "leal\t${sym:mem}(,%ebx,1), $dst", - [(set GR32:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>, - Imp<[EBX],[]>; + [(set GR32:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>; let AddedComplexity = 10 in def TLS_gs_rr : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src), diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 733dd37babd..b7024bc8766 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -476,10 +476,10 @@ def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src), [(set GR32:$dst, (int_x86_mmx_pmovmskb VR64:$src))]>; // Misc. +let Uses = [EDI] in def MMX_MASKMOVQ : MMXI<0xF7, MRMDestMem, (outs), (ins VR64:$src, VR64:$mask), "maskmovq\t{$mask, $src|$src, $mask}", - [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)]>, - Imp<[EDI],[]>; + [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)]>; //===----------------------------------------------------------------------===// // Alias Instructions diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index d4f35fa5b5e..63f041be7fe 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2012,10 +2012,10 @@ def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>; // Conditional store +let Uses = [EDI] in def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask), "maskmovdqu\t{$mask, $src|$src, $mask}", - [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, - Imp<[EDI],[]>; + [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>; // Non-temporal stores def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), diff --git a/lib/Target/X86/X86InstrX86-64.td b/lib/Target/X86/X86InstrX86-64.td index 991f8a0a582..794bb70db90 100644 --- a/lib/Target/X86/X86InstrX86-64.td +++ b/lib/Target/X86/X86InstrX86-64.td @@ -113,12 +113,15 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in { //===----------------------------------------------------------------------===// // Miscellaneous Instructions... // +let Defs = [RBP,RSP], Uses = [RBP,RSP] in def LEAVE64 : I<0xC9, RawFrm, - (outs), (ins), "leave", []>, Imp<[RBP,RSP],[RBP,RSP]>; + (outs), (ins), "leave", []>; +let Defs = [RSP], Uses = [RSP] in { def POP64r : I<0x58, AddRegFrm, - (outs GR64:$reg), (ins), "pop{q}\t$reg", []>, Imp<[RSP],[RSP]>; + (outs GR64:$reg), (ins), "pop{q}\t$reg", []>; def PUSH64r : I<0x50, AddRegFrm, - (outs), (ins GR64:$reg), "push{q}\t$reg", []>, Imp<[RSP],[RSP]>; + (outs), (ins GR64:$reg), "push{q}\t$reg", []>; +} def LEA64_32r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_32mem:$src), @@ -142,12 +145,12 @@ def XCHG64rm : RI<0x87, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), "xchg{q}\t{$src2|$src1}, {$src1|$src2}", []>; // Repeat string ops +let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}", - [(X86rep_movs i64)]>, - Imp<[RCX,RDI,RSI], [RCX,RDI,RSI]>, REP; + [(X86rep_movs i64)]>, REP; +let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}", - [(X86rep_stos i64)]>, - Imp<[RAX,RCX,RDI], [RCX,RDI]>, REP; + [(X86rep_stos i64)]>, REP; //===----------------------------------------------------------------------===// // Move Instructions... @@ -211,11 +214,13 @@ def MOVZX64rm16: RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), "movz{wq|x}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB; +let Defs = [RAX], Uses = [EAX] in def CDQE : RI<0x98, RawFrm, (outs), (ins), - "{cltq|cdqe}", []>, Imp<[EAX],[RAX]>; // RAX = signext(EAX) + "{cltq|cdqe}", []>; // RAX = signext(EAX) +let Defs = [RAX,RDX], Uses = [RAX] in def CQO : RI<0x99, RawFrm, (outs), (ins), - "{cqto|cqo}", []>, Imp<[RAX],[RAX,RDX]>; // RDX:RAX = signext(RAX) + "{cqto|cqo}", []>; // RDX:RAX = signext(RAX) //===----------------------------------------------------------------------===// // Arithmetic Instructions... @@ -334,20 +339,18 @@ def SBB64mi8 : RIi8<0x83, MRM3m, (outs), (ins i64mem:$dst, i64i8imm :$src2), [(store (sube (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; // Unsigned multiplication +let Defs = [RAX,RDX], Uses = [RAX] in { def MUL64r : RI<0xF7, MRM4r, (outs), (ins GR64:$src), - "mul{q}\t$src", []>, - Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64 + "mul{q}\t$src", []>; // RAX,RDX = RAX*GR64 def MUL64m : RI<0xF7, MRM4m, (outs), (ins i64mem:$src), - "mul{q}\t$src", []>, - Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64] + "mul{q}\t$src", []>; // RAX,RDX = RAX*[mem64] // Signed multiplication def IMUL64r : RI<0xF7, MRM5r, (outs), (ins GR64:$src), - "imul{q}\t$src", []>, - Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*GR64 + "imul{q}\t$src", []>; // RAX,RDX = RAX*GR64 def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src), - "imul{q}\t$src", []>, - Imp<[RAX],[RAX,RDX]>; // RAX,RDX = RAX*[mem64] + "imul{q}\t$src", []>; // RAX,RDX = RAX*[mem64] +} let isTwoAddress = 1 in { let isCommutable = 1 in @@ -379,16 +382,18 @@ def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8 [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>; // Unsigned division / remainder +let Defs = [RAX,RDX], Uses = [RAX,RDX] in { def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX - "div{q}\t$src", []>, Imp<[RAX,RDX],[RAX,RDX]>; + "div{q}\t$src", []>; def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX - "div{q}\t$src", []>, Imp<[RAX,RDX],[RAX,RDX]>; + "div{q}\t$src", []>; // Signed division / remainder def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX - "idiv{q}\t$src", []>, Imp<[RAX,RDX],[RAX,RDX]>; + "idiv{q}\t$src", []>; def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX - "idiv{q}\t$src", []>, Imp<[RAX,RDX],[RAX,RDX]>; + "idiv{q}\t$src", []>; +} // Unary instructions let CodeSize = 2 in { @@ -431,10 +436,10 @@ def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst" // Shift instructions let isTwoAddress = 1 in { +let Uses = [CL] in def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src), "shl{q}\t{%cl, $dst|$dst, %CL}", - [(set GR64:$dst, (shl GR64:$src, CL))]>, - Imp<[CL],[]>; + [(set GR64:$dst, (shl GR64:$src, CL))]>; def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), "shl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>; @@ -442,10 +447,10 @@ def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1), "shl{q}\t$dst", []>; } // isTwoAddress +let Uses = [CL] in def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst), "shl{q}\t{%cl, $dst|$dst, %CL}", - [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (shl (loadi64 addr:$dst), CL), addr:$dst)]>; def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, i8imm:$src), "shl{q}\t{$src, $dst|$dst, $src}", [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -454,10 +459,10 @@ def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst), [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>; let isTwoAddress = 1 in { +let Uses = [CL] in def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src), "shr{q}\t{%cl, $dst|$dst, %CL}", - [(set GR64:$dst, (srl GR64:$src, CL))]>, - Imp<[CL],[]>; + [(set GR64:$dst, (srl GR64:$src, CL))]>; def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), "shr{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>; @@ -466,10 +471,10 @@ def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1), [(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>; } // isTwoAddress +let Uses = [CL] in def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst), "shr{q}\t{%cl, $dst|$dst, %CL}", - [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (srl (loadi64 addr:$dst), CL), addr:$dst)]>; def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, i8imm:$src), "shr{q}\t{$src, $dst|$dst, $src}", [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -478,9 +483,10 @@ def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst), [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>; let isTwoAddress = 1 in { +let Uses = [CL] in def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src), "sar{q}\t{%cl, $dst|$dst, %CL}", - [(set GR64:$dst, (sra GR64:$src, CL))]>, Imp<[CL],[]>; + [(set GR64:$dst, (sra GR64:$src, CL))]>; def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), "sar{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>; @@ -489,10 +495,10 @@ def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1), [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>; } // isTwoAddress +let Uses = [CL] in def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst), "sar{q}\t{%cl, $dst|$dst, %CL}", - [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (sra (loadi64 addr:$dst), CL), addr:$dst)]>; def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, i8imm:$src), "sar{q}\t{$src, $dst|$dst, $src}", [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -502,9 +508,10 @@ def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst), // Rotate instructions let isTwoAddress = 1 in { +let Uses = [CL] in def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src), "rol{q}\t{%cl, $dst|$dst, %CL}", - [(set GR64:$dst, (rotl GR64:$src, CL))]>, Imp<[CL],[]>; + [(set GR64:$dst, (rotl GR64:$src, CL))]>; def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), "rol{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>; @@ -513,10 +520,10 @@ def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>; } // isTwoAddress +let Uses = [CL] in def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst), "rol{q}\t{%cl, $dst|$dst, %CL}", - [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>; def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src), "rol{q}\t{$src, $dst|$dst, $src}", [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -525,9 +532,10 @@ def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst), [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>; let isTwoAddress = 1 in { +let Uses = [CL] in def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src), "ror{q}\t{%cl, $dst|$dst, %CL}", - [(set GR64:$dst, (rotr GR64:$src, CL))]>, Imp<[CL],[]>; + [(set GR64:$dst, (rotr GR64:$src, CL))]>; def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), "ror{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>; @@ -536,10 +544,10 @@ def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1), [(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>; } // isTwoAddress +let Uses = [CL] in def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst), "ror{q}\t{%cl, $dst|$dst, %CL}", - [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>, - Imp<[CL],[]>; + [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)]>; def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, i8imm:$src), "ror{q}\t{$src, $dst|$dst, $src}", [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -549,12 +557,12 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst), // Double shift instructions (generalizations of rotate) let isTwoAddress = 1 in { +let Uses = [CL] in { def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, - Imp<[CL],[]>, TB; + "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB; def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, - Imp<[CL],[]>, TB; + "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB; +} let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction def SHLD64rri8 : RIi8<0xA4, MRMDestReg, @@ -570,12 +578,12 @@ def SHRD64rri8 : RIi8<0xAC, MRMDestReg, // Temporary hack: there is no patterns associated with these instructions // so we have to tell tblgen that these do not produce results. +let Uses = [CL] in { def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), - "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, - Imp<[CL],[]>, TB; + "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB; def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), - "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, - Imp<[CL],[]>, TB; + "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", []>, TB; +} def SHLD64mri8 : RIi8<0xA4, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2, i8imm:$src3), "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>, -- 2.34.1