From e5f6204cd5d2306379bf8954e280ad35619a38b5 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Sat, 29 Sep 2007 00:00:36 +0000 Subject: [PATCH] Enabling new condition code modeling scheme. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@42459 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/TargetOptions.h | 5 - lib/Target/TargetMachine.cpp | 6 - lib/Target/X86/X86FloatingPoint.cpp | 33 - lib/Target/X86/X86ISelLowering.cpp | 255 +------ lib/Target/X86/X86ISelLowering.h | 9 +- lib/Target/X86/X86InstrFPStack.td | 65 +- lib/Target/X86/X86InstrInfo.cpp | 72 +- lib/Target/X86/X86InstrInfo.td | 1015 +++++---------------------- lib/Target/X86/X86InstrSSE.td | 132 +--- lib/Target/X86/X86InstrX86-64.td | 280 ++------ lib/Target/X86/X86RegisterInfo.cpp | 90 --- 11 files changed, 308 insertions(+), 1654 deletions(-) diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h index 0d2911eb1fe..7421f96b7bc 100644 --- a/include/llvm/Target/TargetOptions.h +++ b/include/llvm/Target/TargetOptions.h @@ -73,11 +73,6 @@ namespace llvm { /// ExceptionHandling - This flag indicates that exception information should /// be emitted. extern bool ExceptionHandling; - - /// NewCCModeling - This temporary flag indicates whether to use the new - /// condition code modeling scheme. - extern bool NewCCModeling; - } // End llvm namespace #endif diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp index 3c00428a460..6c00a3f492b 100644 --- a/lib/Target/TargetMachine.cpp +++ b/lib/Target/TargetMachine.cpp @@ -31,7 +31,6 @@ namespace llvm { bool UseSoftFloat; bool NoZerosInBSS; bool ExceptionHandling; - bool NewCCModeling; Reloc::Model RelocationModel; CodeModel::Model CMModel; } @@ -117,11 +116,6 @@ namespace { clEnumValN(CodeModel::Large, "large", " Large code model"), clEnumValEnd)); - cl::opt - EnableNewCCModeling("new-cc-modeling-scheme", - cl::desc("New CC modeling scheme."), - cl::location(NewCCModeling), - cl::init(false)); } //--------------------------------------------------------------------------- diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 37a9f6016ce..90813b57ef8 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -437,39 +437,6 @@ static const TableEntry OpcodeTable[] = { { X86::MUL_FpI32m32 , X86::MUL_FI32m }, { X86::MUL_FpI32m64 , X86::MUL_FI32m }, { X86::MUL_FpI32m80 , X86::MUL_FI32m }, - - // TEMPORARY - { X86::NEW_CMOVBE_Fp32 , X86::CMOVBE_F }, - { X86::NEW_CMOVBE_Fp64 , X86::CMOVBE_F }, - { X86::NEW_CMOVBE_Fp80 , X86::CMOVBE_F }, - { X86::NEW_CMOVB_Fp32 , X86::CMOVB_F }, - { X86::NEW_CMOVB_Fp64 , X86::CMOVB_F }, - { X86::NEW_CMOVB_Fp80 , X86::CMOVB_F }, - { X86::NEW_CMOVE_Fp32 , X86::CMOVE_F }, - { X86::NEW_CMOVE_Fp64 , X86::CMOVE_F }, - { X86::NEW_CMOVE_Fp80 , X86::CMOVE_F }, - { X86::NEW_CMOVNBE_Fp32 , X86::CMOVNBE_F }, - { X86::NEW_CMOVNBE_Fp64 , X86::CMOVNBE_F }, - { X86::NEW_CMOVNBE_Fp80 , X86::CMOVNBE_F }, - { X86::NEW_CMOVNB_Fp32 , X86::CMOVNB_F }, - { X86::NEW_CMOVNB_Fp64 , X86::CMOVNB_F }, - { X86::NEW_CMOVNB_Fp80 , X86::CMOVNB_F }, - { X86::NEW_CMOVNE_Fp32 , X86::CMOVNE_F }, - { X86::NEW_CMOVNE_Fp64 , X86::CMOVNE_F }, - { X86::NEW_CMOVNE_Fp80 , X86::CMOVNE_F }, - { X86::NEW_CMOVNP_Fp32 , X86::CMOVNP_F }, - { X86::NEW_CMOVNP_Fp64 , X86::CMOVNP_F }, - { X86::NEW_CMOVNP_Fp80 , X86::CMOVNP_F }, - { X86::NEW_CMOVP_Fp32 , X86::CMOVP_F }, - { X86::NEW_CMOVP_Fp64 , X86::CMOVP_F }, - { X86::NEW_CMOVP_Fp80 , X86::CMOVP_F }, - { X86::NEW_UCOM_FpIr32 , X86::UCOM_FIr }, - { X86::NEW_UCOM_FpIr64 , X86::UCOM_FIr }, - { X86::NEW_UCOM_FpIr80 , X86::UCOM_FIr }, - { X86::NEW_UCOM_Fpr32 , X86::UCOM_Fr }, - { X86::NEW_UCOM_Fpr64 , X86::UCOM_Fr }, - { X86::NEW_UCOM_Fpr80 , X86::UCOM_Fr }, - { X86::SIN_Fp32 , X86::SIN_F }, { X86::SIN_Fp64 , X86::SIN_F }, { X86::SIN_Fp80 , X86::SIN_F }, diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 6d3e4db38d3..9dc4dd69c2c 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -3356,15 +3356,12 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, DAG.getConstant(32, MVT::i8)); - SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)}; - SDOperand Cond = NewCCModeling - ? DAG.getNode(X86ISD::CMP_NEW, MVT::i32, - AndNode, DAG.getConstant(0, MVT::i8)) - : DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1); + SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32, + AndNode, DAG.getConstant(0, MVT::i8)); SDOperand Hi, Lo; SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); - unsigned Opc = NewCCModeling ? X86ISD::CMOV_NEW : X86ISD::CMOV; + unsigned Opc = X86ISD::CMOV; VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag); SmallVector Ops; if (Op.getOpcode() == ISD::SHL_PARTS) { @@ -3372,43 +3369,27 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { Ops.push_back(Tmp3); Ops.push_back(CC); Ops.push_back(Cond); - if (NewCCModeling) - Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); - else { - Hi = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size()); - Cond = Hi.getValue(1); - } + Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); Ops.clear(); Ops.push_back(Tmp3); Ops.push_back(Tmp1); Ops.push_back(CC); Ops.push_back(Cond); - if (NewCCModeling) - Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); - else - Lo = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size()); + Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); } else { Ops.push_back(Tmp2); Ops.push_back(Tmp3); Ops.push_back(CC); Ops.push_back(Cond); - if (NewCCModeling) - Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); - else { - Lo = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size()); - Cond = Lo.getValue(1); - } + Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); Ops.clear(); Ops.push_back(Tmp3); Ops.push_back(Tmp1); Ops.push_back(CC); Ops.push_back(Cond); - if (NewCCModeling) - Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); - else - Hi = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size()); + Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size()); } VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32); @@ -3674,54 +3655,7 @@ SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); } -SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG, - SDOperand Chain) { - assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); - SDOperand Cond; - SDOperand Op0 = Op.getOperand(0); - SDOperand Op1 = Op.getOperand(1); - SDOperand CC = Op.getOperand(2); - ISD::CondCode SetCCOpcode = cast(CC)->get(); - const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); - const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); - bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType()); - unsigned X86CC; - - if (translateX86CC(cast(CC)->get(), isFP, X86CC, - Op0, Op1, DAG)) { - SDOperand Ops1[] = { Chain, Op0, Op1 }; - Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1); - SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; - return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); - } - - assert(isFP && "Illegal integer SetCC!"); - - SDOperand COps[] = { Chain, Op0, Op1 }; - Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1); - - switch (SetCCOpcode) { - default: assert(false && "Illegal floating point SetCC!"); - case ISD::SETOEQ: { // !PF & ZF - SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond }; - SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); - SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8), - Tmp1.getValue(1) }; - SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); - return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); - } - case ISD::SETUNE: { // PF | !ZF - SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond }; - SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2); - SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8), - Tmp1.getValue(1) }; - SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2); - return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); - } - } -} - -SDOperand X86TargetLowering::LowerSETCC_New(SDOperand Op, SelectionDAG &DAG) { +SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); SDOperand Cond; SDOperand Op0 = Op.getOperand(0); @@ -3733,27 +3667,27 @@ SDOperand X86TargetLowering::LowerSETCC_New(SDOperand Op, SelectionDAG &DAG) { if (translateX86CC(cast(CC)->get(), isFP, X86CC, Op0, Op1, DAG)) { - Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Op0, Op1); - return DAG.getNode(X86ISD::SETCC_NEW, MVT::i8, + Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); + return DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86CC, MVT::i8), Cond); } assert(isFP && "Illegal integer SetCC!"); - Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Op0, Op1); + Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1); switch (SetCCOpcode) { default: assert(false && "Illegal floating point SetCC!"); case ISD::SETOEQ: { // !PF & ZF - SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8, + SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_NP, MVT::i8), Cond); - SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8, + SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_E, MVT::i8), Cond); return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); } case ISD::SETUNE: { // PF | !ZF - SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8, + SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_P, MVT::i8), Cond); - SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8, + SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_NE, MVT::i8), Cond); return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); } @@ -3763,13 +3697,11 @@ SDOperand X86TargetLowering::LowerSETCC_New(SDOperand Op, SelectionDAG &DAG) { SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { bool addTest = true; - SDOperand Chain = DAG.getEntryNode(); SDOperand Cond = Op.getOperand(0); SDOperand CC; - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); if (Cond.getOpcode() == ISD::SETCC) - Cond = LowerSETCC(Cond, DAG, Chain); + Cond = LowerSETCC(Cond, DAG); if (Cond.getOpcode() == X86ISD::SETCC) { CC = Cond.getOperand(0); @@ -3786,58 +3718,9 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { ! ((X86ScalarSSEf32 && Op.getValueType()==MVT::f32) || (X86ScalarSSEf64 && Op.getValueType()==MVT::f64)) && !hasFPCMov(cast(CC)->getSignExtended()); - if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) && - !IllegalFPCMov) { - SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; - Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); - addTest = false; - } - } - - if (addTest) { - CC = DAG.getConstant(X86::COND_NE, MVT::i8); - SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; - Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); - } - - VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); - SmallVector Ops; - // X86ISD::CMOV means set the result (which is operand 1) to the RHS if - // condition is true. - Ops.push_back(Op.getOperand(2)); - Ops.push_back(Op.getOperand(1)); - Ops.push_back(CC); - Ops.push_back(Cond.getValue(1)); - return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); -} - -SDOperand X86TargetLowering::LowerSELECT_New(SDOperand Op, SelectionDAG &DAG) { - bool addTest = true; - SDOperand Cond = Op.getOperand(0); - SDOperand CC; - - if (Cond.getOpcode() == ISD::SETCC) - Cond = LowerSETCC_New(Cond, DAG); - - if (Cond.getOpcode() == X86ISD::SETCC_NEW) { - CC = Cond.getOperand(0); - - // If condition flag is set by a X86ISD::CMP, then make a copy of it - // (since flag operand cannot be shared). Use it as the condition setting - // operand in place of the X86ISD::SETCC. - // If the X86ISD::SETCC has more than one use, then perhaps it's better - // to use a test instead of duplicating the X86ISD::CMP (for register - // pressure reason)? - SDOperand Cmp = Cond.getOperand(1); - unsigned Opc = Cmp.getOpcode(); - bool IllegalFPCMov = - ! ((X86ScalarSSEf32 && Op.getValueType()==MVT::f32) || - (X86ScalarSSEf64 && Op.getValueType()==MVT::f64)) && - !hasFPCMov(cast(CC)->getSignExtended()); - if ((Opc == X86ISD::CMP_NEW || - Opc == X86ISD::COMI_NEW || - Opc == X86ISD::UCOMI_NEW) && - !IllegalFPCMov) { + if ((Opc == X86ISD::CMP || + Opc == X86ISD::COMI || + Opc == X86ISD::UCOMI) && !IllegalFPCMov) { Cond = DAG.getNode(Opc, MVT::i32, Cmp.getOperand(0), Cmp.getOperand(1)); addTest = false; } @@ -3845,7 +3728,7 @@ SDOperand X86TargetLowering::LowerSELECT_New(SDOperand Op, SelectionDAG &DAG) { if (addTest) { CC = DAG.getConstant(X86::COND_NE, MVT::i8); - Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Cond, + Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); } @@ -3858,7 +3741,7 @@ SDOperand X86TargetLowering::LowerSELECT_New(SDOperand Op, SelectionDAG &DAG) { Ops.push_back(Op.getOperand(1)); Ops.push_back(CC); Ops.push_back(Cond); - return DAG.getNode(X86ISD::CMOV_NEW, VTs, 2, &Ops[0], Ops.size()); + return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); } SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { @@ -3867,10 +3750,9 @@ SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { SDOperand Cond = Op.getOperand(1); SDOperand Dest = Op.getOperand(2); SDOperand CC; - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); if (Cond.getOpcode() == ISD::SETCC) - Cond = LowerSETCC(Cond, DAG, Chain); + Cond = LowerSETCC(Cond, DAG); if (Cond.getOpcode() == X86ISD::SETCC) { CC = Cond.getOperand(0); @@ -3883,46 +3765,9 @@ SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { // pressure reason)? SDOperand Cmp = Cond.getOperand(1); unsigned Opc = Cmp.getOpcode(); - if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) { - SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) }; - Cond = DAG.getNode(Opc, VTs, 2, Ops, 3); - addTest = false; - } - } - - if (addTest) { - CC = DAG.getConstant(X86::COND_NE, MVT::i8); - SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) }; - Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3); - } - return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), - Cond, Op.getOperand(2), CC, Cond.getValue(1)); -} - -SDOperand X86TargetLowering::LowerBRCOND_New(SDOperand Op, SelectionDAG &DAG) { - bool addTest = true; - SDOperand Chain = Op.getOperand(0); - SDOperand Cond = Op.getOperand(1); - SDOperand Dest = Op.getOperand(2); - SDOperand CC; - - if (Cond.getOpcode() == ISD::SETCC) - Cond = LowerSETCC_New(Cond, DAG); - - if (Cond.getOpcode() == X86ISD::SETCC_NEW) { - CC = Cond.getOperand(0); - - // If condition flag is set by a X86ISD::CMP, then make a copy of it - // (since flag operand cannot be shared). Use it as the condition setting - // operand in place of the X86ISD::SETCC. - // If the X86ISD::SETCC has more than one use, then perhaps it's better - // to use a test instead of duplicating the X86ISD::CMP (for register - // pressure reason)? - SDOperand Cmp = Cond.getOperand(1); - unsigned Opc = Cmp.getOpcode(); - if (Opc == X86ISD::CMP_NEW || - Opc == X86ISD::COMI_NEW || - Opc == X86ISD::UCOMI_NEW) { + if (Opc == X86ISD::CMP || + Opc == X86ISD::COMI || + Opc == X86ISD::UCOMI) { Cond = DAG.getNode(Opc, MVT::i32, Cmp.getOperand(0), Cmp.getOperand(1)); addTest = false; } @@ -3930,9 +3775,9 @@ SDOperand X86TargetLowering::LowerBRCOND_New(SDOperand Op, SelectionDAG &DAG) { if (addTest) { CC = DAG.getConstant(X86::COND_NE, MVT::i8); - Cond= DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); + Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8)); } - return DAG.getNode(X86ISD::BRCOND_NEW, Op.getValueType(), + return DAG.getNode(X86ISD::BRCOND, Op.getValueType(), Chain, Op.getOperand(2), CC, Cond); } @@ -4535,21 +4380,10 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { SDOperand RHS = Op.getOperand(2); translateX86CC(CC, true, X86CC, LHS, RHS, DAG); - if (NewCCModeling) { - Opc = (Opc == X86ISD::UCOMI) ? X86ISD::UCOMI_NEW : X86ISD::COMI_NEW; - SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); - SDOperand SetCC = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8, - DAG.getConstant(X86CC, MVT::i8), Cond); - return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); - } else { - const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag); - SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS }; - SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3); - VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag); - SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond }; - SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2); - return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); - } + SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); + SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, + DAG.getConstant(X86CC, MVT::i8), Cond); + return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); } } } @@ -4721,15 +4555,9 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::FABS: return LowerFABS(Op, DAG); case ISD::FNEG: return LowerFNEG(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); - case ISD::SETCC: return NewCCModeling - ? LowerSETCC_New(Op, DAG) - : LowerSETCC(Op, DAG, DAG.getEntryNode()); - case ISD::SELECT: return NewCCModeling - ? LowerSELECT_New(Op, DAG) - : LowerSELECT(Op, DAG); - case ISD::BRCOND: return NewCCModeling - ? LowerBRCOND_New(Op, DAG) - : LowerBRCOND(Op, DAG); + case ISD::SETCC: return LowerSETCC(Op, DAG); + case ISD::SELECT: return LowerSELECT(Op, DAG); + case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::CALL: return LowerCALL(Op, DAG); case ISD::RET: return LowerRET(Op, DAG); @@ -4773,17 +4601,11 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::TAILCALL: return "X86ISD::TAILCALL"; case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; case X86ISD::CMP: return "X86ISD::CMP"; - case X86ISD::CMP_NEW: return "X86ISD::CMP_NEW"; case X86ISD::COMI: return "X86ISD::COMI"; - case X86ISD::COMI_NEW: return "X86ISD::COMI_NEW"; case X86ISD::UCOMI: return "X86ISD::UCOMI"; - case X86ISD::UCOMI_NEW: return "X86ISD::UCOMI_NEW"; case X86ISD::SETCC: return "X86ISD::SETCC"; - case X86ISD::SETCC_NEW: return "X86ISD::SETCC_NEW"; case X86ISD::CMOV: return "X86ISD::CMOV"; - case X86ISD::CMOV_NEW: return "X86ISD::CMOV_NEW"; case X86ISD::BRCOND: return "X86ISD::BRCOND"; - case X86ISD::BRCOND_NEW: return "X86ISD::BRCOND_NEW"; case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; @@ -4902,13 +4724,7 @@ X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, case X86::CMOV_FR64: case X86::CMOV_V4F32: case X86::CMOV_V2F64: - case X86::CMOV_V2I64: - - case X86::NEW_CMOV_FR32: - case X86::NEW_CMOV_FR64: - case X86::NEW_CMOV_V4F32: - case X86::NEW_CMOV_V2F64: - case X86::NEW_CMOV_V2I64: { + case X86::CMOV_V2I64: { // To "insert" a SELECT_CC instruction, we actually have to insert the // diamond control-flow pattern. The incoming instruction knows the // destination vreg to set, the condition code register to branch on, the @@ -5065,7 +4881,6 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, switch (Opc) { default: break; case X86ISD::SETCC: - case X86ISD::SETCC_NEW: KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); break; } diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 6e70a0b3fa9..9af46801d2c 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -117,26 +117,22 @@ namespace llvm { /// X86 compare and logical compare instructions. CMP, COMI, UCOMI, - CMP_NEW, COMI_NEW, UCOMI_NEW, /// X86 SetCC. Operand 1 is condition code, and operand 2 is the flag /// operand produced by a CMP instruction. SETCC, - SETCC_NEW, /// X86 conditional moves. Operand 1 and operand 2 are the two values /// to select from (operand 1 is a R/W operand). Operand 3 is the /// condition code, and operand 4 is the flag operand produced by a CMP /// or TEST instruction. It also writes a flag result. CMOV, - CMOV_NEW, /// X86 conditional branches. Operand 1 is the chain operand, operand 2 /// is the block to branch if condition is true, operand 3 is the /// condition code, and operand 4 is the flag operand produced by a CMP /// or TEST instruction. BRCOND, - BRCOND_NEW, /// Return with a flag operand. Operand 1 is the chain operand, operand /// 2 is the number of bytes of stack to pop. @@ -430,12 +426,9 @@ namespace llvm { SDOperand LowerFABS(SDOperand Op, SelectionDAG &DAG); SDOperand LowerFNEG(SDOperand Op, SelectionDAG &DAG); SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG, SDOperand Chain); - SDOperand LowerSETCC_New(SDOperand Op, SelectionDAG &DAG); + SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG); SDOperand LowerSELECT(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSELECT_New(SDOperand Op, SelectionDAG &DAG); SDOperand LowerBRCOND(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerBRCOND_New(SDOperand Op, SelectionDAG &DAG); SDOperand LowerMEMSET(SDOperand Op, SelectionDAG &DAG); SDOperand LowerMEMCPYInline(SDOperand Dest, SDOperand Source, SDOperand Chain, unsigned Size, unsigned Align, diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index ec6373262e4..a26a4ffc710 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -299,17 +299,20 @@ def TST_F : FPI<0xE4, RawFrm, (outs), (ins), "ftst">, D9; // Floating point cmovs. multiclass FPCMov { - def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2), CondMovFP, + def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2), + CondMovFP, [(set RFP32:$dst, (X86cmov RFP32:$src1, RFP32:$src2, - cc))]>; - def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2), CondMovFP, + cc, EFLAGS))]>; + def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2), + CondMovFP, [(set RFP64:$dst, (X86cmov RFP64:$src1, RFP64:$src2, - cc))]>; - def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2), CondMovFP, + cc, EFLAGS))]>; + def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2), + CondMovFP, [(set RFP80:$dst, (X86cmov RFP80:$src1, RFP80:$src2, - cc))]>; + cc, EFLAGS))]>; } -let isTwoAddress = 1 in { +let Uses = [EFLAGS], isTwoAddress = 1 in { defm CMOVB : FPCMov; defm CMOVBE : FPCMov; defm CMOVE : FPCMov; @@ -320,31 +323,6 @@ defm CMOVNE : FPCMov; defm CMOVNP : FPCMov; } -multiclass NEW_FPCMov { - def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2), - CondMovFP, - [(set RFP32:$dst, (X86cmov_new RFP32:$src1, RFP32:$src2, - cc, EFLAGS))]>; - def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2), - CondMovFP, - [(set RFP64:$dst, (X86cmov_new RFP64:$src1, RFP64:$src2, - cc, EFLAGS))]>; - def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2), - CondMovFP, - [(set RFP80:$dst, (X86cmov_new RFP80:$src1, RFP80:$src2, - cc, EFLAGS))]>; -} -let Uses = [EFLAGS], isTwoAddress = 1 in { -defm NEW_CMOVB : NEW_FPCMov; -defm NEW_CMOVBE : NEW_FPCMov; -defm NEW_CMOVE : NEW_FPCMov; -defm NEW_CMOVP : NEW_FPCMov; -defm NEW_CMOVNB : NEW_FPCMov; -defm NEW_CMOVNBE: NEW_FPCMov; -defm NEW_CMOVNE : NEW_FPCMov; -defm NEW_CMOVNP : NEW_FPCMov; -} - // These are not factored because there's no clean way to pass DA/DB. def CMOVB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins), "fcmovb\t{$op, %st(0)|%ST(0), $op}">, DA; @@ -507,30 +485,17 @@ let Defs = [EFLAGS] in { def UCOM_Fpr32 : FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP, []>; // FPSW = cmp ST(0) with ST(i) def UCOM_FpIr32: FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP, - [(X86cmp RFP32:$lhs, RFP32:$rhs)]>; // CC = ST(0) cmp ST(i) + [(X86cmp RFP32:$lhs, RFP32:$rhs), + (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i) def UCOM_Fpr64 : FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP, []>; // FPSW = cmp ST(0) with ST(i) def UCOM_FpIr64: FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP, - [(X86cmp RFP64:$lhs, RFP64:$rhs)]>; // CC = ST(0) cmp ST(i) + [(X86cmp RFP64:$lhs, RFP64:$rhs), + (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i) def UCOM_Fpr80 : FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP, []>; // FPSW = cmp ST(0) with ST(i) def UCOM_FpIr80: FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP, - [(X86cmp RFP80:$lhs, RFP80:$rhs)]>; // CC = ST(0) cmp ST(i) - -def NEW_UCOM_Fpr32 : FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP, - []>; // FPSW = cmp ST(0) with ST(i) -def NEW_UCOM_FpIr32: FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP, - [(X86cmp_new RFP32:$lhs, RFP32:$rhs), - (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i) -def NEW_UCOM_Fpr64 : FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP, - []>; // FPSW = cmp ST(0) with ST(i) -def NEW_UCOM_FpIr64: FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP, - [(X86cmp_new RFP64:$lhs, RFP64:$rhs), - (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i) -def NEW_UCOM_Fpr80 : FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP, - []>; // FPSW = cmp ST(0) with ST(i) -def NEW_UCOM_FpIr80: FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP, - [(X86cmp_new RFP80:$lhs, RFP80:$rhs), + [(X86cmp RFP80:$lhs, RFP80:$rhs), (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i) } diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index e510369399b..59b81168121 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -386,68 +386,28 @@ static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) { case X86::JNP: return X86::COND_NP; case X86::JO: return X86::COND_O; case X86::JNO: return X86::COND_NO; - // TEMPORARY - case X86::NEW_JE: return X86::COND_E; - case X86::NEW_JNE: return X86::COND_NE; - case X86::NEW_JL: return X86::COND_L; - case X86::NEW_JLE: return X86::COND_LE; - case X86::NEW_JG: return X86::COND_G; - case X86::NEW_JGE: return X86::COND_GE; - case X86::NEW_JB: return X86::COND_B; - case X86::NEW_JBE: return X86::COND_BE; - case X86::NEW_JA: return X86::COND_A; - case X86::NEW_JAE: return X86::COND_AE; - case X86::NEW_JS: return X86::COND_S; - case X86::NEW_JNS: return X86::COND_NS; - case X86::NEW_JP: return X86::COND_P; - case X86::NEW_JNP: return X86::COND_NP; - case X86::NEW_JO: return X86::COND_O; - case X86::NEW_JNO: return X86::COND_NO; - } } unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { - if (!NewCCModeling) { - switch (CC) { - default: assert(0 && "Illegal condition code!"); - case X86::COND_E: return X86::JE; - case X86::COND_NE: return X86::JNE; - case X86::COND_L: return X86::JL; - case X86::COND_LE: return X86::JLE; - case X86::COND_G: return X86::JG; - case X86::COND_GE: return X86::JGE; - case X86::COND_B: return X86::JB; - case X86::COND_BE: return X86::JBE; - case X86::COND_A: return X86::JA; - case X86::COND_AE: return X86::JAE; - case X86::COND_S: return X86::JS; - case X86::COND_NS: return X86::JNS; - case X86::COND_P: return X86::JP; - case X86::COND_NP: return X86::JNP; - case X86::COND_O: return X86::JO; - case X86::COND_NO: return X86::JNO; - } - } - switch (CC) { default: assert(0 && "Illegal condition code!"); - case X86::COND_E: return X86::NEW_JE; - case X86::COND_NE: return X86::NEW_JNE; - case X86::COND_L: return X86::NEW_JL; - case X86::COND_LE: return X86::NEW_JLE; - case X86::COND_G: return X86::NEW_JG; - case X86::COND_GE: return X86::NEW_JGE; - case X86::COND_B: return X86::NEW_JB; - case X86::COND_BE: return X86::NEW_JBE; - case X86::COND_A: return X86::NEW_JA; - case X86::COND_AE: return X86::NEW_JAE; - case X86::COND_S: return X86::NEW_JS; - case X86::COND_NS: return X86::NEW_JNS; - case X86::COND_P: return X86::NEW_JP; - case X86::COND_NP: return X86::NEW_JNP; - case X86::COND_O: return X86::NEW_JO; - case X86::COND_NO: return X86::NEW_JNO; + case X86::COND_E: return X86::JE; + case X86::COND_NE: return X86::JNE; + case X86::COND_L: return X86::JL; + case X86::COND_LE: return X86::JLE; + case X86::COND_G: return X86::JG; + case X86::COND_GE: return X86::JGE; + case X86::COND_B: return X86::JB; + case X86::COND_BE: return X86::JBE; + case X86::COND_A: return X86::JA; + case X86::COND_AE: return X86::JAE; + case X86::COND_S: return X86::JS; + case X86::COND_NS: return X86::JNS; + case X86::COND_P: return X86::JP; + case X86::COND_NP: return X86::JNP; + case X86::COND_O: return X86::JO; + case X86::COND_NO: return X86::JNO; } } diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 86e102e1d1a..1a0fab3b864 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -23,22 +23,15 @@ def SDTIntShiftDOp: SDTypeProfile<1, 3, def SDTX86CmpTest : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>; -def SDTX86Cmov : SDTypeProfile<1, 3, - [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, - SDTCisVT<3, i8>]>; -def SDTX86Cmov_NEW : SDTypeProfile<1, 4, +def SDTX86Cmov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>, SDTCisVT<4, i32>]>; -def SDTX86BrCond : SDTypeProfile<0, 2, - [SDTCisVT<0, OtherVT>, SDTCisVT<1, i8>]>; -def SDTX86BrCond_NEW : SDTypeProfile<0, 3, +def SDTX86BrCond : SDTypeProfile<0, 3, [SDTCisVT<0, OtherVT>, SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; -def SDTX86SetCC : SDTypeProfile<1, 1, - [SDTCisVT<0, i8>, SDTCisVT<1, i8>]>; -def SDTX86SetCC_NEW : SDTypeProfile<1, 2, +def SDTX86SetCC : SDTypeProfile<1, 2, [SDTCisVT<0, i8>, SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; @@ -65,20 +58,12 @@ def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>; def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>; -def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest, - [SDNPHasChain, SDNPOutFlag]>; -def X86cmp_new : SDNode<"X86ISD::CMP_NEW" , SDTX86CmpTest>; +def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>; -def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov, - [SDNPInFlag, SDNPOutFlag]>; -def X86cmov_new: SDNode<"X86ISD::CMOV_NEW", SDTX86Cmov_NEW>; +def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>; def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond, - [SDNPHasChain, SDNPInFlag]>; -def X86brcond_new : SDNode<"X86ISD::BRCOND_NEW", SDTX86BrCond_NEW, [SDNPHasChain]>; -def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC, - [SDNPInFlag, SDNPOutFlag]>; -def X86setcc_new : SDNode<"X86ISD::SETCC_NEW", SDTX86SetCC_NEW>; +def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>; def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret, [SDNPHasChain, SDNPOptInFlag]>; @@ -319,76 +304,39 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1 in { // Conditional branches let Uses = [EFLAGS] in { def JE : IBr<0x84, (ins brtarget:$dst), "je\t$dst", - [(X86brcond bb:$dst, X86_COND_E)]>, TB; + [(X86brcond bb:$dst, X86_COND_E, EFLAGS)]>, TB; def JNE : IBr<0x85, (ins brtarget:$dst), "jne\t$dst", - [(X86brcond bb:$dst, X86_COND_NE)]>, TB; + [(X86brcond bb:$dst, X86_COND_NE, EFLAGS)]>, TB; def JL : IBr<0x8C, (ins brtarget:$dst), "jl\t$dst", - [(X86brcond bb:$dst, X86_COND_L)]>, TB; + [(X86brcond bb:$dst, X86_COND_L, EFLAGS)]>, TB; def JLE : IBr<0x8E, (ins brtarget:$dst), "jle\t$dst", - [(X86brcond bb:$dst, X86_COND_LE)]>, TB; + [(X86brcond bb:$dst, X86_COND_LE, EFLAGS)]>, TB; def JG : IBr<0x8F, (ins brtarget:$dst), "jg\t$dst", - [(X86brcond bb:$dst, X86_COND_G)]>, TB; + [(X86brcond bb:$dst, X86_COND_G, EFLAGS)]>, TB; def JGE : IBr<0x8D, (ins brtarget:$dst), "jge\t$dst", - [(X86brcond bb:$dst, X86_COND_GE)]>, TB; + [(X86brcond bb:$dst, X86_COND_GE, EFLAGS)]>, TB; def JB : IBr<0x82, (ins brtarget:$dst), "jb\t$dst", - [(X86brcond bb:$dst, X86_COND_B)]>, TB; + [(X86brcond bb:$dst, X86_COND_B, EFLAGS)]>, TB; def JBE : IBr<0x86, (ins brtarget:$dst), "jbe\t$dst", - [(X86brcond bb:$dst, X86_COND_BE)]>, TB; + [(X86brcond bb:$dst, X86_COND_BE, EFLAGS)]>, TB; def JA : IBr<0x87, (ins brtarget:$dst), "ja\t$dst", - [(X86brcond bb:$dst, X86_COND_A)]>, TB; + [(X86brcond bb:$dst, X86_COND_A, EFLAGS)]>, TB; def JAE : IBr<0x83, (ins brtarget:$dst), "jae\t$dst", - [(X86brcond bb:$dst, X86_COND_AE)]>, TB; + [(X86brcond bb:$dst, X86_COND_AE, EFLAGS)]>, TB; def JS : IBr<0x88, (ins brtarget:$dst), "js\t$dst", - [(X86brcond bb:$dst, X86_COND_S)]>, TB; + [(X86brcond bb:$dst, X86_COND_S, EFLAGS)]>, TB; def JNS : IBr<0x89, (ins brtarget:$dst), "jns\t$dst", - [(X86brcond bb:$dst, X86_COND_NS)]>, TB; + [(X86brcond bb:$dst, X86_COND_NS, EFLAGS)]>, TB; def JP : IBr<0x8A, (ins brtarget:$dst), "jp\t$dst", - [(X86brcond bb:$dst, X86_COND_P)]>, TB; + [(X86brcond bb:$dst, X86_COND_P, EFLAGS)]>, TB; def JNP : IBr<0x8B, (ins brtarget:$dst), "jnp\t$dst", - [(X86brcond bb:$dst, X86_COND_NP)]>, TB; + [(X86brcond bb:$dst, X86_COND_NP, EFLAGS)]>, TB; def JO : IBr<0x80, (ins brtarget:$dst), "jo\t$dst", - [(X86brcond bb:$dst, X86_COND_O)]>, TB; + [(X86brcond bb:$dst, X86_COND_O, EFLAGS)]>, TB; def JNO : IBr<0x81, (ins brtarget:$dst), "jno\t$dst", - [(X86brcond bb:$dst, X86_COND_NO)]>, TB; -} // Uses = [EFLAGS] - -let Uses = [EFLAGS] in { -def NEW_JE : IBr<0x84, (ins brtarget:$dst), "je\t$dst", - [(X86brcond_new bb:$dst, X86_COND_E, EFLAGS)]>, TB; -def NEW_JNE : IBr<0x85, (ins brtarget:$dst), "jne\t$dst", - [(X86brcond_new bb:$dst, X86_COND_NE, EFLAGS)]>, TB; -def NEW_JL : IBr<0x8C, (ins brtarget:$dst), "jl\t$dst", - [(X86brcond_new bb:$dst, X86_COND_L, EFLAGS)]>, TB; -def NEW_JLE : IBr<0x8E, (ins brtarget:$dst), "jle\t$dst", - [(X86brcond_new bb:$dst, X86_COND_LE, EFLAGS)]>, TB; -def NEW_JG : IBr<0x8F, (ins brtarget:$dst), "jg\t$dst", - [(X86brcond_new bb:$dst, X86_COND_G, EFLAGS)]>, TB; -def NEW_JGE : IBr<0x8D, (ins brtarget:$dst), "jge\t$dst", - [(X86brcond_new bb:$dst, X86_COND_GE, EFLAGS)]>, TB; - -def NEW_JB : IBr<0x82, (ins brtarget:$dst), "jb\t$dst", - [(X86brcond_new bb:$dst, X86_COND_B, EFLAGS)]>, TB; -def NEW_JBE : IBr<0x86, (ins brtarget:$dst), "jbe\t$dst", - [(X86brcond_new bb:$dst, X86_COND_BE, EFLAGS)]>, TB; -def NEW_JA : IBr<0x87, (ins brtarget:$dst), "ja\t$dst", - [(X86brcond_new bb:$dst, X86_COND_A, EFLAGS)]>, TB; -def NEW_JAE : IBr<0x83, (ins brtarget:$dst), "jae\t$dst", - [(X86brcond_new bb:$dst, X86_COND_AE, EFLAGS)]>, TB; - -def NEW_JS : IBr<0x88, (ins brtarget:$dst), "js\t$dst", - [(X86brcond_new bb:$dst, X86_COND_S, EFLAGS)]>, TB; -def NEW_JNS : IBr<0x89, (ins brtarget:$dst), "jns\t$dst", - [(X86brcond_new bb:$dst, X86_COND_NS, EFLAGS)]>, TB; -def NEW_JP : IBr<0x8A, (ins brtarget:$dst), "jp\t$dst", - [(X86brcond_new bb:$dst, X86_COND_P, EFLAGS)]>, TB; -def NEW_JNP : IBr<0x8B, (ins brtarget:$dst), "jnp\t$dst", - [(X86brcond_new bb:$dst, X86_COND_NP, EFLAGS)]>, TB; -def NEW_JO : IBr<0x80, (ins brtarget:$dst), "jo\t$dst", - [(X86brcond_new bb:$dst, X86_COND_O, EFLAGS)]>, TB; -def NEW_JNO : IBr<0x81, (ins brtarget:$dst), "jno\t$dst", - [(X86brcond_new bb:$dst, X86_COND_NO, EFLAGS)]>, TB; + [(X86brcond bb:$dst, X86_COND_NO, EFLAGS)]>, TB; } // Uses = [EFLAGS] //===----------------------------------------------------------------------===// @@ -703,700 +651,349 @@ def CMOVB16rr : I<0x42, MRMSrcReg, // if , + X86_COND_B, EFLAGS))]>, TB, OpSize; def CMOVB16rm : I<0x42, MRMSrcMem, // if , + X86_COND_B, EFLAGS))]>, TB, OpSize; def CMOVB32rr : I<0x42, MRMSrcReg, // if , + X86_COND_B, EFLAGS))]>, TB; def CMOVB32rm : I<0x42, MRMSrcMem, // if , + X86_COND_B, EFLAGS))]>, TB; def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovae\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_AE))]>, + X86_COND_AE, EFLAGS))]>, TB, OpSize; def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovae\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_AE))]>, + X86_COND_AE, EFLAGS))]>, TB, OpSize; def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovae\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_AE))]>, + X86_COND_AE, EFLAGS))]>, TB; def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovae\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_AE))]>, + X86_COND_AE, EFLAGS))]>, TB; def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_E))]>, + X86_COND_E, EFLAGS))]>, TB, OpSize; def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_E))]>, + X86_COND_E, EFLAGS))]>, TB, OpSize; def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_E))]>, + X86_COND_E, EFLAGS))]>, TB; def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_E))]>, + X86_COND_E, EFLAGS))]>, TB; def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_NE))]>, + X86_COND_NE, EFLAGS))]>, TB, OpSize; def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NE))]>, + X86_COND_NE, EFLAGS))]>, TB, OpSize; def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_NE))]>, + X86_COND_NE, EFLAGS))]>, TB; def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NE))]>, + X86_COND_NE, EFLAGS))]>, TB; def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_BE))]>, + X86_COND_BE, EFLAGS))]>, TB, OpSize; def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_BE))]>, + X86_COND_BE, EFLAGS))]>, TB, OpSize; def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_BE))]>, + X86_COND_BE, EFLAGS))]>, TB; def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_BE))]>, + X86_COND_BE, EFLAGS))]>, TB; def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_A))]>, + X86_COND_A, EFLAGS))]>, TB, OpSize; def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_A))]>, + X86_COND_A, EFLAGS))]>, TB, OpSize; def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_A))]>, + X86_COND_A, EFLAGS))]>, TB; def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_A))]>, + X86_COND_A, EFLAGS))]>, TB; def CMOVL16rr : I<0x4C, MRMSrcReg, // if , + X86_COND_L, EFLAGS))]>, TB, OpSize; def CMOVL16rm : I<0x4C, MRMSrcMem, // if , + X86_COND_L, EFLAGS))]>, TB, OpSize; def CMOVL32rr : I<0x4C, MRMSrcReg, // if , + X86_COND_L, EFLAGS))]>, TB; def CMOVL32rm : I<0x4C, MRMSrcMem, // if , + X86_COND_L, EFLAGS))]>, TB; def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_GE))]>, + X86_COND_GE, EFLAGS))]>, TB, OpSize; def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_GE))]>, + X86_COND_GE, EFLAGS))]>, TB, OpSize; def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_GE))]>, + X86_COND_GE, EFLAGS))]>, TB; def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_GE))]>, + X86_COND_GE, EFLAGS))]>, TB; def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_LE))]>, + X86_COND_LE, EFLAGS))]>, TB, OpSize; def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_LE))]>, + X86_COND_LE, EFLAGS))]>, TB, OpSize; def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_LE))]>, + X86_COND_LE, EFLAGS))]>, TB; def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_LE))]>, + X86_COND_LE, EFLAGS))]>, TB; def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_G))]>, + X86_COND_G, EFLAGS))]>, TB, OpSize; def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_G))]>, + X86_COND_G, EFLAGS))]>, TB, OpSize; def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_G))]>, + X86_COND_G, EFLAGS))]>, TB; def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_G))]>, + X86_COND_G, EFLAGS))]>, TB; def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_S))]>, + X86_COND_S, EFLAGS))]>, TB, OpSize; def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_S))]>, + X86_COND_S, EFLAGS))]>, TB, OpSize; def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_S))]>, + X86_COND_S, EFLAGS))]>, TB; def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_S))]>, + X86_COND_S, EFLAGS))]>, TB; def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_NS))]>, + X86_COND_NS, EFLAGS))]>, TB, OpSize; def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NS))]>, + X86_COND_NS, EFLAGS))]>, TB, OpSize; def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_NS))]>, + X86_COND_NS, EFLAGS))]>, TB; def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NS))]>, + X86_COND_NS, EFLAGS))]>, TB; def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_P))]>, + X86_COND_P, EFLAGS))]>, TB, OpSize; def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_P))]>, + X86_COND_P, EFLAGS))]>, TB, OpSize; def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_P))]>, + X86_COND_P, EFLAGS))]>, TB; def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_P))]>, + X86_COND_P, EFLAGS))]>, TB; def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, - X86_COND_NP))]>, + X86_COND_NP, EFLAGS))]>, TB, OpSize; def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), - X86_COND_NP))]>, + X86_COND_NP, EFLAGS))]>, TB, OpSize; def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, - X86_COND_NP))]>, + X86_COND_NP, EFLAGS))]>, TB; def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), - X86_COND_NP))]>, - TB; - - -def NEW_CMOVB16rr : I<0x42, MRMSrcReg, // if , - TB, OpSize; -def NEW_CMOVB16rm : I<0x42, MRMSrcMem, // if , - TB, OpSize; -def NEW_CMOVB32rr : I<0x42, MRMSrcReg, // if , - TB; -def NEW_CMOVB32rm : I<0x42, MRMSrcMem, // if , - TB; - -def NEW_CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_AE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_AE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_AE, EFLAGS))]>, - TB; -def NEW_CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_AE, EFLAGS))]>, - TB; - -def NEW_CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_E, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_E, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_E, EFLAGS))]>, - TB; -def NEW_CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_E, EFLAGS))]>, - TB; - -def NEW_CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_NE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_NE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_NE, EFLAGS))]>, - TB; -def NEW_CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_NE, EFLAGS))]>, - TB; - -def NEW_CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_BE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_BE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_BE, EFLAGS))]>, - TB; -def NEW_CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_BE, EFLAGS))]>, - TB; - -def NEW_CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_A, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_A, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_A, EFLAGS))]>, - TB; -def NEW_CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_A, EFLAGS))]>, - TB; - -def NEW_CMOVL16rr : I<0x4C, MRMSrcReg, // if , - TB, OpSize; -def NEW_CMOVL16rm : I<0x4C, MRMSrcMem, // if , - TB, OpSize; -def NEW_CMOVL32rr : I<0x4C, MRMSrcReg, // if , - TB; -def NEW_CMOVL32rm : I<0x4C, MRMSrcMem, // if , - TB; - -def NEW_CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_GE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_GE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_GE, EFLAGS))]>, - TB; -def NEW_CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_GE, EFLAGS))]>, - TB; - -def NEW_CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_LE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_LE, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_LE, EFLAGS))]>, - TB; -def NEW_CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_LE, EFLAGS))]>, - TB; - -def NEW_CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_G, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_G, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_G, EFLAGS))]>, - TB; -def NEW_CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_G, EFLAGS))]>, - TB; - -def NEW_CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_S, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_S, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_S, EFLAGS))]>, - TB; -def NEW_CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_S, EFLAGS))]>, - TB; - -def NEW_CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_NS, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_NS, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_NS, EFLAGS))]>, - TB; -def NEW_CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_NS, EFLAGS))]>, - TB; - -def NEW_CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_P, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_P, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_P, EFLAGS))]>, - TB; -def NEW_CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), - X86_COND_P, EFLAGS))]>, - TB; - -def NEW_CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16 - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2, - X86_COND_NP, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16] - (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2), - X86_COND_NP, EFLAGS))]>, - TB, OpSize; -def NEW_CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32 - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2, - X86_COND_NP, EFLAGS))]>, - TB; -def NEW_CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32] - (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", - [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2), X86_COND_NP, EFLAGS))]>, TB; } // Uses = [EFLAGS] @@ -2442,116 +2039,63 @@ def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8 let Defs = [EFLAGS] in { let isCommutable = 1 in { // TEST X, Y --> TEST Y, X def TEST8rr : I<0x84, MRMDestReg, (outs), (ins GR8:$src1, GR8:$src2), - "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR8:$src1, GR8:$src2), 0)]>; -def TEST16rr : I<0x85, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), - "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR16:$src1, GR16:$src2), 0)]>, OpSize; -def TEST32rr : I<0x85, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), - "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR32:$src1, GR32:$src2), 0)]>; -} - -def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2), - "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR8:$src1, (loadi8 addr:$src2)), 0)]>; -def TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2), - "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR16:$src1, (loadi16 addr:$src2)), 0)]>, - OpSize; -def TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2), - "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR32:$src1, (loadi32 addr:$src2)), 0)]>; - -def TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8 - (outs), (ins GR8:$src1, i8imm:$src2), - "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR8:$src1, imm:$src2), 0)]>; -def TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16 - (outs), (ins GR16:$src1, i16imm:$src2), - "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR16:$src1, imm:$src2), 0)]>, OpSize; -def TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32 - (outs), (ins GR32:$src1, i32imm:$src2), - "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR32:$src1, imm:$src2), 0)]>; - -def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8 - (outs), (ins i8mem:$src1, i8imm:$src2), - "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and (loadi8 addr:$src1), imm:$src2), 0)]>; -def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16 - (outs), (ins i16mem:$src1, i16imm:$src2), - "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and (loadi16 addr:$src1), imm:$src2), 0)]>, - OpSize; -def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32 - (outs), (ins i32mem:$src1, i32imm:$src2), - "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and (loadi32 addr:$src1), imm:$src2), 0)]>; -} // Defs = [EFLAGS] - - -let Defs = [EFLAGS] in { -let isCommutable = 1 in { // TEST X, Y --> TEST Y, X -def NEW_TEST8rr : I<0x84, MRMDestReg, (outs), (ins GR8:$src1, GR8:$src2), "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR8:$src1, GR8:$src2), 0), + [(X86cmp (and GR8:$src1, GR8:$src2), 0), (implicit EFLAGS)]>; -def NEW_TEST16rr : I<0x85, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), +def TEST16rr : I<0x85, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR16:$src1, GR16:$src2), 0), + [(X86cmp (and GR16:$src1, GR16:$src2), 0), (implicit EFLAGS)]>, OpSize; -def NEW_TEST32rr : I<0x85, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), +def TEST32rr : I<0x85, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR32:$src1, GR32:$src2), 0), + [(X86cmp (and GR32:$src1, GR32:$src2), 0), (implicit EFLAGS)]>; } -def NEW_TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2), +def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2), "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR8:$src1, (loadi8 addr:$src2)), 0), + [(X86cmp (and GR8:$src1, (loadi8 addr:$src2)), 0), (implicit EFLAGS)]>; -def NEW_TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2), +def TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2), "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR16:$src1, (loadi16 addr:$src2)), 0), + [(X86cmp (and GR16:$src1, (loadi16 addr:$src2)), 0), (implicit EFLAGS)]>, OpSize; -def NEW_TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2), +def TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2), "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR32:$src1, (loadi32 addr:$src2)), 0), + [(X86cmp (and GR32:$src1, (loadi32 addr:$src2)), 0), (implicit EFLAGS)]>; -def NEW_TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8 +def TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8 (outs), (ins GR8:$src1, i8imm:$src2), "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR8:$src1, imm:$src2), 0), + [(X86cmp (and GR8:$src1, imm:$src2), 0), (implicit EFLAGS)]>; -def NEW_TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16 +def TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16 (outs), (ins GR16:$src1, i16imm:$src2), "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR16:$src1, imm:$src2), 0), + [(X86cmp (and GR16:$src1, imm:$src2), 0), (implicit EFLAGS)]>, OpSize; -def NEW_TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32 +def TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32 (outs), (ins GR32:$src1, i32imm:$src2), "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR32:$src1, imm:$src2), 0), + [(X86cmp (and GR32:$src1, imm:$src2), 0), (implicit EFLAGS)]>; -def NEW_TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8 +def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8 (outs), (ins i8mem:$src1, i8imm:$src2), "test{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and (loadi8 addr:$src1), imm:$src2), 0), + [(X86cmp (and (loadi8 addr:$src1), imm:$src2), 0), (implicit EFLAGS)]>; -def NEW_TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16 +def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16 (outs), (ins i16mem:$src1, i16imm:$src2), "test{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and (loadi16 addr:$src1), imm:$src2), 0), + [(X86cmp (and (loadi16 addr:$src1), imm:$src2), 0), (implicit EFLAGS)]>, OpSize; -def NEW_TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32 +def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32 (outs), (ins i32mem:$src1, i32imm:$src2), "test{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and (loadi32 addr:$src1), imm:$src2), 0), + [(X86cmp (and (loadi32 addr:$src1), imm:$src2), 0), (implicit EFLAGS)]>; } // Defs = [EFLAGS] @@ -2566,465 +2110,239 @@ let Uses = [EFLAGS] in { def SETEr : I<0x94, MRM0r, (outs GR8 :$dst), (ins), "sete\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_E))]>, + [(set GR8:$dst, (X86setcc X86_COND_E, EFLAGS))]>, TB; // GR8 = == def SETEm : I<0x94, MRM0m, (outs), (ins i8mem:$dst), "sete\t$dst", - [(store (X86setcc X86_COND_E), addr:$dst)]>, + [(store (X86setcc X86_COND_E, EFLAGS), addr:$dst)]>, TB; // [mem8] = == def SETNEr : I<0x95, MRM0r, (outs GR8 :$dst), (ins), "setne\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_NE))]>, + [(set GR8:$dst, (X86setcc X86_COND_NE, EFLAGS))]>, TB; // GR8 = != def SETNEm : I<0x95, MRM0m, (outs), (ins i8mem:$dst), "setne\t$dst", - [(store (X86setcc X86_COND_NE), addr:$dst)]>, + [(store (X86setcc X86_COND_NE, EFLAGS), addr:$dst)]>, TB; // [mem8] = != def SETLr : I<0x9C, MRM0r, (outs GR8 :$dst), (ins), "setl\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_L))]>, + [(set GR8:$dst, (X86setcc X86_COND_L, EFLAGS))]>, TB; // GR8 = < signed def SETLm : I<0x9C, MRM0m, (outs), (ins i8mem:$dst), "setl\t$dst", - [(store (X86setcc X86_COND_L), addr:$dst)]>, + [(store (X86setcc X86_COND_L, EFLAGS), addr:$dst)]>, TB; // [mem8] = < signed def SETGEr : I<0x9D, MRM0r, (outs GR8 :$dst), (ins), "setge\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_GE))]>, + [(set GR8:$dst, (X86setcc X86_COND_GE, EFLAGS))]>, TB; // GR8 = >= signed def SETGEm : I<0x9D, MRM0m, (outs), (ins i8mem:$dst), "setge\t$dst", - [(store (X86setcc X86_COND_GE), addr:$dst)]>, + [(store (X86setcc X86_COND_GE, EFLAGS), addr:$dst)]>, TB; // [mem8] = >= signed def SETLEr : I<0x9E, MRM0r, (outs GR8 :$dst), (ins), "setle\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_LE))]>, + [(set GR8:$dst, (X86setcc X86_COND_LE, EFLAGS))]>, TB; // GR8 = <= signed def SETLEm : I<0x9E, MRM0m, (outs), (ins i8mem:$dst), "setle\t$dst", - [(store (X86setcc X86_COND_LE), addr:$dst)]>, + [(store (X86setcc X86_COND_LE, EFLAGS), addr:$dst)]>, TB; // [mem8] = <= signed def SETGr : I<0x9F, MRM0r, (outs GR8 :$dst), (ins), "setg\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_G))]>, + [(set GR8:$dst, (X86setcc X86_COND_G, EFLAGS))]>, TB; // GR8 = > signed def SETGm : I<0x9F, MRM0m, (outs), (ins i8mem:$dst), "setg\t$dst", - [(store (X86setcc X86_COND_G), addr:$dst)]>, + [(store (X86setcc X86_COND_G, EFLAGS), addr:$dst)]>, TB; // [mem8] = > signed def SETBr : I<0x92, MRM0r, (outs GR8 :$dst), (ins), "setb\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_B))]>, + [(set GR8:$dst, (X86setcc X86_COND_B, EFLAGS))]>, TB; // GR8 = < unsign def SETBm : I<0x92, MRM0m, (outs), (ins i8mem:$dst), "setb\t$dst", - [(store (X86setcc X86_COND_B), addr:$dst)]>, + [(store (X86setcc X86_COND_B, EFLAGS), addr:$dst)]>, TB; // [mem8] = < unsign def SETAEr : I<0x93, MRM0r, (outs GR8 :$dst), (ins), "setae\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_AE))]>, + [(set GR8:$dst, (X86setcc X86_COND_AE, EFLAGS))]>, TB; // GR8 = >= unsign def SETAEm : I<0x93, MRM0m, (outs), (ins i8mem:$dst), "setae\t$dst", - [(store (X86setcc X86_COND_AE), addr:$dst)]>, + [(store (X86setcc X86_COND_AE, EFLAGS), addr:$dst)]>, TB; // [mem8] = >= unsign def SETBEr : I<0x96, MRM0r, (outs GR8 :$dst), (ins), "setbe\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_BE))]>, + [(set GR8:$dst, (X86setcc X86_COND_BE, EFLAGS))]>, TB; // GR8 = <= unsign def SETBEm : I<0x96, MRM0m, (outs), (ins i8mem:$dst), "setbe\t$dst", - [(store (X86setcc X86_COND_BE), addr:$dst)]>, + [(store (X86setcc X86_COND_BE, EFLAGS), addr:$dst)]>, TB; // [mem8] = <= unsign def SETAr : I<0x97, MRM0r, (outs GR8 :$dst), (ins), "seta\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_A))]>, + [(set GR8:$dst, (X86setcc X86_COND_A, EFLAGS))]>, TB; // GR8 = > signed def SETAm : I<0x97, MRM0m, (outs), (ins i8mem:$dst), "seta\t$dst", - [(store (X86setcc X86_COND_A), addr:$dst)]>, + [(store (X86setcc X86_COND_A, EFLAGS), addr:$dst)]>, TB; // [mem8] = > signed def SETSr : I<0x98, MRM0r, (outs GR8 :$dst), (ins), "sets\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_S))]>, + [(set GR8:$dst, (X86setcc X86_COND_S, EFLAGS))]>, TB; // GR8 = def SETSm : I<0x98, MRM0m, (outs), (ins i8mem:$dst), "sets\t$dst", - [(store (X86setcc X86_COND_S), addr:$dst)]>, + [(store (X86setcc X86_COND_S, EFLAGS), addr:$dst)]>, TB; // [mem8] = def SETNSr : I<0x99, MRM0r, (outs GR8 :$dst), (ins), "setns\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_NS))]>, + [(set GR8:$dst, (X86setcc X86_COND_NS, EFLAGS))]>, TB; // GR8 = ! def SETNSm : I<0x99, MRM0m, (outs), (ins i8mem:$dst), "setns\t$dst", - [(store (X86setcc X86_COND_NS), addr:$dst)]>, + [(store (X86setcc X86_COND_NS, EFLAGS), addr:$dst)]>, TB; // [mem8] = ! def SETPr : I<0x9A, MRM0r, (outs GR8 :$dst), (ins), "setp\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_P))]>, + [(set GR8:$dst, (X86setcc X86_COND_P, EFLAGS))]>, TB; // GR8 = parity def SETPm : I<0x9A, MRM0m, (outs), (ins i8mem:$dst), "setp\t$dst", - [(store (X86setcc X86_COND_P), addr:$dst)]>, + [(store (X86setcc X86_COND_P, EFLAGS), addr:$dst)]>, TB; // [mem8] = parity def SETNPr : I<0x9B, MRM0r, (outs GR8 :$dst), (ins), "setnp\t$dst", - [(set GR8:$dst, (X86setcc X86_COND_NP))]>, + [(set GR8:$dst, (X86setcc X86_COND_NP, EFLAGS))]>, TB; // GR8 = not parity def SETNPm : I<0x9B, MRM0m, (outs), (ins i8mem:$dst), "setnp\t$dst", - [(store (X86setcc X86_COND_NP), addr:$dst)]>, + [(store (X86setcc X86_COND_NP, EFLAGS), addr:$dst)]>, TB; // [mem8] = not parity } // Uses = [EFLAGS] -let Uses = [EFLAGS] in { -def NEW_SETEr : I<0x94, MRM0r, - (outs GR8 :$dst), (ins), - "sete\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_E, EFLAGS))]>, - TB; // GR8 = == -def NEW_SETEm : I<0x94, MRM0m, - (outs), (ins i8mem:$dst), - "sete\t$dst", - [(store (X86setcc_new X86_COND_E, EFLAGS), addr:$dst)]>, - TB; // [mem8] = == -def NEW_SETNEr : I<0x95, MRM0r, - (outs GR8 :$dst), (ins), - "setne\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_NE, EFLAGS))]>, - TB; // GR8 = != -def NEW_SETNEm : I<0x95, MRM0m, - (outs), (ins i8mem:$dst), - "setne\t$dst", - [(store (X86setcc_new X86_COND_NE, EFLAGS), addr:$dst)]>, - TB; // [mem8] = != -def NEW_SETLr : I<0x9C, MRM0r, - (outs GR8 :$dst), (ins), - "setl\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_L, EFLAGS))]>, - TB; // GR8 = < signed -def NEW_SETLm : I<0x9C, MRM0m, - (outs), (ins i8mem:$dst), - "setl\t$dst", - [(store (X86setcc_new X86_COND_L, EFLAGS), addr:$dst)]>, - TB; // [mem8] = < signed -def NEW_SETGEr : I<0x9D, MRM0r, - (outs GR8 :$dst), (ins), - "setge\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_GE, EFLAGS))]>, - TB; // GR8 = >= signed -def NEW_SETGEm : I<0x9D, MRM0m, - (outs), (ins i8mem:$dst), - "setge\t$dst", - [(store (X86setcc_new X86_COND_GE, EFLAGS), addr:$dst)]>, - TB; // [mem8] = >= signed -def NEW_SETLEr : I<0x9E, MRM0r, - (outs GR8 :$dst), (ins), - "setle\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_LE, EFLAGS))]>, - TB; // GR8 = <= signed -def NEW_SETLEm : I<0x9E, MRM0m, - (outs), (ins i8mem:$dst), - "setle\t$dst", - [(store (X86setcc_new X86_COND_LE, EFLAGS), addr:$dst)]>, - TB; // [mem8] = <= signed -def NEW_SETGr : I<0x9F, MRM0r, - (outs GR8 :$dst), (ins), - "setg\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_G, EFLAGS))]>, - TB; // GR8 = > signed -def NEW_SETGm : I<0x9F, MRM0m, - (outs), (ins i8mem:$dst), - "setg\t$dst", - [(store (X86setcc_new X86_COND_G, EFLAGS), addr:$dst)]>, - TB; // [mem8] = > signed - -def NEW_SETBr : I<0x92, MRM0r, - (outs GR8 :$dst), (ins), - "setb\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_B, EFLAGS))]>, - TB; // GR8 = < unsign -def NEW_SETBm : I<0x92, MRM0m, - (outs), (ins i8mem:$dst), - "setb\t$dst", - [(store (X86setcc_new X86_COND_B, EFLAGS), addr:$dst)]>, - TB; // [mem8] = < unsign -def NEW_SETAEr : I<0x93, MRM0r, - (outs GR8 :$dst), (ins), - "setae\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_AE, EFLAGS))]>, - TB; // GR8 = >= unsign -def NEW_SETAEm : I<0x93, MRM0m, - (outs), (ins i8mem:$dst), - "setae\t$dst", - [(store (X86setcc_new X86_COND_AE, EFLAGS), addr:$dst)]>, - TB; // [mem8] = >= unsign -def NEW_SETBEr : I<0x96, MRM0r, - (outs GR8 :$dst), (ins), - "setbe\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_BE, EFLAGS))]>, - TB; // GR8 = <= unsign -def NEW_SETBEm : I<0x96, MRM0m, - (outs), (ins i8mem:$dst), - "setbe\t$dst", - [(store (X86setcc_new X86_COND_BE, EFLAGS), addr:$dst)]>, - TB; // [mem8] = <= unsign -def NEW_SETAr : I<0x97, MRM0r, - (outs GR8 :$dst), (ins), - "seta\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_A, EFLAGS))]>, - TB; // GR8 = > signed -def NEW_SETAm : I<0x97, MRM0m, - (outs), (ins i8mem:$dst), - "seta\t$dst", - [(store (X86setcc_new X86_COND_A, EFLAGS), addr:$dst)]>, - TB; // [mem8] = > signed - -def NEW_SETSr : I<0x98, MRM0r, - (outs GR8 :$dst), (ins), - "sets\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_S, EFLAGS))]>, - TB; // GR8 = -def NEW_SETSm : I<0x98, MRM0m, - (outs), (ins i8mem:$dst), - "sets\t$dst", - [(store (X86setcc_new X86_COND_S, EFLAGS), addr:$dst)]>, - TB; // [mem8] = -def NEW_SETNSr : I<0x99, MRM0r, - (outs GR8 :$dst), (ins), - "setns\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_NS, EFLAGS))]>, - TB; // GR8 = ! -def NEW_SETNSm : I<0x99, MRM0m, - (outs), (ins i8mem:$dst), - "setns\t$dst", - [(store (X86setcc_new X86_COND_NS, EFLAGS), addr:$dst)]>, - TB; // [mem8] = ! -def NEW_SETPr : I<0x9A, MRM0r, - (outs GR8 :$dst), (ins), - "setp\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_P, EFLAGS))]>, - TB; // GR8 = parity -def NEW_SETPm : I<0x9A, MRM0m, - (outs), (ins i8mem:$dst), - "setp\t$dst", - [(store (X86setcc_new X86_COND_P, EFLAGS), addr:$dst)]>, - TB; // [mem8] = parity -def NEW_SETNPr : I<0x9B, MRM0r, - (outs GR8 :$dst), (ins), - "setnp\t$dst", - [(set GR8:$dst, (X86setcc_new X86_COND_NP, EFLAGS))]>, - TB; // GR8 = not parity -def NEW_SETNPm : I<0x9B, MRM0m, - (outs), (ins i8mem:$dst), - "setnp\t$dst", - [(store (X86setcc_new X86_COND_NP, EFLAGS), addr:$dst)]>, - TB; // [mem8] = not parity -} // Uses = [EFLAGS] - - -//def : Pat<(X86setcc_new X86_COND_E, EFLAGS), (SETEr)>; // Integer comparisons let Defs = [EFLAGS] in { def CMP8rr : I<0x38, MRMDestReg, (outs), (ins GR8 :$src1, GR8 :$src2), "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR8:$src1, GR8:$src2)]>; + [(X86cmp GR8:$src1, GR8:$src2), (implicit EFLAGS)]>; def CMP16rr : I<0x39, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR16:$src1, GR16:$src2)]>, OpSize; + [(X86cmp GR16:$src1, GR16:$src2), (implicit EFLAGS)]>, OpSize; def CMP32rr : I<0x39, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR32:$src1, GR32:$src2)]>; + [(X86cmp GR32:$src1, GR32:$src2), (implicit EFLAGS)]>; def CMP8mr : I<0x38, MRMDestMem, (outs), (ins i8mem :$src1, GR8 :$src2), "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi8 addr:$src1), GR8:$src2)]>; + [(X86cmp (loadi8 addr:$src1), GR8:$src2), + (implicit EFLAGS)]>; def CMP16mr : I<0x39, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi16 addr:$src1), GR16:$src2)]>, OpSize; + [(X86cmp (loadi16 addr:$src1), GR16:$src2), + (implicit EFLAGS)]>, OpSize; def CMP32mr : I<0x39, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi32 addr:$src1), GR32:$src2)]>; + [(X86cmp (loadi32 addr:$src1), GR32:$src2), + (implicit EFLAGS)]>; def CMP8rm : I<0x3A, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2), "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR8:$src1, (loadi8 addr:$src2))]>; + [(X86cmp GR8:$src1, (loadi8 addr:$src2)), + (implicit EFLAGS)]>; def CMP16rm : I<0x3B, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2), "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR16:$src1, (loadi16 addr:$src2))]>, OpSize; + [(X86cmp GR16:$src1, (loadi16 addr:$src2)), + (implicit EFLAGS)]>, OpSize; def CMP32rm : I<0x3B, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2), "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR32:$src1, (loadi32 addr:$src2))]>; + [(X86cmp GR32:$src1, (loadi32 addr:$src2)), + (implicit EFLAGS)]>; def CMP8ri : Ii8<0x80, MRM7r, (outs), (ins GR8:$src1, i8imm:$src2), "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR8:$src1, imm:$src2)]>; + [(X86cmp GR8:$src1, imm:$src2), (implicit EFLAGS)]>; def CMP16ri : Ii16<0x81, MRM7r, (outs), (ins GR16:$src1, i16imm:$src2), "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR16:$src1, imm:$src2)]>, OpSize; + [(X86cmp GR16:$src1, imm:$src2), + (implicit EFLAGS)]>, OpSize; def CMP32ri : Ii32<0x81, MRM7r, (outs), (ins GR32:$src1, i32imm:$src2), "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR32:$src1, imm:$src2)]>; + [(X86cmp GR32:$src1, imm:$src2), (implicit EFLAGS)]>; def CMP8mi : Ii8 <0x80, MRM7m, (outs), (ins i8mem :$src1, i8imm :$src2), "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi8 addr:$src1), imm:$src2)]>; -def CMP16mi : Ii16<0x81, MRM7m, - (outs), (ins i16mem:$src1, i16imm:$src2), - "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi16 addr:$src1), imm:$src2)]>, OpSize; -def CMP32mi : Ii32<0x81, MRM7m, - (outs), (ins i32mem:$src1, i32imm:$src2), - "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi32 addr:$src1), imm:$src2)]>; -def CMP16ri8 : Ii8<0x83, MRM7r, - (outs), (ins GR16:$src1, i16i8imm:$src2), - "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR16:$src1, i16immSExt8:$src2)]>, OpSize; -def CMP16mi8 : Ii8<0x83, MRM7m, - (outs), (ins i16mem:$src1, i16i8imm:$src2), - "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi16 addr:$src1), i16immSExt8:$src2)]>, OpSize; -def CMP32mi8 : Ii8<0x83, MRM7m, - (outs), (ins i32mem:$src1, i32i8imm:$src2), - "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi32 addr:$src1), i32immSExt8:$src2)]>; -def CMP32ri8 : Ii8<0x83, MRM7r, - (outs), (ins GR32:$src1, i32i8imm:$src2), - "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR32:$src1, i32immSExt8:$src2)]>; -} // Defs = [EFLAGS] - -let Defs = [EFLAGS] in { -def NEW_CMP8rr : I<0x38, MRMDestReg, - (outs), (ins GR8 :$src1, GR8 :$src2), - "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR8:$src1, GR8:$src2), (implicit EFLAGS)]>; -def NEW_CMP16rr : I<0x39, MRMDestReg, - (outs), (ins GR16:$src1, GR16:$src2), - "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR16:$src1, GR16:$src2), (implicit EFLAGS)]>, OpSize; -def NEW_CMP32rr : I<0x39, MRMDestReg, - (outs), (ins GR32:$src1, GR32:$src2), - "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR32:$src1, GR32:$src2), (implicit EFLAGS)]>; -def NEW_CMP8mr : I<0x38, MRMDestMem, - (outs), (ins i8mem :$src1, GR8 :$src2), - "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi8 addr:$src1), GR8:$src2), - (implicit EFLAGS)]>; -def NEW_CMP16mr : I<0x39, MRMDestMem, - (outs), (ins i16mem:$src1, GR16:$src2), - "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi16 addr:$src1), GR16:$src2), - (implicit EFLAGS)]>, OpSize; -def NEW_CMP32mr : I<0x39, MRMDestMem, - (outs), (ins i32mem:$src1, GR32:$src2), - "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi32 addr:$src1), GR32:$src2), - (implicit EFLAGS)]>; -def NEW_CMP8rm : I<0x3A, MRMSrcMem, - (outs), (ins GR8 :$src1, i8mem :$src2), - "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR8:$src1, (loadi8 addr:$src2)), - (implicit EFLAGS)]>; -def NEW_CMP16rm : I<0x3B, MRMSrcMem, - (outs), (ins GR16:$src1, i16mem:$src2), - "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR16:$src1, (loadi16 addr:$src2)), - (implicit EFLAGS)]>, OpSize; -def NEW_CMP32rm : I<0x3B, MRMSrcMem, - (outs), (ins GR32:$src1, i32mem:$src2), - "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR32:$src1, (loadi32 addr:$src2)), - (implicit EFLAGS)]>; -def NEW_CMP8ri : Ii8<0x80, MRM7r, - (outs), (ins GR8:$src1, i8imm:$src2), - "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR8:$src1, imm:$src2), (implicit EFLAGS)]>; -def NEW_CMP16ri : Ii16<0x81, MRM7r, - (outs), (ins GR16:$src1, i16imm:$src2), - "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR16:$src1, imm:$src2), - (implicit EFLAGS)]>, OpSize; -def NEW_CMP32ri : Ii32<0x81, MRM7r, - (outs), (ins GR32:$src1, i32imm:$src2), - "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR32:$src1, imm:$src2), (implicit EFLAGS)]>; -def NEW_CMP8mi : Ii8 <0x80, MRM7m, - (outs), (ins i8mem :$src1, i8imm :$src2), - "cmp{b}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi8 addr:$src1), imm:$src2), + [(X86cmp (loadi8 addr:$src1), imm:$src2), (implicit EFLAGS)]>; -def NEW_CMP16mi : Ii16<0x81, MRM7m, +def CMP16mi : Ii16<0x81, MRM7m, (outs), (ins i16mem:$src1, i16imm:$src2), "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi16 addr:$src1), imm:$src2), + [(X86cmp (loadi16 addr:$src1), imm:$src2), (implicit EFLAGS)]>, OpSize; -def NEW_CMP32mi : Ii32<0x81, MRM7m, +def CMP32mi : Ii32<0x81, MRM7m, (outs), (ins i32mem:$src1, i32imm:$src2), "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi32 addr:$src1), imm:$src2), + [(X86cmp (loadi32 addr:$src1), imm:$src2), (implicit EFLAGS)]>; -def NEW_CMP16ri8 : Ii8<0x83, MRM7r, +def CMP16ri8 : Ii8<0x83, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2), "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR16:$src1, i16immSExt8:$src2), + [(X86cmp GR16:$src1, i16immSExt8:$src2), (implicit EFLAGS)]>, OpSize; -def NEW_CMP16mi8 : Ii8<0x83, MRM7m, +def CMP16mi8 : Ii8<0x83, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2), "cmp{w}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi16 addr:$src1), i16immSExt8:$src2), + [(X86cmp (loadi16 addr:$src1), i16immSExt8:$src2), (implicit EFLAGS)]>, OpSize; -def NEW_CMP32mi8 : Ii8<0x83, MRM7m, +def CMP32mi8 : Ii8<0x83, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2), "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi32 addr:$src1), i32immSExt8:$src2), + [(X86cmp (loadi32 addr:$src1), i32immSExt8:$src2), (implicit EFLAGS)]>; -def NEW_CMP32ri8 : Ii8<0x83, MRM7r, +def CMP32ri8 : Ii8<0x83, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2), "cmp{l}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR32:$src1, i32immSExt8:$src2), + [(X86cmp GR32:$src1, i32immSExt8:$src2), (implicit EFLAGS)]>; } // Defs = [EFLAGS] @@ -3233,20 +2551,13 @@ def : Pat<(truncstorei1 GR8:$src, addr:$dst), // Comparisons. // TEST R,R is smaller than CMP R,0 -def : Pat<(X86cmp GR8:$src1, 0), +def : Pat<(parallel (X86cmp GR8:$src1, 0), (implicit EFLAGS)), (TEST8rr GR8:$src1, GR8:$src1)>; -def : Pat<(X86cmp GR16:$src1, 0), +def : Pat<(parallel (X86cmp GR16:$src1, 0), (implicit EFLAGS)), (TEST16rr GR16:$src1, GR16:$src1)>; -def : Pat<(X86cmp GR32:$src1, 0), +def : Pat<(parallel (X86cmp GR32:$src1, 0), (implicit EFLAGS)), (TEST32rr GR32:$src1, GR32:$src1)>; -def : Pat<(parallel (X86cmp_new GR8:$src1, 0), (implicit EFLAGS)), - (NEW_TEST8rr GR8:$src1, GR8:$src1)>; -def : Pat<(parallel (X86cmp_new GR16:$src1, 0), (implicit EFLAGS)), - (NEW_TEST16rr GR16:$src1, GR16:$src1)>; -def : Pat<(parallel (X86cmp_new GR32:$src1, 0), (implicit EFLAGS)), - (NEW_TEST32rr GR32:$src1, GR32:$src1)>; - // {s|z}extload bool -> {s|z}extload byte def : Pat<(sextloadi16i1 addr:$src), (MOVSX16rm8 addr:$src)>; def : Pat<(sextloadi32i1 addr:$src), (MOVSX32rm8 addr:$src)>; diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 54dd872e2bc..c7731c1dfbe 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -33,12 +33,8 @@ def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>; def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>; def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>; def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest, - [SDNPHasChain, SDNPOutFlag]>; -def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest, - [SDNPHasChain, SDNPOutFlag]>; -def X86comi_new: SDNode<"X86ISD::COMI_NEW", SDTX86CmpTest, [SDNPHasChain]>; -def X86ucomi_new: SDNode<"X86ISD::UCOMI_NEW",SDTX86CmpTest>; +def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>; def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>; def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>; def X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>; @@ -271,54 +267,30 @@ let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in { def CMOV_FR32 : I<0, Pseudo, (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond), "#CMOV_FR32 PSEUDO!", - [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond))]>; - def CMOV_FR64 : I<0, Pseudo, - (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond), - "#CMOV_FR64 PSEUDO!", - [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond))]>; - def CMOV_V4F32 : I<0, Pseudo, - (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), - "#CMOV_V4F32 PSEUDO!", - [(set VR128:$dst, - (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>; - def CMOV_V2F64 : I<0, Pseudo, - (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), - "#CMOV_V2F64 PSEUDO!", - [(set VR128:$dst, - (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>; - def CMOV_V2I64 : I<0, Pseudo, - (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), - "#CMOV_V2I64 PSEUDO!", - [(set VR128:$dst, - (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond)))]>; - - def NEW_CMOV_FR32 : I<0, Pseudo, - (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond), - "#CMOV_FR32 PSEUDO!", - [(set FR32:$dst, (X86cmov_new FR32:$t, FR32:$f, imm:$cond, + [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond, EFLAGS))]>; - def NEW_CMOV_FR64 : I<0, Pseudo, + def CMOV_FR64 : I<0, Pseudo, (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond), "#CMOV_FR64 PSEUDO!", - [(set FR64:$dst, (X86cmov_new FR64:$t, FR64:$f, imm:$cond, + [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond, EFLAGS))]>; - def NEW_CMOV_V4F32 : I<0, Pseudo, + def CMOV_V4F32 : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), "#CMOV_V4F32 PSEUDO!", [(set VR128:$dst, - (v4f32 (X86cmov_new VR128:$t, VR128:$f, imm:$cond, + (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)))]>; - def NEW_CMOV_V2F64 : I<0, Pseudo, + def CMOV_V2F64 : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), "#CMOV_V2F64 PSEUDO!", [(set VR128:$dst, - (v2f64 (X86cmov_new VR128:$t, VR128:$f, imm:$cond, + (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)))]>; - def NEW_CMOV_V2I64 : I<0, Pseudo, + def CMOV_V2I64 : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), "#CMOV_V2I64 PSEUDO!", [(set VR128:$dst, - (v2i64 (X86cmov_new VR128:$t, VR128:$f, imm:$cond, + (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)))]>; } @@ -396,17 +368,10 @@ let isTwoAddress = 1 in { let Defs = [EFLAGS] in { def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2), "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86cmp FR32:$src1, FR32:$src2)]>; + [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>; def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2), "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86cmp FR32:$src1, (loadf32 addr:$src2))]>; - -def NEW_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2), - "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new FR32:$src1, FR32:$src2), (implicit EFLAGS)]>; -def NEW_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2), - "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new FR32:$src1, (loadf32 addr:$src2)), + [(X86cmp FR32:$src1, (loadf32 addr:$src2)), (implicit EFLAGS)]>; } // Defs = [EFLAGS] @@ -425,40 +390,26 @@ let isTwoAddress = 1 in { } let Defs = [EFLAGS] in { -def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86ucomi (v4f32 VR128:$src1), VR128:$src2)]>; -def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2))]>; - -def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "comiss\t{$src2, $src1|$src1, $src2}", - [(X86comi (v4f32 VR128:$src1), VR128:$src2)]>; -def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "comiss\t{$src2, $src1|$src1, $src2}", - [(X86comi (v4f32 VR128:$src1), (load addr:$src2))]>; - -def NEW_Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), +def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86ucomi_new (v4f32 VR128:$src1), VR128:$src2), + [(X86ucomi (v4f32 VR128:$src1), VR128:$src2), (implicit EFLAGS)]>; -def NEW_Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), +def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), "ucomiss\t{$src2, $src1|$src1, $src2}", - [(X86ucomi_new (v4f32 VR128:$src1), (load addr:$src2)), + [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)), (implicit EFLAGS)]>; -def NEW_Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), +def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), "comiss\t{$src2, $src1|$src1, $src2}", - [(X86comi_new (v4f32 VR128:$src1), VR128:$src2), + [(X86comi (v4f32 VR128:$src1), VR128:$src2), (implicit EFLAGS)]>; -def NEW_Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), +def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), "comiss\t{$src2, $src1|$src1, $src2}", - [(X86comi_new (v4f32 VR128:$src1), (load addr:$src2)), + [(X86comi (v4f32 VR128:$src1), (load addr:$src2)), (implicit EFLAGS)]>; } // Defs = [EFLAGS] @@ -1095,17 +1046,10 @@ let isTwoAddress = 1 in { let Defs = [EFLAGS] in { def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2), "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86cmp FR64:$src1, FR64:$src2)]>; + [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>; def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2), "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86cmp FR64:$src1, (loadf64 addr:$src2))]>; - -def NEW_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2), - "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new FR64:$src1, FR64:$src2), (implicit EFLAGS)]>; -def NEW_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2), - "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new FR64:$src1, (loadf64 addr:$src2)), + [(X86cmp FR64:$src1, (loadf64 addr:$src2)), (implicit EFLAGS)]>; } @@ -1126,38 +1070,20 @@ let isTwoAddress = 1 in { let Defs = [EFLAGS] in { def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>; -def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2))]>; - -def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "comisd\t{$src2, $src1|$src1, $src2}", - [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2))]>; -def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), - "comisd\t{$src2, $src1|$src1, $src2}", - [(X86comi (v2f64 VR128:$src1), (load addr:$src2))]>; - -def NEW_Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2), - "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86ucomi_new (v2f64 VR128:$src1), (v2f64 VR128:$src2)), + [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)), (implicit EFLAGS)]>; -def NEW_Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), - (ins VR128:$src1, f128mem:$src2), +def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2), "ucomisd\t{$src2, $src1|$src1, $src2}", - [(X86ucomi_new (v2f64 VR128:$src1), (load addr:$src2)), + [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)), (implicit EFLAGS)]>; -def NEW_Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2), +def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), "comisd\t{$src2, $src1|$src1, $src2}", - [(X86comi_new (v2f64 VR128:$src1), (v2f64 VR128:$src2)), + [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)), (implicit EFLAGS)]>; -def NEW_Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), - (ins VR128:$src1, f128mem:$src2), +def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), "comisd\t{$src2, $src1|$src1, $src2}", - [(X86comi_new (v2f64 VR128:$src1), (load addr:$src2)), + [(X86comi (v2f64 VR128:$src1), (load addr:$src2)), (implicit EFLAGS)]>; } // Defs = EFLAGS] diff --git a/lib/Target/X86/X86InstrX86-64.td b/lib/Target/X86/X86InstrX86-64.td index ec6b9095f07..c7434159478 100644 --- a/lib/Target/X86/X86InstrX86-64.td +++ b/lib/Target/X86/X86InstrX86-64.td @@ -723,89 +723,51 @@ let Defs = [EFLAGS] in { let isCommutable = 1 in def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR64:$src1, GR64:$src2), 0)]>; -def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), - "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0)]>; -def TEST64ri32 : RIi32<0xF7, MRM0r, (outs), (ins GR64:$src1, i64i32imm:$src2), - "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0)]>; -def TEST64mi32 : RIi32<0xF7, MRM0m, (outs), (ins i64mem:$src1, i64i32imm:$src2), - "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0)]>; - -def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR64:$src1, GR64:$src2)]>; -def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi64 addr:$src1), GR64:$src2)]>; -def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR64:$src1, (loadi64 addr:$src2))]>; -def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR64:$src1, i64immSExt32:$src2)]>; -def CMP64mi32 : RIi32<0x81, MRM7m, (outs), (ins i64mem:$src1, i64i32imm:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2)]>; -def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2)]>; -def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), - "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp GR64:$src1, i64immSExt8:$src2)]>; -} // Defs = [EFLAGS] - -let Defs = [EFLAGS] in { -let isCommutable = 1 in -def NEW_TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), - "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR64:$src1, GR64:$src2), 0), + [(X86cmp (and GR64:$src1, GR64:$src2), 0), (implicit EFLAGS)]>; -def NEW_TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), +def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR64:$src1, (loadi64 addr:$src2)), 0), + [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0), (implicit EFLAGS)]>; -def NEW_TEST64ri32 : RIi32<0xF7, MRM0r, (outs), +def TEST64ri32 : RIi32<0xF7, MRM0r, (outs), (ins GR64:$src1, i64i32imm:$src2), "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and GR64:$src1, i64immSExt32:$src2), 0), + [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0), (implicit EFLAGS)]>; -def NEW_TEST64mi32 : RIi32<0xF7, MRM0m, (outs), +def TEST64mi32 : RIi32<0xF7, MRM0m, (outs), (ins i64mem:$src1, i64i32imm:$src2), "test{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (and (loadi64 addr:$src1), i64immSExt32:$src2), 0), + [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0), (implicit EFLAGS)]>; -def NEW_CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), +def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR64:$src1, GR64:$src2), + [(X86cmp GR64:$src1, GR64:$src2), (implicit EFLAGS)]>; -def NEW_CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), +def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi64 addr:$src1), GR64:$src2), + [(X86cmp (loadi64 addr:$src1), GR64:$src2), (implicit EFLAGS)]>; -def NEW_CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), +def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR64:$src1, (loadi64 addr:$src2)), + [(X86cmp GR64:$src1, (loadi64 addr:$src2)), (implicit EFLAGS)]>; -def NEW_CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2), +def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR64:$src1, i64immSExt32:$src2), + [(X86cmp GR64:$src1, i64immSExt32:$src2), (implicit EFLAGS)]>; -def NEW_CMP64mi32 : RIi32<0x81, MRM7m, (outs), +def CMP64mi32 : RIi32<0x81, MRM7m, (outs), (ins i64mem:$src1, i64i32imm:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi64 addr:$src1), i64immSExt32:$src2), + [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2), (implicit EFLAGS)]>; -def NEW_CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), +def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new (loadi64 addr:$src1), i64immSExt8:$src2), + [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2), (implicit EFLAGS)]>; -def NEW_CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), +def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), "cmp{q}\t{$src2, $src1|$src1, $src2}", - [(X86cmp_new GR64:$src1, i64immSExt8:$src2), + [(X86cmp GR64:$src1, i64immSExt8:$src2), (implicit EFLAGS)]>; } // Defs = [EFLAGS] @@ -815,282 +777,141 @@ def CMOVB64rr : RI<0x42, MRMSrcReg, // if , TB; + X86_COND_B, EFLAGS))]>, TB; def CMOVB64rm : RI<0x42, MRMSrcMem, // if , TB; + X86_COND_B, EFLAGS))]>, TB; def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovae\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_AE))]>, TB; + X86_COND_AE, EFLAGS))]>, TB; def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovae\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_AE))]>, TB; + X86_COND_AE, EFLAGS))]>, TB; def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_E))]>, TB; + X86_COND_E, EFLAGS))]>, TB; def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmove\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_E))]>, TB; + X86_COND_E, EFLAGS))]>, TB; def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_NE))]>, TB; + X86_COND_NE, EFLAGS))]>, TB; def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovne\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_NE))]>, TB; + X86_COND_NE, EFLAGS))]>, TB; def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_BE))]>, TB; + X86_COND_BE, EFLAGS))]>, TB; def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovbe\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_BE))]>, TB; + X86_COND_BE, EFLAGS))]>, TB; def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_A))]>, TB; + X86_COND_A, EFLAGS))]>, TB; def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmova\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_A))]>, TB; + X86_COND_A, EFLAGS))]>, TB; def CMOVL64rr : RI<0x4C, MRMSrcReg, // if , TB; + X86_COND_L, EFLAGS))]>, TB; def CMOVL64rm : RI<0x4C, MRMSrcMem, // if , TB; + X86_COND_L, EFLAGS))]>, TB; def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_GE))]>, TB; + X86_COND_GE, EFLAGS))]>, TB; def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovge\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_GE))]>, TB; + X86_COND_GE, EFLAGS))]>, TB; def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_LE))]>, TB; + X86_COND_LE, EFLAGS))]>, TB; def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovle\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_LE))]>, TB; + X86_COND_LE, EFLAGS))]>, TB; def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_G))]>, TB; + X86_COND_G, EFLAGS))]>, TB; def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovg\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_G))]>, TB; + X86_COND_G, EFLAGS))]>, TB; def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_S))]>, TB; + X86_COND_S, EFLAGS))]>, TB; def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovs\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_S))]>, TB; + X86_COND_S, EFLAGS))]>, TB; def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_NS))]>, TB; + X86_COND_NS, EFLAGS))]>, TB; def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovns\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_NS))]>, TB; + X86_COND_NS, EFLAGS))]>, TB; def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_P))]>, TB; + X86_COND_P, EFLAGS))]>, TB; def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovp\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_P))]>, TB; + X86_COND_P, EFLAGS))]>, TB; def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, - X86_COND_NP))]>, TB; + X86_COND_NP, EFLAGS))]>, TB; def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "cmovnp\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), - X86_COND_NP))]>, TB; - -def NEW_CMOVB64rr : RI<0x42, MRMSrcReg, // if , TB; -def NEW_CMOVB64rm : RI<0x42, MRMSrcMem, // if , TB; -def NEW_CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_AE, EFLAGS))]>, TB; -def NEW_CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_AE, EFLAGS))]>, TB; -def NEW_CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_E, EFLAGS))]>, TB; -def NEW_CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_E, EFLAGS))]>, TB; -def NEW_CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_NE, EFLAGS))]>, TB; -def NEW_CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_NE, EFLAGS))]>, TB; -def NEW_CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_BE, EFLAGS))]>, TB; -def NEW_CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_BE, EFLAGS))]>, TB; -def NEW_CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_A, EFLAGS))]>, TB; -def NEW_CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_A, EFLAGS))]>, TB; -def NEW_CMOVL64rr : RI<0x4C, MRMSrcReg, // if , TB; -def NEW_CMOVL64rm : RI<0x4C, MRMSrcMem, // if , TB; -def NEW_CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_GE, EFLAGS))]>, TB; -def NEW_CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_GE, EFLAGS))]>, TB; -def NEW_CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_LE, EFLAGS))]>, TB; -def NEW_CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_LE, EFLAGS))]>, TB; -def NEW_CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_G, EFLAGS))]>, TB; -def NEW_CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_G, EFLAGS))]>, TB; -def NEW_CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_S, EFLAGS))]>, TB; -def NEW_CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_S, EFLAGS))]>, TB; -def NEW_CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_NS, EFLAGS))]>, TB; -def NEW_CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_NS, EFLAGS))]>, TB; -def NEW_CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_P, EFLAGS))]>, TB; -def NEW_CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), - X86_COND_P, EFLAGS))]>, TB; -def NEW_CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64 - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2, - X86_COND_NP, EFLAGS))]>, TB; -def NEW_CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64] - (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2), X86_COND_NP, EFLAGS))]>, TB; } // isTwoAddress @@ -1279,12 +1100,9 @@ def : Pat<(X86tailcall GR64:$dst), // Comparisons. // TEST R,R is smaller than CMP R,0 -def : Pat<(X86cmp GR64:$src1, 0), +def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)), (TEST64rr GR64:$src1, GR64:$src1)>; -def : Pat<(parallel (X86cmp_new GR64:$src1, 0), (implicit EFLAGS)), - (NEW_TEST64rr GR64:$src1, GR64:$src1)>; - // {s|z}extload bool -> {s|z}extload byte def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>; def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 08843fced0b..e98966ff4cb 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -716,34 +716,6 @@ X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i, { X86::MUL32r, X86::MUL32m }, { X86::MUL64r, X86::MUL64m }, { X86::MUL8r, X86::MUL8m }, - - // TEMPORARY - { X86::NEW_CMP16ri, X86::NEW_CMP16mi }, - { X86::NEW_CMP16ri8,X86::NEW_CMP16mi8 }, - { X86::NEW_CMP32ri, X86::NEW_CMP32mi }, - { X86::NEW_CMP32ri8,X86::NEW_CMP32mi8 }, - { X86::NEW_CMP64ri32,X86::NEW_CMP64mi32 }, - { X86::NEW_CMP64ri8,X86::NEW_CMP64mi8 }, - { X86::NEW_CMP8ri, X86::NEW_CMP8mi }, - { X86::NEW_SETAEr, X86::NEW_SETAEm }, - { X86::NEW_SETAr, X86::NEW_SETAm }, - { X86::NEW_SETBEr, X86::NEW_SETBEm }, - { X86::NEW_SETBr, X86::NEW_SETBm }, - { X86::NEW_SETEr, X86::NEW_SETEm }, - { X86::NEW_SETGEr, X86::NEW_SETGEm }, - { X86::NEW_SETGr, X86::NEW_SETGm }, - { X86::NEW_SETLEr, X86::NEW_SETLEm }, - { X86::NEW_SETLr, X86::NEW_SETLm }, - { X86::NEW_SETNEr, X86::NEW_SETNEm }, - { X86::NEW_SETNPr, X86::NEW_SETNPm }, - { X86::NEW_SETNSr, X86::NEW_SETNSm }, - { X86::NEW_SETPr, X86::NEW_SETPm }, - { X86::NEW_SETSr, X86::NEW_SETSm }, - { X86::NEW_TEST16ri,X86::NEW_TEST16mi }, - { X86::NEW_TEST32ri,X86::NEW_TEST32mi }, - { X86::NEW_TEST64ri32, X86::NEW_TEST64mi32 }, - { X86::NEW_TEST8ri, X86::NEW_TEST8mi }, - { X86::SETAEr, X86::SETAEm }, { X86::SETAr, X86::SETAm }, { X86::SETBEr, X86::SETBEm }, @@ -854,23 +826,6 @@ X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i, { X86::MOVZX32rr8, X86::MOVZX32rm8 }, { X86::MOVZX64rr16, X86::MOVZX64rm16 }, { X86::MOVZX64rr8, X86::MOVZX64rm8 }, - - // TEMPORARY - { X86::NEW_CMP16rr, X86::NEW_CMP16rm }, - { X86::NEW_CMP32rr, X86::NEW_CMP32rm }, - { X86::NEW_CMP64rr, X86::NEW_CMP64rm }, - { X86::NEW_CMP8rr, X86::NEW_CMP8rm }, - { X86::NEW_Int_COMISDrr, X86::NEW_Int_COMISDrm }, - { X86::NEW_Int_COMISSrr, X86::NEW_Int_COMISSrm }, - { X86::NEW_Int_UCOMISDrr, X86::NEW_Int_UCOMISDrm }, - { X86::NEW_Int_UCOMISSrr, X86::NEW_Int_UCOMISSrm }, - { X86::NEW_TEST16rr, X86::NEW_TEST16rm }, - { X86::NEW_TEST32rr, X86::NEW_TEST32rm }, - { X86::NEW_TEST64rr, X86::NEW_TEST64rm }, - { X86::NEW_TEST8rr, X86::NEW_TEST8rm }, - { X86::NEW_UCOMISDrr, X86::NEW_UCOMISDrm }, - { X86::NEW_UCOMISSrr, X86::NEW_UCOMISSrm }, - { X86::PSHUFDri, X86::PSHUFDmi }, { X86::PSHUFHWri, X86::PSHUFHWmi }, { X86::PSHUFLWri, X86::PSHUFLWmi }, @@ -1004,51 +959,6 @@ X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i, { X86::MULPSrr, X86::MULPSrm }, { X86::MULSDrr, X86::MULSDrm }, { X86::MULSSrr, X86::MULSSrm }, - - // TEMPORARY - { X86::NEW_CMOVA16rr, X86::NEW_CMOVA16rm }, - { X86::NEW_CMOVA32rr, X86::NEW_CMOVA32rm }, - { X86::NEW_CMOVA64rr, X86::NEW_CMOVA64rm }, - { X86::NEW_CMOVAE16rr, X86::NEW_CMOVAE16rm }, - { X86::NEW_CMOVAE32rr, X86::NEW_CMOVAE32rm }, - { X86::NEW_CMOVAE64rr, X86::NEW_CMOVAE64rm }, - { X86::NEW_CMOVB16rr, X86::NEW_CMOVB16rm }, - { X86::NEW_CMOVB32rr, X86::NEW_CMOVB32rm }, - { X86::NEW_CMOVB64rr, X86::NEW_CMOVB64rm }, - { X86::NEW_CMOVBE16rr, X86::NEW_CMOVBE16rm }, - { X86::NEW_CMOVBE32rr, X86::NEW_CMOVBE32rm }, - { X86::NEW_CMOVBE64rr, X86::NEW_CMOVBE64rm }, - { X86::NEW_CMOVE16rr, X86::NEW_CMOVE16rm }, - { X86::NEW_CMOVE32rr, X86::NEW_CMOVE32rm }, - { X86::NEW_CMOVE64rr, X86::NEW_CMOVE64rm }, - { X86::NEW_CMOVG16rr, X86::NEW_CMOVG16rm }, - { X86::NEW_CMOVG32rr, X86::NEW_CMOVG32rm }, - { X86::NEW_CMOVG64rr, X86::NEW_CMOVG64rm }, - { X86::NEW_CMOVGE16rr, X86::NEW_CMOVGE16rm }, - { X86::NEW_CMOVGE32rr, X86::NEW_CMOVGE32rm }, - { X86::NEW_CMOVGE64rr, X86::NEW_CMOVGE64rm }, - { X86::NEW_CMOVL16rr, X86::NEW_CMOVL16rm }, - { X86::NEW_CMOVL32rr, X86::NEW_CMOVL32rm }, - { X86::NEW_CMOVL64rr, X86::NEW_CMOVL64rm }, - { X86::NEW_CMOVLE16rr, X86::NEW_CMOVLE16rm }, - { X86::NEW_CMOVLE32rr, X86::NEW_CMOVLE32rm }, - { X86::NEW_CMOVLE64rr, X86::NEW_CMOVLE64rm }, - { X86::NEW_CMOVNE16rr, X86::NEW_CMOVNE16rm }, - { X86::NEW_CMOVNE32rr, X86::NEW_CMOVNE32rm }, - { X86::NEW_CMOVNE64rr, X86::NEW_CMOVNE64rm }, - { X86::NEW_CMOVNP16rr, X86::NEW_CMOVNP16rm }, - { X86::NEW_CMOVNP32rr, X86::NEW_CMOVNP32rm }, - { X86::NEW_CMOVNP64rr, X86::NEW_CMOVNP64rm }, - { X86::NEW_CMOVNS16rr, X86::NEW_CMOVNS16rm }, - { X86::NEW_CMOVNS32rr, X86::NEW_CMOVNS32rm }, - { X86::NEW_CMOVNS64rr, X86::NEW_CMOVNS64rm }, - { X86::NEW_CMOVP16rr, X86::NEW_CMOVP16rm }, - { X86::NEW_CMOVP32rr, X86::NEW_CMOVP32rm }, - { X86::NEW_CMOVP64rr, X86::NEW_CMOVP64rm }, - { X86::NEW_CMOVS16rr, X86::NEW_CMOVS16rm }, - { X86::NEW_CMOVS32rr, X86::NEW_CMOVS32rm }, - { X86::NEW_CMOVS64rr, X86::NEW_CMOVS64rm }, - { X86::OR16rr, X86::OR16rm }, { X86::OR32rr, X86::OR32rm }, { X86::OR64rr, X86::OR64rm }, -- 2.34.1