#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SSARegMap.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetOptions.h"
SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
DAG.getConstant(32, MVT::i8));
SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)};
- SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
+ SDOperand Cond = NewCCModeling
+ ? DAG.getNode(X86ISD::CMP_NEW, MVT::i32,
+ AndNode, DAG.getConstant(0, MVT::i8))
+ : DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
SDOperand Hi, Lo;
SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
-
+ unsigned Opc = NewCCModeling ? X86ISD::CMOV_NEW : X86ISD::CMOV;
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
SmallVector<SDOperand, 4> Ops;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
- InFlag = Hi.getValue(1);
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else {
+ Hi = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
+ Cond = Hi.getValue(1);
+ }
Ops.clear();
Ops.push_back(Tmp3);
Ops.push_back(Tmp1);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else
+ Lo = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
} else {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
- InFlag = Lo.getValue(1);
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else {
+ Lo = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
+ Cond = Lo.getValue(1);
+ }
Ops.clear();
Ops.push_back(Tmp3);
Ops.push_back(Tmp1);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else
+ Hi = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
}
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
}
}
+SDOperand X86TargetLowering::LowerSETCC_New(SDOperand Op, SelectionDAG &DAG) {
+ assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
+ SDOperand Op0 = Op.getOperand(0);
+ SDOperand Op1 = Op.getOperand(1);
+ SDOperand CC = Op.getOperand(2);
+ ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+ bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
+ unsigned X86CC;
+
+ SDOperand Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Op0, Op1);
+ if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
+ Op0, Op1, DAG))
+ return DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
+
+ assert(isFP && "Illegal integer SetCC!");
+
+ switch (SetCCOpcode) {
+ default: assert(false && "Illegal floating point SetCC!");
+ case ISD::SETOEQ: { // !PF & ZF
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_NP, MVT::i8), Cond);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_E, MVT::i8), Cond);
+ return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETUNE: { // PF | !ZF
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_P, MVT::i8), Cond);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_NE, MVT::i8), Cond);
+ return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
+ }
+ }
+}
+
+
SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
bool addTest = true;
SDOperand Chain = DAG.getEntryNode();
return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
}
+SDOperand X86TargetLowering::LowerSELECT_New(SDOperand Op, SelectionDAG &DAG) {
+ bool addTest = true;
+ SDOperand Cond = Op.getOperand(0);
+ SDOperand CC;
+
+ if (Cond.getOpcode() == ISD::SETCC)
+ Cond = LowerSETCC_New(Cond, DAG);
+
+ if (Cond.getOpcode() == X86ISD::SETCC_NEW) {
+ CC = Cond.getOperand(0);
+
+ // If condition flag is set by a X86ISD::CMP, then make a copy of it
+ // (since flag operand cannot be shared). Use it as the condition setting
+ // operand in place of the X86ISD::SETCC.
+ // If the X86ISD::SETCC has more than one use, then perhaps it's better
+ // to use a test instead of duplicating the X86ISD::CMP (for register
+ // pressure reason)?
+ SDOperand Cmp = Cond.getOperand(1);
+ unsigned Opc = Cmp.getOpcode();
+ bool IllegalFPCMov =
+ ! ((X86ScalarSSEf32 && Op.getValueType()==MVT::f32) ||
+ (X86ScalarSSEf64 && Op.getValueType()==MVT::f64)) &&
+ !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
+ if ((Opc == X86ISD::CMP_NEW ||
+ Opc == X86ISD::COMI_NEW ||
+ Opc == X86ISD::UCOMI_NEW) &&
+ !IllegalFPCMov) {
+ Cond = DAG.getNode(Opc, MVT::i32, Cmp.getOperand(0), Cmp.getOperand(1));
+ addTest = false;
+ }
+ }
+
+ if (addTest) {
+ CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Cond,
+ DAG.getConstant(0, MVT::i8));
+ }
+
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(),
+ MVT::Flag);
+ SmallVector<SDOperand, 4> Ops;
+ // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
+ // condition is true.
+ Ops.push_back(Op.getOperand(2));
+ Ops.push_back(Op.getOperand(1));
+ Ops.push_back(CC);
+ Ops.push_back(Cond);
+ return DAG.getNode(X86ISD::CMOV_NEW, VTs, 2, &Ops[0], Ops.size());
+}
+
SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
bool addTest = true;
SDOperand Chain = Op.getOperand(0);
Cond, Op.getOperand(2), CC, Cond.getValue(1));
}
+SDOperand X86TargetLowering::LowerBRCOND_New(SDOperand Op, SelectionDAG &DAG) {
+ bool addTest = true;
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand Cond = Op.getOperand(1);
+ SDOperand Dest = Op.getOperand(2);
+ SDOperand CC;
+
+ if (Cond.getOpcode() == ISD::SETCC)
+ Cond = LowerSETCC_New(Cond, DAG);
+
+ if (Cond.getOpcode() == X86ISD::SETCC_NEW) {
+ CC = Cond.getOperand(0);
+
+ // If condition flag is set by a X86ISD::CMP, then make a copy of it
+ // (since flag operand cannot be shared). Use it as the condition setting
+ // operand in place of the X86ISD::SETCC.
+ // If the X86ISD::SETCC has more than one use, then perhaps it's better
+ // to use a test instead of duplicating the X86ISD::CMP (for register
+ // pressure reason)?
+ SDOperand Cmp = Cond.getOperand(1);
+ unsigned Opc = Cmp.getOpcode();
+ if (Opc == X86ISD::CMP_NEW ||
+ Opc == X86ISD::COMI_NEW ||
+ Opc == X86ISD::UCOMI_NEW) {
+ Cond = DAG.getNode(Opc, MVT::i32, Cmp.getOperand(0), Cmp.getOperand(1));
+ addTest = false;
+ }
+ }
+
+ if (addTest) {
+ CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ Cond= DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
+ }
+ return DAG.getNode(X86ISD::BRCOND_NEW, Op.getValueType(),
+ Chain, Op.getOperand(2), CC, Cond);
+}
+
SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
SDOperand RHS = Op.getOperand(2);
translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
- SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
- SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
- VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
- SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
- SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
- return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
+ if (NewCCModeling) {
+ Opc = (Opc == X86ISD::UCOMI) ? X86ISD::UCOMI_NEW : X86ISD::COMI_NEW;
+ SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS);
+ SDOperand SetCC = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
+ return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
+ } else {
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
+ SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
+ SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
+ VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
+ SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
+ SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
+ return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
+ }
}
}
}
case ISD::FABS: return LowerFABS(Op, DAG);
case ISD::FNEG: return LowerFNEG(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
- case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode());
- case ISD::SELECT: return LowerSELECT(Op, DAG);
- case ISD::BRCOND: return LowerBRCOND(Op, DAG);
+ case ISD::SETCC: return NewCCModeling
+ ? LowerSETCC_New(Op, DAG)
+ : LowerSETCC(Op, DAG, DAG.getEntryNode());
+ case ISD::SELECT: return NewCCModeling
+ ? LowerSELECT_New(Op, DAG)
+ : LowerSELECT(Op, DAG);
+ case ISD::BRCOND: return NewCCModeling
+ ? LowerBRCOND_New(Op, DAG)
+ : LowerBRCOND(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
case X86ISD::CMP: return "X86ISD::CMP";
+ case X86ISD::CMP_NEW: return "X86ISD::CMP_NEW";
case X86ISD::COMI: return "X86ISD::COMI";
+ case X86ISD::COMI_NEW: return "X86ISD::COMI_NEW";
case X86ISD::UCOMI: return "X86ISD::UCOMI";
+ case X86ISD::UCOMI_NEW: return "X86ISD::UCOMI_NEW";
case X86ISD::SETCC: return "X86ISD::SETCC";
+ case X86ISD::SETCC_NEW: return "X86ISD::SETCC_NEW";
case X86ISD::CMOV: return "X86ISD::CMOV";
+ case X86ISD::CMOV_NEW: return "X86ISD::CMOV_NEW";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
+ case X86ISD::BRCOND_NEW: return "X86ISD::BRCOND_NEW";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
case X86::CMOV_FR64:
case X86::CMOV_V4F32:
case X86::CMOV_V2F64:
- case X86::CMOV_V2I64: {
+ case X86::CMOV_V2I64:
+
+ case X86::NEW_CMOV_FR32:
+ case X86::NEW_CMOV_FR64:
+ case X86::NEW_CMOV_V4F32:
+ case X86::NEW_CMOV_V2F64:
+ case X86::NEW_CMOV_V2I64: {
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
switch (Opc) {
default: break;
case X86ISD::SETCC:
+ case X86ISD::SETCC_NEW:
KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
break;
}
def SDTX86Cmov : SDTypeProfile<1, 3,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>]>;
+def SDTX86Cmov_NEW : SDTypeProfile<1, 4,
+ [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
+ SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
def SDTX86BrCond : SDTypeProfile<0, 2,
[SDTCisVT<0, OtherVT>, SDTCisVT<1, i8>]>;
+def SDTX86BrCond_NEW : SDTypeProfile<0, 3,
+ [SDTCisVT<0, OtherVT>,
+ SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
def SDTX86SetCC : SDTypeProfile<1, 1,
[SDTCisVT<0, i8>, SDTCisVT<1, i8>]>;
+def SDTX86SetCC_NEW : SDTypeProfile<1, 2,
+ [SDTCisVT<0, i8>,
+ SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
def SDTX86Ret : SDTypeProfile<0, 1, [SDTCisVT<0, i16>]>;
def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest,
[SDNPHasChain, SDNPOutFlag]>;
+def X86cmp_new : SDNode<"X86ISD::CMP_NEW" , SDTX86CmpTest>;
-def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov,
+def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov,
[SDNPInFlag, SDNPOutFlag]>;
+def X86cmov_new: SDNode<"X86ISD::CMOV_NEW", SDTX86Cmov_NEW>;
def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
[SDNPHasChain, SDNPInFlag]>;
+def X86brcond_new : SDNode<"X86ISD::BRCOND_NEW", SDTX86BrCond_NEW,
+ [SDNPHasChain]>;
def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC,
[SDNPInFlag, SDNPOutFlag]>;
+def X86setcc_new : SDNode<"X86ISD::SETCC_NEW", SDTX86SetCC_NEW>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
[SDNPHasChain, SDNPOptInFlag]>;
}
// Conditional branches
+let Uses = [EFLAGS] in {
def JE : IBr<0x84, (ins brtarget:$dst), "je\t$dst",
[(X86brcond bb:$dst, X86_COND_E)]>, TB;
def JNE : IBr<0x85, (ins brtarget:$dst), "jne\t$dst",
[(X86brcond bb:$dst, X86_COND_O)]>, TB;
def JNO : IBr<0x81, (ins brtarget:$dst), "jno\t$dst",
[(X86brcond bb:$dst, X86_COND_NO)]>, TB;
+} // Uses = [EFLAGS]
+
+let Uses = [EFLAGS] in {
+def NEW_JE : IBr<0x84, (ins brtarget:$dst), "je\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_E, EFLAGS)]>, TB;
+def NEW_JNE : IBr<0x85, (ins brtarget:$dst), "jne\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_NE, EFLAGS)]>, TB;
+def NEW_JL : IBr<0x8C, (ins brtarget:$dst), "jl\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_L, EFLAGS)]>, TB;
+def NEW_JLE : IBr<0x8E, (ins brtarget:$dst), "jle\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_LE, EFLAGS)]>, TB;
+def NEW_JG : IBr<0x8F, (ins brtarget:$dst), "jg\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_G, EFLAGS)]>, TB;
+def NEW_JGE : IBr<0x8D, (ins brtarget:$dst), "jge\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_GE, EFLAGS)]>, TB;
+
+def NEW_JB : IBr<0x82, (ins brtarget:$dst), "jb\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_B, EFLAGS)]>, TB;
+def NEW_JBE : IBr<0x86, (ins brtarget:$dst), "jbe\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_BE, EFLAGS)]>, TB;
+def NEW_JA : IBr<0x87, (ins brtarget:$dst), "ja\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_A, EFLAGS)]>, TB;
+def NEW_JAE : IBr<0x83, (ins brtarget:$dst), "jae\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_AE, EFLAGS)]>, TB;
+
+def NEW_JS : IBr<0x88, (ins brtarget:$dst), "js\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_S, EFLAGS)]>, TB;
+def NEW_JNS : IBr<0x89, (ins brtarget:$dst), "jns\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_NS, EFLAGS)]>, TB;
+def NEW_JP : IBr<0x8A, (ins brtarget:$dst), "jp\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_P, EFLAGS)]>, TB;
+def NEW_JNP : IBr<0x8B, (ins brtarget:$dst), "jnp\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_NP, EFLAGS)]>, TB;
+def NEW_JO : IBr<0x80, (ins brtarget:$dst), "jo\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_O, EFLAGS)]>, TB;
+def NEW_JNO : IBr<0x81, (ins brtarget:$dst), "jno\t$dst",
+ [(X86brcond_new bb:$dst, X86_COND_NO, EFLAGS)]>, TB;
+} // Uses = [EFLAGS]
//===----------------------------------------------------------------------===//
// Call Instructions...
// All calls clobber the non-callee saved registers...
let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
- XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7] in {
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, EFLAGS] in {
def CALLpcrel32 : I<0xE8, RawFrm, (outs), (ins i32imm:$dst, variable_ops),
"call\t${dst:call}", []>;
def CALL32r : I<0xFF, MRM2r, (outs), (ins GR32:$dst, variable_ops),
let isTwoAddress = 1 in {
// Conditional moves
+let Uses = [EFLAGS] in {
def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"cmovb\t{$src2, $dst|$dst, $src2}",
TB;
+def NEW_CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovb\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_B, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovb\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_B, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovb\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_B, EFLAGS))]>,
+ TB;
+def NEW_CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovb\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_B, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovae\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_AE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovae\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_AE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovae\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_AE, EFLAGS))]>,
+ TB;
+def NEW_CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovae\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_AE, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmove\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_E, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmove\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_E, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmove\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_E, EFLAGS))]>,
+ TB;
+def NEW_CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmove\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_E, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovne\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_NE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovne\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_NE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovne\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_NE, EFLAGS))]>,
+ TB;
+def NEW_CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovne\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_NE, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovbe\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_BE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovbe\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_BE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovbe\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_BE, EFLAGS))]>,
+ TB;
+def NEW_CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovbe\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_BE, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmova\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_A, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmova\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_A, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmova\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_A, EFLAGS))]>,
+ TB;
+def NEW_CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmova\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_A, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovl\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_L, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovl\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_L, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovl\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_L, EFLAGS))]>,
+ TB;
+def NEW_CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovl\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_L, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovge\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_GE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovge\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_GE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovge\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_GE, EFLAGS))]>,
+ TB;
+def NEW_CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovge\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_GE, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovle\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_LE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovle\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_LE, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovle\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_LE, EFLAGS))]>,
+ TB;
+def NEW_CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovle\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_LE, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovg\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_G, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovg\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_G, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovg\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_G, EFLAGS))]>,
+ TB;
+def NEW_CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovg\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_G, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovs\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_S, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovs\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_S, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovs\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_S, EFLAGS))]>,
+ TB;
+def NEW_CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovs\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_S, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovns\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_NS, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovns\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_NS, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovns\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_NS, EFLAGS))]>,
+ TB;
+def NEW_CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovns\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_NS, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovp\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_P, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovp\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_P, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovp\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_P, EFLAGS))]>,
+ TB;
+def NEW_CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovp\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_P, EFLAGS))]>,
+ TB;
+
+def NEW_CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "cmovnp\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, GR16:$src2,
+ X86_COND_NP, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
+ (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
+ "cmovnp\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, (X86cmov_new GR16:$src1, (loadi16 addr:$src2),
+ X86_COND_NP, EFLAGS))]>,
+ TB, OpSize;
+def NEW_CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "cmovnp\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, GR32:$src2,
+ X86_COND_NP, EFLAGS))]>,
+ TB;
+def NEW_CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
+ (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
+ "cmovnp\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, (X86cmov_new GR32:$src1, (loadi32 addr:$src2),
+ X86_COND_NP, EFLAGS))]>,
+ TB;
+} // Uses = [EFLAGS]
+
+
// unary instructions
let CodeSize = 2 in {
let Defs = [EFLAGS] in {
//===----------------------------------------------------------------------===//
// Test instructions are just like AND, except they don't generate a result.
//
- let Defs = [EFLAGS] in {
+let Defs = [EFLAGS] in {
let isCommutable = 1 in { // TEST X, Y --> TEST Y, X
def TEST8rr : I<0x84, MRMDestReg, (outs), (ins GR8:$src1, GR8:$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
} // Defs = [EFLAGS]
+let Defs = [EFLAGS] in {
+let isCommutable = 1 in { // TEST X, Y --> TEST Y, X
+def NEW_TEST8rr : I<0x84, MRMDestReg, (outs), (ins GR8:$src1, GR8:$src2),
+ "test{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR8:$src1, GR8:$src2), 0),
+ (implicit EFLAGS)]>;
+def NEW_TEST16rr : I<0x85, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
+ "test{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR16:$src1, GR16:$src2), 0),
+ (implicit EFLAGS)]>,
+ OpSize;
+def NEW_TEST32rr : I<0x85, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
+ "test{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR32:$src1, GR32:$src2), 0),
+ (implicit EFLAGS)]>;
+}
+
+def NEW_TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2),
+ "test{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR8:$src1, (loadi8 addr:$src2)), 0),
+ (implicit EFLAGS)]>;
+def NEW_TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2),
+ "test{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR16:$src1, (loadi16 addr:$src2)), 0),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2),
+ "test{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR32:$src1, (loadi32 addr:$src2)), 0),
+ (implicit EFLAGS)]>;
+
+def NEW_TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8
+ (outs), (ins GR8:$src1, i8imm:$src2),
+ "test{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR8:$src1, imm:$src2), 0),
+ (implicit EFLAGS)]>;
+def NEW_TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16
+ (outs), (ins GR16:$src1, i16imm:$src2),
+ "test{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR16:$src1, imm:$src2), 0),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32
+ (outs), (ins GR32:$src1, i32imm:$src2),
+ "test{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR32:$src1, imm:$src2), 0),
+ (implicit EFLAGS)]>;
+
+def NEW_TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8
+ (outs), (ins i8mem:$src1, i8imm:$src2),
+ "test{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and (loadi8 addr:$src1), imm:$src2), 0),
+ (implicit EFLAGS)]>;
+def NEW_TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16
+ (outs), (ins i16mem:$src1, i16imm:$src2),
+ "test{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and (loadi16 addr:$src1), imm:$src2), 0),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32
+ (outs), (ins i32mem:$src1, i32imm:$src2),
+ "test{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and (loadi32 addr:$src1), imm:$src2), 0),
+ (implicit EFLAGS)]>;
+} // Defs = [EFLAGS]
+
+
// Condition code ops, incl. set if equal/not equal/...
let Defs = [EFLAGS], Uses = [AH] in
def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>; // flags = AH
let Defs = [AH], Uses = [EFLAGS] in
def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
+let Uses = [EFLAGS] in {
def SETEr : I<0x94, MRM0r,
(outs GR8 :$dst), (ins),
"sete\t$dst",
"setnp\t$dst",
[(store (X86setcc X86_COND_NP), addr:$dst)]>,
TB; // [mem8] = not parity
+} // Uses = [EFLAGS]
+
+let Uses = [EFLAGS] in {
+def NEW_SETEr : I<0x94, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "sete\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_E, EFLAGS))]>,
+ TB; // GR8 = ==
+def NEW_SETEm : I<0x94, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "sete\t$dst",
+ [(store (X86setcc_new X86_COND_E, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = ==
+def NEW_SETNEr : I<0x95, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setne\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_NE, EFLAGS))]>,
+ TB; // GR8 = !=
+def NEW_SETNEm : I<0x95, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setne\t$dst",
+ [(store (X86setcc_new X86_COND_NE, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = !=
+def NEW_SETLr : I<0x9C, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setl\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_L, EFLAGS))]>,
+ TB; // GR8 = < signed
+def NEW_SETLm : I<0x9C, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setl\t$dst",
+ [(store (X86setcc_new X86_COND_L, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = < signed
+def NEW_SETGEr : I<0x9D, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setge\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_GE, EFLAGS))]>,
+ TB; // GR8 = >= signed
+def NEW_SETGEm : I<0x9D, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setge\t$dst",
+ [(store (X86setcc_new X86_COND_GE, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = >= signed
+def NEW_SETLEr : I<0x9E, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setle\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_LE, EFLAGS))]>,
+ TB; // GR8 = <= signed
+def NEW_SETLEm : I<0x9E, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setle\t$dst",
+ [(store (X86setcc_new X86_COND_LE, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = <= signed
+def NEW_SETGr : I<0x9F, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setg\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_G, EFLAGS))]>,
+ TB; // GR8 = > signed
+def NEW_SETGm : I<0x9F, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setg\t$dst",
+ [(store (X86setcc_new X86_COND_G, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = > signed
+
+def NEW_SETBr : I<0x92, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setb\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_B, EFLAGS))]>,
+ TB; // GR8 = < unsign
+def NEW_SETBm : I<0x92, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setb\t$dst",
+ [(store (X86setcc_new X86_COND_B, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = < unsign
+def NEW_SETAEr : I<0x93, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setae\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_AE, EFLAGS))]>,
+ TB; // GR8 = >= unsign
+def NEW_SETAEm : I<0x93, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setae\t$dst",
+ [(store (X86setcc_new X86_COND_AE, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = >= unsign
+def NEW_SETBEr : I<0x96, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setbe\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_BE, EFLAGS))]>,
+ TB; // GR8 = <= unsign
+def NEW_SETBEm : I<0x96, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setbe\t$dst",
+ [(store (X86setcc_new X86_COND_BE, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = <= unsign
+def NEW_SETAr : I<0x97, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "seta\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_A, EFLAGS))]>,
+ TB; // GR8 = > signed
+def NEW_SETAm : I<0x97, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "seta\t$dst",
+ [(store (X86setcc_new X86_COND_A, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = > signed
+
+def NEW_SETSr : I<0x98, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "sets\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_S, EFLAGS))]>,
+ TB; // GR8 = <sign bit>
+def NEW_SETSm : I<0x98, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "sets\t$dst",
+ [(store (X86setcc_new X86_COND_S, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = <sign bit>
+def NEW_SETNSr : I<0x99, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setns\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_NS, EFLAGS))]>,
+ TB; // GR8 = !<sign bit>
+def NEW_SETNSm : I<0x99, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setns\t$dst",
+ [(store (X86setcc_new X86_COND_NS, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = !<sign bit>
+def NEW_SETPr : I<0x9A, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setp\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_P, EFLAGS))]>,
+ TB; // GR8 = parity
+def NEW_SETPm : I<0x9A, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setp\t$dst",
+ [(store (X86setcc_new X86_COND_P, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = parity
+def NEW_SETNPr : I<0x9B, MRM0r,
+ (outs GR8 :$dst), (ins),
+ "setnp\t$dst",
+ [(set GR8:$dst, (X86setcc_new X86_COND_NP, EFLAGS))]>,
+ TB; // GR8 = not parity
+def NEW_SETNPm : I<0x9B, MRM0m,
+ (outs), (ins i8mem:$dst),
+ "setnp\t$dst",
+ [(store (X86setcc_new X86_COND_NP, EFLAGS), addr:$dst)]>,
+ TB; // [mem8] = not parity
+} // Uses = [EFLAGS]
+
+
+//def : Pat<(X86setcc_new X86_COND_E, EFLAGS), (SETEr)>;
// Integer comparisons
let Defs = [EFLAGS] in {
[(X86cmp GR32:$src1, i32immSExt8:$src2)]>;
} // Defs = [EFLAGS]
+let Defs = [EFLAGS] in {
+def NEW_CMP8rr : I<0x38, MRMDestReg,
+ (outs), (ins GR8 :$src1, GR8 :$src2),
+ "cmp{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR8:$src1, GR8:$src2), (implicit EFLAGS)]>;
+def NEW_CMP16rr : I<0x39, MRMDestReg,
+ (outs), (ins GR16:$src1, GR16:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR16:$src1, GR16:$src2), (implicit EFLAGS)]>, OpSize;
+def NEW_CMP32rr : I<0x39, MRMDestReg,
+ (outs), (ins GR32:$src1, GR32:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR32:$src1, GR32:$src2), (implicit EFLAGS)]>;
+def NEW_CMP8mr : I<0x38, MRMDestMem,
+ (outs), (ins i8mem :$src1, GR8 :$src2),
+ "cmp{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi8 addr:$src1), GR8:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP16mr : I<0x39, MRMDestMem,
+ (outs), (ins i16mem:$src1, GR16:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi16 addr:$src1), GR16:$src2),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_CMP32mr : I<0x39, MRMDestMem,
+ (outs), (ins i32mem:$src1, GR32:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi32 addr:$src1), GR32:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP8rm : I<0x3A, MRMSrcMem,
+ (outs), (ins GR8 :$src1, i8mem :$src2),
+ "cmp{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR8:$src1, (loadi8 addr:$src2)),
+ (implicit EFLAGS)]>;
+def NEW_CMP16rm : I<0x3B, MRMSrcMem,
+ (outs), (ins GR16:$src1, i16mem:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR16:$src1, (loadi16 addr:$src2)),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_CMP32rm : I<0x3B, MRMSrcMem,
+ (outs), (ins GR32:$src1, i32mem:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR32:$src1, (loadi32 addr:$src2)),
+ (implicit EFLAGS)]>;
+def NEW_CMP8ri : Ii8<0x80, MRM7r,
+ (outs), (ins GR8:$src1, i8imm:$src2),
+ "cmp{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR8:$src1, imm:$src2), (implicit EFLAGS)]>;
+def NEW_CMP16ri : Ii16<0x81, MRM7r,
+ (outs), (ins GR16:$src1, i16imm:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR16:$src1, imm:$src2),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_CMP32ri : Ii32<0x81, MRM7r,
+ (outs), (ins GR32:$src1, i32imm:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR32:$src1, imm:$src2), (implicit EFLAGS)]>;
+def NEW_CMP8mi : Ii8 <0x80, MRM7m,
+ (outs), (ins i8mem :$src1, i8imm :$src2),
+ "cmp{b}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi8 addr:$src1), imm:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP16mi : Ii16<0x81, MRM7m,
+ (outs), (ins i16mem:$src1, i16imm:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi16 addr:$src1), imm:$src2),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_CMP32mi : Ii32<0x81, MRM7m,
+ (outs), (ins i32mem:$src1, i32imm:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi32 addr:$src1), imm:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP16ri8 : Ii8<0x83, MRM7r,
+ (outs), (ins GR16:$src1, i16i8imm:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR16:$src1, i16immSExt8:$src2),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_CMP16mi8 : Ii8<0x83, MRM7m,
+ (outs), (ins i16mem:$src1, i16i8imm:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi16 addr:$src1), i16immSExt8:$src2),
+ (implicit EFLAGS)]>, OpSize;
+def NEW_CMP32mi8 : Ii8<0x83, MRM7m,
+ (outs), (ins i32mem:$src1, i32i8imm:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi32 addr:$src1), i32immSExt8:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP32ri8 : Ii8<0x83, MRM7r,
+ (outs), (ins GR32:$src1, i32i8imm:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR32:$src1, i32immSExt8:$src2),
+ (implicit EFLAGS)]>;
+} // Defs = [EFLAGS]
+
// Sign/Zero extenders
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}",
def : Pat<(X86cmp GR32:$src1, 0),
(TEST32rr GR32:$src1, GR32:$src1)>;
+def : Pat<(parallel (X86cmp_new GR8:$src1, 0), (implicit EFLAGS)),
+ (NEW_TEST8rr GR8:$src1, GR8:$src1)>;
+def : Pat<(parallel (X86cmp_new GR16:$src1, 0), (implicit EFLAGS)),
+ (NEW_TEST16rr GR16:$src1, GR16:$src1)>;
+def : Pat<(parallel (X86cmp_new GR32:$src1, 0), (implicit EFLAGS)),
+ (NEW_TEST32rr GR32:$src1, GR32:$src1)>;
+
// {s|z}extload bool -> {s|z}extload byte
def : Pat<(sextloadi16i1 addr:$src), (MOVSX16rm8 addr:$src)>;
def : Pat<(sextloadi32i1 addr:$src), (MOVSX32rm8 addr:$src)>;
[(X86cmp GR64:$src1, i64immSExt8:$src2)]>;
} // Defs = [EFLAGS]
+let Defs = [EFLAGS] in {
+let isCommutable = 1 in
+def NEW_TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "test{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR64:$src1, GR64:$src2), 0),
+ (implicit EFLAGS)]>;
+def NEW_TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
+ "test{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR64:$src1, (loadi64 addr:$src2)), 0),
+ (implicit EFLAGS)]>;
+def NEW_TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
+ (ins GR64:$src1, i64i32imm:$src2),
+ "test{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and GR64:$src1, i64immSExt32:$src2), 0),
+ (implicit EFLAGS)]>;
+def NEW_TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
+ (ins i64mem:$src1, i64i32imm:$src2),
+ "test{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
+ (implicit EFLAGS)]>;
+
+def NEW_CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR64:$src1, GR64:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi64 addr:$src1), GR64:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR64:$src1, (loadi64 addr:$src2)),
+ (implicit EFLAGS)]>;
+def NEW_CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR64:$src1, i64immSExt32:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP64mi32 : RIi32<0x81, MRM7m, (outs),
+ (ins i64mem:$src1, i64i32imm:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi64 addr:$src1), i64immSExt32:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new (loadi64 addr:$src1), i64immSExt8:$src2),
+ (implicit EFLAGS)]>;
+def NEW_CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}",
+ [(X86cmp_new GR64:$src1, i64immSExt8:$src2),
+ (implicit EFLAGS)]>;
+} // Defs = [EFLAGS]
+
// Conditional moves
-let isTwoAddress = 1 in {
+let Uses = [EFLAGS], isTwoAddress = 1 in {
def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"cmovb\t{$src2, $dst|$dst, $src2}",
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"cmovnp\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
- X86_COND_NP))]>, TB;
+ X86_COND_NP))]>, TB;
+
+def NEW_CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovb\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_B, EFLAGS))]>, TB;
+def NEW_CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovb\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_B, EFLAGS))]>, TB;
+def NEW_CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovae\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_AE, EFLAGS))]>, TB;
+def NEW_CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovae\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_AE, EFLAGS))]>, TB;
+def NEW_CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmove\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_E, EFLAGS))]>, TB;
+def NEW_CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmove\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_E, EFLAGS))]>, TB;
+def NEW_CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovne\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_NE, EFLAGS))]>, TB;
+def NEW_CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovne\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_NE, EFLAGS))]>, TB;
+def NEW_CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovbe\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_BE, EFLAGS))]>, TB;
+def NEW_CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovbe\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_BE, EFLAGS))]>, TB;
+def NEW_CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmova\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_A, EFLAGS))]>, TB;
+def NEW_CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmova\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_A, EFLAGS))]>, TB;
+def NEW_CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovl\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_L, EFLAGS))]>, TB;
+def NEW_CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovl\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_L, EFLAGS))]>, TB;
+def NEW_CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovge\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_GE, EFLAGS))]>, TB;
+def NEW_CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovge\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_GE, EFLAGS))]>, TB;
+def NEW_CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovle\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_LE, EFLAGS))]>, TB;
+def NEW_CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovle\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_LE, EFLAGS))]>, TB;
+def NEW_CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovg\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_G, EFLAGS))]>, TB;
+def NEW_CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovg\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_G, EFLAGS))]>, TB;
+def NEW_CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovs\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_S, EFLAGS))]>, TB;
+def NEW_CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovs\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_S, EFLAGS))]>, TB;
+def NEW_CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovns\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_NS, EFLAGS))]>, TB;
+def NEW_CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovns\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_NS, EFLAGS))]>, TB;
+def NEW_CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovp\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_P, EFLAGS))]>, TB;
+def NEW_CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovp\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_P, EFLAGS))]>, TB;
+def NEW_CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
+ (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
+ "cmovnp\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, GR64:$src2,
+ X86_COND_NP, EFLAGS))]>, TB;
+def NEW_CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
+ (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
+ "cmovnp\t{$src2, $dst|$dst, $src2}",
+ [(set GR64:$dst, (X86cmov_new GR64:$src1, (loadi64 addr:$src2),
+ X86_COND_NP, EFLAGS))]>, TB;
} // isTwoAddress
//===----------------------------------------------------------------------===//
def : Pat<(X86cmp GR64:$src1, 0),
(TEST64rr GR64:$src1, GR64:$src1)>;
+def : Pat<(parallel (X86cmp_new GR64:$src1, 0), (implicit EFLAGS)),
+ (NEW_TEST64rr GR64:$src1, GR64:$src1)>;
+
// {s|z}extload bool -> {s|z}extload byte
def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;