setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
if (Subtarget->is64Bit()) {
- setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
+ setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
} else {
- if (X86ScalarSSEf64) {
+ if (!UseSoftFloat && !NoImplicitFloat && X86ScalarSSEf64) {
// We have an impenetrably clever algorithm for ui64->double only.
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
// We have faster algorithm for ui32->single only.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
- } else
+ } else {
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
+ }
}
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
// this operation.
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
- // SSE has no i16 to fp conversion, only i32
- if (X86ScalarSSEf32) {
- setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
- // f32 and f64 cases are Legal, f80 case is not
- setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
+
+ if (!UseSoftFloat && !NoImplicitFloat) {
+ // SSE has no i16 to fp conversion, only i32
+ if (X86ScalarSSEf32) {
+ setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
+ // f32 and f64 cases are Legal, f80 case is not
+ setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
+ } else {
+ setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
+ setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
+ }
} else {
- setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
- setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
+ setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
+ setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
}
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
- setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand);
+ setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand);
setOperationAction(ISD::TRUNCATE, MVT::v8i8, Expand);
setOperationAction(ISD::SELECT, MVT::v8i8, Promote);
setOperationAction(ISD::SELECT, MVT::v4i16, Promote);
if (!UseSoftFloat && Subtarget->hasSSE2()) {
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
- // FIXME: Unfortunately -soft-float means XMM registers cannot be used even
- // for integer operations.
+ // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
+ // registers cannot be used even for integer operations.
addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
+
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
+
if (Subtarget->is64Bit()) {
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
setOperationAction(ISD::UMULO, MVT::i32, Custom);
setOperationAction(ISD::UMULO, MVT::i64, Custom);
+ if (!Subtarget->is64Bit()) {
+ // These libcalls are not available in 32-bit.
+ setLibcallName(RTLIB::SHL_I128, 0);
+ setLibcallName(RTLIB::SRL_I128, 0);
+ setLibcallName(RTLIB::SRA_I128, 0);
+ }
+
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::BUILD_VECTOR);
setTargetDAGCombine(ISD::SRA);
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::STORE);
+ if (Subtarget->is64Bit())
+ setTargetDAGCombine(ISD::MUL);
computeRegisterProperties();
// FIXME: This turns off use of xmm stores for memset/memcpy on targets like
// linux. This is because the stack realignment code can't handle certain
// cases like PR2962. This should be removed when PR2962 is fixed.
- if (Subtarget->getStackAlignment() >= 16) {
+ if (!NoImplicitFloat && Subtarget->getStackAlignment() >= 16) {
if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16)
return MVT::v4i32;
if ((isSrcConst || isSrcStr) && Subtarget->hasSSE1() && Size >= 16)
return MVT::i32;
}
-
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
// For info on fast calling convention see Fast Calling Convention (tail call)
// implementation LowerX86_32FastCCCallTo.
-/// AddLiveIn - This helper function adds the specified physical register to the
-/// MachineFunction as a live in value. It also creates a corresponding virtual
-/// register for it.
-static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
- const TargetRegisterClass *RC) {
- assert(RC->contains(PReg) && "Not the correct regclass!");
- unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
- MF.getRegInfo().addLiveIn(PReg, VReg);
- return VReg;
-}
-
/// CallIsStructReturn - Determines whether a CALL node uses struct return
/// semantics.
static bool CallIsStructReturn(CallSDNode *TheCall) {
assert(0 && "Unknown argument type!");
}
- unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
+ unsigned Reg = DAG.getMachineFunction().addLiveIn(VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it is really passed promoted to 32
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
- assert(!(NumXMMRegs && UseSoftFloat) &&
+ assert(!(NumXMMRegs && UseSoftFloat && NoImplicitFloat) &&
"SSE register cannot be used when SSE is disabled!");
- if (UseSoftFloat || !Subtarget->hasSSE1()) {
+ if (UseSoftFloat || NoImplicitFloat || !Subtarget->hasSSE1())
// Kernel mode asks for SSE to be disabled, so don't push them
// on the stack.
TotalNumXMMRegs = 0;
- }
+
// For X86-64, if there are vararg parameters that are passed via
// registers, then we must store them to their spots on the stack so they
// may be loaded by deferencing the result of va_next.
SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
DAG.getIntPtrConstant(VarArgsGPOffset));
for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
- unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
- X86::GR64RegisterClass);
+ unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
+ X86::GR64RegisterClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i64);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
DAG.getIntPtrConstant(VarArgsFPOffset));
for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
- unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
- X86::VR128RegisterClass);
+ unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs],
+ X86::VR128RegisterClass);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::v4f32);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
// Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
// source words for the shuffle, to aid later transformations.
bool AllWordsInNewV = true;
+ bool InOrder[2] = { true, true };
for (unsigned i = 0; i != 8; ++i) {
int idx = MaskVals[i];
+ if (idx != (int)i)
+ InOrder[i/4] = false;
if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
continue;
AllWordsInNewV = false;
// If we've eliminated the use of V2, and the new mask is a pshuflw or
// pshufhw, that's as cheap as it gets. Return the new shuffle.
- if (pshufhw || pshuflw) {
+ if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
MaskV.clear();
for (unsigned i = 0; i != 8; ++i)
MaskV.push_back((MaskVals[i] < 0) ? DAG.getUNDEF(MVT::i16)
SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
SDValue InsElt;
-
+
+ // If Elt0 and Elt1 are defined, are consecutive, and can be load
+ // using a single extract together, load it and store it.
+ if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
+ InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
+ DAG.getIntPtrConstant(Elt1 / 2));
+ NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
+ DAG.getIntPtrConstant(i));
+ continue;
+ }
+
// If Elt1 is defined, extract it from the appropriate source. If the
- // source byte is not also odd, shift the extracted word left 8 bits.
+ // source byte is not also odd, shift the extracted word left 8 bits
+ // otherwise clear the bottom 8 bits if we need to do an or.
if (Elt1 >= 0) {
InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
DAG.getIntPtrConstant(Elt1 / 2));
if ((Elt1 & 1) == 0)
InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
DAG.getConstant(8, TLI.getShiftAmountTy()));
+ else if (Elt0 >= 0)
+ InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
+ DAG.getConstant(0xFF00, MVT::i16));
}
// If Elt0 is defined, extract it from the appropriate source. If the
// source byte is not also even, shift the extracted word right 8 bits. If
if ((Elt0 & 1) != 0)
InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
DAG.getConstant(8, TLI.getShiftAmountTy()));
+ else if (Elt1 >= 0)
+ InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
+ DAG.getConstant(0x00FF, MVT::i16));
InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
: InsElt0;
}
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
// FIXME there isn't really any debug info here, should come from the parent
DebugLoc dl = CP->getDebugLoc();
- SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(),
- getPointerTy(),
- CP->getAlignment());
+ SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
+ CP->getAlignment());
Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
}
+static SDValue
+GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
+ SDValue *InFlag, const MVT PtrVT, unsigned ReturnReg) {
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ DebugLoc dl = GA->getDebugLoc();
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
+ GA->getValueType(0),
+ GA->getOffset());
+ if (InFlag) {
+ SDValue Ops[] = { Chain, TGA, *InFlag };
+ Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3);
+ } else {
+ SDValue Ops[] = { Chain, TGA };
+ Chain = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2);
+ }
+ SDValue Flag = Chain.getValue(1);
+ return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
+}
+
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
PtrVT), InFlag);
InFlag = Chain.getValue(1);
- // emit leal symbol@TLSGD(,%ebx,1), %eax
- SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
- GA->getValueType(0),
- GA->getOffset());
- SDValue Ops[] = { Chain, TGA, InFlag };
- SDValue Result = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 3);
- InFlag = Result.getValue(2);
- Chain = Result.getValue(1);
-
- // call ___tls_get_addr. This function receives its argument in
- // the register EAX.
- Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Result, InFlag);
- InFlag = Chain.getValue(1);
-
- NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops1[] = { Chain,
- DAG.getTargetExternalSymbol("___tls_get_addr",
- PtrVT),
- DAG.getRegister(X86::EAX, PtrVT),
- DAG.getRegister(X86::EBX, PtrVT),
- InFlag };
- Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops1, 5);
- InFlag = Chain.getValue(1);
-
- return DAG.getCopyFromReg(Chain, dl, X86::EAX, PtrVT, InFlag);
+ return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX);
}
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const MVT PtrVT) {
- SDValue InFlag, Chain;
- DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better
-
- // emit leaq symbol@TLSGD(%rip), %rdi
- SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
- GA->getValueType(0),
- GA->getOffset());
- SDValue Ops[] = { DAG.getEntryNode(), TGA};
- SDValue Result = DAG.getNode(X86ISD::TLSADDR, dl, NodeTys, Ops, 2);
- Chain = Result.getValue(1);
- InFlag = Result.getValue(2);
-
- // call __tls_get_addr. This function receives its argument in
- // the register RDI.
- Chain = DAG.getCopyToReg(Chain, dl, X86::RDI, Result, InFlag);
- InFlag = Chain.getValue(1);
-
- NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops1[] = { Chain,
- DAG.getTargetExternalSymbol("__tls_get_addr",
- PtrVT),
- DAG.getRegister(X86::RDI, PtrVT),
- InFlag };
- Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops1, 4);
- InFlag = Chain.getValue(1);
-
- return DAG.getCopyFromReg(Chain, dl, X86::RAX, PtrVT, InFlag);
+ return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT, X86::RAX);
}
// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
// "local exec" model.
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
- const MVT PtrVT, TLSModel::Model model) {
+ const MVT PtrVT, TLSModel::Model model,
+ bool is64Bit) {
DebugLoc dl = GA->getDebugLoc();
// Get the Thread Pointer
- SDValue ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER,
- DebugLoc::getUnknownLoc(), PtrVT);
+ SDValue Base = DAG.getNode(X86ISD::SegmentBaseAddress,
+ DebugLoc::getUnknownLoc(), PtrVT,
+ DAG.getRegister(is64Bit? X86::FS : X86::GS,
+ MVT::i32));
+
+ SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Base,
+ NULL, 0);
+
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
// exec)
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
switch (model) {
case TLSModel::GeneralDynamic:
case TLSModel::LocalDynamic: // not implemented
- case TLSModel::InitialExec: // not implemented
- case TLSModel::LocalExec: // not implemented
return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
- default:
- assert (0 && "Unknown TLS model");
+
+ case TLSModel::InitialExec:
+ case TLSModel::LocalExec:
+ return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, true);
}
} else {
switch (model) {
case TLSModel::InitialExec:
case TLSModel::LocalExec:
- return LowerToTLSExecModel(GA, DAG, getPointerTy(), model);
- default:
- assert (0 && "Unknown TLS model");
+ return LowerToTLSExecModel(GA, DAG, getPointerTy(), model, false);
}
}
+ assert(0 && "Unreachable");
+ return SDValue();
}
SDValue
int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
- StackSlot,
- PseudoSourceValue::getFixedStack(SSFI), 0);
+ StackSlot,
+ PseudoSourceValue::getFixedStack(SSFI), 0);
// Build the FILD
SDVTList Tys;
CV0.push_back(ConstantInt::get(APInt(32, 0)));
CV0.push_back(ConstantInt::get(APInt(32, 0)));
Constant *C0 = ConstantVector::get(CV0);
- SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 4);
+ SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
std::vector<Constant*> CV1;
CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4530000000000000ULL))));
CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4330000000000000ULL))));
Constant *C1 = ConstantVector::get(CV1);
- SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 4);
+ SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
SmallVector<SDValue, 4> MaskVec;
MaskVec.push_back(DAG.getConstant(0, MVT::i32));
CV.push_back(C);
}
Constant *C = ConstantVector::get(CV);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
false, 16);
CV.push_back(C);
}
Constant *C = ConstantVector::get(CV);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
false, 16);
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
}
Constant *C = ConstantVector::get(CV);
- SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
false, 16);
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
}
C = ConstantVector::get(CV);
- CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
+ CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
SDValue Mask2 = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
false, 16);
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
-SDValue X86TargetLowering::EmitTest(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
+ SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
+ // CF and OF aren't always set the way we want. Determine which
+ // of these we need.
+ bool NeedCF = false;
+ bool NeedOF = false;
+ switch (X86CC) {
+ case X86::COND_A: case X86::COND_AE:
+ case X86::COND_B: case X86::COND_BE:
+ NeedCF = true;
+ break;
+ case X86::COND_G: case X86::COND_GE:
+ case X86::COND_L: case X86::COND_LE:
+ case X86::COND_O: case X86::COND_NO:
+ NeedOF = true;
+ break;
+ default: break;
+ }
+
// See if we can use the EFLAGS value from the operand instead of
- // doing a separate TEST.
- if (Op.getResNo() == 0) {
+ // doing a separate TEST. TEST always sets OF and CF to 0, so unless
+ // we prove that the arithmetic won't overflow, we can't use OF or CF.
+ if (Op.getResNo() == 0 && !NeedOF && !NeedCF) {
unsigned Opcode = 0;
unsigned NumOperands = 0;
switch (Op.getNode()->getOpcode()) {
break;
}
if (Opcode != 0) {
- const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::i32);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0, e = NumOperands; i != e; ++i)
+ for (unsigned i = 0; i != NumOperands; ++i)
Ops.push_back(Op.getOperand(i));
- SDValue New = DAG.getNode(Opcode, dl, VTs, 2, &Ops[0], Ops.size());
+ SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
DAG.ReplaceAllUsesWith(Op, New);
return SDValue(New.getNode(), 1);
}
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
-SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, SelectionDAG &DAG) {
+SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
+ SelectionDAG &DAG) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1))
if (C->getAPIntValue() == 0)
- return EmitTest(Op0, DAG);
+ return EmitTest(Op0, X86CC, DAG);
DebugLoc dl = Op0.getDebugLoc();
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
- SDValue Cond = EmitCmp(Op0, Op1, DAG);
+ SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG);
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86CC, MVT::i8), Cond);
}
!isScalarFPTypeInSSEReg(VT)) // FPStack?
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
- if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || Opc == X86ISD::BT) { // FIXME
+ if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
+ Opc == X86ISD::BT) { // FIXME
Cond = Cmp;
addTest = false;
}
if (addTest) {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- Cond = EmitTest(Cond, DAG);
+ Cond = EmitTest(Cond, X86::COND_NE, DAG);
}
- const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(),
- MVT::Flag);
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
SmallVector<SDValue, 4> Ops;
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
// condition is true.
Ops.push_back(Op.getOperand(1));
Ops.push_back(CC);
Ops.push_back(Cond);
- return DAG.getNode(X86ISD::CMOV, dl, VTs, 2, &Ops[0], Ops.size());
+ return DAG.getNode(X86ISD::CMOV, dl, VTs, &Ops[0], Ops.size());
}
// isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
if (addTest) {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- Cond = EmitTest(Cond, DAG);
+ Cond = EmitTest(Cond, X86::COND_NE, DAG);
}
return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cond);
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
- case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
+ case X86ISD::SegmentBaseAddress: return "X86ISD::SegmentBaseAddress";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
case X86ISD::UMUL: return "X86ISD::UMUL";
case X86ISD::INC: return "X86ISD::INC";
case X86ISD::DEC: return "X86ISD::DEC";
+ case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
}
}
return Subtarget->is64Bit() || NumBits1 < 64;
}
+bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const {
+ // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
+ return Ty1 == Type::Int32Ty && Ty2 == Type::Int64Ty && Subtarget->is64Bit();
+}
+
+bool X86TargetLowering::isZExtFree(MVT VT1, MVT VT2) const {
+ // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
+ return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
+}
+
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
newMBB->addSuccessor(newMBB);
// Insert instructions into newMBB based on incoming instruction
- assert(bInstr->getNumOperands() < 8 && "unexpected number of operands");
+ assert(bInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+ "unexpected number of operands");
DebugLoc dl = bInstr->getDebugLoc();
MachineOperand& destOper = bInstr->getOperand(0);
- MachineOperand* argOpers[6];
+ MachineOperand* argOpers[2 + X86AddrNumOperands];
int numArgs = bInstr->getNumOperands() - 1;
for (int i=0; i < numArgs; ++i)
argOpers[i] = &bInstr->getOperand(i+1);
// x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = 3; // [0,3]
- int valArgIndx = 4;
+ int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int valArgIndx = lastAddrIndx + 1;
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1);
DebugLoc dl = bInstr->getDebugLoc();
// Insert instructions into newMBB based on incoming instruction
// There are 8 "real" operands plus 9 implicit def/uses, ignored here.
- assert(bInstr->getNumOperands() < 18 && "unexpected number of operands");
+ assert(bInstr->getNumOperands() < X86AddrNumOperands + 14 &&
+ "unexpected number of operands");
MachineOperand& dest1Oper = bInstr->getOperand(0);
MachineOperand& dest2Oper = bInstr->getOperand(1);
- MachineOperand* argOpers[6];
- for (int i=0; i < 6; ++i)
+ MachineOperand* argOpers[2 + X86AddrNumOperands];
+ for (int i=0; i < 2 + X86AddrNumOperands; ++i)
argOpers[i] = &bInstr->getOperand(i+2);
// x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = 3; // [0,3]
+ int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2);
// add 4 to displacement.
- for (int i=0; i <= lastAddrIndx-1; ++i)
+ for (int i=0; i <= lastAddrIndx-2; ++i)
(*MIB).addOperand(*argOpers[i]);
MachineOperand newOp3 = *(argOpers[3]);
if (newOp3.isImm())
else
newOp3.setOffset(newOp3.getOffset()+4);
(*MIB).addOperand(newOp3);
+ (*MIB).addOperand(*argOpers[lastAddrIndx]);
// t3/4 are defined later, at the bottom of the loop
unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
tt2 = t2;
}
- assert((argOpers[4]->isReg() || argOpers[4]->isImm()) &&
+ int valArgIndx = lastAddrIndx + 1;
+ assert((argOpers[valArgIndx]->isReg() ||
+ argOpers[valArgIndx]->isImm()) &&
"invalid operand");
unsigned t5 = F->getRegInfo().createVirtualRegister(RC);
unsigned t6 = F->getRegInfo().createVirtualRegister(RC);
- if (argOpers[4]->isReg())
+ if (argOpers[valArgIndx]->isReg())
MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5);
else
MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5);
if (regOpcL != X86::MOV32rr)
MIB.addReg(tt1);
- (*MIB).addOperand(*argOpers[4]);
- assert(argOpers[5]->isReg() == argOpers[4]->isReg());
- assert(argOpers[5]->isImm() == argOpers[4]->isImm());
- if (argOpers[5]->isReg())
+ (*MIB).addOperand(*argOpers[valArgIndx]);
+ assert(argOpers[valArgIndx + 1]->isReg() ==
+ argOpers[valArgIndx]->isReg());
+ assert(argOpers[valArgIndx + 1]->isImm() ==
+ argOpers[valArgIndx]->isImm());
+ if (argOpers[valArgIndx + 1]->isReg())
MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6);
else
MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6);
if (regOpcH != X86::MOV32rr)
MIB.addReg(tt2);
- (*MIB).addOperand(*argOpers[5]);
+ (*MIB).addOperand(*argOpers[valArgIndx + 1]);
MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EAX);
MIB.addReg(t1);
DebugLoc dl = mInstr->getDebugLoc();
// Insert instructions into newMBB based on incoming instruction
- assert(mInstr->getNumOperands() < 8 && "unexpected number of operands");
+ assert(mInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+ "unexpected number of operands");
MachineOperand& destOper = mInstr->getOperand(0);
- MachineOperand* argOpers[6];
+ MachineOperand* argOpers[2 + X86AddrNumOperands];
int numArgs = mInstr->getNumOperands() - 1;
for (int i=0; i < numArgs; ++i)
argOpers[i] = &mInstr->getOperand(i+1);
// x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = 3; // [0,3]
- int valArgIndx = 4;
+ int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int valArgIndx = lastAddrIndx + 1;
unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1);
AM.Disp = Op.getImm();
}
addFullAddress(BuildMI(BB, dl, TII->get(Opc)), AM)
- .addReg(MI->getOperand(4).getReg());
+ .addReg(MI->getOperand(X86AddrNumOperands).getReg());
// Reload the original control word now.
addFrameReference(BuildMI(BB, dl, TII->get(X86::FLDCW16m)), CWFrameIdx);
/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
/// if the load addresses are consecutive, non-overlapping, and in the right
-/// order.
+/// order. In the case of v2i64, it will see if it can rewrite the
+/// shuffle to be an appropriate build vector so it can take advantage of
+// performBuildVectorCombine.
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
const TargetLowering &TLI) {
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
DebugLoc dl = N->getDebugLoc();
MVT VT = N->getValueType(0);
MVT EVT = VT.getVectorElementType();
SDValue PermMask = N->getOperand(2);
unsigned NumElems = PermMask.getNumOperands();
+
+ // For x86-32 machines, if we see an insert and then a shuffle in a v2i64
+ // where the upper half is 0, it is advantageous to rewrite it as a build
+ // vector of (0, val) so it can use movq.
+ if (VT == MVT::v2i64) {
+ SDValue In[2];
+ In[0] = N->getOperand(0);
+ In[1] = N->getOperand(1);
+ unsigned Idx0 =cast<ConstantSDNode>(PermMask.getOperand(0))->getZExtValue();
+ unsigned Idx1 =cast<ConstantSDNode>(PermMask.getOperand(1))->getZExtValue();
+ if (In[0].getValueType().getVectorNumElements() == NumElems &&
+ In[Idx0/2].getOpcode() == ISD::INSERT_VECTOR_ELT &&
+ In[Idx1/2].getOpcode() == ISD::BUILD_VECTOR) {
+ ConstantSDNode* InsertVecIdx =
+ dyn_cast<ConstantSDNode>(In[Idx0/2].getOperand(2));
+ if (InsertVecIdx &&
+ InsertVecIdx->getZExtValue() == (Idx0 % 2) &&
+ isZeroNode(In[Idx1/2].getOperand(Idx1 % 2))) {
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, VT,
+ In[Idx0/2].getOperand(1),
+ In[Idx1/2].getOperand(Idx1 % 2));
+ }
+ }
+ }
+
+ // Try to combine a vector_shuffle into a 128-bit load.
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
SDNode *Base = NULL;
if (!EltsFromConsecutiveLoads(N, PermMask, NumElems, EVT, Base,
DAG, MFI, TLI))
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
- DebugLoc dl = N->getDebugLoc();
+ const X86Subtarget *Subtarget) {
+ DebugLoc DL = N->getDebugLoc();
SDValue Cond = N->getOperand(0);
-
+ // Get the LHS/RHS of the select.
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+
// If we have SSE[12] support, try to form min/max nodes.
if (Subtarget->hasSSE2() &&
- (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
- if (Cond.getOpcode() == ISD::SETCC) {
- // Get the LHS/RHS of the select.
- SDValue LHS = N->getOperand(1);
- SDValue RHS = N->getOperand(2);
- ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
-
- unsigned Opcode = 0;
- if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
- switch (CC) {
- default: break;
- case ISD::SETOLE: // (X <= Y) ? X : Y -> min
- case ISD::SETULE:
- case ISD::SETLE:
- if (!UnsafeFPMath) break;
- // FALL THROUGH.
- case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
- case ISD::SETLT:
- Opcode = X86ISD::FMIN;
- break;
+ (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) &&
+ Cond.getOpcode() == ISD::SETCC) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
- case ISD::SETOGT: // (X > Y) ? X : Y -> max
- case ISD::SETUGT:
- case ISD::SETGT:
- if (!UnsafeFPMath) break;
- // FALL THROUGH.
- case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
- case ISD::SETGE:
- Opcode = X86ISD::FMAX;
- break;
- }
- } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
- switch (CC) {
- default: break;
- case ISD::SETOGT: // (X > Y) ? Y : X -> min
- case ISD::SETUGT:
- case ISD::SETGT:
- if (!UnsafeFPMath) break;
- // FALL THROUGH.
- case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
- case ISD::SETGE:
- Opcode = X86ISD::FMIN;
- break;
+ unsigned Opcode = 0;
+ if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
+ switch (CC) {
+ default: break;
+ case ISD::SETOLE: // (X <= Y) ? X : Y -> min
+ case ISD::SETULE:
+ case ISD::SETLE:
+ if (!UnsafeFPMath) break;
+ // FALL THROUGH.
+ case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
+ case ISD::SETLT:
+ Opcode = X86ISD::FMIN;
+ break;
- case ISD::SETOLE: // (X <= Y) ? Y : X -> max
- case ISD::SETULE:
- case ISD::SETLE:
- if (!UnsafeFPMath) break;
- // FALL THROUGH.
- case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
- case ISD::SETLT:
- Opcode = X86ISD::FMAX;
- break;
- }
+ case ISD::SETOGT: // (X > Y) ? X : Y -> max
+ case ISD::SETUGT:
+ case ISD::SETGT:
+ if (!UnsafeFPMath) break;
+ // FALL THROUGH.
+ case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
+ case ISD::SETGE:
+ Opcode = X86ISD::FMAX;
+ break;
}
+ } else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
+ switch (CC) {
+ default: break;
+ case ISD::SETOGT: // (X > Y) ? Y : X -> min
+ case ISD::SETUGT:
+ case ISD::SETGT:
+ if (!UnsafeFPMath) break;
+ // FALL THROUGH.
+ case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
+ case ISD::SETGE:
+ Opcode = X86ISD::FMIN;
+ break;
- if (Opcode)
- return DAG.getNode(Opcode, dl, N->getValueType(0), LHS, RHS);
+ case ISD::SETOLE: // (X <= Y) ? Y : X -> max
+ case ISD::SETULE:
+ case ISD::SETLE:
+ if (!UnsafeFPMath) break;
+ // FALL THROUGH.
+ case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
+ case ISD::SETLT:
+ Opcode = X86ISD::FMAX;
+ break;
+ }
}
+ if (Opcode)
+ return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
+ }
+
+ // If this is a select between two integer constants, try to do some
+ // optimizations.
+ if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
+ if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
+ // Don't do this for crazy integer types.
+ if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
+ // If this is efficiently invertible, canonicalize the LHSC/RHSC values
+ // so that TrueC (the true value) is larger than FalseC.
+ bool NeedsCondInvert = false;
+
+ if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
+ // Efficiently invertible.
+ (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
+ (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
+ isa<ConstantSDNode>(Cond.getOperand(1))))) {
+ NeedsCondInvert = true;
+ std::swap(TrueC, FalseC);
+ }
+
+ // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
+ if (FalseC->getAPIntValue() == 0 &&
+ TrueC->getAPIntValue().isPowerOf2()) {
+ if (NeedsCondInvert) // Invert the condition if needed.
+ Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
+ DAG.getConstant(1, Cond.getValueType()));
+
+ // Zero extend the condition if needed.
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
+
+ unsigned ShAmt = TrueC->getAPIntValue().logBase2();
+ return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
+ DAG.getConstant(ShAmt, MVT::i8));
+ }
+
+ // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
+ if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
+ if (NeedsCondInvert) // Invert the condition if needed.
+ Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
+ DAG.getConstant(1, Cond.getValueType()));
+
+ // Zero extend the condition if needed.
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
+ FalseC->getValueType(0), Cond);
+ return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
+ SDValue(FalseC, 0));
+ }
+
+ // Optimize cases that will turn into an LEA instruction. This requires
+ // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
+ if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
+ uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
+ if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
+
+ bool isFastMultiplier = false;
+ if (Diff < 10) {
+ switch ((unsigned char)Diff) {
+ default: break;
+ case 1: // result = add base, cond
+ case 2: // result = lea base( , cond*2)
+ case 3: // result = lea base(cond, cond*2)
+ case 4: // result = lea base( , cond*4)
+ case 5: // result = lea base(cond, cond*4)
+ case 8: // result = lea base( , cond*8)
+ case 9: // result = lea base(cond, cond*8)
+ isFastMultiplier = true;
+ break;
+ }
+ }
+
+ if (isFastMultiplier) {
+ APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
+ if (NeedsCondInvert) // Invert the condition if needed.
+ Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
+ DAG.getConstant(1, Cond.getValueType()));
+
+ // Zero extend the condition if needed.
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
+ Cond);
+ // Scale the condition by the difference.
+ if (Diff != 1)
+ Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
+ DAG.getConstant(Diff, Cond.getValueType()));
+
+ // Add the base if non-zero.
+ if (FalseC->getAPIntValue() != 0)
+ Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
+ SDValue(FalseC, 0));
+ return Cond;
+ }
+ }
+ }
+ }
+
+ return SDValue();
+}
+
+/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
+static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ DebugLoc DL = N->getDebugLoc();
+
+ // If the flag operand isn't dead, don't touch this CMOV.
+ if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
+ return SDValue();
+
+ // If this is a select between two integer constants, try to do some
+ // optimizations. Note that the operands are ordered the opposite of SELECT
+ // operands.
+ if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
+ if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
+ // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
+ // larger than FalseC (the false value).
+ X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
+
+ if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
+ CC = X86::GetOppositeBranchCondition(CC);
+ std::swap(TrueC, FalseC);
+ }
+
+ // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
+ // This is efficient for any integer data type (including i8/i16) and
+ // shift amount.
+ if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
+ SDValue Cond = N->getOperand(3);
+ Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
+ DAG.getConstant(CC, MVT::i8), Cond);
+
+ // Zero extend the condition if needed.
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
+
+ unsigned ShAmt = TrueC->getAPIntValue().logBase2();
+ Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
+ DAG.getConstant(ShAmt, MVT::i8));
+ if (N->getNumValues() == 2) // Dead flag value?
+ return DCI.CombineTo(N, Cond, SDValue());
+ return Cond;
+ }
+
+ // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
+ // for any integer data type, including i8/i16.
+ if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
+ SDValue Cond = N->getOperand(3);
+ Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
+ DAG.getConstant(CC, MVT::i8), Cond);
+
+ // Zero extend the condition if needed.
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
+ FalseC->getValueType(0), Cond);
+ Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
+ SDValue(FalseC, 0));
+
+ if (N->getNumValues() == 2) // Dead flag value?
+ return DCI.CombineTo(N, Cond, SDValue());
+ return Cond;
+ }
+
+ // Optimize cases that will turn into an LEA instruction. This requires
+ // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
+ if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
+ uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
+ if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
+
+ bool isFastMultiplier = false;
+ if (Diff < 10) {
+ switch ((unsigned char)Diff) {
+ default: break;
+ case 1: // result = add base, cond
+ case 2: // result = lea base( , cond*2)
+ case 3: // result = lea base(cond, cond*2)
+ case 4: // result = lea base( , cond*4)
+ case 5: // result = lea base(cond, cond*4)
+ case 8: // result = lea base( , cond*8)
+ case 9: // result = lea base(cond, cond*8)
+ isFastMultiplier = true;
+ break;
+ }
+ }
+
+ if (isFastMultiplier) {
+ APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
+ SDValue Cond = N->getOperand(3);
+ Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
+ DAG.getConstant(CC, MVT::i8), Cond);
+ // Zero extend the condition if needed.
+ Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
+ Cond);
+ // Scale the condition by the difference.
+ if (Diff != 1)
+ Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
+ DAG.getConstant(Diff, Cond.getValueType()));
+
+ // Add the base if non-zero.
+ if (FalseC->getAPIntValue() != 0)
+ Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
+ SDValue(FalseC, 0));
+ if (N->getNumValues() == 2) // Dead flag value?
+ return DCI.CombineTo(N, Cond, SDValue());
+ return Cond;
+ }
+ }
+ }
}
+ return SDValue();
+}
+
+
+/// PerformMulCombine - Optimize a single multiply with constant into two
+/// in order to implement it with two cheaper instructions, e.g.
+/// LEA + SHL, LEA + LEA.
+static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ if (DAG.getMachineFunction().
+ getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ return SDValue();
+
+ if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
+ return SDValue();
+
+ MVT VT = N->getValueType(0);
+ if (VT != MVT::i64)
+ return SDValue();
+
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!C)
+ return SDValue();
+ uint64_t MulAmt = C->getZExtValue();
+ if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
+ return SDValue();
+
+ uint64_t MulAmt1 = 0;
+ uint64_t MulAmt2 = 0;
+ if ((MulAmt % 9) == 0) {
+ MulAmt1 = 9;
+ MulAmt2 = MulAmt / 9;
+ } else if ((MulAmt % 5) == 0) {
+ MulAmt1 = 5;
+ MulAmt2 = MulAmt / 5;
+ } else if ((MulAmt % 3) == 0) {
+ MulAmt1 = 3;
+ MulAmt2 = MulAmt / 3;
+ }
+ if (MulAmt2 &&
+ (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
+ DebugLoc DL = N->getDebugLoc();
+
+ if (isPowerOf2_64(MulAmt2) &&
+ !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
+ // If second multiplifer is pow2, issue it first. We want the multiply by
+ // 3, 5, or 9 to be folded into the addressing mode unless the lone use
+ // is an add.
+ std::swap(MulAmt1, MulAmt2);
+
+ SDValue NewMul;
+ if (isPowerOf2_64(MulAmt1))
+ NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
+ else
+ NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
+ DAG.getConstant(MulAmt1, VT));
+
+ if (isPowerOf2_64(MulAmt2))
+ NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
+ DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
+ else
+ NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
+ DAG.getConstant(MulAmt2, VT));
+ // Do not add new nodes to DAG combiner worklist.
+ DCI.CombineTo(N, NewMul, false);
+ }
return SDValue();
}
+
/// PerformShiftCombine - Transforms vector shift nodes to use vector shifts
/// when possible.
static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
SDValue ShAmtOp = N->getOperand(1);
MVT EltVT = VT.getVectorElementType();
- DebugLoc dl = N->getDebugLoc();
+ DebugLoc DL = N->getDebugLoc();
SDValue BaseShAmt;
if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) {
unsigned NumElts = VT.getVectorNumElements();
}
} else if (ShAmtOp.getOpcode() == ISD::VECTOR_SHUFFLE &&
isSplatMask(ShAmtOp.getOperand(2).getNode())) {
- BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ShAmtOp,
+ BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, ShAmtOp,
DAG.getIntPtrConstant(0));
} else
return SDValue();
if (EltVT.bitsGT(MVT::i32))
- BaseShAmt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BaseShAmt);
+ BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt);
else if (EltVT.bitsLT(MVT::i32))
- BaseShAmt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BaseShAmt);
+ BaseShAmt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, BaseShAmt);
// The shift amount is identical so we can do a vector shift.
SDValue ValOp = N->getOperand(0);
break;
case ISD::SHL:
if (VT == MVT::v2i64)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
ValOp, BaseShAmt);
if (VT == MVT::v4i32)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
ValOp, BaseShAmt);
if (VT == MVT::v8i16)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
ValOp, BaseShAmt);
break;
case ISD::SRA:
if (VT == MVT::v4i32)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32),
ValOp, BaseShAmt);
if (VT == MVT::v8i16)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
ValOp, BaseShAmt);
break;
case ISD::SRL:
if (VT == MVT::v2i64)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
ValOp, BaseShAmt);
if (VT == MVT::v4i32)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32),
ValOp, BaseShAmt);
if (VT == MVT::v8i16)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
ValOp, BaseShAmt);
break;
/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget *Subtarget) {
// Turn load->store of MMX types into GPR load/stores. This avoids clobbering
// the FP state in cases where an emms may be missing.
// A preferable solution to the general problem is to figure out the right
// places to insert EMMS. This qualifies as a quick hack.
+
+ // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
StoreSDNode *St = cast<StoreSDNode>(N);
- if (St->getValue().getValueType().isVector() &&
- St->getValue().getValueType().getSizeInBits() == 64 &&
+ MVT VT = St->getValue().getValueType();
+ if (VT.getSizeInBits() != 64)
+ return SDValue();
+
+ bool F64IsLegal = !UseSoftFloat && !NoImplicitFloat && Subtarget->hasSSE2();
+ if ((VT.isVector() ||
+ (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
isa<LoadSDNode>(St->getValue()) &&
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
St->getChain().hasOneUse() && !St->isVolatile()) {
Ops.push_back(ChainVal->getOperand(i));
}
}
- if (Ld) {
- DebugLoc dl = N->getDebugLoc();
- // If we are a 64-bit capable x86, lower to a single movq load/store pair.
- if (Subtarget->is64Bit()) {
- SDValue NewLd = DAG.getLoad(MVT::i64, dl, Ld->getChain(),
- Ld->getBasePtr(), Ld->getSrcValue(),
- Ld->getSrcValueOffset(), Ld->isVolatile(),
- Ld->getAlignment());
- SDValue NewChain = NewLd.getValue(1);
- if (TokenFactorIndex != -1) {
- Ops.push_back(NewChain);
- NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Ops[0],
- Ops.size());
- }
- return DAG.getStore(NewChain, dl, NewLd, St->getBasePtr(),
- St->getSrcValue(), St->getSrcValueOffset(),
- St->isVolatile(), St->getAlignment());
- }
- // Otherwise, lower to two 32-bit copies.
- SDValue LoAddr = Ld->getBasePtr();
- SDValue HiAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, LoAddr,
- DAG.getConstant(4, MVT::i32));
+ if (!Ld || !ISD::isNormalLoad(Ld))
+ return SDValue();
- SDValue LoLd = DAG.getLoad(MVT::i32, dl, Ld->getChain(), LoAddr,
- Ld->getSrcValue(), Ld->getSrcValueOffset(),
- Ld->isVolatile(), Ld->getAlignment());
- SDValue HiLd = DAG.getLoad(MVT::i32, dl, Ld->getChain(), HiAddr,
- Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
- Ld->isVolatile(),
- MinAlign(Ld->getAlignment(), 4));
+ // If this is not the MMX case, i.e. we are just turning i64 load/store
+ // into f64 load/store, avoid the transformation if there are multiple
+ // uses of the loaded value.
+ if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
+ return SDValue();
- SDValue NewChain = LoLd.getValue(1);
+ DebugLoc LdDL = Ld->getDebugLoc();
+ DebugLoc StDL = N->getDebugLoc();
+ // If we are a 64-bit capable x86, lower to a single movq load/store pair.
+ // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
+ // pair instead.
+ if (Subtarget->is64Bit() || F64IsLegal) {
+ MVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
+ SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(),
+ Ld->getBasePtr(), Ld->getSrcValue(),
+ Ld->getSrcValueOffset(), Ld->isVolatile(),
+ Ld->getAlignment());
+ SDValue NewChain = NewLd.getValue(1);
if (TokenFactorIndex != -1) {
- Ops.push_back(LoLd);
- Ops.push_back(HiLd);
- NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Ops[0],
+ Ops.push_back(NewChain);
+ NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
Ops.size());
}
-
- LoAddr = St->getBasePtr();
- HiAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, LoAddr,
- DAG.getConstant(4, MVT::i32));
-
- SDValue LoSt = DAG.getStore(NewChain, dl, LoLd, LoAddr,
+ return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
St->getSrcValue(), St->getSrcValueOffset(),
St->isVolatile(), St->getAlignment());
- SDValue HiSt = DAG.getStore(NewChain, dl, HiLd, HiAddr,
- St->getSrcValue(),
- St->getSrcValueOffset() + 4,
- St->isVolatile(),
- MinAlign(St->getAlignment(), 4));
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoSt, HiSt);
}
+
+ // Otherwise, lower to two pairs of 32-bit loads / stores.
+ SDValue LoAddr = Ld->getBasePtr();
+ SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
+ DAG.getConstant(4, MVT::i32));
+
+ SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
+ Ld->getSrcValue(), Ld->getSrcValueOffset(),
+ Ld->isVolatile(), Ld->getAlignment());
+ SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
+ Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
+ Ld->isVolatile(),
+ MinAlign(Ld->getAlignment(), 4));
+
+ SDValue NewChain = LoLd.getValue(1);
+ if (TokenFactorIndex != -1) {
+ Ops.push_back(LoLd);
+ Ops.push_back(HiLd);
+ NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
+ Ops.size());
+ }
+
+ LoAddr = St->getBasePtr();
+ HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
+ DAG.getConstant(4, MVT::i32));
+
+ SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
+ St->getSrcValue(), St->getSrcValueOffset(),
+ St->isVolatile(), St->getAlignment());
+ SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
+ St->getSrcValue(),
+ St->getSrcValueOffset() + 4,
+ St->isVolatile(),
+ MinAlign(St->getAlignment(), 4));
+ return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
}
return SDValue();
}
case ISD::BUILD_VECTOR:
return PerformBuildVectorCombine(N, DAG, DCI, Subtarget, *this);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
+ case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
+ case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget);
}
if (DestReg) {
Res.first = DestReg;
- Res.second = Res.second = X86::GR8RegisterClass;
+ Res.second = X86::GR8RegisterClass;
}
} else if (VT == MVT::i32) {
unsigned DestReg = 0;
}
if (DestReg) {
Res.first = DestReg;
- Res.second = Res.second = X86::GR32RegisterClass;
+ Res.second = X86::GR32RegisterClass;
}
} else if (VT == MVT::i64) {
unsigned DestReg = 0;
}
if (DestReg) {
Res.first = DestReg;
- Res.second = Res.second = X86::GR64RegisterClass;
+ Res.second = X86::GR64RegisterClass;
}
}
} else if (Res.second == X86::FR32RegisterClass ||