setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
+ // We don't accept any truncstore of integer registers.
+ setTruncStoreAction(MVT::i64, MVT::i32, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
+ setTruncStoreAction(MVT::i32, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
+ setTruncStoreAction(MVT::i16, MVT::i8, Expand);
+
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAARG , MVT::Other, Expand);
AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
}
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
// Custom lower v2i64 and v2f64 selects.
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it
/// exists skip possible ISD:TokenFactor.
static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) {
- if (Chain.getOpcode()==X86ISD::TAILCALL) {
+ if (Chain.getOpcode() == X86ISD::TAILCALL) {
return Chain;
- } else if (Chain.getOpcode()==ISD::TokenFactor) {
+ } else if (Chain.getOpcode() == ISD::TokenFactor) {
if (Chain.getNumOperands() &&
- Chain.getOperand(0).getOpcode()==X86ISD::TAILCALL)
+ Chain.getOperand(0).getOpcode() == X86ISD::TAILCALL)
return Chain.getOperand(0);
}
return Chain;
}
-
+
/// LowerRET - Lower an ISD::RET node.
SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
SDOperand TailCall = Chain;
SDOperand TargetAddress = TailCall.getOperand(1);
SDOperand StackAdjustment = TailCall.getOperand(2);
- assert ( ((TargetAddress.getOpcode() == ISD::Register &&
+ assert(((TargetAddress.getOpcode() == ISD::Register &&
(cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX ||
cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) ||
TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
"Expecting an global address, external symbol, or register");
- assert( StackAdjustment.getOpcode() == ISD::Constant &&
- "Expecting a const value");
+ assert(StackAdjustment.getOpcode() == ISD::Constant &&
+ "Expecting a const value");
SmallVector<SDOperand,8> Operands;
Operands.push_back(Chain.getOperand(0));
// If this is an FP return with ScalarSSE, we need to move the value from
// an XMM register onto the fp-stack.
- if ((X86ScalarSSEf32 && RVLocs[0].getValVT()==MVT::f32) ||
- (X86ScalarSSEf64 && RVLocs[0].getValVT()==MVT::f64)) {
+ if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
SDOperand MemLoc;
// If this is a load into a scalarsse value, don't store the loaded value
// back to the stack, only to reload it: just replace the scalar-sse load.
if (ISD::isNON_EXTLoad(Value.Val) &&
- (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
+ Chain.reachesChainWithoutSideEffects(Value.getOperand(0))) {
Chain = Value.getOperand(0);
MemLoc = Value.getOperand(1);
} else {
// If we are using ScalarSSE, store ST(0) to the stack and reload it into
// an XMM register.
- if ((X86ScalarSSEf32 && RVLocs[0].getValVT() == MVT::f32) ||
- (X86ScalarSSEf64 && RVLocs[0].getValVT() == MVT::f64)) {
+ if (isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
SDOperand StoreLoc;
const Value *SrcVal = 0;
int SrcValOffset = 0;
return None;
}
+
+// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could possibly
+// be overwritten when lowering the outgoing arguments in a tail call. Currently
+// the implementation of this call is very conservative and assumes all
+// arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with virtual
+// registers would be overwritten by direct lowering.
+// Possible improvement:
+// Check FORMAL_ARGUMENTS corresponding MERGE_VALUES for CopyFromReg nodes
+// indicating inreg passed arguments which also need not be lowered to a safe
+// stack slot.
+static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op) {
+ RegisterSDNode * OpReg = NULL;
+ if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS ||
+ (Op.getOpcode()== ISD::CopyFromReg &&
+ (OpReg = cast<RegisterSDNode>(Op.getOperand(1))) &&
+ OpReg->getReg() >= MRegisterInfo::FirstVirtualRegister))
+ return true;
+ return false;
+}
+
+// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
+// by "Src" to address "Dst" with size and alignment information specified by
+// the specific parameter attribute. The copy will be passed as a byval function
+// parameter.
+static SDOperand
+CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain,
+ unsigned Flags, SelectionDAG &DAG) {
+ unsigned Align = 1 <<
+ ((Flags & ISD::ParamFlags::ByValAlign) >> ISD::ParamFlags::ByValAlignOffs);
+ unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
+ ISD::ParamFlags::ByValSizeOffs;
+ SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
+ SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
+ SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32);
+ return DAG.getMemcpy(Chain, Dst, Src, SizeNode, AlignNode, AlwaysInline);
+}
+
SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo *MFI,
SDOperand Root, unsigned i) {
// Create the nodes corresponding to a load from this parameter slot.
+ unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
+ bool isByVal = Flags & ISD::ParamFlags::ByVal;
+
+ // FIXME: For now, all byval parameter objects are marked mutable. This
+ // can be changed with more analysis.
int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- VA.getLocMemOffset());
+ VA.getLocMemOffset(), !isByVal);
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
-
- unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
-
- if (Flags & ISD::ParamFlags::ByVal)
+ if (isByVal)
return FIN;
- else
- return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0);
+ return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0);
}
SDOperand
SmallVector<SDOperand, 8> MemOps;
SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
- DAG.getConstant(VarArgsGPOffset,
- getPointerTy()));
+ DAG.getIntPtrConstant(VarArgsGPOffset));
for (; NumIntRegs != 6; ++NumIntRegs) {
unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
X86::GR64RegisterClass);
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
- DAG.getConstant(8, getPointerTy()));
+ DAG.getIntPtrConstant(8));
}
// Now store the XMM (fp + vector) parameter registers.
FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
- DAG.getConstant(VarArgsFPOffset, getPointerTy()));
+ DAG.getIntPtrConstant(VarArgsFPOffset));
for (; NumXMMRegs != 8; ++NumXMMRegs) {
unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
X86::VR128RegisterClass);
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
- DAG.getConstant(16, getPointerTy()));
+ DAG.getIntPtrConstant(16));
}
if (!MemOps.empty())
Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
&ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
}
+SDOperand
+X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
+ const SDOperand &StackPtr,
+ const CCValAssign &VA,
+ SDOperand Chain,
+ SDOperand Arg) {
+ SDOperand PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
+ unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
+ if (Flags & ISD::ParamFlags::ByVal) {
+ return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG);
+ }
+ return DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
+}
+
SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
SDOperand Chain = Op.getOperand(0);
MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
}
- Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes));
SDOperand RetAddrFrIdx, NewRetAddrFrIdx;
if (IsTailCall) {
SDOperand StackPtr;
- // Walk the register/memloc assignments, inserting copies/loads.
- // For tail calls, lower arguments first to the stack slot where they would
- // normally - in case of a normal function call - be.
+ // Walk the register/memloc assignments, inserting copies/loads. For tail
+ // calls, lower arguments which could otherwise be possibly overwritten to the
+ // stack slot where they would go on normal function calls.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
- assert(VA.isMemLoc());
- if (StackPtr.Val == 0)
- StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
-
- MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
- Arg));
+ if (!IsTailCall || IsPossiblyOverwrittenArgumentOfTailCall(Arg)) {
+ assert(VA.isMemLoc());
+ if (StackPtr.Val == 0)
+ StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy());
+
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
+ }
}
}
InFlag = Chain.getValue(1);
}
- // Copy from stack slots to stack slot of a tail called function. This needs
- // to be done because if we would lower the arguments directly to their real
- // stack slot we might end up overwriting each other.
- // TODO: To make this more efficient (sometimes saving a store/load) we could
- // analyse the arguments and emit this store/load/store sequence only for
- // arguments which would be overwritten otherwise.
+ // For tail calls lower the arguments to the 'real' stack slot.
if (IsTailCall) {
SmallVector<SDOperand, 8> MemOpChains2;
- SDOperand PtrOff;
SDOperand FIN;
int FI = 0;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
if (!VA.isRegLoc()) {
+ assert(VA.isMemLoc());
+ SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
-
- // Get source stack slot.
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(),
- getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8;
FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
FIN = DAG.getFrameIndex(FI, MVT::i32);
+ SDOperand Source = Arg;
+ if (IsPossiblyOverwrittenArgumentOfTailCall(Arg)) {
+ // Copy from stack slots to stack slot of a tail called function. This
+ // needs to be done because if we would lower the arguments directly
+ // to their real stack slot we might end up overwriting each other.
+ // Get source stack slot.
+ Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
+ if (StackPtr.Val == 0)
+ StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy());
+ Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source);
+ if ((Flags & ISD::ParamFlags::ByVal)==0)
+ Source = DAG.getLoad(VA.getValVT(), Chain, Source, NULL, 0);
+ }
+
if (Flags & ISD::ParamFlags::ByVal) {
// Copy relative to framepointer.
- unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
- ISD::ParamFlags::ByValAlignOffs);
-
- unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
- ISD::ParamFlags::ByValSizeOffs;
-
- SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
- SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
- SDOperand AlwaysInline = DAG.getConstant(1, MVT::i1);
-
- MemOpChains2.push_back(DAG.getMemcpy(Chain, FIN, PtrOff, SizeNode,
- AlignNode,AlwaysInline));
+ MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain,
+ Flags, DAG));
} else {
- SDOperand LoadedArg = DAG.getLoad(VA.getValVT(), Chain, PtrOff,
- NULL, 0);
// Store relative to framepointer.
- MemOpChains2.push_back(DAG.getStore(Chain, LoadedArg, FIN, NULL, 0));
- }
+ MemOpChains2.push_back(DAG.getStore(Chain, Source, FIN, NULL, 0));
+ }
}
}
if (!MemOpChains2.empty())
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
- &MemOpChains2[0], MemOpChains.size());
+ &MemOpChains2[0], MemOpChains2.size());
// Store the return address to the appropriate stack slot.
if (FPDiff)
if (IsTailCall) {
Ops.push_back(Chain);
- Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
- Ops.push_back(DAG.getConstant(0, getPointerTy()));
+ Ops.push_back(DAG.getIntPtrConstant(NumBytes));
+ Ops.push_back(DAG.getIntPtrConstant(0));
if (InFlag.Val)
Ops.push_back(InFlag);
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
// Add argument registers to the end of the list so that they are known live
// into the call.
- if (IsTailCall)
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
if (InFlag.Val)
Ops.push_back(InFlag);
// Returns a flag for retval copy to use.
Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, getPointerTy()),
- DAG.getConstant(NumBytesForCalleeToPush,
- getPointerTy()),
+ DAG.getIntPtrConstant(NumBytes),
+ DAG.getIntPtrConstant(NumBytesForCalleeToPush),
InFlag);
InFlag = Chain.getValue(1);
}
-//===----------------------------------------------------------------------===//
-// FastCall Calling Convention implementation
-//===----------------------------------------------------------------------===//
-//
-// The X86 'fastcall' calling convention passes up to two integer arguments in
-// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
-// and requires that the callee pop its arguments off the stack (allowing proper
-// tail calls), and has the same return value conventions as C calling convs.
-//
-// This calling convention always arranges for the callee pop value to be 8n+4
-// bytes, which is needed for tail recursion elimination and stack alignment
-// reasons.
-
-SDOperand
-X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
- const SDOperand &StackPtr,
- const CCValAssign &VA,
- SDOperand Chain,
- SDOperand Arg) {
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
- unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
- if (Flags & ISD::ParamFlags::ByVal) {
- unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
- ISD::ParamFlags::ByValAlignOffs);
-
- unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
- ISD::ParamFlags::ByValSizeOffs;
-
- SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
- SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
- SDOperand AlwaysInline = DAG.getConstant(1, MVT::i32);
-
- return DAG.getMemcpy(Chain, PtrOff, Arg, SizeNode, AlignNode,
- AlwaysInline);
- } else {
- return DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
- }
-}
-
//===----------------------------------------------------------------------===//
// Fast Calling Convention (tail call) implementation
//===----------------------------------------------------------------------===//
if (ThisElt.Val)
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
- DAG.getConstant(i/2, TLI.getPointerTy()));
+ DAG.getIntPtrConstant(i/2));
}
}
First = false;
}
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
- DAG.getConstant(i, TLI.getPointerTy()));
+ DAG.getIntPtrConstant(i));
}
}
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
- DAG.getConstant(0, getPointerTy()));
+ DAG.getIntPtrConstant(0));
} else if (MVT::getSizeInBits(VT) == 64) {
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (Idx == 0)
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
- DAG.getConstant(0, getPointerTy()));
+ DAG.getIntPtrConstant(0));
}
return SDOperand();
if (N1.getValueType() != MVT::i32)
N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
- N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy());
+ N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue());
return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
}
-
- N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
- unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
- MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
- SmallVector<SDOperand, 4> MaskVec;
- for (unsigned i = 0; i < 4; ++i)
- MaskVec.push_back(DAG.getConstant((i == Idx) ? i+4 : i, MaskEVT));
- return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
- &MaskVec[0], MaskVec.size()));
+ return SDOperand();
}
SDOperand
StackSlot, NULL, 0);
// These are really Legal; caller falls through into that case.
- if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f32 && X86ScalarSSEf32)
+ if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
return Result;
- if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f64 && X86ScalarSSEf64)
- return Result;
- if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 &&
+ if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 &&
Subtarget->is64Bit())
return Result;
// Build the FILD
SDVTList Tys;
- bool useSSE = (X86ScalarSSEf32 && Op.getValueType() == MVT::f32) ||
- (X86ScalarSSEf64 && Op.getValueType() == MVT::f64);
+ bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
if (useSSE)
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
else
// These are really Legal.
if (Op.getValueType() == MVT::i32 &&
- X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32)
- return std::make_pair(SDOperand(), SDOperand());
- if (Op.getValueType() == MVT::i32 &&
- X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)
+ isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDOperand(), SDOperand());
if (Subtarget->is64Bit() &&
Op.getValueType() == MVT::i64 &&
SDOperand Chain = DAG.getEntryNode();
SDOperand Value = Op.getOperand(0);
- if ((X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) ||
- (X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)) {
+ if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) {
assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
}
// And if it is bigger, shrink it first.
if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
- Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1);
+ Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1));
SrcVT = VT;
SrcTy = MVT::getTypeForValueType(SrcVT);
}
DAG.getConstant(32, MVT::i32));
SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit);
SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit,
- DAG.getConstant(0, getPointerTy()));
+ DAG.getIntPtrConstant(0));
}
// Clear first operand sign bit.
SDOperand Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
MVT::ValueType VT = Op.getValueType();
+
bool IllegalFPCMov = false;
- if (VT == MVT::f32 && !X86ScalarSSEf32)
- IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
- else if (VT == MVT::f64 && !X86ScalarSSEf64)
- IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
- else if (VT == MVT::f80)
+ if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) &&
+ !isScalarFPTypeInSSEReg(VT)) // FPStack?
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
+
if ((Opc == X86ISD::CMP ||
Opc == X86ISD::COMI ||
Opc == X86ISD::UCOMI) && !IllegalFPCMov) {
SDOperand Flag;
MVT::ValueType IntPtr = getPointerTy();
- MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
+ MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
Flag = Chain.getValue(1);
if (AVT > MVT::i8) {
if (I) {
unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
- Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
+ Count = DAG.getIntPtrConstant(I->getValue() / UBytes);
BytesLeft = I->getValue() % UBytes;
} else {
assert(AVT >= MVT::i32 &&
}
unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
- SDOperand Count = DAG.getConstant(Size / UBytes, getPointerTy());
+ SDOperand Count = DAG.getIntPtrConstant(Size / UBytes);
BytesLeft = Size % UBytes;
SDOperand InFlag(0, 0);
MemOps.push_back(Store);
// Store fp_offset
- FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
- DAG.getConstant(4, getPointerTy()));
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4));
Store = DAG.getStore(Op.getOperand(0),
DAG.getConstant(VarArgsFPOffset, MVT::i32),
FIN, SV->getValue(), SV->getOffset());
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
- FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
- DAG.getConstant(4, getPointerTy()));
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4));
SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV->getValue(),
SV->getOffset());
MemOps.push_back(Store);
// Store ptr to reg_save_area.
- FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
- DAG.getConstant(8, getPointerTy()));
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8));
SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV->getValue(),
SV->getOffset());
if (i == 2)
break;
SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr,
- DAG.getConstant(8, getPointerTy()));
+ DAG.getIntPtrConstant(8));
DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr,
- DAG.getConstant(8, getPointerTy()));
+ DAG.getIntPtrConstant(8));
}
return Chain;
}
SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
- DAG.getConstant(4, getPointerTy()));
+ DAG.getIntPtrConstant(4));
}
SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op,
if (Subtarget->is64Bit())
return SDOperand();
- return DAG.getConstant(8, getPointerTy());
+ return DAG.getIntPtrConstant(8);
}
SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG)
getPointerTy());
SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame,
- DAG.getConstant(-4UL, getPointerTy()));
+ DAG.getIntPtrConstant(-4UL));
StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset);
Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0);
Chain = DAG.getCopyToReg(Chain, X86::ECX, StoreAddr);
SrcValueSDNode *TrmpSV = cast<SrcValueSDNode>(Op.getOperand(4));
+ const X86InstrInfo *TII =
+ ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
+
if (Subtarget->is64Bit()) {
- return SDOperand(); // not yet supported
+ SDOperand OutChains[6];
+
+ // Large code-model.
+
+ const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r);
+ const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri);
+
+ const unsigned char N86R10 =
+ ((X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10);
+ const unsigned char N86R11 =
+ ((X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11);
+
+ const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
+
+ // Load the pointer to the nested function into R11.
+ unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
+ SDOperand Addr = Trmp;
+ OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
+ TrmpSV->getValue(), TrmpSV->getOffset());
+
+ Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64));
+ OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpSV->getValue(),
+ TrmpSV->getOffset() + 2, false, 2);
+
+ // Load the 'nest' parameter value into R10.
+ // R10 is specified in X86CallingConv.td
+ OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
+ Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64));
+ OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
+ TrmpSV->getValue(), TrmpSV->getOffset() + 10);
+
+ Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64));
+ OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(),
+ TrmpSV->getOffset() + 12, false, 2);
+
+ // Jump to the nested function.
+ OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
+ Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64));
+ OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
+ TrmpSV->getValue(), TrmpSV->getOffset() + 20);
+
+ unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
+ Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64));
+ OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr,
+ TrmpSV->getValue(), TrmpSV->getOffset() + 22);
+
+ SDOperand Ops[] =
+ { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) };
+ return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
} else {
Function *Func = (Function *)
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
break;
}
- const X86InstrInfo *TII =
- ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
-
SDOperand OutChains[4];
SDOperand Addr, Disp;
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32));
Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
- unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
- unsigned char N86Reg = ((X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg);
+ const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
+ const unsigned char N86Reg =
+ ((X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg);
OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
Trmp, TrmpSV->getValue(), TrmpSV->getOffset());
OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(),
TrmpSV->getOffset() + 1, false, 1);
- unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
+ const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32));
OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr,
TrmpSV->getValue() + 5, TrmpSV->getOffset());