#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86ISelLowering.h"
+#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSE = Subtarget->hasSSE2();
+ X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
// Set up the TargetLowering object.
setSetCCResultContents(ZeroOrOneSetCCResult);
setSchedulingPreference(SchedulingForRegPressure);
setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
- setStackPointerRegisterToSaveRestore(X86::ESP);
+ setStackPointerRegisterToSaveRestore(X86StackPtr);
if (!Subtarget->isTargetDarwin())
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
addRegisterClass(MVT::i8, X86::GR8RegisterClass);
addRegisterClass(MVT::i16, X86::GR16RegisterClass);
addRegisterClass(MVT::i32, X86::GR32RegisterClass);
+ if (Subtarget->is64Bit())
+ addRegisterClass(MVT::i64, X86::GR64RegisterClass);
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
- if (X86ScalarSSE)
- // No SSE i64 SINT_TO_FP, so expand i32 UINT_TO_FP instead.
- setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
- else
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
+ } else {
+ if (X86ScalarSSE)
+ // If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
+ setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
+ else
+ setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
+ }
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
// this operation.
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
- // We can handle SINT_TO_FP and FP_TO_SINT from/to i64 even though i64
- // isn't legal.
- setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
- setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
+ if (!Subtarget->is64Bit()) {
+ // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode.
+ setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
+ setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
+ }
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
- if (X86ScalarSSE && !Subtarget->hasSSE3())
- // Expand FP_TO_UINT into a select.
- // FIXME: We would like to use a Custom expander here eventually to do
- // the optimal thing for SSE vs. the default expansion in the legalizer.
- setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
- else
- // With SSE3 we can use fisttpll to convert to a signed i64.
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
+ } else {
+ if (X86ScalarSSE && !Subtarget->hasSSE3())
+ // Expand FP_TO_UINT into a select.
+ // FIXME: We would like to use a Custom expander here eventually to do
+ // the optimal thing for SSE vs. the default expansion in the legalizer.
+ setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
+ else
+ // With SSE3 we can use fisttpll to convert to a signed i64.
+ setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
+ }
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
+ if (Subtarget->is64Bit())
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
+
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
+ setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
+ setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
+ }
+
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
setOperationAction(ISD::SELECT , MVT::i8 , Promote);
-
// X86 wants to expand cmov itself.
setOperationAction(ISD::SELECT , MVT::i16 , Custom);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::SELECT , MVT::i64 , Custom);
+ setOperationAction(ISD::SETCC , MVT::i64 , Custom);
+ }
// X86 ret instruction may pop stack.
setOperationAction(ISD::RET , MVT::Other, Custom);
// Darwin ABI issue.
setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
+ setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
+ setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
+ setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
+ }
// 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+ if (Subtarget->is64Bit())
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+ // We have target-specific dag combine patterns for the following nodes:
+ setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
+
computeRegisterProperties();
// FIXME: These should be based on subtarget info. Plus, the values should
/// HowToPassCCCArgument - Returns how an formal argument of the specified type
/// should be passed. If it is through stack, returns the size of the stack
-/// frame; if it is through XMM register, returns the number of XMM registers
+/// slot; if it is through XMM register, returns the number of XMM registers
/// are needed.
static void
HowToPassCCCArgument(MVT::ValueType ObjectVT, unsigned NumXMMRegs,
unsigned &ObjSize, unsigned &ObjXMMRegs) {
- NumXMMRegs = 0;
+ ObjXMMRegs = 0;
switch (ObjectVT) {
default: assert(0 && "Unhandled argument type!");
- case MVT::i1:
case MVT::i8: ObjSize = 1; break;
case MVT::i16: ObjSize = 2; break;
case MVT::i32: ObjSize = 4; break;
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- if (NumXMMRegs < 3)
+ if (NumXMMRegs < 4)
ObjXMMRegs = 1;
else
ObjSize = 16;
//
// [ESP] -- return address
// [ESP + 4] -- first argument (leftmost lexically)
- // [ESP + 8] -- second argument, if first argument is four bytes in size
+ // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size
// ...
//
unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
- unsigned XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2 };
+ static const unsigned XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
+ };
for (unsigned i = 0; i < NumArgs; ++i) {
MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
unsigned ArgIncrement = 4;
unsigned ObjSize = 0;
unsigned ObjXMMRegs = 0;
HowToPassCCCArgument(ObjectVT, NumXMMRegs, ObjSize, ObjXMMRegs);
- if (ObjSize >= 8)
+ if (ObjSize > 4)
ArgIncrement = ObjSize;
SDOperand ArgValue;
if (ObjXMMRegs) {
// Passed in a XMM register.
unsigned Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
- X86::VR128RegisterClass);
+ X86::VR128RegisterClass);
ArgValue= DAG.getCopyFromReg(Root, Reg, ObjectVT);
ArgValues.push_back(ArgValue);
NumXMMRegs += ObjXMMRegs;
} else {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ if (ObjSize == 16)
+ ArgOffset = ((ArgOffset + 15) / 16) * 16;
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
if (isVarArg)
VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
- ReturnAddrIndex = 0; // No return address slot generated yet.
- BytesToPopOnReturn = 0; // Callee pops nothing.
+ RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
+ ReturnAddrIndex = 0; // No return address slot generated yet.
+ BytesToPopOnReturn = 0; // Callee pops nothing.
BytesCallerReserves = ArgOffset;
// If this is a struct return on Darwin/X86, the callee pops the hidden struct
// Return the new list of results.
std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
Op.Val->value_end());
- return DAG.getNode(ISD::MERGE_VALUES, RetVTs, ArgValues);
+ return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
}
// Keep track of the number of XMM regs passed so far.
unsigned NumXMMRegs = 0;
static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
};
// Count how many bytes are to be pushed on the stack.
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- if (NumXMMRegs < 3)
+ if (NumXMMRegs < 4)
++NumXMMRegs;
- else
+ else {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ NumBytes = ((NumBytes + 15) / 16) * 16;
NumBytes += 16;
+ }
break;
}
}
NumXMMRegs = 0;
std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
std::vector<SDOperand> MemOpChains;
- SDOperand StackPtr = DAG.getRegister(X86::ESP, getPointerTy());
+ SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
for (unsigned i = 0; i != NumOps; ++i) {
SDOperand Arg = Op.getOperand(5+2*i);
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- if (NumXMMRegs < 3) {
+ if (NumXMMRegs < 4) {
RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
NumXMMRegs++;
} else {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ ArgOffset = ((ArgOffset + 15) / 16) * 16;
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
}
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOpChains);
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into registers.
std::vector<SDOperand> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are known live
+ // into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+
if (InFlag.Val)
Ops.push_back(InFlag);
Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
- NodeTys, Ops);
+ NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
Ops.push_back(InFlag);
- Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
+ Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
if (RetVT != MVT::Other)
InFlag = Chain.getValue(1);
std::vector<SDOperand> Ops;
Ops.push_back(Chain);
Ops.push_back(InFlag);
- SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
+ SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
+ &Ops[0], Ops.size());
Chain = RetVal.getValue(1);
InFlag = RetVal.getValue(2);
if (X86ScalarSSE) {
Ops.push_back(StackSlot);
Ops.push_back(DAG.getValueType(RetVT));
Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
+ Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
RetVal = DAG.getLoad(RetVT, Chain, StackSlot,
DAG.getSrcValue(NULL));
Chain = RetVal.getValue(1);
// Otherwise, merge everything together with a MERGE_VALUES node.
NodeTys.push_back(MVT::Other);
ResultVals.push_back(Chain);
- SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, ResultVals);
+ SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
+ &ResultVals[0], ResultVals.size());
return Res.getValue(Op.ResNo);
}
+
//===----------------------------------------------------------------------===//
-// Fast Calling Convention implementation
+// X86-64 C Calling Convention implementation
//===----------------------------------------------------------------------===//
-//
-// The X86 'fast' calling convention passes up to two integer arguments in
-// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
-// and requires that the callee pop its arguments off the stack (allowing proper
-// tail calls), and has the same return value conventions as C calling convs.
-//
-// This calling convention always arranges for the callee pop value to be 8n+4
-// bytes, which is needed for tail recursion elimination and stack alignment
-// reasons.
-//
-// Note that this can be enhanced in the future to pass fp vals in registers
-// (when we have a global fp allocator) and do other tricks.
-//
-
-// FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
-// to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
-// EDX". Anything more is illegal.
-//
-// FIXME: The linscan register allocator currently has problem with
-// coalescing. At the time of this writing, whenever it decides to coalesce
-// a physreg with a virtreg, this increases the size of the physreg's live
-// range, and the live range cannot ever be reduced. This causes problems if
-// too many physregs are coaleced with virtregs, which can cause the register
-// allocator to wedge itself.
-//
-// This code triggers this problem more often if we pass args in registers,
-// so disable it until this is fixed.
-//
-// NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
-// about code being dead.
-//
-static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
-
-/// HowToPassFastCCArgument - Returns how an formal argument of the specified
+/// HowToPassX86_64CCCArgument - Returns how an formal argument of the specified
/// type should be passed. If it is through stack, returns the size of the stack
-/// frame; if it is through integer or XMM register, returns the number of
+/// slot; if it is through integer or XMM register, returns the number of
/// integer or XMM registers are needed.
static void
-HowToPassFastCCArgument(MVT::ValueType ObjectVT,
- unsigned NumIntRegs, unsigned NumXMMRegs,
- unsigned &ObjSize, unsigned &ObjIntRegs,
- unsigned &ObjXMMRegs) {
+HowToPassX86_64CCCArgument(MVT::ValueType ObjectVT,
+ unsigned NumIntRegs, unsigned NumXMMRegs,
+ unsigned &ObjSize, unsigned &ObjIntRegs,
+ unsigned &ObjXMMRegs) {
ObjSize = 0;
- NumIntRegs = 0;
- NumXMMRegs = 0;
+ ObjIntRegs = 0;
+ ObjXMMRegs = 0;
switch (ObjectVT) {
default: assert(0 && "Unhandled argument type!");
- case MVT::i1:
case MVT::i8:
- if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
- ObjIntRegs = 1;
- else
- ObjSize = 1;
- break;
case MVT::i16:
- if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
- ObjIntRegs = 1;
- else
- ObjSize = 2;
- break;
case MVT::i32:
- if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
- ObjIntRegs = 1;
- else
- ObjSize = 4;
- break;
case MVT::i64:
- if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
- ObjIntRegs = 2;
- } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
+ if (NumIntRegs < 6)
ObjIntRegs = 1;
- ObjSize = 4;
- } else
- ObjSize = 8;
- case MVT::f32:
- ObjSize = 4;
+ else {
+ switch (ObjectVT) {
+ default: break;
+ case MVT::i8: ObjSize = 1; break;
+ case MVT::i16: ObjSize = 2; break;
+ case MVT::i32: ObjSize = 4; break;
+ case MVT::i64: ObjSize = 8; break;
+ }
+ }
break;
+ case MVT::f32:
case MVT::f64:
- ObjSize = 8;
- break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- if (NumXMMRegs < 3)
+ if (NumXMMRegs < 8)
ObjXMMRegs = 1;
- else
- ObjSize = 16;
+ else {
+ switch (ObjectVT) {
+ default: break;
+ case MVT::f32: ObjSize = 4; break;
+ case MVT::f64: ObjSize = 8; break;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64: ObjSize = 16; break;
+ }
break;
}
+ }
}
SDOperand
-X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
- unsigned NumArgs = Op.Val->getNumValues()-1;
+X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
+ unsigned NumArgs = Op.Val->getNumValues() - 1;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
std::vector<SDOperand> ArgValues;
- // Add DAG nodes to load the arguments... On entry to a function the stack
- // frame looks like this:
+ // Add DAG nodes to load the arguments... On entry to a function on the X86,
+ // the stack frame looks like this:
//
- // [ESP] -- return address
- // [ESP + 4] -- first nonreg argument (leftmost lexically)
- // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
+ // [RSP] -- return address
+ // [RSP + 8] -- first nonreg argument (leftmost lexically)
+ // [RSP +16] -- second nonreg argument, if 1st argument is <= 8 bytes in size
// ...
+ //
unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
-
- // Keep track of the number of integer regs passed so far. This can be either
- // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
- // used).
- unsigned NumIntRegs = 0;
+ unsigned NumIntRegs = 0; // Int regs used for parameter passing.
unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
+ static const unsigned GPR8ArgRegs[] = {
+ X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B
+ };
+ static const unsigned GPR16ArgRegs[] = {
+ X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W
+ };
+ static const unsigned GPR32ArgRegs[] = {
+ X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
+ };
+ static const unsigned GPR64ArgRegs[] = {
+ X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
+ };
static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
+ X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
-
+
for (unsigned i = 0; i < NumArgs; ++i) {
MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
- unsigned ArgIncrement = 4;
+ unsigned ArgIncrement = 8;
unsigned ObjSize = 0;
unsigned ObjIntRegs = 0;
unsigned ObjXMMRegs = 0;
- HowToPassFastCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
- ObjSize, ObjIntRegs, ObjXMMRegs);
- if (ObjSize >= 8)
+ // FIXME: __int128 and long double support?
+ HowToPassX86_64CCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
+ ObjSize, ObjIntRegs, ObjXMMRegs);
+ if (ObjSize > 8)
ArgIncrement = ObjSize;
- unsigned Reg;
+ unsigned Reg = 0;
SDOperand ArgValue;
if (ObjIntRegs || ObjXMMRegs) {
switch (ObjectVT) {
default: assert(0 && "Unhandled argument type!");
- case MVT::i1:
case MVT::i8:
- Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
- X86::GR8RegisterClass);
- ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8);
- break;
case MVT::i16:
- Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
- X86::GR16RegisterClass);
- ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16);
- break;
case MVT::i32:
- Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
- X86::GR32RegisterClass);
- ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
- break;
- case MVT::i64:
- Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
- X86::GR32RegisterClass);
- ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
- if (ObjIntRegs == 2) {
- Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
- SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32);
- ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
+ case MVT::i64: {
+ TargetRegisterClass *RC = NULL;
+ switch (ObjectVT) {
+ default: break;
+ case MVT::i8:
+ RC = X86::GR8RegisterClass;
+ Reg = GPR8ArgRegs[NumIntRegs];
+ break;
+ case MVT::i16:
+ RC = X86::GR16RegisterClass;
+ Reg = GPR16ArgRegs[NumIntRegs];
+ break;
+ case MVT::i32:
+ RC = X86::GR32RegisterClass;
+ Reg = GPR32ArgRegs[NumIntRegs];
+ break;
+ case MVT::i64:
+ RC = X86::GR64RegisterClass;
+ Reg = GPR64ArgRegs[NumIntRegs];
+ break;
}
+ Reg = AddLiveIn(MF, Reg, RC);
+ ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
break;
+ }
+ case MVT::f32:
+ case MVT::f64:
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
- case MVT::v2f64:
- Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass);
+ case MVT::v2f64: {
+ TargetRegisterClass *RC= (ObjectVT == MVT::f32) ?
+ X86::FR32RegisterClass : ((ObjectVT == MVT::f64) ?
+ X86::FR64RegisterClass : X86::VR128RegisterClass);
+ Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], RC);
ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
break;
}
+ }
NumIntRegs += ObjIntRegs;
NumXMMRegs += ObjXMMRegs;
- }
-
- if (ObjSize) {
+ } else if (ObjSize) {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ if (ObjSize == 16)
+ ArgOffset = ((ArgOffset + 15) / 16) * 16;
// Create the SelectionDAG nodes corresponding to a load from this
// parameter.
int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- if (ObjectVT == MVT::i64 && ObjIntRegs) {
- SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
- DAG.getSrcValue(NULL));
- ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
- } else
- ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
- DAG.getSrcValue(NULL));
+ ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
+ DAG.getSrcValue(NULL));
ArgOffset += ArgIncrement; // Move on to the next argument.
}
ArgValues.push_back(ArgValue);
}
- ArgValues.push_back(Root);
+ // If the function takes variable number of arguments, make a frame index for
+ // the start of the first vararg value... for expansion of llvm.va_start.
+ if (isVarArg) {
+ // For X86-64, if there are vararg parameters that are passed via
+ // registers, then we must store them to their spots on the stack so they
+ // may be loaded by deferencing the result of va_next.
+ VarArgsGPOffset = NumIntRegs * 8;
+ VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
+ VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
+ RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
+
+ // Store the integer parameter registers.
+ std::vector<SDOperand> MemOps;
+ SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
+ SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
+ DAG.getConstant(VarArgsGPOffset, getPointerTy()));
+ for (; NumIntRegs != 6; ++NumIntRegs) {
+ unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
+ X86::GR64RegisterClass);
+ SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
+ SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
+ Val, FIN, DAG.getSrcValue(NULL));
+ MemOps.push_back(Store);
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
+ DAG.getConstant(8, getPointerTy()));
+ }
- // Make sure the instruction takes 8n+4 bytes to make sure the start of the
- // arguments and the arguments after the retaddr has been pushed are aligned.
- if ((ArgOffset & 7) == 0)
- ArgOffset += 4;
+ // Now store the XMM (fp + vector) parameter registers.
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
+ DAG.getConstant(VarArgsFPOffset, getPointerTy()));
+ for (; NumXMMRegs != 8; ++NumXMMRegs) {
+ unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
+ X86::VR128RegisterClass);
+ SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
+ SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
+ Val, FIN, DAG.getSrcValue(NULL));
+ MemOps.push_back(Store);
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
+ DAG.getConstant(16, getPointerTy()));
+ }
+ if (!MemOps.empty())
+ Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOps[0], MemOps.size());
+ }
- VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
- ReturnAddrIndex = 0; // No return address slot generated yet.
- BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
- BytesCallerReserves = 0;
+ ArgValues.push_back(Root);
- // Finally, inform the code generator which regs we return values in.
- switch (getValueType(MF.getFunction()->getReturnType())) {
- default: assert(0 && "Unknown type!");
- case MVT::isVoid: break;
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- MF.addLiveOut(X86::EAX);
- break;
- case MVT::i64:
- MF.addLiveOut(X86::EAX);
- MF.addLiveOut(X86::EDX);
- break;
- case MVT::f32:
- case MVT::f64:
- MF.addLiveOut(X86::ST0);
- break;
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- MF.addLiveOut(X86::XMM0);
- break;
- }
+ ReturnAddrIndex = 0; // No return address slot generated yet.
+ BytesToPopOnReturn = 0; // Callee pops nothing.
+ BytesCallerReserves = ArgOffset;
// Return the new list of results.
std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
Op.Val->value_end());
- return DAG.getNode(ISD::MERGE_VALUES, RetVTs, ArgValues);
+ return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
}
- SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG) {
+SDOperand
+X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG) {
SDOperand Chain = Op.getOperand(0);
unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
// Count how many bytes are to be pushed on the stack.
unsigned NumBytes = 0;
-
- // Keep track of the number of integer regs passed so far. This can be either
- // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
- // used).
- unsigned NumIntRegs = 0;
+ unsigned NumIntRegs = 0; // Int regs used for parameter passing.
unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
- static const unsigned GPRArgRegs[][2] = {
- { X86::AL, X86::DL },
- { X86::AX, X86::DX },
- { X86::EAX, X86::EDX }
+ static const unsigned GPR8ArgRegs[] = {
+ X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B
+ };
+ static const unsigned GPR16ArgRegs[] = {
+ X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W
+ };
+ static const unsigned GPR32ArgRegs[] = {
+ X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
+ };
+ static const unsigned GPR64ArgRegs[] = {
+ X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
};
static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
+ X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
for (unsigned i = 0; i != NumOps; ++i) {
SDOperand Arg = Op.getOperand(5+2*i);
+ MVT::ValueType ArgVT = Arg.getValueType();
- switch (Arg.getValueType()) {
+ switch (ArgVT) {
default: assert(0 && "Unknown value type!");
case MVT::i8:
case MVT::i16:
case MVT::i32:
- if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
+ case MVT::i64:
+ if (NumIntRegs < 6)
++NumIntRegs;
- break;
- }
- // Fall through
- case MVT::f32:
- NumBytes += 4;
+ else
+ NumBytes += 8;
break;
+ case MVT::f32:
case MVT::f64:
- NumBytes += 8;
- break;
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- if (NumXMMRegs < 3)
+ if (NumXMMRegs < 8)
NumXMMRegs++;
- else
+ else if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
+ NumBytes += 8;
+ else {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ NumBytes = ((NumBytes + 15) / 16) * 16;
NumBytes += 16;
+ }
break;
}
}
- // Make sure the instruction takes 8n+4 bytes to make sure the start of the
- // arguments and the arguments after the retaddr has been pushed are aligned.
- if ((NumBytes & 7) == 0)
- NumBytes += 4;
-
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
// Arguments go on the stack in reverse order, as specified by the ABI.
unsigned ArgOffset = 0;
NumIntRegs = 0;
+ NumXMMRegs = 0;
std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
std::vector<SDOperand> MemOpChains;
- SDOperand StackPtr = DAG.getRegister(X86::ESP, getPointerTy());
+ SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
for (unsigned i = 0; i != NumOps; ++i) {
SDOperand Arg = Op.getOperand(5+2*i);
+ MVT::ValueType ArgVT = Arg.getValueType();
- switch (Arg.getValueType()) {
+ switch (ArgVT) {
default: assert(0 && "Unexpected ValueType for argument!");
case MVT::i8:
case MVT::i16:
case MVT::i32:
- if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
- RegsToPass.push_back(
- std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs],
- Arg));
+ case MVT::i64:
+ if (NumIntRegs < 6) {
+ unsigned Reg = 0;
+ switch (ArgVT) {
+ default: break;
+ case MVT::i8: Reg = GPR8ArgRegs[NumIntRegs]; break;
+ case MVT::i16: Reg = GPR16ArgRegs[NumIntRegs]; break;
+ case MVT::i32: Reg = GPR32ArgRegs[NumIntRegs]; break;
+ case MVT::i64: Reg = GPR64ArgRegs[NumIntRegs]; break;
+ }
+ RegsToPass.push_back(std::make_pair(Reg, Arg));
++NumIntRegs;
- break;
+ } else {
+ SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
+ Arg, PtrOff, DAG.getSrcValue(NULL)));
+ ArgOffset += 8;
}
- // Fall through
- case MVT::f32: {
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
- Arg, PtrOff, DAG.getSrcValue(NULL)));
- ArgOffset += 4;
- break;
- }
- case MVT::f64: {
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
- Arg, PtrOff, DAG.getSrcValue(NULL)));
- ArgOffset += 8;
break;
- }
+ case MVT::f32:
+ case MVT::f64:
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
- if (NumXMMRegs < 3) {
+ if (NumXMMRegs < 8) {
RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
NumXMMRegs++;
} else {
+ if (ArgVT != MVT::f32 && ArgVT != MVT::f64) {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ ArgOffset = ((ArgOffset + 15) / 16) * 16;
+ }
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
Arg, PtrOff, DAG.getSrcValue(NULL)));
- ArgOffset += 16;
+ if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
+ ArgOffset += 8;
+ else
+ ArgOffset += 16;
}
}
}
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOpChains);
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into registers.
InFlag = Chain.getValue(1);
}
+ if (isVarArg) {
+ // From AMD64 ABI document:
+ // For calls that may call functions that use varargs or stdargs
+ // (prototype-less calls or calls to functions containing ellipsis (...) in
+ // the declaration) %al is used as hidden argument to specify the number
+ // of SSE registers used. The contents of %al do not need to match exactly
+ // the number of registers, but must be an ubound on the number of SSE
+ // registers used and is in the range 0 - 8 inclusive.
+ Chain = DAG.getCopyToReg(Chain, X86::AL,
+ DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
std::vector<SDOperand> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are known live
+ // into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+
if (InFlag.Val)
Ops.push_back(InFlag);
// FIXME: Do not generate X86ISD::TAILCALL for now.
Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
- NodeTys, Ops);
+ NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
NodeTys.clear();
Ops.clear();
Ops.push_back(Chain);
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
- Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
+ Ops.push_back(DAG.getConstant(0, getPointerTy()));
Ops.push_back(InFlag);
- Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
+ Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
if (RetVT != MVT::Other)
InFlag = Chain.getValue(1);
NodeTys.push_back(MVT::i16);
break;
case MVT::i32:
- if (Op.Val->getValueType(1) == MVT::i32) {
- Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
+ Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
+ ResultVals.push_back(Chain.getValue(0));
+ NodeTys.push_back(MVT::i32);
+ break;
+ case MVT::i64:
+ if (Op.Val->getValueType(1) == MVT::i64) {
+ // FIXME: __int128 support?
+ Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
ResultVals.push_back(Chain.getValue(0));
- Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
+ Chain = DAG.getCopyFromReg(Chain, X86::RDX, MVT::i64,
Chain.getValue(2)).getValue(1);
ResultVals.push_back(Chain.getValue(0));
- NodeTys.push_back(MVT::i32);
+ NodeTys.push_back(MVT::i64);
} else {
- Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
+ Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
ResultVals.push_back(Chain.getValue(0));
}
- NodeTys.push_back(MVT::i32);
+ NodeTys.push_back(MVT::i64);
break;
+ case MVT::f32:
+ case MVT::f64:
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
+ // FIXME: long double support?
Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
ResultVals.push_back(Chain.getValue(0));
NodeTys.push_back(RetVT);
break;
- case MVT::f32:
- case MVT::f64: {
- std::vector<MVT::ValueType> Tys;
- Tys.push_back(MVT::f64);
- Tys.push_back(MVT::Other);
- Tys.push_back(MVT::Flag);
- std::vector<SDOperand> Ops;
- Ops.push_back(Chain);
- Ops.push_back(InFlag);
- SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
- Chain = RetVal.getValue(1);
- InFlag = RetVal.getValue(2);
- if (X86ScalarSSE) {
- // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
- // shouldn't be necessary except that RFP cannot be live across
- // multiple blocks. When stackifier is fixed, they can be uncoupled.
- MachineFunction &MF = DAG.getMachineFunction();
- int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
- SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
- Tys.clear();
- Tys.push_back(MVT::Other);
- Ops.clear();
- Ops.push_back(Chain);
- Ops.push_back(RetVal);
- Ops.push_back(StackSlot);
- Ops.push_back(DAG.getValueType(RetVT));
- Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
- RetVal = DAG.getLoad(RetVT, Chain, StackSlot,
- DAG.getSrcValue(NULL));
- Chain = RetVal.getValue(1);
- }
-
- if (RetVT == MVT::f32 && !X86ScalarSSE)
- // FIXME: we would really like to remember that this FP_ROUND
- // operation is okay to eliminate if we allow excess FP precision.
- RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
- ResultVals.push_back(RetVal);
- NodeTys.push_back(RetVT);
- break;
- }
}
-
// If the function returns void, just return the chain.
if (ResultVals.empty())
return Chain;
// Otherwise, merge everything together with a MERGE_VALUES node.
NodeTys.push_back(MVT::Other);
ResultVals.push_back(Chain);
- SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, ResultVals);
+ SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
+ &ResultVals[0], ResultVals.size());
return Res.getValue(Op.ResNo);
}
-SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
- if (ReturnAddrIndex == 0) {
- // Set up a frame object for the return address.
- MachineFunction &MF = DAG.getMachineFunction();
- ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
- }
-
- return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
-}
-
+//===----------------------------------------------------------------------===//
+// Fast Calling Convention implementation
+//===----------------------------------------------------------------------===//
+//
+// The X86 'fast' calling convention passes up to two integer arguments in
+// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
+// and requires that the callee pop its arguments off the stack (allowing proper
+// tail calls), and has the same return value conventions as C calling convs.
+//
+// This calling convention always arranges for the callee pop value to be 8n+4
+// bytes, which is needed for tail recursion elimination and stack alignment
+// reasons.
+//
+// Note that this can be enhanced in the future to pass fp vals in registers
+// (when we have a global fp allocator) and do other tricks.
+//
+/// HowToPassFastCCArgument - Returns how an formal argument of the specified
+/// type should be passed. If it is through stack, returns the size of the stack
+/// slot; if it is through integer or XMM register, returns the number of
+/// integer or XMM registers are needed.
+static void
+HowToPassFastCCArgument(MVT::ValueType ObjectVT,
+ unsigned NumIntRegs, unsigned NumXMMRegs,
+ unsigned &ObjSize, unsigned &ObjIntRegs,
+ unsigned &ObjXMMRegs) {
+ ObjSize = 0;
+ ObjIntRegs = 0;
+ ObjXMMRegs = 0;
-std::pair<SDOperand, SDOperand> X86TargetLowering::
-LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
- SelectionDAG &DAG) {
- SDOperand Result;
- if (Depth) // Depths > 0 not supported yet!
- Result = DAG.getConstant(0, getPointerTy());
- else {
- SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
- if (!isFrameAddress)
- // Just load the return address
- Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
- DAG.getSrcValue(NULL));
+ switch (ObjectVT) {
+ default: assert(0 && "Unhandled argument type!");
+ case MVT::i8:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
+ if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
+ ObjIntRegs = 1;
+ else
+#endif
+ ObjSize = 1;
+ break;
+ case MVT::i16:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
+ if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
+ ObjIntRegs = 1;
+ else
+#endif
+ ObjSize = 2;
+ break;
+ case MVT::i32:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
+ if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
+ ObjIntRegs = 1;
+ else
+#endif
+ ObjSize = 4;
+ break;
+ case MVT::i64:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
+ if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
+ ObjIntRegs = 2;
+ } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
+ ObjIntRegs = 1;
+ ObjSize = 4;
+ } else
+#endif
+ ObjSize = 8;
+ case MVT::f32:
+ ObjSize = 4;
+ break;
+ case MVT::f64:
+ ObjSize = 8;
+ break;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ if (NumXMMRegs < 4)
+ ObjXMMRegs = 1;
else
- Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
- DAG.getConstant(4, MVT::i32));
+ ObjSize = 16;
+ break;
}
- return std::make_pair(Result, Chain);
}
-/// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
-/// which corresponds to the condition code.
-static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
- switch (X86CC) {
- default: assert(0 && "Unknown X86 conditional code!");
- case X86ISD::COND_A: return X86::JA;
- case X86ISD::COND_AE: return X86::JAE;
- case X86ISD::COND_B: return X86::JB;
- case X86ISD::COND_BE: return X86::JBE;
- case X86ISD::COND_E: return X86::JE;
- case X86ISD::COND_G: return X86::JG;
- case X86ISD::COND_GE: return X86::JGE;
- case X86ISD::COND_L: return X86::JL;
- case X86ISD::COND_LE: return X86::JLE;
- case X86ISD::COND_NE: return X86::JNE;
- case X86ISD::COND_NO: return X86::JNO;
- case X86ISD::COND_NP: return X86::JNP;
- case X86ISD::COND_NS: return X86::JNS;
- case X86ISD::COND_O: return X86::JO;
- case X86ISD::COND_P: return X86::JP;
- case X86ISD::COND_S: return X86::JS;
- }
-}
+SDOperand
+X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
+ unsigned NumArgs = Op.Val->getNumValues()-1;
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ SDOperand Root = Op.getOperand(0);
+ std::vector<SDOperand> ArgValues;
-/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
-/// specific condition code. It returns a false if it cannot do a direct
-/// translation. X86CC is the translated CondCode. Flip is set to true if the
-/// the order of comparison operands should be flipped.
-static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
- unsigned &X86CC, bool &Flip) {
- Flip = false;
- X86CC = X86ISD::COND_INVALID;
- if (!isFP) {
- switch (SetCCOpcode) {
- default: break;
- case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
- case ISD::SETGT: X86CC = X86ISD::COND_G; break;
- case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
- case ISD::SETLT: X86CC = X86ISD::COND_L; break;
- case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
- case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
- case ISD::SETULT: X86CC = X86ISD::COND_B; break;
- case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
- case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
- case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
- }
- } else {
- // On a floating point condition, the flags are set as follows:
- // ZF PF CF op
- // 0 | 0 | 0 | X > Y
- // 0 | 0 | 1 | X < Y
- // 1 | 0 | 0 | X == Y
- // 1 | 1 | 1 | unordered
- switch (SetCCOpcode) {
- default: break;
- case ISD::SETUEQ:
- case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
- case ISD::SETOLT: Flip = true; // Fallthrough
- case ISD::SETOGT:
- case ISD::SETGT: X86CC = X86ISD::COND_A; break;
- case ISD::SETOLE: Flip = true; // Fallthrough
- case ISD::SETOGE:
- case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
- case ISD::SETUGT: Flip = true; // Fallthrough
- case ISD::SETULT:
- case ISD::SETLT: X86CC = X86ISD::COND_B; break;
- case ISD::SETUGE: Flip = true; // Fallthrough
- case ISD::SETULE:
- case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
- case ISD::SETONE:
- case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
- case ISD::SETUO: X86CC = X86ISD::COND_P; break;
- case ISD::SETO: X86CC = X86ISD::COND_NP; break;
- }
- }
+ // Add DAG nodes to load the arguments... On entry to a function the stack
+ // frame looks like this:
+ //
+ // [ESP] -- return address
+ // [ESP + 4] -- first nonreg argument (leftmost lexically)
+ // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size
+ // ...
+ unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
- return X86CC != X86ISD::COND_INVALID;
-}
+ // Keep track of the number of integer regs passed so far. This can be either
+ // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
+ // used).
+ unsigned NumIntRegs = 0;
+ unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
-static bool translateX86CC(SDOperand CC, bool isFP, unsigned &X86CC,
- bool &Flip) {
- return translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, Flip);
+ static const unsigned XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
+ };
+
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
+ unsigned ArgIncrement = 4;
+ unsigned ObjSize = 0;
+ unsigned ObjIntRegs = 0;
+ unsigned ObjXMMRegs = 0;
+
+ HowToPassFastCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
+ ObjSize, ObjIntRegs, ObjXMMRegs);
+ if (ObjSize > 4)
+ ArgIncrement = ObjSize;
+
+ unsigned Reg = 0;
+ SDOperand ArgValue;
+ if (ObjIntRegs || ObjXMMRegs) {
+ switch (ObjectVT) {
+ default: assert(0 && "Unhandled argument type!");
+ case MVT::i8:
+ Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
+ X86::GR8RegisterClass);
+ ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i8);
+ break;
+ case MVT::i16:
+ Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
+ X86::GR16RegisterClass);
+ ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i16);
+ break;
+ case MVT::i32:
+ Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
+ X86::GR32RegisterClass);
+ ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
+ break;
+ case MVT::i64:
+ Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
+ X86::GR32RegisterClass);
+ ArgValue = DAG.getCopyFromReg(Root, Reg, MVT::i32);
+ if (ObjIntRegs == 2) {
+ Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
+ SDOperand ArgValue2 = DAG.getCopyFromReg(Root, Reg, MVT::i32);
+ ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
+ }
+ break;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass);
+ ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
+ break;
+ }
+ NumIntRegs += ObjIntRegs;
+ NumXMMRegs += ObjXMMRegs;
+ }
+
+ if (ObjSize) {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ if (ObjSize == 16)
+ ArgOffset = ((ArgOffset + 15) / 16) * 16;
+ // Create the SelectionDAG nodes corresponding to a load from this
+ // parameter.
+ int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
+ SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
+ if (ObjectVT == MVT::i64 && ObjIntRegs) {
+ SDOperand ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
+ DAG.getSrcValue(NULL));
+ ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2);
+ } else
+ ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN,
+ DAG.getSrcValue(NULL));
+ ArgOffset += ArgIncrement; // Move on to the next argument.
+ }
+
+ ArgValues.push_back(ArgValue);
+ }
+
+ ArgValues.push_back(Root);
+
+ // Make sure the instruction takes 8n+4 bytes to make sure the start of the
+ // arguments and the arguments after the retaddr has been pushed are aligned.
+ if ((ArgOffset & 7) == 0)
+ ArgOffset += 4;
+
+ VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
+ RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
+ ReturnAddrIndex = 0; // No return address slot generated yet.
+ BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
+ BytesCallerReserves = 0;
+
+ // Finally, inform the code generator which regs we return values in.
+ switch (getValueType(MF.getFunction()->getReturnType())) {
+ default: assert(0 && "Unknown type!");
+ case MVT::isVoid: break;
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ MF.addLiveOut(X86::EAX);
+ break;
+ case MVT::i64:
+ MF.addLiveOut(X86::EAX);
+ MF.addLiveOut(X86::EDX);
+ break;
+ case MVT::f32:
+ case MVT::f64:
+ MF.addLiveOut(X86::ST0);
+ break;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ MF.addLiveOut(X86::XMM0);
+ break;
+ }
+
+ // Return the new list of results.
+ std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
+ Op.Val->value_end());
+ return DAG.getNode(ISD::MERGE_VALUES, RetVTs, &ArgValues[0],ArgValues.size());
+}
+
+SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG){
+ SDOperand Chain = Op.getOperand(0);
+ unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
+ bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
+ SDOperand Callee = Op.getOperand(4);
+ MVT::ValueType RetVT= Op.Val->getValueType(0);
+ unsigned NumOps = (Op.getNumOperands() - 5) / 2;
+
+ // Count how many bytes are to be pushed on the stack.
+ unsigned NumBytes = 0;
+
+ // Keep track of the number of integer regs passed so far. This can be either
+ // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
+ // used).
+ unsigned NumIntRegs = 0;
+ unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
+
+ static const unsigned GPRArgRegs[][2] = {
+ { X86::AL, X86::DL },
+ { X86::AX, X86::DX },
+ { X86::EAX, X86::EDX }
+ };
+ static const unsigned XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
+ };
+
+ for (unsigned i = 0; i != NumOps; ++i) {
+ SDOperand Arg = Op.getOperand(5+2*i);
+
+ switch (Arg.getValueType()) {
+ default: assert(0 && "Unknown value type!");
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
+ if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
+ ++NumIntRegs;
+ break;
+ }
+#endif
+ // Fall through
+ case MVT::f32:
+ NumBytes += 4;
+ break;
+ case MVT::f64:
+ NumBytes += 8;
+ break;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ if (NumXMMRegs < 4)
+ NumXMMRegs++;
+ else {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ NumBytes = ((NumBytes + 15) / 16) * 16;
+ NumBytes += 16;
+ }
+ break;
+ }
+ }
+
+ // Make sure the instruction takes 8n+4 bytes to make sure the start of the
+ // arguments and the arguments after the retaddr has been pushed are aligned.
+ if ((NumBytes & 7) == 0)
+ NumBytes += 4;
+
+ Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
+
+ // Arguments go on the stack in reverse order, as specified by the ABI.
+ unsigned ArgOffset = 0;
+ NumIntRegs = 0;
+ std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
+ std::vector<SDOperand> MemOpChains;
+ SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
+ for (unsigned i = 0; i != NumOps; ++i) {
+ SDOperand Arg = Op.getOperand(5+2*i);
+
+ switch (Arg.getValueType()) {
+ default: assert(0 && "Unexpected ValueType for argument!");
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
+ if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
+ RegsToPass.push_back(
+ std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs],
+ Arg));
+ ++NumIntRegs;
+ break;
+ }
+#endif
+ // Fall through
+ case MVT::f32: {
+ SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
+ Arg, PtrOff, DAG.getSrcValue(NULL)));
+ ArgOffset += 4;
+ break;
+ }
+ case MVT::f64: {
+ SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
+ Arg, PtrOff, DAG.getSrcValue(NULL)));
+ ArgOffset += 8;
+ break;
+ }
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ if (NumXMMRegs < 4) {
+ RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
+ NumXMMRegs++;
+ } else {
+ // XMM arguments have to be aligned on 16-byte boundary.
+ ArgOffset = ((ArgOffset + 15) / 16) * 16;
+ SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
+ Arg, PtrOff, DAG.getSrcValue(NULL)));
+ ArgOffset += 16;
+ }
+ }
+ }
+
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
+
+ // Build a sequence of copy-to-reg nodes chained together with token chain
+ // and flag operands which copy the outgoing args into registers.
+ SDOperand InFlag;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
+ InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
+ else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
+
+ std::vector<MVT::ValueType> NodeTys;
+ NodeTys.push_back(MVT::Other); // Returns a chain
+ NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
+ std::vector<SDOperand> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are known live
+ // into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+
+ if (InFlag.Val)
+ Ops.push_back(InFlag);
+
+ // FIXME: Do not generate X86ISD::TAILCALL for now.
+ Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
+ NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+
+ NodeTys.clear();
+ NodeTys.push_back(MVT::Other); // Returns a chain
+ if (RetVT != MVT::Other)
+ NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
+ Ops.clear();
+ Ops.push_back(Chain);
+ Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
+ Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
+ Ops.push_back(InFlag);
+ Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
+ if (RetVT != MVT::Other)
+ InFlag = Chain.getValue(1);
+
+ std::vector<SDOperand> ResultVals;
+ NodeTys.clear();
+ switch (RetVT) {
+ default: assert(0 && "Unknown value type to return!");
+ case MVT::Other: break;
+ case MVT::i8:
+ Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
+ ResultVals.push_back(Chain.getValue(0));
+ NodeTys.push_back(MVT::i8);
+ break;
+ case MVT::i16:
+ Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
+ ResultVals.push_back(Chain.getValue(0));
+ NodeTys.push_back(MVT::i16);
+ break;
+ case MVT::i32:
+ if (Op.Val->getValueType(1) == MVT::i32) {
+ Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
+ ResultVals.push_back(Chain.getValue(0));
+ Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
+ Chain.getValue(2)).getValue(1);
+ ResultVals.push_back(Chain.getValue(0));
+ NodeTys.push_back(MVT::i32);
+ } else {
+ Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
+ ResultVals.push_back(Chain.getValue(0));
+ }
+ NodeTys.push_back(MVT::i32);
+ break;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
+ ResultVals.push_back(Chain.getValue(0));
+ NodeTys.push_back(RetVT);
+ break;
+ case MVT::f32:
+ case MVT::f64: {
+ std::vector<MVT::ValueType> Tys;
+ Tys.push_back(MVT::f64);
+ Tys.push_back(MVT::Other);
+ Tys.push_back(MVT::Flag);
+ std::vector<SDOperand> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(InFlag);
+ SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
+ &Ops[0], Ops.size());
+ Chain = RetVal.getValue(1);
+ InFlag = RetVal.getValue(2);
+ if (X86ScalarSSE) {
+ // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
+ // shouldn't be necessary except that RFP cannot be live across
+ // multiple blocks. When stackifier is fixed, they can be uncoupled.
+ MachineFunction &MF = DAG.getMachineFunction();
+ int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
+ SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
+ Tys.clear();
+ Tys.push_back(MVT::Other);
+ Ops.clear();
+ Ops.push_back(Chain);
+ Ops.push_back(RetVal);
+ Ops.push_back(StackSlot);
+ Ops.push_back(DAG.getValueType(RetVT));
+ Ops.push_back(InFlag);
+ Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
+ RetVal = DAG.getLoad(RetVT, Chain, StackSlot,
+ DAG.getSrcValue(NULL));
+ Chain = RetVal.getValue(1);
+ }
+
+ if (RetVT == MVT::f32 && !X86ScalarSSE)
+ // FIXME: we would really like to remember that this FP_ROUND
+ // operation is okay to eliminate if we allow excess FP precision.
+ RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
+ ResultVals.push_back(RetVal);
+ NodeTys.push_back(RetVT);
+ break;
+ }
+ }
+
+
+ // If the function returns void, just return the chain.
+ if (ResultVals.empty())
+ return Chain;
+
+ // Otherwise, merge everything together with a MERGE_VALUES node.
+ NodeTys.push_back(MVT::Other);
+ ResultVals.push_back(Chain);
+ SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
+ &ResultVals[0], ResultVals.size());
+ return Res.getValue(Op.ResNo);
+}
+
+SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
+ if (ReturnAddrIndex == 0) {
+ // Set up a frame object for the return address.
+ MachineFunction &MF = DAG.getMachineFunction();
+ if (Subtarget->is64Bit())
+ ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
+ else
+ ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
+ }
+
+ return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
+}
+
+
+
+std::pair<SDOperand, SDOperand> X86TargetLowering::
+LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
+ SelectionDAG &DAG) {
+ SDOperand Result;
+ if (Depth) // Depths > 0 not supported yet!
+ Result = DAG.getConstant(0, getPointerTy());
+ else {
+ SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
+ if (!isFrameAddress)
+ // Just load the return address
+ Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI,
+ DAG.getSrcValue(NULL));
+ else
+ Result = DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI,
+ DAG.getConstant(4, getPointerTy()));
+ }
+ return std::make_pair(Result, Chain);
+}
+
+/// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
+/// which corresponds to the condition code.
+static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
+ switch (X86CC) {
+ default: assert(0 && "Unknown X86 conditional code!");
+ case X86ISD::COND_A: return X86::JA;
+ case X86ISD::COND_AE: return X86::JAE;
+ case X86ISD::COND_B: return X86::JB;
+ case X86ISD::COND_BE: return X86::JBE;
+ case X86ISD::COND_E: return X86::JE;
+ case X86ISD::COND_G: return X86::JG;
+ case X86ISD::COND_GE: return X86::JGE;
+ case X86ISD::COND_L: return X86::JL;
+ case X86ISD::COND_LE: return X86::JLE;
+ case X86ISD::COND_NE: return X86::JNE;
+ case X86ISD::COND_NO: return X86::JNO;
+ case X86ISD::COND_NP: return X86::JNP;
+ case X86ISD::COND_NS: return X86::JNS;
+ case X86ISD::COND_O: return X86::JO;
+ case X86ISD::COND_P: return X86::JP;
+ case X86ISD::COND_S: return X86::JS;
+ }
+}
+
+/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
+/// specific condition code. It returns a false if it cannot do a direct
+/// translation. X86CC is the translated CondCode. LHS/RHS are modified as
+/// needed.
+static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
+ unsigned &X86CC, SDOperand &LHS, SDOperand &RHS,
+ SelectionDAG &DAG) {
+ X86CC = X86ISD::COND_INVALID;
+ if (!isFP) {
+ if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
+ if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
+ // X > -1 -> X == 0, jump !sign.
+ RHS = DAG.getConstant(0, RHS.getValueType());
+ X86CC = X86ISD::COND_NS;
+ return true;
+ } else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
+ // X < 0 -> X == 0, jump on sign.
+ X86CC = X86ISD::COND_S;
+ return true;
+ }
+ }
+
+ switch (SetCCOpcode) {
+ default: break;
+ case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
+ case ISD::SETGT: X86CC = X86ISD::COND_G; break;
+ case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
+ case ISD::SETLT: X86CC = X86ISD::COND_L; break;
+ case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
+ case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
+ case ISD::SETULT: X86CC = X86ISD::COND_B; break;
+ case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
+ case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
+ case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
+ }
+ } else {
+ // On a floating point condition, the flags are set as follows:
+ // ZF PF CF op
+ // 0 | 0 | 0 | X > Y
+ // 0 | 0 | 1 | X < Y
+ // 1 | 0 | 0 | X == Y
+ // 1 | 1 | 1 | unordered
+ bool Flip = false;
+ switch (SetCCOpcode) {
+ default: break;
+ case ISD::SETUEQ:
+ case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
+ case ISD::SETOLT: Flip = true; // Fallthrough
+ case ISD::SETOGT:
+ case ISD::SETGT: X86CC = X86ISD::COND_A; break;
+ case ISD::SETOLE: Flip = true; // Fallthrough
+ case ISD::SETOGE:
+ case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
+ case ISD::SETUGT: Flip = true; // Fallthrough
+ case ISD::SETULT:
+ case ISD::SETLT: X86CC = X86ISD::COND_B; break;
+ case ISD::SETUGE: Flip = true; // Fallthrough
+ case ISD::SETULE:
+ case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
+ case ISD::SETONE:
+ case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
+ case ISD::SETUO: X86CC = X86ISD::COND_P; break;
+ case ISD::SETO: X86CC = X86ISD::COND_NP; break;
+ }
+ if (Flip)
+ std::swap(LHS, RHS);
+ }
+
+ return X86CC != X86ISD::COND_INVALID;
}
/// hasFPCMov - is there a floating point cmov for the specific X86 condition
}
}
-MachineBasicBlock *
-X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
- MachineBasicBlock *BB) {
- switch (MI->getOpcode()) {
- default: assert(false && "Unexpected instr type to insert");
- case X86::CMOV_FR32:
- case X86::CMOV_FR64:
- case X86::CMOV_V4F32:
- case X86::CMOV_V2F64:
- case X86::CMOV_V2I64: {
- // To "insert" a SELECT_CC instruction, we actually have to insert the
- // diamond control-flow pattern. The incoming instruction knows the
- // destination vreg to set, the condition code register to branch on, the
- // true/false values to select between, and a branch opcode to use.
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- ilist<MachineBasicBlock>::iterator It = BB;
- ++It;
-
- // thisMBB:
- // ...
- // TrueVal = ...
- // cmpTY ccX, r1, r2
- // bCC copy1MBB
- // fallthrough --> copy0MBB
- MachineBasicBlock *thisMBB = BB;
- MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
- MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
- unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
- BuildMI(BB, Opc, 1).addMBB(sinkMBB);
- MachineFunction *F = BB->getParent();
- F->getBasicBlockList().insert(It, copy0MBB);
- F->getBasicBlockList().insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
- e = BB->succ_end(); i != e; ++i)
- sinkMBB->addSuccessor(*i);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while(!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
- BB->addSuccessor(copy0MBB);
- BB->addSuccessor(sinkMBB);
-
- // copy0MBB:
- // %FalseValue = ...
- // # fallthrough to sinkMBB
- BB = copy0MBB;
-
- // Update machine-CFG edges
- BB->addSuccessor(sinkMBB);
-
- // sinkMBB:
- // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
- // ...
- BB = sinkMBB;
- BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
-
- delete MI; // The pseudo instruction is gone now.
- return BB;
- }
-
- case X86::FP_TO_INT16_IN_MEM:
- case X86::FP_TO_INT32_IN_MEM:
- case X86::FP_TO_INT64_IN_MEM: {
- // Change the floating point control register to use "round towards zero"
- // mode when truncating to an integer value.
- MachineFunction *F = BB->getParent();
- int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
- addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
-
- // Load the old value of the high byte of the control word...
- unsigned OldCW =
- F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
- addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
-
- // Set the high part to be round to zero...
- addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
-
- // Reload the modified control word now...
- addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
-
- // Restore the memory image of control word to original value
- addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
-
- // Get the X86 opcode to use.
- unsigned Opc;
- switch (MI->getOpcode()) {
- default: assert(0 && "illegal opcode!");
- case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
- case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
- case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
- }
-
- X86AddressMode AM;
- MachineOperand &Op = MI->getOperand(0);
- if (Op.isRegister()) {
- AM.BaseType = X86AddressMode::RegBase;
- AM.Base.Reg = Op.getReg();
- } else {
- AM.BaseType = X86AddressMode::FrameIndexBase;
- AM.Base.FrameIndex = Op.getFrameIndex();
- }
- Op = MI->getOperand(1);
- if (Op.isImmediate())
- AM.Scale = Op.getImmedValue();
- Op = MI->getOperand(2);
- if (Op.isImmediate())
- AM.IndexReg = Op.getImmedValue();
- Op = MI->getOperand(3);
- if (Op.isGlobalAddress()) {
- AM.GV = Op.getGlobal();
- } else {
- AM.Disp = Op.getImmedValue();
- }
- addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
-
- // Reload the original control word now.
- addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
-
- delete MI; // The pseudo instruction is gone now.
- return BB;
- }
- }
-}
-
-
-//===----------------------------------------------------------------------===//
-// X86 Custom Lowering Hooks
-//===----------------------------------------------------------------------===//
-
/// DarwinGVRequiresExtraLoad - true if accessing the GV requires an extra
/// load. For Darwin, external and weak symbols are indirect, loading the value
/// at address GV rather then the value of GV itself. This means that the
(GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
}
+/// WinndowsGVRequiresExtraLoad - true if accessing the GV requires an extra
+/// load. For Windows, dllimported variables (not functions!) are indirect,
+/// loading the value at address GV rather then the value of GV itself. This
+/// means that the GlobalAddress must be in the base or index register of the
+/// address, not the GV offset field.
+static bool WindowsGVRequiresExtraLoad(GlobalValue *GV) {
+ return (isa<GlobalVariable>((Value*)GV) && GV->hasDLLImportLinkage());
+}
+
/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return
/// true if Op is undef or if its value falls within the specified range (L, H].
static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) {
/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
/// of what x86 movss want. X86 movs requires the lowest element to be lowest
/// element of vector 2 and the other elements to come from vector 1 in order.
-static bool isCommutedMOVL(std::vector<SDOperand> &Ops, bool V2IsSplat = false) {
+static bool isCommutedMOVL(std::vector<SDOperand> &Ops, bool V2IsSplat = false,
+ bool V2IsUndef = false) {
unsigned NumElems = Ops.size();
if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
return false;
for (unsigned i = 1; i < NumElems; ++i) {
SDOperand Arg = Ops[i];
- if (V2IsSplat) {
- if (!isUndefOrEqual(Arg, NumElems))
- return false;
- } else {
- if (!isUndefOrEqual(Arg, i+NumElems))
- return false;
- }
+ if (!(isUndefOrEqual(Arg, i+NumElems) ||
+ (V2IsUndef && isUndefOrInRange(Arg, NumElems, NumElems*2)) ||
+ (V2IsSplat && isUndefOrEqual(Arg, NumElems))))
+ return false;
}
return true;
}
-static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false) {
+static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false,
+ bool V2IsUndef = false) {
assert(N->getOpcode() == ISD::BUILD_VECTOR);
std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
- return isCommutedMOVL(Ops, V2IsSplat);
+ return isCommutedMOVL(Ops, V2IsSplat, V2IsUndef);
}
/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
}
- Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, Mask);
}
return true;
}
+/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
+/// to an undef.
+static bool isUndefShuffle(SDNode *N) {
+ if (N->getOpcode() != ISD::BUILD_VECTOR)
+ return false;
+
+ SDOperand V1 = N->getOperand(0);
+ SDOperand V2 = N->getOperand(1);
+ SDOperand Mask = N->getOperand(2);
+ unsigned NumElems = Mask.getNumOperands();
+ for (unsigned i = 0; i != NumElems; ++i) {
+ SDOperand Arg = Mask.getOperand(i);
+ if (Arg.getOpcode() != ISD::UNDEF) {
+ unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
+ if (Val < NumElems && V1.getOpcode() != ISD::UNDEF)
+ return false;
+ else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF)
+ return false;
+ }
+ }
+ return true;
+}
+
/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
/// that point to V2 points to its first element.
static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
}
if (Changed)
- Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), MaskVec);
+ Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
+ &MaskVec[0], MaskVec.size());
return Mask;
}
MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
for (unsigned i = 1; i != NumElems; ++i)
MaskVec.push_back(DAG.getConstant(i, BaseVT));
- return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
}
/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
MaskVec.push_back(DAG.getConstant(i, BaseVT));
MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
}
- return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
}
/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
MaskVec.push_back(DAG.getConstant(i + Half, BaseVT));
MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
}
- return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
}
/// getZeroVector - Returns a vector of specified type with all zero elements.
bool isFP = MVT::isFloatingPoint(EVT);
SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
std::vector<SDOperand> ZeroVec(NumElems, Zero);
- return DAG.getNode(ISD::BUILD_VECTOR, VT, ZeroVec);
+ return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size());
}
/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
SDOperand Zero = DAG.getConstant(0, EVT);
std::vector<SDOperand> MaskVec(NumElems, Zero);
MaskVec[Idx] = DAG.getConstant(NumElems, EVT);
- SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &MaskVec[0], MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
}
///
static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG, TargetLowering &TLI) {
if (NumNonZero > 8)
return SDOperand();
if (ThisElt.Val)
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
- DAG.getConstant(i/2, MVT::i32));
+ DAG.getConstant(i/2, TLI.getPointerTy()));
}
}
///
static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG, TargetLowering &TLI) {
if (NumNonZero > 4)
return SDOperand();
First = false;
}
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
- DAG.getConstant(i, MVT::i32));
+ DAG.getConstant(i, TLI.getPointerTy()));
}
}
std::vector<SDOperand> MaskVec;
for (unsigned i = 0; i < NumElems; i++)
MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
- SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &MaskVec[0], MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
DAG.getNode(ISD::UNDEF, VT), Mask);
}
// If element VT is < 32 bits, convert it to inserts into a zero vector.
if (EVTBits == 8) {
- SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG);
+ SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
+ *this);
if (V.Val) return V;
}
if (EVTBits == 16) {
- SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG);
+ SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
+ *this);
if (V.Val) return V;
}
MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
else
MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
- SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &MaskVec[0], MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
}
bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
+ if (isUndefShuffle(Op.Val))
+ return DAG.getNode(ISD::UNDEF, VT);
+
if (isSplatMask(PermMask.Val)) {
if (NumElems <= 4) return Op;
// Promote it to a v4i32 splat.
ShouldXformToMOVLP(V1.Val, PermMask.Val))
return CommuteVectorShuffle(Op, DAG);
- bool V1IsSplat = isSplatVector(V1.Val) || V1.getOpcode() == ISD::UNDEF;
- bool V2IsSplat = isSplatVector(V2.Val) || V2.getOpcode() == ISD::UNDEF;
- if (V1IsSplat && !V2IsSplat) {
+ bool V1IsSplat = isSplatVector(V1.Val);
+ bool V2IsSplat = isSplatVector(V2.Val);
+ if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
Op = CommuteVectorShuffle(Op, DAG);
V1 = Op.getOperand(0);
V2 = Op.getOperand(1);
PermMask = Op.getOperand(2);
- V2IsSplat = true;
+ std::swap(V1IsSplat, V2IsSplat);
+ std::swap(V1IsUndef, V2IsUndef);
}
- if (isCommutedMOVL(PermMask.Val, V2IsSplat)) {
+ if (isCommutedMOVL(PermMask.Val, V2IsSplat, V2IsUndef)) {
if (V2IsUndef) return V1;
Op = CommuteVectorShuffle(Op, DAG);
V1 = Op.getOperand(0);
MaskVec.push_back(PermMask.getOperand(i));
for (unsigned i = 4; i != 8; ++i)
MaskVec.push_back(DAG.getConstant(i, BaseVT));
- SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &MaskVec[0], MaskVec.size());
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
MaskVec.clear();
for (unsigned i = 0; i != 4; ++i)
MaskVec.push_back(DAG.getConstant(i, BaseVT));
for (unsigned i = 4; i != 8; ++i)
MaskVec.push_back(PermMask.getOperand(i));
- Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
+ Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0],MaskVec.size());
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
}
} else {
}
if (NumLo <= 2 && NumHi <= 2) {
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, Mask1));
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &Mask1[0], Mask1.size()));
for (unsigned i = 0; i != NumElems; ++i) {
if (Locs[i].first == -1)
continue;
}
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, Mask2));
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &Mask2[0], Mask2.size()));
}
// Break it into (shuffle shuffle_hi, shuffle_lo).
SDOperand LoShuffle =
DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, LoMask));
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &LoMask[0], LoMask.size()));
SDOperand HiShuffle =
DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, HiMask));
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &HiMask[0], HiMask.size()));
std::vector<SDOperand> MaskOps;
for (unsigned i = 0; i != NumElems; ++i) {
if (Locs[i].first == -1) {
}
}
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskOps));
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &MaskOps[0], MaskOps.size()));
}
return SDOperand();
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (Idx == 0)
return Op;
-
// SHUFPS the element to the lowest double word, then movss.
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
- SDOperand IdxNode = DAG.getConstant((Idx < 2) ? Idx : Idx+4,
- MVT::getVectorBaseType(MaskVT));
std::vector<SDOperand> IdxVec;
IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
- SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, IdxVec);
+ SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
Vec, Vec, Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, getPointerTy()));
} else if (MVT::getSizeInBits(VT) == 64) {
SDOperand Vec = Op.getOperand(0);
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
std::vector<SDOperand> IdxVec;
IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
- SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, IdxVec);
+ SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, getPointerTy()));
}
return SDOperand();
for (unsigned i = 1; i <= 3; ++i)
MaskVec.push_back(DAG.getConstant(i, BaseVT));
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec));
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
+ &MaskVec[0], MaskVec.size()));
} else {
// Use two pinsrw instructions to insert a 32 bit value.
Idx <<= 1;
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1);
N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1);
N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, getPointerTy()));
}
}
N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0);
N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
- DAG.getConstant(Idx, MVT::i32));
+ DAG.getConstant(Idx, getPointerTy()));
N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8));
N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
- DAG.getConstant(Idx+1, MVT::i32));
+ DAG.getConstant(Idx+1, getPointerTy()));
return DAG.getNode(ISD::BIT_CONVERT, VT, N0);
}
}
X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
- DAG.getTargetConstantPool(CP->get(), getPointerTy(),
- CP->getAlignment()));
+ DAG.getTargetConstantPool(CP->getConstVal(),
+ getPointerTy(),
+ CP->getAlignment()));
if (Subtarget->isTargetDarwin()) {
// With PIC, the address is actually $g + Offset.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC)
+ if (!Subtarget->is64Bit() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_)
Result = DAG.getNode(ISD::ADD, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
}
getPointerTy()));
if (Subtarget->isTargetDarwin()) {
// With PIC, the address is actually $g + Offset.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC)
+ if (!Subtarget->is64Bit() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_)
Result = DAG.getNode(ISD::ADD, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
Result);
// not the GV offset field.
if (getTargetMachine().getRelocationModel() != Reloc::Static &&
DarwinGVRequiresExtraLoad(GV))
- Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(),
+ Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(),
Result, DAG.getSrcValue(NULL));
+ } else if (Subtarget->isTargetCygwin() || Subtarget->isTargetWindows()) {
+ // FIXME: What's about PIC?
+ if (WindowsGVRequiresExtraLoad(GV)) {
+ Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(),
+ Result, DAG.getSrcValue(NULL));
+ }
}
+
return Result;
}
getPointerTy()));
if (Subtarget->isTargetDarwin()) {
// With PIC, the address is actually $g + Offset.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC)
+ if (!Subtarget->is64Bit() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_)
Result = DAG.getNode(ISD::ADD, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
Result);
SDOperand ShOpLo = Op.getOperand(0);
SDOperand ShOpHi = Op.getOperand(1);
SDOperand ShAmt = Op.getOperand(2);
- SDOperand Tmp1 = isSRA ? DAG.getNode(ISD::SRA, MVT::i32, ShOpHi,
- DAG.getConstant(31, MVT::i8))
- : DAG.getConstant(0, MVT::i32);
+ SDOperand Tmp1 = isSRA ?
+ DAG.getNode(ISD::SRA, MVT::i32, ShOpHi, DAG.getConstant(31, MVT::i8)) :
+ DAG.getConstant(0, MVT::i32);
SDOperand Tmp2, Tmp3;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
}
- SDOperand InFlag = DAG.getNode(X86ISD::TEST, MVT::Flag,
- ShAmt, DAG.getConstant(32, MVT::i8));
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
+ SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
+ DAG.getConstant(32, MVT::i8));
+ SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)};
+ SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
SDOperand Hi, Lo;
SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
- std::vector<MVT::ValueType> Tys;
- Tys.push_back(MVT::i32);
- Tys.push_back(MVT::Flag);
- std::vector<SDOperand> Ops;
+ VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
+ SmallVector<SDOperand, 4> Ops;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
+ Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
InFlag = Hi.getValue(1);
Ops.clear();
Ops.push_back(Tmp1);
Ops.push_back(CC);
Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
+ Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
} else {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
+ Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
InFlag = Lo.getValue(1);
Ops.clear();
Ops.push_back(Tmp1);
Ops.push_back(CC);
Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
+ Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
}
- Tys.clear();
- Tys.push_back(MVT::i32);
- Tys.push_back(MVT::i32);
+ VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
Ops.clear();
Ops.push_back(Lo);
Ops.push_back(Hi);
- return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
+ return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
}
SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
Ops.push_back(StackSlot);
Ops.push_back(DAG.getValueType(SrcVT));
Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
- Tys, Ops);
+ Tys, &Ops[0], Ops.size());
if (X86ScalarSSE) {
Chain = Result.getValue(1);
Ops.push_back(StackSlot);
Ops.push_back(DAG.getValueType(Op.getValueType()));
Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
+ Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
DAG.getSrcValue(NULL));
}
Ops.push_back(Chain);
Ops.push_back(StackSlot);
Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
- Value = DAG.getNode(X86ISD::FLD, Tys, Ops);
+ Value = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size());
Chain = Value.getValue(1);
SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
Ops.push_back(Chain);
Ops.push_back(Value);
Ops.push_back(StackSlot);
- SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
+ SDOperand FIST = DAG.getNode(Opc, MVT::Other, &Ops[0], Ops.size());
// Load the result.
return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
- SDOperand Mask
- = DAG.getNode(X86ISD::LOAD_PACK,
- VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
+ std::vector<MVT::ValueType> Tys;
+ Tys.push_back(VT);
+ Tys.push_back(MVT::Other);
+ SmallVector<SDOperand, 3> Ops;
+ Ops.push_back(DAG.getEntryNode());
+ Ops.push_back(CPIdx);
+ Ops.push_back(DAG.getSrcValue(NULL));
+ SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
}
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
- SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK,
- VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
+ std::vector<MVT::ValueType> Tys;
+ Tys.push_back(VT);
+ Tys.push_back(MVT::Other);
+ SmallVector<SDOperand, 3> Ops;
+ Ops.push_back(DAG.getEntryNode());
+ Ops.push_back(CPIdx);
+ Ops.push_back(DAG.getSrcValue(NULL));
+ SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK, Tys, &Ops[0], Ops.size());
return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
}
-SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
+SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG,
+ SDOperand Chain) {
assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
SDOperand Cond;
+ SDOperand Op0 = Op.getOperand(0);
+ SDOperand Op1 = Op.getOperand(1);
SDOperand CC = Op.getOperand(2);
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
- bool Flip;
unsigned X86CC;
- if (translateX86CC(CC, isFP, X86CC, Flip)) {
- if (Flip)
- Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
- Op.getOperand(1), Op.getOperand(0));
- else
- Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
- Op.getOperand(0), Op.getOperand(1));
- return DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), Cond);
- } else {
- assert(isFP && "Illegal integer SetCC!");
- Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
- Op.getOperand(0), Op.getOperand(1));
- std::vector<MVT::ValueType> Tys;
- std::vector<SDOperand> Ops;
- switch (SetCCOpcode) {
- default: assert(false && "Illegal floating point SetCC!");
- case ISD::SETOEQ: { // !PF & ZF
- Tys.push_back(MVT::i8);
- Tys.push_back(MVT::Flag);
- Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
- Ops.push_back(Cond);
- SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
- SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86ISD::COND_E, MVT::i8),
- Tmp1.getValue(1));
- return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
- }
- case ISD::SETUNE: { // PF | !ZF
- Tys.push_back(MVT::i8);
- Tys.push_back(MVT::Flag);
- Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
- Ops.push_back(Cond);
- SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
- SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86ISD::COND_NE, MVT::i8),
- Tmp1.getValue(1));
- return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
- }
- }
+ VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
+ if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
+ Op0, Op1, DAG)) {
+ SDOperand Ops1[] = { Chain, Op0, Op1 };
+ Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops1, 3).getValue(1);
+ SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
+ return DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
+ }
+
+ assert(isFP && "Illegal integer SetCC!");
+
+ SDOperand COps[] = { Chain, Op0, Op1 };
+ Cond = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
+
+ switch (SetCCOpcode) {
+ default: assert(false && "Illegal floating point SetCC!");
+ case ISD::SETOEQ: { // !PF & ZF
+ SDOperand Ops1[] = { DAG.getConstant(X86ISD::COND_NP, MVT::i8), Cond };
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops1, 2);
+ SDOperand Ops2[] = { DAG.getConstant(X86ISD::COND_E, MVT::i8),
+ Tmp1.getValue(1) };
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
+ return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETUNE: { // PF | !ZF
+ SDOperand Ops1[] = { DAG.getConstant(X86ISD::COND_P, MVT::i8), Cond };
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops1, 2);
+ SDOperand Ops2[] = { DAG.getConstant(X86ISD::COND_NE, MVT::i8),
+ Tmp1.getValue(1) };
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
+ return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
+ }
}
}
SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- bool isFPStack = MVT::isFloatingPoint(VT) && !X86ScalarSSE;
- bool addTest = false;
- SDOperand Op0 = Op.getOperand(0);
- SDOperand Cond, CC;
- if (Op0.getOpcode() == ISD::SETCC)
- Op0 = LowerOperation(Op0, DAG);
+ bool addTest = true;
+ SDOperand Chain = DAG.getEntryNode();
+ SDOperand Cond = Op.getOperand(0);
+ SDOperand CC;
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
+
+ if (Cond.getOpcode() == ISD::SETCC)
+ Cond = LowerSETCC(Cond, DAG, Chain);
+
+ if (Cond.getOpcode() == X86ISD::SETCC) {
+ CC = Cond.getOperand(0);
- if (Op0.getOpcode() == X86ISD::SETCC) {
// If condition flag is set by a X86ISD::CMP, then make a copy of it
- // (since flag operand cannot be shared). If the X86ISD::SETCC does not
- // have another use it will be eliminated.
- // If the X86ISD::SETCC has more than one use, then it's probably better
+ // (since flag operand cannot be shared). Use it as the condition setting
+ // operand in place of the X86ISD::SETCC.
+ // If the X86ISD::SETCC has more than one use, then perhaps it's better
// to use a test instead of duplicating the X86ISD::CMP (for register
- // pressure reason).
- unsigned CmpOpc = Op0.getOperand(1).getOpcode();
- if (CmpOpc == X86ISD::CMP || CmpOpc == X86ISD::COMI ||
- CmpOpc == X86ISD::UCOMI) {
- if (!Op0.hasOneUse()) {
- std::vector<MVT::ValueType> Tys;
- for (unsigned i = 0; i < Op0.Val->getNumValues(); ++i)
- Tys.push_back(Op0.Val->getValueType(i));
- std::vector<SDOperand> Ops;
- for (unsigned i = 0; i < Op0.getNumOperands(); ++i)
- Ops.push_back(Op0.getOperand(i));
- Op0 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
- }
-
- CC = Op0.getOperand(0);
- Cond = Op0.getOperand(1);
- // Make a copy as flag result cannot be used by more than one.
- Cond = DAG.getNode(CmpOpc, MVT::Flag,
- Cond.getOperand(0), Cond.getOperand(1));
- addTest =
- isFPStack && !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
- } else
- addTest = true;
- } else
- addTest = true;
+ // pressure reason)?
+ SDOperand Cmp = Cond.getOperand(1);
+ unsigned Opc = Cmp.getOpcode();
+ bool IllegalFPCMov = !X86ScalarSSE &&
+ MVT::isFloatingPoint(Op.getValueType()) &&
+ !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
+ if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) &&
+ !IllegalFPCMov) {
+ SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
+ Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
+ addTest = false;
+ }
+ }
if (addTest) {
CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
- Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Op0, Op0);
+ SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
+ Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
}
- std::vector<MVT::ValueType> Tys;
- Tys.push_back(Op.getValueType());
- Tys.push_back(MVT::Flag);
- std::vector<SDOperand> Ops;
+ VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag);
+ SmallVector<SDOperand, 4> Ops;
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
// condition is true.
Ops.push_back(Op.getOperand(2));
Ops.push_back(Op.getOperand(1));
Ops.push_back(CC);
- Ops.push_back(Cond);
- return DAG.getNode(X86ISD::CMOV, Tys, Ops);
+ Ops.push_back(Cond.getValue(1));
+ return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
}
SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
- bool addTest = false;
+ bool addTest = true;
+ SDOperand Chain = Op.getOperand(0);
SDOperand Cond = Op.getOperand(1);
SDOperand Dest = Op.getOperand(2);
SDOperand CC;
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
+
if (Cond.getOpcode() == ISD::SETCC)
- Cond = LowerOperation(Cond, DAG);
+ Cond = LowerSETCC(Cond, DAG, Chain);
if (Cond.getOpcode() == X86ISD::SETCC) {
+ CC = Cond.getOperand(0);
+
// If condition flag is set by a X86ISD::CMP, then make a copy of it
- // (since flag operand cannot be shared). If the X86ISD::SETCC does not
- // have another use it will be eliminated.
- // If the X86ISD::SETCC has more than one use, then it's probably better
+ // (since flag operand cannot be shared). Use it as the condition setting
+ // operand in place of the X86ISD::SETCC.
+ // If the X86ISD::SETCC has more than one use, then perhaps it's better
// to use a test instead of duplicating the X86ISD::CMP (for register
- // pressure reason).
- unsigned CmpOpc = Cond.getOperand(1).getOpcode();
- if (CmpOpc == X86ISD::CMP || CmpOpc == X86ISD::COMI ||
- CmpOpc == X86ISD::UCOMI) {
- if (!Cond.hasOneUse()) {
- std::vector<MVT::ValueType> Tys;
- for (unsigned i = 0; i < Cond.Val->getNumValues(); ++i)
- Tys.push_back(Cond.Val->getValueType(i));
- std::vector<SDOperand> Ops;
- for (unsigned i = 0; i < Cond.getNumOperands(); ++i)
- Ops.push_back(Cond.getOperand(i));
- Cond = DAG.getNode(X86ISD::SETCC, Tys, Ops);
- }
-
- CC = Cond.getOperand(0);
- Cond = Cond.getOperand(1);
- // Make a copy as flag result cannot be used by more than one.
- Cond = DAG.getNode(CmpOpc, MVT::Flag,
- Cond.getOperand(0), Cond.getOperand(1));
- } else
- addTest = true;
- } else
- addTest = true;
+ // pressure reason)?
+ SDOperand Cmp = Cond.getOperand(1);
+ unsigned Opc = Cmp.getOpcode();
+ if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) {
+ SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
+ Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
+ addTest = false;
+ }
+ }
if (addTest) {
CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
- Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
+ SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
+ Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
}
return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
- Op.getOperand(0), Op.getOperand(2), CC, Cond);
+ Cond, Op.getOperand(2), CC, Cond.getValue(1));
}
SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
getPointerTy()));
if (Subtarget->isTargetDarwin()) {
// With PIC, the address is actually $g + Offset.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC)
+ if (!Subtarget->is64Bit() &&
+ getTargetMachine().getRelocationModel() == Reloc::PIC_)
Result = DAG.getNode(ISD::ADD, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
Result);
SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
- if (CallingConv == CallingConv::Fast && EnableFastCC)
+ if (Subtarget->is64Bit())
+ return LowerX86_64CCCCallTo(Op, DAG);
+ else if (CallingConv == CallingConv::Fast && EnableFastCC)
return LowerFastCCCallTo(Op, DAG);
else
return LowerCCCCallTo(Op, DAG);
case 1: // ret void.
return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
- case 2: {
+ case 3: {
MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
- if (MVT::isVector(ArgVT)) {
+ if (MVT::isVector(ArgVT) ||
+ (Subtarget->is64Bit() && MVT::isFloatingPoint(ArgVT))) {
// Integer or FP vector result -> XMM0.
if (DAG.getMachineFunction().liveout_empty())
DAG.getMachineFunction().addLiveOut(X86::XMM0);
Copy = DAG.getCopyToReg(Op.getOperand(0), X86::XMM0, Op.getOperand(1),
SDOperand());
} else if (MVT::isInteger(ArgVT)) {
- // Integer result -> EAX
+ // Integer result -> EAX / RAX.
+ // The C calling convention guarantees the return value has been
+ // promoted to at least MVT::i32. The X86-64 ABI doesn't require the
+ // value to be promoted MVT::i64. So we don't have to extend it to
+ // 64-bit. Return the value in EAX, but mark RAX as liveout.
+ unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
if (DAG.getMachineFunction().liveout_empty())
- DAG.getMachineFunction().addLiveOut(X86::EAX);
+ DAG.getMachineFunction().addLiveOut(Reg);
- Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EAX, Op.getOperand(1),
+ Reg = (ArgVT == MVT::i64) ? X86::RAX : X86::EAX;
+ Copy = DAG.getCopyToReg(Op.getOperand(0), Reg, Op.getOperand(1),
SDOperand());
} else if (!X86ScalarSSE) {
// FP return with fp-stack value.
std::vector<SDOperand> Ops;
Ops.push_back(Op.getOperand(0));
Ops.push_back(Op.getOperand(1));
- Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
+ Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size());
} else {
// FP return with ScalarSSE (return on fp-stack).
if (DAG.getMachineFunction().liveout_empty())
Ops.push_back(Chain);
Ops.push_back(MemLoc);
Ops.push_back(DAG.getValueType(ArgVT));
- Copy = DAG.getNode(X86ISD::FLD, Tys, Ops);
+ Copy = DAG.getNode(X86ISD::FLD, Tys, &Ops[0], Ops.size());
Tys.clear();
Tys.push_back(MVT::Other);
Tys.push_back(MVT::Flag);
Ops.clear();
Ops.push_back(Copy.getValue(1));
Ops.push_back(Copy);
- Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
+ Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, &Ops[0], Ops.size());
}
break;
}
- case 3:
+ case 5: {
+ unsigned Reg1 = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ unsigned Reg2 = Subtarget->is64Bit() ? X86::RDX : X86::EDX;
if (DAG.getMachineFunction().liveout_empty()) {
- DAG.getMachineFunction().addLiveOut(X86::EAX);
- DAG.getMachineFunction().addLiveOut(X86::EDX);
+ DAG.getMachineFunction().addLiveOut(Reg1);
+ DAG.getMachineFunction().addLiveOut(Reg2);
}
- Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EDX, Op.getOperand(2),
+ Copy = DAG.getCopyToReg(Op.getOperand(0), Reg2, Op.getOperand(3),
SDOperand());
- Copy = DAG.getCopyToReg(Copy, X86::EAX,Op.getOperand(1),Copy.getValue(1));
+ Copy = DAG.getCopyToReg(Copy, Reg1, Op.getOperand(1), Copy.getValue(1));
break;
+ }
}
return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
- Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
+ Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
Copy.getValue(1));
}
SDOperand
X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const Function* Fn = MF.getFunction();
+ if (Fn->hasExternalLinkage() &&
+ Subtarget->TargetType == X86Subtarget::isCygwin &&
+ Fn->getName() == "main")
+ MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true);
+
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
- if (CC == CallingConv::Fast && EnableFastCC)
+ if (Subtarget->is64Bit())
+ return LowerX86_64CCCArguments(Op, DAG);
+ else if (CC == CallingConv::Fast && EnableFastCC)
return LowerFastCCArguments(Op, DAG);
else
return LowerCCCArguments(Op, DAG);
bool TwoRepStos = false;
if (ValC) {
unsigned ValReg;
- unsigned Val = ValC->getValue() & 255;
+ uint64_t Val = ValC->getValue() & 255;
// If the value is a constant, then we can potentially use larger sets.
switch (Align & 3) {
case 2: // WORD aligned
AVT = MVT::i16;
- Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
- BytesLeft = I->getValue() % 2;
- Val = (Val << 8) | Val;
ValReg = X86::AX;
+ Val = (Val << 8) | Val;
break;
- case 0: // DWORD aligned
+ case 0: // DWORD aligned
AVT = MVT::i32;
- if (I) {
- Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
- BytesLeft = I->getValue() % 4;
- } else {
- Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
- DAG.getConstant(2, MVT::i8));
- TwoRepStos = true;
- }
+ ValReg = X86::EAX;
Val = (Val << 8) | Val;
Val = (Val << 16) | Val;
- ValReg = X86::EAX;
+ if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) { // QWORD aligned
+ AVT = MVT::i64;
+ ValReg = X86::RAX;
+ Val = (Val << 32) | Val;
+ }
break;
default: // Byte aligned
AVT = MVT::i8;
- Count = Op.getOperand(3);
ValReg = X86::AL;
+ Count = Op.getOperand(3);
break;
}
+ if (AVT > MVT::i8) {
+ if (I) {
+ unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
+ Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
+ BytesLeft = I->getValue() % UBytes;
+ } else {
+ assert(AVT >= MVT::i32 &&
+ "Do not use rep;stos if not at least DWORD aligned");
+ Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
+ Op.getOperand(3), DAG.getConstant(2, MVT::i8));
+ TwoRepStos = true;
+ }
+ }
+
Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
InFlag);
InFlag = Chain.getValue(1);
InFlag = Chain.getValue(1);
}
- Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
+ Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
+ Count, InFlag);
InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
+ Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
+ Op.getOperand(1), InFlag);
InFlag = Chain.getValue(1);
std::vector<MVT::ValueType> Tys;
Ops.push_back(Chain);
Ops.push_back(DAG.getValueType(AVT));
Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
+ Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
if (TwoRepStos) {
InFlag = Chain.getValue(1);
Count = Op.getOperand(3);
MVT::ValueType CVT = Count.getValueType();
SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
- DAG.getConstant(3, CVT));
- Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
+ DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
+ Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
+ Left, InFlag);
InFlag = Chain.getValue(1);
Tys.clear();
Tys.push_back(MVT::Other);
Ops.push_back(Chain);
Ops.push_back(DAG.getValueType(MVT::i8));
Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
+ Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
} else if (BytesLeft) {
- // Issue stores for the last 1 - 3 bytes.
+ // Issue stores for the last 1 - 7 bytes.
SDOperand Value;
unsigned Val = ValC->getValue() & 255;
unsigned Offset = I->getValue() - BytesLeft;
SDOperand DstAddr = Op.getOperand(1);
MVT::ValueType AddrVT = DstAddr.getValueType();
+ if (BytesLeft >= 4) {
+ Val = (Val << 8) | Val;
+ Val = (Val << 16) | Val;
+ Value = DAG.getConstant(Val, MVT::i32);
+ Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
+ DAG.getNode(ISD::ADD, AddrVT, DstAddr,
+ DAG.getConstant(Offset, AddrVT)),
+ DAG.getSrcValue(NULL));
+ BytesLeft -= 4;
+ Offset += 4;
+ }
if (BytesLeft >= 2) {
Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
BytesLeft -= 2;
Offset += 2;
}
-
if (BytesLeft == 1) {
Value = DAG.getConstant(Val, MVT::i8);
Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
switch (Align & 3) {
case 2: // WORD aligned
AVT = MVT::i16;
- Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
- BytesLeft = I->getValue() % 2;
break;
- case 0: // DWORD aligned
+ case 0: // DWORD aligned
AVT = MVT::i32;
- if (I) {
- Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
- BytesLeft = I->getValue() % 4;
- } else {
- Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
- DAG.getConstant(2, MVT::i8));
- TwoRepMovs = true;
- }
+ if (Subtarget->is64Bit() && ((Align & 0xF) == 0)) // QWORD aligned
+ AVT = MVT::i64;
break;
default: // Byte aligned
AVT = MVT::i8;
break;
}
+ if (AVT > MVT::i8) {
+ if (I) {
+ unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
+ Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
+ BytesLeft = I->getValue() % UBytes;
+ } else {
+ assert(AVT >= MVT::i32 &&
+ "Do not use rep;movs if not at least DWORD aligned");
+ Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
+ Op.getOperand(3), DAG.getConstant(2, MVT::i8));
+ TwoRepMovs = true;
+ }
+ }
+
SDOperand InFlag(0, 0);
- Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
+ Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
+ Count, InFlag);
InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
+ Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
+ Op.getOperand(1), InFlag);
InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, X86::ESI, Op.getOperand(2), InFlag);
+ Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
+ Op.getOperand(2), InFlag);
InFlag = Chain.getValue(1);
std::vector<MVT::ValueType> Tys;
Ops.push_back(Chain);
Ops.push_back(DAG.getValueType(AVT));
Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
+ Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
if (TwoRepMovs) {
InFlag = Chain.getValue(1);
Count = Op.getOperand(3);
MVT::ValueType CVT = Count.getValueType();
SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
- DAG.getConstant(3, CVT));
- Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
+ DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
+ Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
+ Left, InFlag);
InFlag = Chain.getValue(1);
Tys.clear();
Tys.push_back(MVT::Other);
Ops.push_back(Chain);
Ops.push_back(DAG.getValueType(MVT::i8));
Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
+ Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
} else if (BytesLeft) {
- // Issue loads and stores for the last 1 - 3 bytes.
+ // Issue loads and stores for the last 1 - 7 bytes.
unsigned Offset = I->getValue() - BytesLeft;
SDOperand DstAddr = Op.getOperand(1);
MVT::ValueType DstVT = DstAddr.getValueType();
SDOperand SrcAddr = Op.getOperand(2);
MVT::ValueType SrcVT = SrcAddr.getValueType();
SDOperand Value;
+ if (BytesLeft >= 4) {
+ Value = DAG.getLoad(MVT::i32, Chain,
+ DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
+ DAG.getConstant(Offset, SrcVT)),
+ DAG.getSrcValue(NULL));
+ Chain = Value.getValue(1);
+ Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
+ DAG.getNode(ISD::ADD, DstVT, DstAddr,
+ DAG.getConstant(Offset, DstVT)),
+ DAG.getSrcValue(NULL));
+ BytesLeft -= 4;
+ Offset += 4;
+ }
if (BytesLeft >= 2) {
Value = DAG.getLoad(MVT::i16, Chain,
DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
Tys.push_back(MVT::Flag);
std::vector<SDOperand> Ops;
Ops.push_back(Op.getOperand(0));
- SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, Ops);
+ SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &Ops[0], Ops.size());
Ops.clear();
Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
Ops.push_back(Ops[1].getValue(1));
Tys[0] = Tys[1] = MVT::i32;
Tys.push_back(MVT::Other);
- return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
+ return DAG.getNode(ISD::MERGE_VALUES, Tys, &Ops[0], Ops.size());
}
SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
- // vastart just stores the address of the VarArgsFrameIndex slot into the
- // memory location argument.
- // FIXME: Replace MVT::i32 with PointerTy
- SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
- return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
- Op.getOperand(1), Op.getOperand(2));
+ if (!Subtarget->is64Bit()) {
+ // vastart just stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
+ return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
+ Op.getOperand(1), Op.getOperand(2));
+ }
+
+ // __va_list_tag:
+ // gp_offset (0 - 6 * 8)
+ // fp_offset (48 - 48 + 8 * 16)
+ // overflow_arg_area (point to parameters coming in memory).
+ // reg_save_area
+ std::vector<SDOperand> MemOps;
+ SDOperand FIN = Op.getOperand(1);
+ // Store gp_offset
+ SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(VarArgsGPOffset, MVT::i32),
+ FIN, Op.getOperand(2));
+ MemOps.push_back(Store);
+
+ // Store fp_offset
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
+ DAG.getConstant(4, getPointerTy()));
+ Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(VarArgsFPOffset, MVT::i32),
+ FIN, Op.getOperand(2));
+ MemOps.push_back(Store);
+
+ // Store ptr to overflow_arg_area
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
+ DAG.getConstant(4, getPointerTy()));
+ SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
+ Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
+ OVFIN, FIN, Op.getOperand(2));
+ MemOps.push_back(Store);
+
+ // Store ptr to reg_save_area.
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
+ DAG.getConstant(8, getPointerTy()));
+ SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
+ Store = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
+ RSFIN, FIN, Op.getOperand(2));
+ MemOps.push_back(Store);
+ return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
}
SDOperand
CC = ISD::SETNE;
break;
}
- bool Flip;
+
unsigned X86CC;
- translateX86CC(CC, true, X86CC, Flip);
- SDOperand Cond = DAG.getNode(Opc, MVT::Flag, Op.getOperand(Flip?2:1),
- Op.getOperand(Flip?1:2));
- SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8,
- DAG.getConstant(X86CC, MVT::i8), Cond);
+ SDOperand LHS = Op.getOperand(1);
+ SDOperand RHS = Op.getOperand(2);
+ translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
+
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
+ SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
+ SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
+ VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
+ SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
+ SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
}
}
}
-/// LowerOperation - Provide custom lowering hooks for some operations.
-///
-SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
- switch (Op.getOpcode()) {
- default: assert(0 && "Should not custom lower this!");
- case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
- case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
- case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
- case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
- case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
- case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
- case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
- case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
- case ISD::SHL_PARTS:
- case ISD::SRA_PARTS:
- case ISD::SRL_PARTS: return LowerShift(Op, DAG);
- case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
- case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
- case ISD::FABS: return LowerFABS(Op, DAG);
- case ISD::FNEG: return LowerFNEG(Op, DAG);
- case ISD::SETCC: return LowerSETCC(Op, DAG);
- case ISD::SELECT: return LowerSELECT(Op, DAG);
- case ISD::BRCOND: return LowerBRCOND(Op, DAG);
- case ISD::JumpTable: return LowerJumpTable(Op, DAG);
- case ISD::CALL: return LowerCALL(Op, DAG);
- case ISD::RET: return LowerRET(Op, DAG);
- case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
- case ISD::MEMSET: return LowerMEMSET(Op, DAG);
- case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
- case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG);
- case ISD::VASTART: return LowerVASTART(Op, DAG);
- case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+/// LowerOperation - Provide custom lowering hooks for some operations.
+///
+SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
+ switch (Op.getOpcode()) {
+ default: assert(0 && "Should not custom lower this!");
+ case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
+ case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
+ case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
+ case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
+ case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
+ case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
+ case ISD::SHL_PARTS:
+ case ISD::SRA_PARTS:
+ case ISD::SRL_PARTS: return LowerShift(Op, DAG);
+ case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
+ case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
+ case ISD::FABS: return LowerFABS(Op, DAG);
+ case ISD::FNEG: return LowerFNEG(Op, DAG);
+ case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode());
+ case ISD::SELECT: return LowerSELECT(Op, DAG);
+ case ISD::BRCOND: return LowerBRCOND(Op, DAG);
+ case ISD::JumpTable: return LowerJumpTable(Op, DAG);
+ case ISD::CALL: return LowerCALL(Op, DAG);
+ case ISD::RET: return LowerRET(Op, DAG);
+ case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
+ case ISD::MEMSET: return LowerMEMSET(Op, DAG);
+ case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
+ case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG);
+ case ISD::VASTART: return LowerVASTART(Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ }
+}
+
+const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch (Opcode) {
+ default: return NULL;
+ case X86ISD::SHLD: return "X86ISD::SHLD";
+ case X86ISD::SHRD: return "X86ISD::SHRD";
+ case X86ISD::FAND: return "X86ISD::FAND";
+ case X86ISD::FXOR: return "X86ISD::FXOR";
+ case X86ISD::FILD: return "X86ISD::FILD";
+ case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
+ case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
+ case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
+ case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
+ case X86ISD::FLD: return "X86ISD::FLD";
+ case X86ISD::FST: return "X86ISD::FST";
+ case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
+ case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
+ case X86ISD::CALL: return "X86ISD::CALL";
+ case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
+ case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
+ case X86ISD::CMP: return "X86ISD::CMP";
+ case X86ISD::COMI: return "X86ISD::COMI";
+ case X86ISD::UCOMI: return "X86ISD::UCOMI";
+ case X86ISD::SETCC: return "X86ISD::SETCC";
+ case X86ISD::CMOV: return "X86ISD::CMOV";
+ case X86ISD::BRCOND: return "X86ISD::BRCOND";
+ case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
+ case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
+ case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
+ case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
+ case X86ISD::LOAD_UA: return "X86ISD::LOAD_UA";
+ case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
+ case X86ISD::Wrapper: return "X86ISD::Wrapper";
+ case X86ISD::S2VEC: return "X86ISD::S2VEC";
+ case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
+ case X86ISD::PINSRW: return "X86ISD::PINSRW";
+ }
+}
+
+/// isLegalAddressImmediate - Return true if the integer value or
+/// GlobalValue can be used as the offset of the target addressing mode.
+bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
+ // X86 allows a sign-extended 32-bit immediate field.
+ return (V > -(1LL << 32) && V < (1LL << 32)-1);
+}
+
+bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
+ // GV is 64-bit but displacement field is 32-bit unless we are in small code
+ // model. Mac OS X happens to support only small PIC code model.
+ // FIXME: better support for other OS's.
+ if (Subtarget->is64Bit() && !Subtarget->isTargetDarwin())
+ return false;
+ if (Subtarget->isTargetDarwin()) {
+ Reloc::Model RModel = getTargetMachine().getRelocationModel();
+ if (RModel == Reloc::Static)
+ return true;
+ else if (RModel == Reloc::DynamicNoPIC)
+ return !DarwinGVRequiresExtraLoad(GV);
+ else
+ return false;
+ } else
+ return true;
+}
+
+/// isShuffleMaskLegal - Targets can use this to indicate that they only
+/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
+/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
+/// are assumed to be legal.
+bool
+X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
+ // Only do shuffles on 128-bit vector types for now.
+ if (MVT::getSizeInBits(VT) == 64) return false;
+ return (Mask.Val->getNumOperands() <= 4 ||
+ isSplatMask(Mask.Val) ||
+ isPSHUFHW_PSHUFLWMask(Mask.Val) ||
+ X86::isUNPCKLMask(Mask.Val) ||
+ X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
+ X86::isUNPCKHMask(Mask.Val));
+}
+
+bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
+ MVT::ValueType EVT,
+ SelectionDAG &DAG) const {
+ unsigned NumElts = BVOps.size();
+ // Only do shuffles on 128-bit vector types for now.
+ if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
+ if (NumElts == 2) return true;
+ if (NumElts == 4) {
+ return (isMOVLMask(BVOps) || isCommutedMOVL(BVOps, true) ||
+ isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps));
+ }
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// X86 Scheduler Hooks
+//===----------------------------------------------------------------------===//
+
+MachineBasicBlock *
+X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
+ MachineBasicBlock *BB) {
+ switch (MI->getOpcode()) {
+ default: assert(false && "Unexpected instr type to insert");
+ case X86::CMOV_FR32:
+ case X86::CMOV_FR64:
+ case X86::CMOV_V4F32:
+ case X86::CMOV_V2F64:
+ case X86::CMOV_V2I64: {
+ // To "insert" a SELECT_CC instruction, we actually have to insert the
+ // diamond control-flow pattern. The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ ilist<MachineBasicBlock>::iterator It = BB;
+ ++It;
+
+ // thisMBB:
+ // ...
+ // TrueVal = ...
+ // cmpTY ccX, r1, r2
+ // bCC copy1MBB
+ // fallthrough --> copy0MBB
+ MachineBasicBlock *thisMBB = BB;
+ MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
+ unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
+ BuildMI(BB, Opc, 1).addMBB(sinkMBB);
+ MachineFunction *F = BB->getParent();
+ F->getBasicBlockList().insert(It, copy0MBB);
+ F->getBasicBlockList().insert(It, sinkMBB);
+ // Update machine-CFG edges by first adding all successors of the current
+ // block to the new block which will contain the Phi node for the select.
+ for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
+ e = BB->succ_end(); i != e; ++i)
+ sinkMBB->addSuccessor(*i);
+ // Next, remove all successors of the current block, and add the true
+ // and fallthrough blocks as its successors.
+ while(!BB->succ_empty())
+ BB->removeSuccessor(BB->succ_begin());
+ BB->addSuccessor(copy0MBB);
+ BB->addSuccessor(sinkMBB);
+
+ // copy0MBB:
+ // %FalseValue = ...
+ // # fallthrough to sinkMBB
+ BB = copy0MBB;
+
+ // Update machine-CFG edges
+ BB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
+ // ...
+ BB = sinkMBB;
+ BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
+ .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
+
+ delete MI; // The pseudo instruction is gone now.
+ return BB;
}
-}
-const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- default: return NULL;
- case X86ISD::SHLD: return "X86ISD::SHLD";
- case X86ISD::SHRD: return "X86ISD::SHRD";
- case X86ISD::FAND: return "X86ISD::FAND";
- case X86ISD::FXOR: return "X86ISD::FXOR";
- case X86ISD::FILD: return "X86ISD::FILD";
- case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
- case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
- case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
- case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
- case X86ISD::FLD: return "X86ISD::FLD";
- case X86ISD::FST: return "X86ISD::FST";
- case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
- case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
- case X86ISD::CALL: return "X86ISD::CALL";
- case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
- case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
- case X86ISD::CMP: return "X86ISD::CMP";
- case X86ISD::TEST: return "X86ISD::TEST";
- case X86ISD::COMI: return "X86ISD::COMI";
- case X86ISD::UCOMI: return "X86ISD::UCOMI";
- case X86ISD::SETCC: return "X86ISD::SETCC";
- case X86ISD::CMOV: return "X86ISD::CMOV";
- case X86ISD::BRCOND: return "X86ISD::BRCOND";
- case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
- case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
- case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
- case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
- case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
- case X86ISD::Wrapper: return "X86ISD::Wrapper";
- case X86ISD::S2VEC: return "X86ISD::S2VEC";
- case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
- case X86ISD::PINSRW: return "X86ISD::PINSRW";
+ case X86::FP_TO_INT16_IN_MEM:
+ case X86::FP_TO_INT32_IN_MEM:
+ case X86::FP_TO_INT64_IN_MEM: {
+ // Change the floating point control register to use "round towards zero"
+ // mode when truncating to an integer value.
+ MachineFunction *F = BB->getParent();
+ int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
+ addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
+
+ // Load the old value of the high byte of the control word...
+ unsigned OldCW =
+ F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
+ addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
+
+ // Set the high part to be round to zero...
+ addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
+
+ // Reload the modified control word now...
+ addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
+
+ // Restore the memory image of control word to original value
+ addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
+
+ // Get the X86 opcode to use.
+ unsigned Opc;
+ switch (MI->getOpcode()) {
+ default: assert(0 && "illegal opcode!");
+ case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
+ case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
+ case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
+ }
+
+ X86AddressMode AM;
+ MachineOperand &Op = MI->getOperand(0);
+ if (Op.isRegister()) {
+ AM.BaseType = X86AddressMode::RegBase;
+ AM.Base.Reg = Op.getReg();
+ } else {
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = Op.getFrameIndex();
+ }
+ Op = MI->getOperand(1);
+ if (Op.isImmediate())
+ AM.Scale = Op.getImmedValue();
+ Op = MI->getOperand(2);
+ if (Op.isImmediate())
+ AM.IndexReg = Op.getImmedValue();
+ Op = MI->getOperand(3);
+ if (Op.isGlobalAddress()) {
+ AM.GV = Op.getGlobal();
+ } else {
+ AM.Disp = Op.getImmedValue();
+ }
+ addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
+
+ // Reload the original control word now.
+ addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
+
+ delete MI; // The pseudo instruction is gone now.
+ return BB;
+ }
}
}
+//===----------------------------------------------------------------------===//
+// X86 Optimization Hooks
+//===----------------------------------------------------------------------===//
+
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
uint64_t Mask,
uint64_t &KnownZero,
}
}
+/// getShuffleScalarElt - Returns the scalar element that will make up the ith
+/// element of the result of the vector shuffle.
+static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
+ MVT::ValueType VT = N->getValueType(0);
+ SDOperand PermMask = N->getOperand(2);
+ unsigned NumElems = PermMask.getNumOperands();
+ SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
+ i %= NumElems;
+ if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
+ return (i == 0)
+ ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
+ } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
+ SDOperand Idx = PermMask.getOperand(i);
+ if (Idx.getOpcode() == ISD::UNDEF)
+ return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
+ return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
+ }
+ return SDOperand();
+}
+
+/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
+/// node is a GlobalAddress + an offset.
+static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
+ if (N->getOpcode() == X86ISD::Wrapper) {
+ if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
+ GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
+ return true;
+ }
+ } else if (N->getOpcode() == ISD::ADD) {
+ SDOperand N1 = N->getOperand(0);
+ SDOperand N2 = N->getOperand(1);
+ if (isGAPlusOffset(N1.Val, GA, Offset)) {
+ ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
+ if (V) {
+ Offset += V->getSignExtended();
+ return true;
+ }
+ } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
+ ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
+ if (V) {
+ Offset += V->getSignExtended();
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/// isConsecutiveLoad - Returns true if N is loading from an address of Base
+/// + Dist * Size.
+static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
+ MachineFrameInfo *MFI) {
+ if (N->getOperand(0).Val != Base->getOperand(0).Val)
+ return false;
+
+ SDOperand Loc = N->getOperand(1);
+ SDOperand BaseLoc = Base->getOperand(1);
+ if (Loc.getOpcode() == ISD::FrameIndex) {
+ if (BaseLoc.getOpcode() != ISD::FrameIndex)
+ return false;
+ int FI = dyn_cast<FrameIndexSDNode>(Loc)->getIndex();
+ int BFI = dyn_cast<FrameIndexSDNode>(BaseLoc)->getIndex();
+ int FS = MFI->getObjectSize(FI);
+ int BFS = MFI->getObjectSize(BFI);
+ if (FS != BFS || FS != Size) return false;
+ return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
+ } else {
+ GlobalValue *GV1 = NULL;
+ GlobalValue *GV2 = NULL;
+ int64_t Offset1 = 0;
+ int64_t Offset2 = 0;
+ bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
+ bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
+ if (isGA1 && isGA2 && GV1 == GV2)
+ return Offset1 == (Offset2 + Dist*Size);
+ }
+
+ return false;
+}
+
+static bool isBaseAlignment16(SDNode *Base, MachineFrameInfo *MFI,
+ const X86Subtarget *Subtarget) {
+ GlobalValue *GV;
+ int64_t Offset;
+ if (isGAPlusOffset(Base, GV, Offset))
+ return (GV->getAlignment() >= 16 && (Offset % 16) == 0);
+ else {
+ assert(Base->getOpcode() == ISD::FrameIndex && "Unexpected base node!");
+ int BFI = dyn_cast<FrameIndexSDNode>(Base)->getIndex();
+ if (BFI < 0)
+ // Fixed objects do not specify alignment, however the offsets are known.
+ return ((Subtarget->getStackAlignment() % 16) == 0 &&
+ (MFI->getObjectOffset(BFI) % 16) == 0);
+ else
+ return MFI->getObjectAlignment(BFI) >= 16;
+ }
+ return false;
+}
+
+
+/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
+/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
+/// if the load addresses are consecutive, non-overlapping, and in the right
+/// order.
+static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MVT::ValueType VT = N->getValueType(0);
+ MVT::ValueType EVT = MVT::getVectorBaseType(VT);
+ SDOperand PermMask = N->getOperand(2);
+ int NumElems = (int)PermMask.getNumOperands();
+ SDNode *Base = NULL;
+ for (int i = 0; i < NumElems; ++i) {
+ SDOperand Idx = PermMask.getOperand(i);
+ if (Idx.getOpcode() == ISD::UNDEF) {
+ if (!Base) return SDOperand();
+ } else {
+ SDOperand Arg =
+ getShuffleScalarElt(N, cast<ConstantSDNode>(Idx)->getValue(), DAG);
+ if (!Arg.Val || Arg.getOpcode() != ISD::LOAD)
+ return SDOperand();
+ if (!Base)
+ Base = Arg.Val;
+ else if (!isConsecutiveLoad(Arg.Val, Base,
+ i, MVT::getSizeInBits(EVT)/8,MFI))
+ return SDOperand();
+ }
+ }
+
+ bool isAlign16 = isBaseAlignment16(Base->getOperand(1).Val, MFI, Subtarget);
+ if (isAlign16)
+ return DAG.getLoad(VT, Base->getOperand(0), Base->getOperand(1),
+ Base->getOperand(2));
+ else {
+ // Just use movups, it's shorter.
+ std::vector<MVT::ValueType> Tys;
+ Tys.push_back(MVT::v4f32);
+ Tys.push_back(MVT::Other);
+ SmallVector<SDOperand, 3> Ops;
+ Ops.push_back(Base->getOperand(0));
+ Ops.push_back(Base->getOperand(1));
+ Ops.push_back(Base->getOperand(2));
+ return DAG.getNode(ISD::BIT_CONVERT, VT,
+ DAG.getNode(X86ISD::LOAD_UA, Tys, &Ops[0], Ops.size()));
+ }
+}
+
+SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ TargetMachine &TM = getTargetMachine();
+ SelectionDAG &DAG = DCI.DAG;
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::VECTOR_SHUFFLE:
+ return PerformShuffleCombine(N, DAG, Subtarget);
+ }
+
+ return SDOperand();
+}
+
+//===----------------------------------------------------------------------===//
+// X86 Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+X86TargetLowering::ConstraintType
+X86TargetLowering::getConstraintType(char ConstraintLetter) const {
+ switch (ConstraintLetter) {
+ case 'A':
+ case 'r':
+ case 'R':
+ case 'l':
+ case 'q':
+ case 'Q':
+ case 'x':
+ case 'Y':
+ return C_RegisterClass;
+ default: return TargetLowering::getConstraintType(ConstraintLetter);
+ }
+}
+
std::vector<unsigned> X86TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
// FIXME: not handling fp-stack yet!
// FIXME: not handling MMX registers yet ('y' constraint).
switch (Constraint[0]) { // GCC X86 Constraint Letters
- default: break; // Unknown constriant letter
+ default: break; // Unknown constraint letter
+ case 'A': // EAX/EDX
+ if (VT == MVT::i32 || VT == MVT::i64)
+ return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
+ break;
case 'r': // GENERAL_REGS
case 'R': // LEGACY_REGS
if (VT == MVT::i32)
return std::vector<unsigned>();
}
-/// isLegalAddressImmediate - Return true if the integer value or
-/// GlobalValue can be used as the offset of the target addressing mode.
-bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
- // X86 allows a sign-extended 32-bit immediate field.
- return (V > -(1LL << 32) && V < (1LL << 32)-1);
-}
-
-bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
- if (Subtarget->isTargetDarwin()) {
- Reloc::Model RModel = getTargetMachine().getRelocationModel();
- if (RModel == Reloc::Static)
- return true;
- else if (RModel == Reloc::DynamicNoPIC)
- return !DarwinGVRequiresExtraLoad(GV);
- else
- return false;
- } else
- return true;
-}
-
-/// isShuffleMaskLegal - Targets can use this to indicate that they only
-/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
-/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
-/// are assumed to be legal.
-bool
-X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
- // Only do shuffles on 128-bit vector types for now.
- if (MVT::getSizeInBits(VT) == 64) return false;
- return (Mask.Val->getNumOperands() <= 4 ||
- isSplatMask(Mask.Val) ||
- isPSHUFHW_PSHUFLWMask(Mask.Val) ||
- X86::isUNPCKLMask(Mask.Val) ||
- X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
- X86::isUNPCKHMask(Mask.Val));
-}
-
-bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
- MVT::ValueType EVT,
- SelectionDAG &DAG) const {
- unsigned NumElts = BVOps.size();
- // Only do shuffles on 128-bit vector types for now.
- if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
- if (NumElts == 2) return true;
- if (NumElts == 4) {
- return (isMOVLMask(BVOps) || isCommutedMOVL(BVOps, true) ||
- isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps));
+std::pair<unsigned, const TargetRegisterClass*>
+X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+ MVT::ValueType VT) const {
+ // Use the default implementation in TargetLowering to convert the register
+ // constraint into a member of a register class.
+ std::pair<unsigned, const TargetRegisterClass*> Res;
+ Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+
+ // Not found? Bail out.
+ if (Res.second == 0) return Res;
+
+ // Otherwise, check to see if this is a register class of the wrong value
+ // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
+ // turn into {ax},{dx}.
+ if (Res.second->hasType(VT))
+ return Res; // Correct type already, nothing to do.
+
+ // All of the single-register GCC register classes map their values onto
+ // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
+ // really want an 8-bit or 32-bit register, map to the appropriate register
+ // class and return the appropriate register.
+ if (Res.second != X86::GR16RegisterClass)
+ return Res;
+
+ if (VT == MVT::i8) {
+ unsigned DestReg = 0;
+ switch (Res.first) {
+ default: break;
+ case X86::AX: DestReg = X86::AL; break;
+ case X86::DX: DestReg = X86::DL; break;
+ case X86::CX: DestReg = X86::CL; break;
+ case X86::BX: DestReg = X86::BL; break;
+ }
+ if (DestReg) {
+ Res.first = DestReg;
+ Res.second = Res.second = X86::GR8RegisterClass;
+ }
+ } else if (VT == MVT::i32) {
+ unsigned DestReg = 0;
+ switch (Res.first) {
+ default: break;
+ case X86::AX: DestReg = X86::EAX; break;
+ case X86::DX: DestReg = X86::EDX; break;
+ case X86::CX: DestReg = X86::ECX; break;
+ case X86::BX: DestReg = X86::EBX; break;
+ case X86::SI: DestReg = X86::ESI; break;
+ case X86::DI: DestReg = X86::EDI; break;
+ case X86::BP: DestReg = X86::EBP; break;
+ case X86::SP: DestReg = X86::ESP; break;
+ }
+ if (DestReg) {
+ Res.first = DestReg;
+ Res.second = Res.second = X86::GR32RegisterClass;
+ }
+ } else if (VT == MVT::i64) {
+ unsigned DestReg = 0;
+ switch (Res.first) {
+ default: break;
+ case X86::AX: DestReg = X86::RAX; break;
+ case X86::DX: DestReg = X86::RDX; break;
+ case X86::CX: DestReg = X86::RCX; break;
+ case X86::BX: DestReg = X86::RBX; break;
+ case X86::SI: DestReg = X86::RSI; break;
+ case X86::DI: DestReg = X86::RDI; break;
+ case X86::BP: DestReg = X86::RBP; break;
+ case X86::SP: DestReg = X86::RSP; break;
+ }
+ if (DestReg) {
+ Res.first = DestReg;
+ Res.second = Res.second = X86::GR64RegisterClass;
+ }
}
- return false;
+
+ return Res;
}
+