#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/ADT/StringExtras.h"
using namespace llvm;
-// FIXME: temporary.
-static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
- cl::desc("Enable fastcc on X86"));
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
setUseUnderscoreLongJmp(true);
}
- // Add legal addressing mode scale values.
- addLegalAddressScale(8);
- addLegalAddressScale(4);
- addLegalAddressScale(2);
- // Enter the ones which require both scale + index last. These are more
- // expensive.
- addLegalAddressScale(9);
- addLegalAddressScale(5);
- addLegalAddressScale(3);
-
// Set up the register classes.
addRegisterClass(MVT::i8, X86::GR8RegisterClass);
addRegisterClass(MVT::i16, X86::GR16RegisterClass);
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
!Subtarget->isTargetCygMing())
setOperationAction(ISD::LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
+ if (Subtarget->is64Bit()) {
+ // FIXME: Verify
+ setExceptionPointerRegister(X86::RAX);
+ setExceptionSelectorRegister(X86::RDX);
+ } else {
+ setExceptionPointerRegister(X86::EAX);
+ setExceptionSelectorRegister(X86::EDX);
+ }
+
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
-
- // Use the default implementation.
setOperationAction(ISD::VAARG , MVT::Other, Expand);
- setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
+ if (Subtarget->is64Bit())
+ setOperationAction(ISD::VACOPY , MVT::Other, Custom);
+ else
+ setOperationAction(ISD::VACOPY , MVT::Other, Expand);
+
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
if (Subtarget->is64Bit())
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
+ if (Subtarget->isTargetCygMing())
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+ else
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
if (X86ScalarSSE) {
// Set up the FP register classes.
addLegalFPImmediate(+0.0); // xorps / xorpd
} else {
// Set up the FP register classes.
- addRegisterClass(MVT::f64, X86::RFPRegisterClass);
+ addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
+ addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
+ setOperationAction(ISD::UNDEF, MVT::f32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FP_ROUND, MVT::f32, Expand);
if (!UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
}
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
addLegalFPImmediate(+0.0); // FLD0
addLegalFPImmediate(+1.0); // FLD1
addLegalFPImmediate(-0.0); // FLD0/FCHS
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
- for (unsigned VT = (unsigned)MVT::Vector + 1;
- VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
+ for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
}
if (Subtarget->hasMMX()) {
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
+ addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
// FIXME: add MMX packed arithmetics
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand);
+
+ setOperationAction(ISD::ADD, MVT::v8i8, Legal);
+ setOperationAction(ISD::ADD, MVT::v4i16, Legal);
+ setOperationAction(ISD::ADD, MVT::v2i32, Legal);
+ setOperationAction(ISD::ADD, MVT::v1i64, Legal);
+
+ setOperationAction(ISD::SUB, MVT::v8i8, Legal);
+ setOperationAction(ISD::SUB, MVT::v4i16, Legal);
+ setOperationAction(ISD::SUB, MVT::v2i32, Legal);
+
+ setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
+ setOperationAction(ISD::MUL, MVT::v4i16, Legal);
+
+ setOperationAction(ISD::AND, MVT::v8i8, Promote);
+ AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
+ setOperationAction(ISD::AND, MVT::v4i16, Promote);
+ AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
+ setOperationAction(ISD::AND, MVT::v2i32, Promote);
+ AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
+ setOperationAction(ISD::AND, MVT::v1i64, Legal);
+
+ setOperationAction(ISD::OR, MVT::v8i8, Promote);
+ AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
+ setOperationAction(ISD::OR, MVT::v4i16, Promote);
+ AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
+ setOperationAction(ISD::OR, MVT::v2i32, Promote);
+ AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
+ setOperationAction(ISD::OR, MVT::v1i64, Legal);
+
+ setOperationAction(ISD::XOR, MVT::v8i8, Promote);
+ AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
+ setOperationAction(ISD::XOR, MVT::v4i16, Promote);
+ AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
+ setOperationAction(ISD::XOR, MVT::v2i32, Promote);
+ AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
+ setOperationAction(ISD::XOR, MVT::v1i64, Legal);
+
+ setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
+ AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
+ setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
+ AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
+ setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
+ AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
+ setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
+
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
+
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
+
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
}
if (Subtarget->hasSSE1()) {
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
+ setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
+ setOperationAction(ISD::FABS, MVT::v4f32, Custom);
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::ADD, MVT::v16i8, Legal);
setOperationAction(ISD::ADD, MVT::v8i16, Legal);
setOperationAction(ISD::ADD, MVT::v4i32, Legal);
+ setOperationAction(ISD::ADD, MVT::v2i64, Legal);
setOperationAction(ISD::SUB, MVT::v16i8, Legal);
setOperationAction(ISD::SUB, MVT::v8i16, Legal);
setOperationAction(ISD::SUB, MVT::v4i32, Legal);
+ setOperationAction(ISD::SUB, MVT::v2i64, Legal);
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
setOperationAction(ISD::FADD, MVT::v2f64, Legal);
setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
+ setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
+ setOperationAction(ISD::FABS, MVT::v2f64, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
-/// GetRetValueLocs - If we are returning a set of values with the specified
-/// value types, determine the set of registers each one will land in. This
-/// sets one element of the ResultRegs array for each element in the VTs array.
-static void GetRetValueLocs(const MVT::ValueType *VTs, unsigned NumVTs,
- unsigned *ResultRegs,
- const X86Subtarget *Subtarget,
- unsigned CallingConv) {
- if (NumVTs == 0) return;
+#include "X86GenCallingConv.inc"
+
+/// LowerRET - Lower an ISD::RET node.
+SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
+ assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
+
+ SmallVector<CCValAssign, 16> RVLocs;
+ unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
+ bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
+ CCInfo.AnalyzeReturn(Op.Val, RetCC_X86);
- if (NumVTs == 2) {
- ResultRegs[0] = VTs[0] == MVT::i64 ? X86::RAX : X86::EAX;
- ResultRegs[1] = VTs[1] == MVT::i64 ? X86::RDX : X86::EDX;
- return;
+
+ // If this is the first return lowered for this function, add the regs to the
+ // liveout set for the function.
+ if (DAG.getMachineFunction().liveout_empty()) {
+ for (unsigned i = 0; i != RVLocs.size(); ++i)
+ if (RVLocs[i].isRegLoc())
+ DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg());
}
- // Otherwise, NumVTs is 1.
- MVT::ValueType ArgVT = VTs[0];
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand Flag;
- unsigned Reg;
- switch (ArgVT) {
- case MVT::i8: Reg = X86::AL; break;
- case MVT::i16: Reg = X86::AX; break;
- case MVT::i32: Reg = X86::EAX; break;
- case MVT::i64: Reg = X86::RAX; break;
- case MVT::f32:
- case MVT::f64:
- if (Subtarget->is64Bit())
- Reg = X86::XMM0; // FP values in X86-64 go in XMM0.
- else
- Reg = X86::ST0; // FP values in X86-32 go in ST0.
- break;
- default:
- assert(MVT::isVector(ArgVT) && "Unknown return value type!");
- Reg = X86::XMM0; // Int/FP vector result -> XMM0.
- break;
+ // Copy the result values into the output registers.
+ if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() ||
+ RVLocs[0].getLocReg() != X86::ST0) {
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+ Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1),
+ Flag);
+ Flag = Chain.getValue(1);
+ }
+ } else {
+ // We need to handle a destination of ST0 specially, because it isn't really
+ // a register.
+ SDOperand Value = Op.getOperand(1);
+
+ // If this is an FP return with ScalarSSE, we need to move the value from
+ // an XMM register onto the fp-stack.
+ if (X86ScalarSSE) {
+ SDOperand MemLoc;
+
+ // If this is a load into a scalarsse value, don't store the loaded value
+ // back to the stack, only to reload it: just replace the scalar-sse load.
+ if (ISD::isNON_EXTLoad(Value.Val) &&
+ (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
+ Chain = Value.getOperand(0);
+ MemLoc = Value.getOperand(1);
+ } else {
+ // Spill the value to memory and reload it into top of stack.
+ unsigned Size = MVT::getSizeInBits(RVLocs[0].getValVT())/8;
+ MachineFunction &MF = DAG.getMachineFunction();
+ int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
+ MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
+ Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0);
+ }
+ SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other);
+ SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())};
+ Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
+ Chain = Value.getValue(1);
+ }
+
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDOperand Ops[] = { Chain, Value };
+ Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2);
+ Flag = Chain.getValue(1);
}
- ResultRegs[0] = Reg;
+
+ SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16);
+ if (Flag.Val)
+ return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag);
+ else
+ return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop);
}
+
/// LowerCallResult - Lower the result values of an ISD::CALL into the
/// appropriate copies out of appropriate physical registers. This assumes that
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
SDNode *X86TargetLowering::
LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall,
unsigned CallingConv, SelectionDAG &DAG) {
- SmallVector<SDOperand, 8> ResultVals;
-
- // We support returning up to two registers.
- MVT::ValueType VTs[2];
- unsigned DestRegs[2];
- unsigned NumRegs = TheCall->getNumValues() - 1;
- assert(NumRegs <= 2 && "Can only return up to two regs!");
- for (unsigned i = 0; i != NumRegs; ++i)
- VTs[i] = TheCall->getValueType(i);
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0;
+ CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
+ CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
+
- // Determine which register each value should be copied into.
- GetRetValueLocs(VTs, NumRegs, DestRegs, Subtarget, CallingConv);
+ SmallVector<SDOperand, 8> ResultVals;
// Copy all of the result registers out of their specified physreg.
- if (NumRegs != 1 || DestRegs[0] != X86::ST0) {
- for (unsigned i = 0; i != NumRegs; ++i) {
- Chain = DAG.getCopyFromReg(Chain, DestRegs[i], VTs[i],
- InFlag).getValue(1);
+ if (RVLocs.size() != 1 || RVLocs[0].getLocReg() != X86::ST0) {
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(),
+ RVLocs[i].getValVT(), InFlag).getValue(1);
InFlag = Chain.getValue(2);
ResultVals.push_back(Chain.getValue(0));
}
// before the fp stackifier runs.
// Copy ST0 into an RFP register with FP_GET_RESULT.
- SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
+ SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag);
SDOperand GROps[] = { Chain, InFlag };
SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2);
Chain = RetVal.getValue(1);
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
SDOperand Ops[] = {
- Chain, RetVal, StackSlot, DAG.getValueType(VTs[0]), InFlag
+ Chain, RetVal, StackSlot, DAG.getValueType(RVLocs[0].getValVT()), InFlag
};
Chain = DAG.getNode(X86ISD::FST, MVT::Other, Ops, 5);
- RetVal = DAG.getLoad(VTs[0], Chain, StackSlot, NULL, 0);
+ RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0);
Chain = RetVal.getValue(1);
}
-
- if (VTs[0] == MVT::f32 && !X86ScalarSSE)
- // FIXME: we would really like to remember that this FP_ROUND
- // operation is okay to eliminate if we allow excess FP precision.
- RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
ResultVals.push_back(RetVal);
}
return VReg;
}
-/// HowToPassArgument - Returns how an formal argument of the specified type
-/// should be passed. If it is through stack, returns the size of the stack
-/// slot; if it is through integer or XMM register, returns the number of
-/// integer or XMM registers are needed.
-static void
-HowToPassCallArgument(MVT::ValueType ObjectVT,
- bool ArgInReg,
- unsigned NumIntRegs, unsigned NumXMMRegs,
- unsigned MaxNumIntRegs,
- unsigned &ObjSize, unsigned &ObjIntRegs,
- unsigned &ObjXMMRegs,
- bool AllowVectors = true) {
- ObjSize = 0;
- ObjIntRegs = 0;
- ObjXMMRegs = 0;
-
- if (MaxNumIntRegs>3) {
- // We don't have too much registers on ia32! :)
- MaxNumIntRegs = 3;
- }
-
- switch (ObjectVT) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i8:
- if (ArgInReg && (NumIntRegs < MaxNumIntRegs))
- ObjIntRegs = 1;
- else
- ObjSize = 1;
- break;
- case MVT::i16:
- if (ArgInReg && (NumIntRegs < MaxNumIntRegs))
- ObjIntRegs = 1;
- else
- ObjSize = 2;
- break;
- case MVT::i32:
- if (ArgInReg && (NumIntRegs < MaxNumIntRegs))
- ObjIntRegs = 1;
- else
- ObjSize = 4;
- break;
- case MVT::i64:
- if (ArgInReg && (NumIntRegs+2 <= MaxNumIntRegs)) {
- ObjIntRegs = 2;
- } else if (ArgInReg && (NumIntRegs+1 <= MaxNumIntRegs)) {
- ObjIntRegs = 1;
- ObjSize = 4;
- } else
- ObjSize = 8;
- case MVT::f32:
- ObjSize = 4;
- break;
- case MVT::f64:
- ObjSize = 8;
- break;
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- if (AllowVectors) {
- if (NumXMMRegs < 4)
- ObjXMMRegs = 1;
- else
- ObjSize = 16;
- break;
- } else
- assert(0 && "Unhandled argument type [vector]!");
- }
-}
-
SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
bool isStdCall) {
unsigned NumArgs = Op.Val->getNumValues() - 1;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
- SmallVector<SDOperand, 8> ArgValues;
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
- // Add DAG nodes to load the arguments... On entry to a function on the X86,
- // the stack frame looks like this:
- //
- // [ESP] -- return address
- // [ESP + 4] -- first argument (leftmost lexically)
- // [ESP + 8] -- second argument, if first argument is <= 4 bytes in size
- // ...
- //
- unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
- unsigned NumSRetBytes= 0; // How much bytes on stack used for struct return
- unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
- unsigned NumIntRegs = 0; // Integer regs used for parameter passing
-
- static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
- };
- static const unsigned GPRArgRegs[][3] = {
- { X86::AL, X86::DL, X86::CL },
- { X86::AX, X86::DX, X86::CX },
- { X86::EAX, X86::EDX, X86::ECX }
- };
- static const TargetRegisterClass* GPRClasses[3] = {
- X86::GR8RegisterClass, X86::GR16RegisterClass, X86::GR32RegisterClass
- };
-
- // Handle regparm attribute
- SmallVector<bool, 8> ArgInRegs(NumArgs, false);
- SmallVector<bool, 8> SRetArgs(NumArgs, false);
- if (!isVarArg) {
- for (unsigned i = 0; i<NumArgs; ++i) {
- unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3+i))->getValue();
- ArgInRegs[i] = (Flags >> 1) & 1;
- SRetArgs[i] = (Flags >> 2) & 1;
- }
- }
-
- for (unsigned i = 0; i < NumArgs; ++i) {
- MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
- unsigned ArgIncrement = 4;
- unsigned ObjSize = 0;
- unsigned ObjXMMRegs = 0;
- unsigned ObjIntRegs = 0;
- unsigned Reg = 0;
- SDOperand ArgValue;
-
- HowToPassCallArgument(ObjectVT,
- ArgInRegs[i],
- NumIntRegs, NumXMMRegs, 3,
- ObjSize, ObjIntRegs, ObjXMMRegs,
- !isStdCall);
-
- if (ObjSize > 4)
- ArgIncrement = ObjSize;
-
- if (ObjIntRegs || ObjXMMRegs) {
- switch (ObjectVT) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32: {
- unsigned RegToUse = GPRArgRegs[ObjectVT-MVT::i8][NumIntRegs];
- Reg = AddLiveIn(MF, RegToUse, GPRClasses[ObjectVT-MVT::i8]);
- ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
- break;
- }
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- assert(!isStdCall && "Unhandled argument type!");
- Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass);
- ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
- break;
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C);
+
+ SmallVector<SDOperand, 8> ArgValues;
+ unsigned LastVal = ~0U;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
+ // places.
+ assert(VA.getValNo() != LastVal &&
+ "Don't support value assigned to multiple locs yet");
+ LastVal = VA.getValNo();
+
+ if (VA.isRegLoc()) {
+ MVT::ValueType RegVT = VA.getLocVT();
+ TargetRegisterClass *RC;
+ if (RegVT == MVT::i32)
+ RC = X86::GR32RegisterClass;
+ else {
+ assert(MVT::isVector(RegVT));
+ RC = X86::VR128RegisterClass;
}
- NumIntRegs += ObjIntRegs;
- NumXMMRegs += ObjXMMRegs;
- }
- if (ObjSize) {
- // XMM arguments have to be aligned on 16-byte boundary.
- if (ObjSize == 16)
- ArgOffset = ((ArgOffset + 15) / 16) * 16;
- // Create the SelectionDAG nodes corresponding to a load from this
- // parameter.
- int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
- ArgOffset += ArgIncrement; // Move on to the next argument.
- if (SRetArgs[i])
- NumSRetBytes += ArgIncrement;
+ unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
+ SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
+
+ // If this is an 8 or 16-bit value, it is really passed promoted to 32
+ // bits. Insert an assert[sz]ext to capture this, then truncate to the
+ // right size.
+ if (VA.getLocInfo() == CCValAssign::SExt)
+ ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+ else if (VA.getLocInfo() == CCValAssign::ZExt)
+ ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+
+ if (VA.getLocInfo() != CCValAssign::Full)
+ ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
+
+ ArgValues.push_back(ArgValue);
+ } else {
+ assert(VA.isMemLoc());
+
+ // Create the nodes corresponding to a load from this parameter slot.
+ int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
+ VA.getLocMemOffset());
+ SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
+ ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
}
-
- ArgValues.push_back(ArgValue);
}
+
+ unsigned StackSize = CCInfo.getNextStackOffset();
ArgValues.push_back(Root);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg)
- VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
+ VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
if (isStdCall && !isVarArg) {
- BytesToPopOnReturn = ArgOffset; // Callee pops everything..
+ BytesToPopOnReturn = StackSize; // Callee pops everything..
BytesCallerReserves = 0;
} else {
- BytesToPopOnReturn = NumSRetBytes; // Callee pops hidden struct pointer.
- BytesCallerReserves = ArgOffset;
+ BytesToPopOnReturn = 0; // Callee pops nothing.
+
+ // If this is an sret function, the return should pop the hidden pointer.
+ if (NumArgs &&
+ (cast<ConstantSDNode>(Op.getOperand(3))->getValue() &
+ ISD::ParamFlags::StructReturn))
+ BytesToPopOnReturn = 4;
+
+ BytesCallerReserves = StackSize;
}
RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
ReturnAddrIndex = 0; // No return address slot generated yet.
-
- MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn);
+ MF.getInfo<X86MachineFunctionInfo>()
+ ->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
- &ArgValues[0], ArgValues.size());
+ &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
}
SDOperand X86TargetLowering::LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG,
SDOperand Callee = Op.getOperand(4);
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
- static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
- };
- static const unsigned GPR32ArgRegs[] = {
- X86::EAX, X86::EDX, X86::ECX
- };
-
- // Count how many bytes are to be pushed on the stack.
- unsigned NumBytes = 0;
- // Keep track of the number of integer regs passed so far.
- unsigned NumIntRegs = 0;
- // Keep track of the number of XMM regs passed so far.
- unsigned NumXMMRegs = 0;
- // How much bytes on stack used for struct return
- unsigned NumSRetBytes= 0;
-
- // Handle regparm attribute
- SmallVector<bool, 8> ArgInRegs(NumOps, false);
- SmallVector<bool, 8> SRetArgs(NumOps, false);
- for (unsigned i = 0; i<NumOps; ++i) {
- unsigned Flags =
- dyn_cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue();
- ArgInRegs[i] = (Flags >> 1) & 1;
- SRetArgs[i] = (Flags >> 2) & 1;
- }
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
- // Calculate stack frame size
- for (unsigned i = 0; i != NumOps; ++i) {
- SDOperand Arg = Op.getOperand(5+2*i);
- unsigned ArgIncrement = 4;
- unsigned ObjSize = 0;
- unsigned ObjIntRegs = 0;
- unsigned ObjXMMRegs = 0;
-
- HowToPassCallArgument(Arg.getValueType(),
- ArgInRegs[i],
- NumIntRegs, NumXMMRegs, 3,
- ObjSize, ObjIntRegs, ObjXMMRegs,
- CC != CallingConv::X86_StdCall);
- if (ObjSize > 4)
- ArgIncrement = ObjSize;
-
- NumIntRegs += ObjIntRegs;
- NumXMMRegs += ObjXMMRegs;
- if (ObjSize) {
- // XMM arguments have to be aligned on 16-byte boundary.
- if (ObjSize == 16)
- NumBytes = ((NumBytes + 15) / 16) * 16;
- NumBytes += ArgIncrement;
- }
- }
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
- // Arguments go on the stack in reverse order, as specified by the ABI.
- unsigned ArgOffset = 0;
- NumXMMRegs = 0;
- NumIntRegs = 0;
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
SmallVector<SDOperand, 8> MemOpChains;
- SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
- for (unsigned i = 0; i != NumOps; ++i) {
- SDOperand Arg = Op.getOperand(5+2*i);
- unsigned ArgIncrement = 4;
- unsigned ObjSize = 0;
- unsigned ObjIntRegs = 0;
- unsigned ObjXMMRegs = 0;
-
- HowToPassCallArgument(Arg.getValueType(),
- ArgInRegs[i],
- NumIntRegs, NumXMMRegs, 3,
- ObjSize, ObjIntRegs, ObjXMMRegs,
- CC != CallingConv::X86_StdCall);
-
- if (ObjSize > 4)
- ArgIncrement = ObjSize;
-
- if (Arg.getValueType() == MVT::i8 || Arg.getValueType() == MVT::i16) {
- // Promote the integer to 32 bits. If the input type is signed use a
- // sign extend, otherwise use a zero extend.
- unsigned Flags = cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue();
-
- unsigned ExtOp = (Flags & 1) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- Arg = DAG.getNode(ExtOp, MVT::i32, Arg);
- }
- if (ObjIntRegs || ObjXMMRegs) {
- switch (Arg.getValueType()) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i32:
- RegsToPass.push_back(std::make_pair(GPR32ArgRegs[NumIntRegs], Arg));
- break;
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
- break;
- }
+ SDOperand StackPtr;
- NumIntRegs += ObjIntRegs;
- NumXMMRegs += ObjXMMRegs;
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
+ break;
}
- if (ObjSize) {
- // XMM arguments have to be aligned on 16-byte boundary.
- if (ObjSize == 16)
- ArgOffset = ((ArgOffset + 15) / 16) * 16;
-
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
+
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ } else {
+ assert(VA.isMemLoc());
+ if (StackPtr.Val == 0)
+ StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
+ SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
-
- ArgOffset += ArgIncrement; // Move on to the next argument.
- if (SRetArgs[i])
- NumSRetBytes += ArgIncrement;
}
}
- // Sanity check: we haven't seen NumSRetBytes > 4
- assert((NumSRetBytes<=4) &&
- "Too much space for struct-return pointer requested");
-
+ // If the first argument is an sret pointer, remember it.
+ bool isSRet = NumOps &&
+ (cast<ConstantSDNode>(Op.getOperand(6))->getValue() &
+ ISD::ParamFlags::StructReturn);
+
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
&MemOpChains[0], MemOpChains.size());
if (CC == CallingConv::X86_StdCall) {
if (isVarArg)
- NumBytesForCalleeToPush = NumSRetBytes;
+ NumBytesForCalleeToPush = isSRet ? 4 : 0;
else
NumBytesForCalleeToPush = NumBytes;
} else {
// If this is is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
- NumBytesForCalleeToPush = NumSRetBytes;
+ NumBytesForCalleeToPush = isSRet ? 4 : 0;
}
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
//===----------------------------------------------------------------------===//
-// X86-64 C Calling Convention implementation
+// FastCall Calling Convention implementation
//===----------------------------------------------------------------------===//
-
-/// HowToPassX86_64CCCArgument - Returns how an formal argument of the specified
-/// type should be passed. If it is through stack, returns the size of the stack
-/// slot; if it is through integer or XMM register, returns the number of
-/// integer or XMM registers are needed.
-static void
-HowToPassX86_64CCCArgument(MVT::ValueType ObjectVT,
- unsigned NumIntRegs, unsigned NumXMMRegs,
- unsigned &ObjSize, unsigned &ObjIntRegs,
- unsigned &ObjXMMRegs) {
- ObjSize = 0;
- ObjIntRegs = 0;
- ObjXMMRegs = 0;
-
- switch (ObjectVT) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::i64:
- if (NumIntRegs < 6)
- ObjIntRegs = 1;
- else {
- switch (ObjectVT) {
- default: break;
- case MVT::i8: ObjSize = 1; break;
- case MVT::i16: ObjSize = 2; break;
- case MVT::i32: ObjSize = 4; break;
- case MVT::i64: ObjSize = 8; break;
- }
- }
- break;
- case MVT::f32:
- case MVT::f64:
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- if (NumXMMRegs < 8)
- ObjXMMRegs = 1;
- else {
- switch (ObjectVT) {
- default: break;
- case MVT::f32: ObjSize = 4; break;
- case MVT::f64: ObjSize = 8; break;
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64: ObjSize = 16; break;
- }
- break;
- }
- }
-}
-
+//
+// The X86 'fastcall' calling convention passes up to two integer arguments in
+// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
+// and requires that the callee pop its arguments off the stack (allowing proper
+// tail calls), and has the same return value conventions as C calling convs.
+//
+// This calling convention always arranges for the callee pop value to be 8n+4
+// bytes, which is needed for tail recursion elimination and stack alignment
+// reasons.
SDOperand
-X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
- unsigned NumArgs = Op.Val->getNumValues() - 1;
+X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
- SmallVector<SDOperand, 8> ArgValues;
-
- // Add DAG nodes to load the arguments... On entry to a function on the X86,
- // the stack frame looks like this:
- //
- // [RSP] -- return address
- // [RSP + 8] -- first nonreg argument (leftmost lexically)
- // [RSP +16] -- second nonreg argument, if 1st argument is <= 8 bytes in size
- // ...
- //
- unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
- unsigned NumIntRegs = 0; // Int regs used for parameter passing.
- unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
-
- static const unsigned GPR8ArgRegs[] = {
- X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B
- };
- static const unsigned GPR16ArgRegs[] = {
- X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W
- };
- static const unsigned GPR32ArgRegs[] = {
- X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
- };
- static const unsigned GPR64ArgRegs[] = {
- X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
- };
- static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
- };
- for (unsigned i = 0; i < NumArgs; ++i) {
- MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
- unsigned ArgIncrement = 8;
- unsigned ObjSize = 0;
- unsigned ObjIntRegs = 0;
- unsigned ObjXMMRegs = 0;
-
- // FIXME: __int128 and long double support?
- HowToPassX86_64CCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
- ObjSize, ObjIntRegs, ObjXMMRegs);
- if (ObjSize > 8)
- ArgIncrement = ObjSize;
-
- unsigned Reg = 0;
- SDOperand ArgValue;
- if (ObjIntRegs || ObjXMMRegs) {
- switch (ObjectVT) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::i64: {
- TargetRegisterClass *RC = NULL;
- switch (ObjectVT) {
- default: break;
- case MVT::i8:
- RC = X86::GR8RegisterClass;
- Reg = GPR8ArgRegs[NumIntRegs];
- break;
- case MVT::i16:
- RC = X86::GR16RegisterClass;
- Reg = GPR16ArgRegs[NumIntRegs];
- break;
- case MVT::i32:
- RC = X86::GR32RegisterClass;
- Reg = GPR32ArgRegs[NumIntRegs];
- break;
- case MVT::i64:
- RC = X86::GR64RegisterClass;
- Reg = GPR64ArgRegs[NumIntRegs];
- break;
- }
- Reg = AddLiveIn(MF, Reg, RC);
- ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
- break;
- }
- case MVT::f32:
- case MVT::f64:
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64: {
- TargetRegisterClass *RC= (ObjectVT == MVT::f32) ?
- X86::FR32RegisterClass : ((ObjectVT == MVT::f64) ?
- X86::FR64RegisterClass : X86::VR128RegisterClass);
- Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], RC);
- ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
- break;
- }
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall);
+
+ SmallVector<SDOperand, 8> ArgValues;
+ unsigned LastVal = ~0U;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
+ // places.
+ assert(VA.getValNo() != LastVal &&
+ "Don't support value assigned to multiple locs yet");
+ LastVal = VA.getValNo();
+
+ if (VA.isRegLoc()) {
+ MVT::ValueType RegVT = VA.getLocVT();
+ TargetRegisterClass *RC;
+ if (RegVT == MVT::i32)
+ RC = X86::GR32RegisterClass;
+ else {
+ assert(MVT::isVector(RegVT));
+ RC = X86::VR128RegisterClass;
}
- NumIntRegs += ObjIntRegs;
- NumXMMRegs += ObjXMMRegs;
- } else if (ObjSize) {
- // XMM arguments have to be aligned on 16-byte boundary.
- if (ObjSize == 16)
- ArgOffset = ((ArgOffset + 15) / 16) * 16;
- // Create the SelectionDAG nodes corresponding to a load from this
- // parameter.
- int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
+
+ unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
+ SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
+
+ // If this is an 8 or 16-bit value, it is really passed promoted to 32
+ // bits. Insert an assert[sz]ext to capture this, then truncate to the
+ // right size.
+ if (VA.getLocInfo() == CCValAssign::SExt)
+ ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+ else if (VA.getLocInfo() == CCValAssign::ZExt)
+ ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+
+ if (VA.getLocInfo() != CCValAssign::Full)
+ ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
+
+ ArgValues.push_back(ArgValue);
+ } else {
+ assert(VA.isMemLoc());
+
+ // Create the nodes corresponding to a load from this parameter slot.
+ int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
+ VA.getLocMemOffset());
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
- ArgOffset += ArgIncrement; // Move on to the next argument.
+ ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
}
-
- ArgValues.push_back(ArgValue);
}
+
+ ArgValues.push_back(Root);
- // If the function takes variable number of arguments, make a frame index for
- // the start of the first vararg value... for expansion of llvm.va_start.
- if (isVarArg) {
- // For X86-64, if there are vararg parameters that are passed via
- // registers, then we must store them to their spots on the stack so they
- // may be loaded by deferencing the result of va_next.
- VarArgsGPOffset = NumIntRegs * 8;
- VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
- VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
- RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
-
- // Store the integer parameter registers.
- SmallVector<SDOperand, 8> MemOps;
- SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
- SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
- DAG.getConstant(VarArgsGPOffset, getPointerTy()));
- for (; NumIntRegs != 6; ++NumIntRegs) {
- unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
- X86::GR64RegisterClass);
- SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
- SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
- MemOps.push_back(Store);
- FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
- DAG.getConstant(8, getPointerTy()));
- }
+ unsigned StackSize = CCInfo.getNextStackOffset();
- // Now store the XMM (fp + vector) parameter registers.
- FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
- DAG.getConstant(VarArgsFPOffset, getPointerTy()));
- for (; NumXMMRegs != 8; ++NumXMMRegs) {
- unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
- X86::VR128RegisterClass);
- SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
- SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
- MemOps.push_back(Store);
- FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
- DAG.getConstant(16, getPointerTy()));
- }
- if (!MemOps.empty())
- Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
- &MemOps[0], MemOps.size());
+ if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
+ // Make sure the instruction takes 8n+4 bytes to make sure the start of the
+ // arguments and the arguments after the retaddr has been pushed are aligned.
+ if ((StackSize & 7) == 0)
+ StackSize += 4;
}
- ArgValues.push_back(Root);
+ VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
+ RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
+ ReturnAddrIndex = 0; // No return address slot generated yet.
+ BytesToPopOnReturn = StackSize; // Callee pops all stack arguments.
+ BytesCallerReserves = 0;
- ReturnAddrIndex = 0; // No return address slot generated yet.
- BytesToPopOnReturn = 0; // Callee pops nothing.
- BytesCallerReserves = ArgOffset;
+ MF.getInfo<X86MachineFunctionInfo>()
+ ->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
- &ArgValues[0], ArgValues.size());
+ &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
}
-SDOperand
-X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,
- unsigned CallingConv) {
+SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
+ unsigned CC) {
SDOperand Chain = Op.getOperand(0);
- bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
SDOperand Callee = Op.getOperand(4);
- MVT::ValueType RetVT= Op.Val->getValueType(0);
- unsigned NumOps = (Op.getNumOperands() - 5) / 2;
- // Count how many bytes are to be pushed on the stack.
- unsigned NumBytes = 0;
- unsigned NumIntRegs = 0; // Int regs used for parameter passing.
- unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
-
- static const unsigned GPR8ArgRegs[] = {
- X86::DIL, X86::SIL, X86::DL, X86::CL, X86::R8B, X86::R9B
- };
- static const unsigned GPR16ArgRegs[] = {
- X86::DI, X86::SI, X86::DX, X86::CX, X86::R8W, X86::R9W
- };
- static const unsigned GPR32ArgRegs[] = {
- X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
- };
- static const unsigned GPR64ArgRegs[] = {
- X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
- };
- static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
- };
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall);
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
- for (unsigned i = 0; i != NumOps; ++i) {
- SDOperand Arg = Op.getOperand(5+2*i);
- MVT::ValueType ArgVT = Arg.getValueType();
-
- switch (ArgVT) {
- default: assert(0 && "Unknown value type!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::i64:
- if (NumIntRegs < 6)
- ++NumIntRegs;
- else
- NumBytes += 8;
- break;
- case MVT::f32:
- case MVT::f64:
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- if (NumXMMRegs < 8)
- NumXMMRegs++;
- else if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
- NumBytes += 8;
- else {
- // XMM arguments have to be aligned on 16-byte boundary.
- NumBytes = ((NumBytes + 15) / 16) * 16;
- NumBytes += 16;
- }
- break;
- }
+ if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
+ // Make sure the instruction takes 8n+4 bytes to make sure the start of the
+ // arguments and the arguments after the retaddr has been pushed are aligned.
+ if ((NumBytes & 7) == 0)
+ NumBytes += 4;
}
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
-
- // Arguments go on the stack in reverse order, as specified by the ABI.
- unsigned ArgOffset = 0;
- NumIntRegs = 0;
- NumXMMRegs = 0;
+
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
SmallVector<SDOperand, 8> MemOpChains;
- SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
- for (unsigned i = 0; i != NumOps; ++i) {
- SDOperand Arg = Op.getOperand(5+2*i);
- MVT::ValueType ArgVT = Arg.getValueType();
-
- switch (ArgVT) {
- default: assert(0 && "Unexpected ValueType for argument!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- case MVT::i64:
- if (NumIntRegs < 6) {
- unsigned Reg = 0;
- switch (ArgVT) {
- default: break;
- case MVT::i8: Reg = GPR8ArgRegs[NumIntRegs]; break;
- case MVT::i16: Reg = GPR16ArgRegs[NumIntRegs]; break;
- case MVT::i32: Reg = GPR32ArgRegs[NumIntRegs]; break;
- case MVT::i64: Reg = GPR64ArgRegs[NumIntRegs]; break;
- }
- RegsToPass.push_back(std::make_pair(Reg, Arg));
- ++NumIntRegs;
- } else {
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += 8;
- }
- break;
- case MVT::f32:
- case MVT::f64:
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- if (NumXMMRegs < 8) {
- RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
- NumXMMRegs++;
- } else {
- if (ArgVT != MVT::f32 && ArgVT != MVT::f64) {
- // XMM arguments have to be aligned on 16-byte boundary.
- ArgOffset = ((ArgOffset + 15) / 16) * 16;
- }
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
- ArgOffset += 8;
- else
- ArgOffset += 16;
- }
+
+ SDOperand StackPtr;
+
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
+ break;
+ }
+
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ } else {
+ assert(VA.isMemLoc());
+ if (StackPtr.Val == 0)
+ StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
+ SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
}
}
InFlag = Chain.getValue(1);
}
- if (isVarArg) {
- // From AMD64 ABI document:
- // For calls that may call functions that use varargs or stdargs
- // (prototype-less calls or calls to functions containing ellipsis (...) in
- // the declaration) %al is used as hidden argument to specify the number
- // of SSE registers used. The contents of %al do not need to match exactly
- // the number of registers, but must be an ubound on the number of SSE
- // registers used and is in the range 0 - 8 inclusive.
- Chain = DAG.getCopyToReg(Chain, X86::AL,
- DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
- InFlag = Chain.getValue(1);
- }
-
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
+ // ELF / PIC requires GOT in the EBX register before function calls via PLT
+ // GOT pointer.
+ if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
+ Subtarget->isPICStyleGOT()) {
+ Chain = DAG.getCopyToReg(Chain, X86::EBX,
+ DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
+ InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDOperand, 8> Ops;
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
+ // Add an implicit use GOT pointer in EBX.
+ if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
+ Subtarget->isPICStyleGOT())
+ Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
+
if (InFlag.Val)
Ops.push_back(InFlag);
Ops.clear();
Ops.push_back(Chain);
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
- Ops.push_back(DAG.getConstant(0, getPointerTy()));
+ Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
Ops.push_back(InFlag);
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
- if (RetVT != MVT::Other)
- InFlag = Chain.getValue(1);
-
- SmallVector<SDOperand, 8> ResultVals;
- switch (RetVT) {
- default: assert(0 && "Unknown value type to return!");
- case MVT::Other:
- NodeTys = DAG.getVTList(MVT::Other);
- break;
- case MVT::i8:
- Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i8, MVT::Other);
- break;
- case MVT::i16:
- Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i16, MVT::Other);
- break;
- case MVT::i32:
- Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i32, MVT::Other);
- break;
- case MVT::i64:
- if (Op.Val->getValueType(1) == MVT::i64) {
- // FIXME: __int128 support?
- Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- Chain = DAG.getCopyFromReg(Chain, X86::RDX, MVT::i64,
- Chain.getValue(2)).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
- } else {
- Chain = DAG.getCopyFromReg(Chain, X86::RAX, MVT::i64, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i64, MVT::Other);
- }
- break;
- case MVT::f32:
- case MVT::f64:
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- // FIXME: long double support?
- Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(RetVT, MVT::Other);
- break;
- }
+ InFlag = Chain.getValue(1);
- // Merge everything together with a MERGE_VALUES node.
- ResultVals.push_back(Chain);
- SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
- &ResultVals[0], ResultVals.size());
- return Res.getValue(Op.ResNo);
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
}
+
//===----------------------------------------------------------------------===//
-// Fast & FastCall Calling Convention implementation
-//===----------------------------------------------------------------------===//
-//
-// The X86 'fast' calling convention passes up to two integer arguments in
-// registers (an appropriate portion of EAX/EDX), passes arguments in C order,
-// and requires that the callee pop its arguments off the stack (allowing proper
-// tail calls), and has the same return value conventions as C calling convs.
-//
-// This calling convention always arranges for the callee pop value to be 8n+4
-// bytes, which is needed for tail recursion elimination and stack alignment
-// reasons.
-//
-// Note that this can be enhanced in the future to pass fp vals in registers
-// (when we have a global fp allocator) and do other tricks.
-//
+// X86-64 C Calling Convention implementation
//===----------------------------------------------------------------------===//
-// The X86 'fastcall' calling convention passes up to two integer arguments in
-// registers (an appropriate portion of ECX/EDX), passes arguments in C order,
-// and requires that the callee pop its arguments off the stack (allowing proper
-// tail calls), and has the same return value conventions as C calling convs.
-//
-// This calling convention always arranges for the callee pop value to be 8n+4
-// bytes, which is needed for tail recursion elimination and stack alignment
-// reasons.
-
SDOperand
-X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG,
- bool isFastCall) {
- unsigned NumArgs = Op.Val->getNumValues()-1;
+X86TargetLowering::LowerX86_64CCCArguments(SDOperand Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
- SmallVector<SDOperand, 8> ArgValues;
-
- // Add DAG nodes to load the arguments... On entry to a function the stack
- // frame looks like this:
- //
- // [ESP] -- return address
- // [ESP + 4] -- first nonreg argument (leftmost lexically)
- // [ESP + 8] -- second nonreg argument, if 1st argument is <= 4 bytes in size
- // ...
- unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
-
- // Keep track of the number of integer regs passed so far. This can be either
- // 0 (neither EAX/ECX or EDX used), 1 (EAX/ECX is used) or 2 (EAX/ECX and EDX
- // are both used).
- unsigned NumIntRegs = 0;
- unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
-
- static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
- };
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
- static const unsigned GPRArgRegs[][2][2] = {
- {{ X86::AL, X86::DL }, { X86::CL, X86::DL }},
- {{ X86::AX, X86::DX }, { X86::CX, X86::DX }},
- {{ X86::EAX, X86::EDX }, { X86::ECX, X86::EDX }}
+ static const unsigned GPR64ArgRegs[] = {
+ X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
};
-
- static const TargetRegisterClass* GPRClasses[3] = {
- X86::GR8RegisterClass, X86::GR16RegisterClass, X86::GR32RegisterClass
+ static const unsigned XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
+ X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
- unsigned GPRInd = (isFastCall ? 1 : 0);
- for (unsigned i = 0; i < NumArgs; ++i) {
- MVT::ValueType ObjectVT = Op.getValue(i).getValueType();
- unsigned ArgIncrement = 4;
- unsigned ObjSize = 0;
- unsigned ObjXMMRegs = 0;
- unsigned ObjIntRegs = 0;
- unsigned Reg = 0;
- SDOperand ArgValue;
-
- HowToPassCallArgument(ObjectVT,
- true, // Use as much registers as possible
- NumIntRegs, NumXMMRegs,
- (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS),
- ObjSize, ObjIntRegs, ObjXMMRegs,
- !isFastCall);
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C);
+
+ SmallVector<SDOperand, 8> ArgValues;
+ unsigned LastVal = ~0U;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
+ // places.
+ assert(VA.getValNo() != LastVal &&
+ "Don't support value assigned to multiple locs yet");
+ LastVal = VA.getValNo();
- if (ObjSize > 4)
- ArgIncrement = ObjSize;
-
- if (ObjIntRegs || ObjXMMRegs) {
- switch (ObjectVT) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32: {
- unsigned RegToUse = GPRArgRegs[ObjectVT-MVT::i8][GPRInd][NumIntRegs];
- Reg = AddLiveIn(MF, RegToUse, GPRClasses[ObjectVT-MVT::i8]);
- ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
- break;
- }
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64: {
- assert(!isFastCall && "Unhandled argument type!");
- Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass);
- ArgValue = DAG.getCopyFromReg(Root, Reg, ObjectVT);
- break;
- }
+ if (VA.isRegLoc()) {
+ MVT::ValueType RegVT = VA.getLocVT();
+ TargetRegisterClass *RC;
+ if (RegVT == MVT::i32)
+ RC = X86::GR32RegisterClass;
+ else if (RegVT == MVT::i64)
+ RC = X86::GR64RegisterClass;
+ else if (RegVT == MVT::f32)
+ RC = X86::FR32RegisterClass;
+ else if (RegVT == MVT::f64)
+ RC = X86::FR64RegisterClass;
+ else {
+ assert(MVT::isVector(RegVT));
+ if (MVT::getSizeInBits(RegVT) == 64) {
+ RC = X86::GR64RegisterClass; // MMX values are passed in GPRs.
+ RegVT = MVT::i64;
+ } else
+ RC = X86::VR128RegisterClass;
}
- NumIntRegs += ObjIntRegs;
- NumXMMRegs += ObjXMMRegs;
- }
- if (ObjSize) {
- // XMM arguments have to be aligned on 16-byte boundary.
- if (ObjSize == 16)
- ArgOffset = ((ArgOffset + 15) / 16) * 16;
- // Create the SelectionDAG nodes corresponding to a load from this
- // parameter.
- int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValue = DAG.getLoad(Op.Val->getValueType(i), Root, FIN, NULL, 0);
+
+ unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
+ SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
- ArgOffset += ArgIncrement; // Move on to the next argument.
+ // If this is an 8 or 16-bit value, it is really passed promoted to 32
+ // bits. Insert an assert[sz]ext to capture this, then truncate to the
+ // right size.
+ if (VA.getLocInfo() == CCValAssign::SExt)
+ ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+ else if (VA.getLocInfo() == CCValAssign::ZExt)
+ ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+
+ if (VA.getLocInfo() != CCValAssign::Full)
+ ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
+
+ // Handle MMX values passed in GPRs.
+ if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass &&
+ MVT::getSizeInBits(RegVT) == 64)
+ ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
+
+ ArgValues.push_back(ArgValue);
+ } else {
+ assert(VA.isMemLoc());
+
+ // Create the nodes corresponding to a load from this parameter slot.
+ int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
+ VA.getLocMemOffset());
+ SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
+ ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
}
-
- ArgValues.push_back(ArgValue);
}
+
+ unsigned StackSize = CCInfo.getNextStackOffset();
+
+ // If the function takes variable number of arguments, make a frame index for
+ // the start of the first vararg value... for expansion of llvm.va_start.
+ if (isVarArg) {
+ unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6);
+ unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
+
+ // For X86-64, if there are vararg parameters that are passed via
+ // registers, then we must store them to their spots on the stack so they
+ // may be loaded by deferencing the result of va_next.
+ VarArgsGPOffset = NumIntRegs * 8;
+ VarArgsFPOffset = 6 * 8 + NumXMMRegs * 16;
+ VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
+ RegSaveFrameIndex = MFI->CreateStackObject(6 * 8 + 8 * 16, 16);
- ArgValues.push_back(Root);
+ // Store the integer parameter registers.
+ SmallVector<SDOperand, 8> MemOps;
+ SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
+ SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
+ DAG.getConstant(VarArgsGPOffset, getPointerTy()));
+ for (; NumIntRegs != 6; ++NumIntRegs) {
+ unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
+ X86::GR64RegisterClass);
+ SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
+ SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
+ MemOps.push_back(Store);
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
+ DAG.getConstant(8, getPointerTy()));
+ }
- // Make sure the instruction takes 8n+4 bytes to make sure the start of the
- // arguments and the arguments after the retaddr has been pushed are aligned.
- if ((ArgOffset & 7) == 0)
- ArgOffset += 4;
+ // Now store the XMM (fp + vector) parameter registers.
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
+ DAG.getConstant(VarArgsFPOffset, getPointerTy()));
+ for (; NumXMMRegs != 8; ++NumXMMRegs) {
+ unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
+ X86::VR128RegisterClass);
+ SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
+ SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
+ MemOps.push_back(Store);
+ FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
+ DAG.getConstant(16, getPointerTy()));
+ }
+ if (!MemOps.empty())
+ Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOps[0], MemOps.size());
+ }
- VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
- RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
- ReturnAddrIndex = 0; // No return address slot generated yet.
- BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
- BytesCallerReserves = 0;
+ ArgValues.push_back(Root);
- MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn);
-
- // Finally, inform the code generator which regs we return values in.
- switch (getValueType(MF.getFunction()->getReturnType())) {
- default: assert(0 && "Unknown type!");
- case MVT::isVoid: break;
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- MF.addLiveOut(X86::EAX);
- break;
- case MVT::i64:
- MF.addLiveOut(X86::EAX);
- MF.addLiveOut(X86::EDX);
- break;
- case MVT::f32:
- case MVT::f64:
- MF.addLiveOut(X86::ST0);
- break;
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- assert(!isFastCall && "Unknown result type");
- MF.addLiveOut(X86::XMM0);
- break;
- }
+ ReturnAddrIndex = 0; // No return address slot generated yet.
+ BytesToPopOnReturn = 0; // Callee pops nothing.
+ BytesCallerReserves = StackSize;
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
- &ArgValues[0], ArgValues.size());
+ &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
}
-SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
- unsigned CC) {
+SDOperand
+X86TargetLowering::LowerX86_64CCCCallTo(SDOperand Op, SelectionDAG &DAG,
+ unsigned CC) {
SDOperand Chain = Op.getOperand(0);
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
SDOperand Callee = Op.getOperand(4);
- MVT::ValueType RetVT= Op.Val->getValueType(0);
- unsigned NumOps = (Op.getNumOperands() - 5) / 2;
-
- // Count how many bytes are to be pushed on the stack.
- unsigned NumBytes = 0;
-
- // Keep track of the number of integer regs passed so far. This can be either
- // 0 (neither EAX/ECX or EDX used), 1 (EAX/ECX is used) or 2 (EAX/ECX and EDX
- // are both used).
- unsigned NumIntRegs = 0;
- unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
-
- static const unsigned GPRArgRegs[][2][2] = {
- {{ X86::AL, X86::DL }, { X86::CL, X86::DL }},
- {{ X86::AX, X86::DX }, { X86::CX, X86::DX }},
- {{ X86::EAX, X86::EDX }, { X86::ECX, X86::EDX }}
- };
- static const unsigned XMMArgRegs[] = {
- X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
- };
-
- bool isFastCall = CC == CallingConv::X86_FastCall;
- unsigned GPRInd = isFastCall ? 1 : 0;
- for (unsigned i = 0; i != NumOps; ++i) {
- SDOperand Arg = Op.getOperand(5+2*i);
-
- switch (Arg.getValueType()) {
- default: assert(0 && "Unknown value type!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32: {
- unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS);
- if (NumIntRegs < MaxNumIntRegs) {
- ++NumIntRegs;
- break;
- }
- } // Fall through
- case MVT::f32:
- NumBytes += 4;
- break;
- case MVT::f64:
- NumBytes += 8;
- break;
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- assert(!isFastCall && "Unknown value type!");
- if (NumXMMRegs < 4)
- NumXMMRegs++;
- else {
- // XMM arguments have to be aligned on 16-byte boundary.
- NumBytes = ((NumBytes + 15) / 16) * 16;
- NumBytes += 16;
- }
- break;
- }
- }
-
- // Make sure the instruction takes 8n+4 bytes to make sure the start of the
- // arguments and the arguments after the retaddr has been pushed are aligned.
- if ((NumBytes & 7) == 0)
- NumBytes += 4;
-
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C);
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
- // Arguments go on the stack in reverse order, as specified by the ABI.
- unsigned ArgOffset = 0;
- NumIntRegs = 0;
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
SmallVector<SDOperand, 8> MemOpChains;
- SDOperand StackPtr = DAG.getRegister(X86StackPtr, getPointerTy());
- for (unsigned i = 0; i != NumOps; ++i) {
- SDOperand Arg = Op.getOperand(5+2*i);
-
- switch (Arg.getValueType()) {
- default: assert(0 && "Unexpected ValueType for argument!");
- case MVT::i8:
- case MVT::i16:
- case MVT::i32: {
- unsigned MaxNumIntRegs = (isFastCall ? 2 : FASTCC_NUM_INT_ARGS_INREGS);
- if (NumIntRegs < MaxNumIntRegs) {
- unsigned RegToUse =
- GPRArgRegs[Arg.getValueType()-MVT::i8][GPRInd][NumIntRegs];
- RegsToPass.push_back(std::make_pair(RegToUse, Arg));
- ++NumIntRegs;
- break;
- }
- } // Fall through
- case MVT::f32: {
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += 4;
+
+ SDOperand StackPtr;
+
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
break;
}
- case MVT::f64: {
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
+
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ } else {
+ assert(VA.isMemLoc());
+ if (StackPtr.Val == 0)
+ StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
+ SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += 8;
- break;
- }
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- assert(!isFastCall && "Unexpected ValueType for argument!");
- if (NumXMMRegs < 4) {
- RegsToPass.push_back(std::make_pair(XMMArgRegs[NumXMMRegs], Arg));
- NumXMMRegs++;
- } else {
- // XMM arguments have to be aligned on 16-byte boundary.
- ArgOffset = ((ArgOffset + 15) / 16) * 16;
- SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- ArgOffset += 16;
- }
- break;
}
}
-
+
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
&MemOpChains[0], MemOpChains.size());
InFlag = Chain.getValue(1);
}
+ if (isVarArg) {
+ // From AMD64 ABI document:
+ // For calls that may call functions that use varargs or stdargs
+ // (prototype-less calls or calls to functions containing ellipsis (...) in
+ // the declaration) %al is used as hidden argument to specify the number
+ // of SSE registers used. The contents of %al do not need to match exactly
+ // the number of registers, but must be an ubound on the number of SSE
+ // registers used and is in the range 0 - 8 inclusive.
+
+ // Count the number of XMM registers allocated.
+ static const unsigned XMMArgRegs[] = {
+ X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
+ X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
+ };
+ unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
+
+ Chain = DAG.getCopyToReg(Chain, X86::AL,
+ DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
// If the callee is a GlobalAddress node (quite common, every direct call is)
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
- if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
- getTargetMachine(), true))
+ if (getTargetMachine().getCodeModel() != CodeModel::Large
+ && !Subtarget->GVRequiresExtraLoad(G->getGlobal(),
+ getTargetMachine(), true))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
-
- // ELF / PIC requires GOT in the EBX register before function calls via PLT
- // GOT pointer.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT()) {
- Chain = DAG.getCopyToReg(Chain, X86::EBX,
- DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
- InFlag);
- InFlag = Chain.getValue(1);
- }
+ if (getTargetMachine().getCodeModel() != CodeModel::Large)
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
- // Add an implicit use GOT pointer in EBX.
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
- Subtarget->isPICStyleGOT())
- Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
-
if (InFlag.Val)
Ops.push_back(InFlag);
Ops.clear();
Ops.push_back(Chain);
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
- Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
+ Ops.push_back(DAG.getConstant(0, getPointerTy()));
Ops.push_back(InFlag);
- Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
- if (RetVT != MVT::Other)
- InFlag = Chain.getValue(1);
-
- SmallVector<SDOperand, 8> ResultVals;
- switch (RetVT) {
- default: assert(0 && "Unknown value type to return!");
- case MVT::Other:
- NodeTys = DAG.getVTList(MVT::Other);
- break;
- case MVT::i8:
- Chain = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i8, MVT::Other);
- break;
- case MVT::i16:
- Chain = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i16, MVT::Other);
- break;
- case MVT::i32:
- if (Op.Val->getValueType(1) == MVT::i32) {
- Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- Chain = DAG.getCopyFromReg(Chain, X86::EDX, MVT::i32,
- Chain.getValue(2)).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
- } else {
- Chain = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(MVT::i32, MVT::Other);
- }
- break;
- case MVT::v16i8:
- case MVT::v8i16:
- case MVT::v4i32:
- case MVT::v2i64:
- case MVT::v4f32:
- case MVT::v2f64:
- if (isFastCall) {
- assert(0 && "Unknown value type to return!");
- } else {
- Chain = DAG.getCopyFromReg(Chain, X86::XMM0, RetVT, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- NodeTys = DAG.getVTList(RetVT, MVT::Other);
- }
- break;
- case MVT::f32:
- case MVT::f64: {
- SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
- SmallVector<SDOperand, 8> Ops;
- Ops.push_back(Chain);
- Ops.push_back(InFlag);
- SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys,
- &Ops[0], Ops.size());
- Chain = RetVal.getValue(1);
- InFlag = RetVal.getValue(2);
- if (X86ScalarSSE) {
- // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
- // shouldn't be necessary except that RFP cannot be live across
- // multiple blocks. When stackifier is fixed, they can be uncoupled.
- MachineFunction &MF = DAG.getMachineFunction();
- int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
- SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
- Tys = DAG.getVTList(MVT::Other);
- Ops.clear();
- Ops.push_back(Chain);
- Ops.push_back(RetVal);
- Ops.push_back(StackSlot);
- Ops.push_back(DAG.getValueType(RetVT));
- Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
- RetVal = DAG.getLoad(RetVT, Chain, StackSlot, NULL, 0);
- Chain = RetVal.getValue(1);
- }
+ Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
+}
- if (RetVT == MVT::f32 && !X86ScalarSSE)
- // FIXME: we would really like to remember that this FP_ROUND
- // operation is okay to eliminate if we allow excess FP precision.
- RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
- ResultVals.push_back(RetVal);
- NodeTys = DAG.getVTList(RetVT, MVT::Other);
- break;
- }
- }
+//===----------------------------------------------------------------------===//
+// Other Lowering Hooks
+//===----------------------------------------------------------------------===//
- // Merge everything together with a MERGE_VALUES node.
- ResultVals.push_back(Chain);
- SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
- &ResultVals[0], ResultVals.size());
- return Res.getValue(Op.ResNo);
-}
SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
if (ReturnAddrIndex == 0) {
return ::isSHUFPMask(N->op_begin(), N->getNumOperands());
}
-/// isCommutedSHUFP - Returns true if the shuffle mask is except
+/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
/// half elements to come from vector 1 (which would equal the dest.) and
/// the upper half to come from vector 2.
assert(N->getOpcode() == ISD::BUILD_VECTOR);
unsigned NumElems = N->getNumOperands();
- if (NumElems != 4 && NumElems != 8 && NumElems != 16)
+ if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
return false;
for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
return true;
}
+/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
+/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
+/// <2, 2, 3, 3>
+bool X86::isUNPCKH_v_undef_Mask(SDNode *N) {
+ assert(N->getOpcode() == ISD::BUILD_VECTOR);
+
+ unsigned NumElems = N->getNumOperands();
+ if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
+ return false;
+
+ for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
+ SDOperand BitI = N->getOperand(i);
+ SDOperand BitI1 = N->getOperand(i + 1);
+
+ if (!isUndefOrEqual(BitI, j))
+ return false;
+ if (!isUndefOrEqual(BitI1, j))
+ return false;
+ }
+
+ return true;
+}
+
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSS,
/// MOVSD, and MOVD, i.e. setting the lowest element.
return HasHi;
}
+/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand
+/// specifies a identity operation on the LHS or RHS.
+static bool isIdentityMask(SDNode *N, bool RHS = false) {
+ unsigned NumElems = N->getNumOperands();
+ for (unsigned i = 0; i < NumElems; ++i)
+ if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0)))
+ return false;
+ return true;
+}
+
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
/// a splat of a single element.
static bool isSplatMask(SDNode *N) {
SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
MVT::ValueType MaskVT = Mask.getValueType();
- MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
unsigned NumElems = Mask.getNumOperands();
SmallVector<SDOperand, 8> MaskVec;
/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
/// to an undef.
static bool isUndefShuffle(SDNode *N) {
- if (N->getOpcode() != ISD::BUILD_VECTOR)
+ if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
return false;
SDOperand V1 = N->getOperand(0);
return true;
}
+/// isZeroNode - Returns true if Elt is a constant zero or a floating point
+/// constant +0.0.
+static inline bool isZeroNode(SDOperand Elt) {
+ return ((isa<ConstantSDNode>(Elt) &&
+ cast<ConstantSDNode>(Elt)->getValue() == 0) ||
+ (isa<ConstantFPSDNode>(Elt) &&
+ cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
+}
+
+/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
+/// to an zero vector.
+static bool isZeroShuffle(SDNode *N) {
+ if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
+ return false;
+
+ SDOperand V1 = N->getOperand(0);
+ SDOperand V2 = N->getOperand(1);
+ SDOperand Mask = N->getOperand(2);
+ unsigned NumElems = Mask.getNumOperands();
+ for (unsigned i = 0; i != NumElems; ++i) {
+ SDOperand Arg = Mask.getOperand(i);
+ if (Arg.getOpcode() != ISD::UNDEF) {
+ unsigned Idx = cast<ConstantSDNode>(Arg)->getValue();
+ if (Idx < NumElems) {
+ unsigned Opc = V1.Val->getOpcode();
+ if (Opc == ISD::UNDEF)
+ continue;
+ if (Opc != ISD::BUILD_VECTOR ||
+ !isZeroNode(V1.Val->getOperand(Idx)))
+ return false;
+ } else if (Idx >= NumElems) {
+ unsigned Opc = V2.Val->getOpcode();
+ if (Opc == ISD::UNDEF)
+ continue;
+ if (Opc != ISD::BUILD_VECTOR ||
+ !isZeroNode(V2.Val->getOperand(Idx - NumElems)))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/// getZeroVector - Returns a vector of specified type with all zero elements.
+///
+static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
+ assert(MVT::isVector(VT) && "Expected a vector type");
+ unsigned NumElems = MVT::getVectorNumElements(VT);
+ MVT::ValueType EVT = MVT::getVectorElementType(VT);
+ bool isFP = MVT::isFloatingPoint(EVT);
+ SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
+ SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero);
+ return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size());
+}
+
/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
/// that point to V2 points to its first element.
static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
/// operation of specified width.
static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
/// of specified width.
static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
MaskVec.push_back(DAG.getConstant(i, BaseVT));
/// of specified width.
static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
unsigned Half = NumElems/2;
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i != Half; ++i) {
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
}
-/// getZeroVector - Returns a vector of specified type with all zero elements.
-///
-static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
- assert(MVT::isVector(VT) && "Expected a vector type");
- unsigned NumElems = getVectorNumElements(VT);
- MVT::ValueType EVT = MVT::getVectorBaseType(VT);
- bool isFP = MVT::isFloatingPoint(EVT);
- SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
- SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero);
- return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size());
-}
-
/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
///
static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
}
-/// isZeroNode - Returns true if Elt is a constant zero or a floating point
-/// constant +0.0.
-static inline bool isZeroNode(SDOperand Elt) {
- return ((isa<ConstantSDNode>(Elt) &&
- cast<ConstantSDNode>(Elt)->getValue() == 0) ||
- (isa<ConstantFPSDNode>(Elt) &&
- cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
-}
-
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
-/// vector and zero or undef vector.
+/// vector of zero or undef vector.
static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
unsigned NumElems, unsigned Idx,
bool isZero, SelectionDAG &DAG) {
SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
SDOperand Zero = DAG.getConstant(0, EVT);
SmallVector<SDOperand, 8> MaskVec(NumElems, Zero);
MaskVec[Idx] = DAG.getConstant(NumElems, EVT);
return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
}
-/// LowerBuildVectorv16i8 - Custom lower build_vector of v8i16.
+/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
///
static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
return Op;
MVT::ValueType VT = Op.getValueType();
- MVT::ValueType EVT = MVT::getVectorBaseType(VT);
+ MVT::ValueType EVT = MVT::getVectorElementType(VT);
unsigned EVTBits = MVT::getSizeInBits(EVT);
unsigned NumElems = Op.getNumOperands();
}
}
- if (NumNonZero == 0)
- // Must be a mix of zero and undef. Return a zero vector.
- return getZeroVector(VT, DAG);
+ if (NumNonZero == 0) {
+ if (NumZero == 0)
+ // All undef vector. Return an UNDEF.
+ return DAG.getNode(ISD::UNDEF, VT);
+ else
+ // A mix of zero and undef. Return a zero vector.
+ return getZeroVector(VT, DAG);
+ }
// Splat is obviously ok. Let legalizer expand it to a shuffle.
if (Values.size() == 1)
Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
DAG);
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i < NumElems; i++)
MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
}
}
- // Let legalizer expand 2-wide build_vector's.
+ // Let legalizer expand 2-wide build_vectors.
if (EVTBits == 64)
return SDOperand();
// If element VT is < 32 bits, convert it to inserts into a zero vector.
- if (EVTBits == 8) {
+ if (EVTBits == 8 && NumElems == 16) {
SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
*this);
if (V.Val) return V;
}
- if (EVTBits == 16) {
+ if (EVTBits == 16 && NumElems == 8) {
SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
*this);
if (V.Val) return V;
if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
return V[0];
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
bool Reverse = (NonZeros & 0x3) == 2;
for (unsigned i = 0; i < 2; ++i)
if (isUndefShuffle(Op.Val))
return DAG.getNode(ISD::UNDEF, VT);
+ if (isZeroShuffle(Op.Val))
+ return getZeroVector(VT, DAG);
+
+ if (isIdentityMask(PermMask.Val))
+ return V1;
+ else if (isIdentityMask(PermMask.Val, true))
+ return V2;
+
if (isSplatMask(PermMask.Val)) {
if (NumElems <= 4) return Op;
// Promote it to a v4i32 splat.
}
if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
+ X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
X86::isUNPCKLMask(PermMask.Val) ||
X86::isUNPCKHMask(PermMask.Val))
return Op;
// Commute is back and try unpck* again.
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
+ X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
X86::isUNPCKLMask(PermMask.Val) ||
X86::isUNPCKHMask(PermMask.Val))
return Op;
return Op;
}
- if (X86::isSHUFPMask(PermMask.Val))
+ if (X86::isSHUFPMask(PermMask.Val) &&
+ MVT::getSizeInBits(VT) != 64) // Don't do this for MMX.
return Op;
// Handle v8i16 shuffle high / low shuffle node pair.
if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i != 4; ++i)
MaskVec.push_back(PermMask.getOperand(i));
}
}
- if (NumElems == 4) {
+ if (NumElems == 4 &&
+ // Don't do this for MMX.
+ MVT::getSizeInBits(VT) != 64) {
MVT::ValueType MaskVT = PermMask.getValueType();
- MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
SmallVector<std::pair<int, int>, 8> Locs;
Locs.reserve(NumElems);
SmallVector<SDOperand, 8> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
// SHUFPS the element to the lowest double word, then movss.
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
- IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
+ IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
- IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
+ IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
// Transform it so it match pinsrw which expects a 16-bit value in a GR32
// as its second argument.
MVT::ValueType VT = Op.getValueType();
- MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(VT);
SDOperand N0 = Op.getOperand(0);
SDOperand N1 = Op.getOperand(1);
SDOperand N2 = Op.getOperand(2);
if (N1.getValueType() != MVT::i32)
N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
- N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32);
+ N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy());
return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
} else if (MVT::getSizeInBits(BaseVT) == 32) {
unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
// Use a movss.
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
MaskVec.push_back(DAG.getConstant(4, BaseVT));
for (unsigned i = 1; i <= 3; ++i)
return Result;
}
+// Lower ISD::GlobalTLSAddress using the "general dynamic" model
+static SDOperand
+LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
+ const MVT::ValueType PtrVT) {
+ SDOperand InFlag;
+ SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX,
+ DAG.getNode(X86ISD::GlobalBaseReg,
+ PtrVT), InFlag);
+ InFlag = Chain.getValue(1);
+
+ // emit leal symbol@TLSGD(,%ebx,1), %eax
+ SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
+ SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
+ GA->getValueType(0),
+ GA->getOffset());
+ SDOperand Ops[] = { Chain, TGA, InFlag };
+ SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3);
+ InFlag = Result.getValue(2);
+ Chain = Result.getValue(1);
+
+ // call ___tls_get_addr. This function receives its argument in
+ // the register EAX.
+ Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag);
+ InFlag = Chain.getValue(1);
+
+ NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDOperand Ops1[] = { Chain,
+ DAG.getTargetExternalSymbol("___tls_get_addr",
+ PtrVT),
+ DAG.getRegister(X86::EAX, PtrVT),
+ DAG.getRegister(X86::EBX, PtrVT),
+ InFlag };
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5);
+ InFlag = Chain.getValue(1);
+
+ return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag);
+}
+
+// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
+// "local exec" model.
+static SDOperand
+LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
+ const MVT::ValueType PtrVT) {
+ // Get the Thread Pointer
+ SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT);
+ // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
+ // exec)
+ SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
+ GA->getValueType(0),
+ GA->getOffset());
+ SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA);
+
+ if (GA->getGlobal()->isDeclaration()) // initial exec TLS model
+ Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0);
+
+ // The address of the thread local variable is the add of the thread
+ // pointer with the offset of the variable.
+ return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset);
+}
+
+SDOperand
+X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) {
+ // TODO: implement the "local dynamic" model
+ // TODO: implement the "initial exec"model for pic executables
+ assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() &&
+ "TLS not implemented for non-ELF and 64-bit targets");
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
+ // If the relocation model is PIC, use the "General Dynamic" TLS Model,
+ // otherwise use the "Local Exec"TLS Model
+ if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy());
+ else
+ return LowerToTLSExecModel(GA, DAG, getPointerTy());
+}
+
SDOperand
X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
if (X86ScalarSSE)
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
else
- Tys = DAG.getVTList(MVT::f64, MVT::Other);
+ Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
SmallVector<SDOperand, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(StackSlot);
if (X86ScalarSSE) {
assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
- SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other);
+ SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
SDOperand Ops[] = {
Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
};
SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
- const Type *OpNTy = MVT::getTypeForValueType(VT);
+ MVT::ValueType EltVT = VT;
+ if (MVT::isVector(VT))
+ EltVT = MVT::getVectorElementType(VT);
+ const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
- if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ if (EltVT == MVT::f64) {
+ Constant *C = ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)));
+ CV.push_back(C);
+ CV.push_back(C);
} else {
- CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ Constant *C = ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)));
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
- const Type *OpNTy = MVT::getTypeForValueType(VT);
+ MVT::ValueType EltVT = VT;
+ if (MVT::isVector(VT))
+ EltVT = MVT::getVectorElementType(VT);
+ const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
- if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ if (EltVT == MVT::f64) {
+ Constant *C = ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63));
+ CV.push_back(C);
+ CV.push_back(C);
} else {
- CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ Constant *C = ConstantFP::get(OpNTy, BitsToFloat(1U << 31));
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
default:
assert(0 && "Unsupported calling convention");
case CallingConv::Fast:
- if (EnableFastCC)
- return LowerFastCCCallTo(Op, DAG, CallingConv);
+ // TODO: Implement fastcc
// Falls through
case CallingConv::C:
case CallingConv::X86_StdCall:
}
}
-SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
- assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
-
- // Support up returning up to two registers.
- MVT::ValueType VTs[2];
- unsigned DestRegs[2];
- unsigned NumRegs = Op.getNumOperands() / 2;
- assert(NumRegs <= 2 && "Can only return up to two regs!");
-
- for (unsigned i = 0; i != NumRegs; ++i)
- VTs[i] = Op.getOperand(i*2+1).getValueType();
-
- // Determine which register each value should be copied into.
- GetRetValueLocs(VTs, NumRegs, DestRegs, Subtarget,
- DAG.getMachineFunction().getFunction()->getCallingConv());
-
- // If this is the first return lowered for this function, add the regs to the
- // liveout set for the function.
- if (DAG.getMachineFunction().liveout_empty()) {
- for (unsigned i = 0; i != NumRegs; ++i)
- DAG.getMachineFunction().addLiveOut(DestRegs[i]);
- }
+
+// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
+// Calls to _alloca is needed to probe the stack when allocating more than 4k
+// bytes in one go. Touching the stack at 4K increments is necessary to ensure
+// that the guard pages used by the OS virtual memory manager are allocated in
+// correct sequence.
+SDOperand
+X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
+ SelectionDAG &DAG) {
+ assert(Subtarget->isTargetCygMing() &&
+ "This should be used only on Cygwin/Mingw targets");
+ // Get the inputs.
SDOperand Chain = Op.getOperand(0);
+ SDOperand Size = Op.getOperand(1);
+ // FIXME: Ensure alignment here
+
SDOperand Flag;
- // Copy the result values into the output registers.
- if (NumRegs != 1 || DestRegs[0] != X86::ST0) {
- for (unsigned i = 0; i != NumRegs; ++i) {
- Chain = DAG.getCopyToReg(Chain, DestRegs[i], Op.getOperand(i*2+1), Flag);
- Flag = Chain.getValue(1);
- }
- } else {
- // We need to handle a destination of ST0 specially, because it isn't really
- // a register.
- SDOperand Value = Op.getOperand(1);
-
- // If this is an FP return with ScalarSSE, we need to move the value from
- // an XMM register onto the fp-stack.
- if (X86ScalarSSE) {
- SDOperand MemLoc;
-
- // If this is a load into a scalarsse value, don't store the loaded value
- // back to the stack, only to reload it: just replace the scalar-sse load.
- if (ISD::isNON_EXTLoad(Value.Val) &&
- (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
- Chain = Value.getOperand(0);
- MemLoc = Value.getOperand(1);
- } else {
- // Spill the value to memory and reload it into top of stack.
- unsigned Size = MVT::getSizeInBits(VTs[0])/8;
- MachineFunction &MF = DAG.getMachineFunction();
- int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
- MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
- Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0);
- }
- SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other);
- SDOperand Ops[] = { Chain, MemLoc, DAG.getValueType(VTs[0]) };
- Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
- Chain = Value.getValue(1);
- }
-
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDOperand Ops[] = { Chain, Value };
- Chain = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops, 2);
- Flag = Chain.getValue(1);
- }
+ MVT::ValueType IntPtr = getPointerTy();
+ MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
+
+ Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
+ Flag = Chain.getValue(1);
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDOperand Ops[] = { Chain,
+ DAG.getTargetExternalSymbol("_alloca", IntPtr),
+ DAG.getRegister(X86::EAX, IntPtr),
+ Flag };
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4);
+ Flag = Chain.getValue(1);
+
+ Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
- SDOperand BytesToPop = DAG.getConstant(getBytesToPopOnReturn(), MVT::i16);
- if (Flag.Val)
- return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop, Flag);
- else
- return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Chain, BytesToPop);
+ std::vector<MVT::ValueType> Tys;
+ Tys.push_back(SPTy);
+ Tys.push_back(MVT::Other);
+ SDOperand Ops1[2] = { Chain.getValue(0), Chain };
+ return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2);
}
SDOperand
if (Fn->hasExternalLinkage() &&
Subtarget->isTargetCygMing() &&
Fn->getName() == "main")
- MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true);
+ MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true);
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (Subtarget->is64Bit())
default:
assert(0 && "Unsupported calling convention");
case CallingConv::Fast:
- if (EnableFastCC) {
- return LowerFastCCArguments(Op, DAG);
- }
+ // TODO: implement fastcc.
+
// Falls through
case CallingConv::C:
return LowerCCCArguments(Op, DAG);
case CallingConv::X86_StdCall:
- MF.getInfo<X86FunctionInfo>()->setDecorationStyle(StdCall);
+ MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall);
return LowerCCCArguments(Op, DAG, true);
case CallingConv::X86_FastCall:
- MF.getInfo<X86FunctionInfo>()->setDecorationStyle(FastCall);
- return LowerFastCCArguments(Op, DAG, true);
+ MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall);
+ return LowerFastCCArguments(Op, DAG);
}
}
TargetLowering::ArgListEntry Entry;
Entry.Node = Op.getOperand(1);
Entry.Ty = IntPtrTy;
- Entry.isSigned = false;
- Entry.isInReg = false;
- Entry.isSRet = false;
Args.push_back(Entry);
// Extend the unsigned i8 argument to be an int value for the call.
Entry.Node = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
Entry.Ty = IntPtrTy;
- Entry.isSigned = false;
- Entry.isInReg = false;
- Entry.isSRet = false;
Args.push_back(Entry);
Entry.Node = Op.getOperand(3);
Args.push_back(Entry);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = getTargetData()->getIntPtrType();
- Entry.isSigned = false;
- Entry.isInReg = false;
- Entry.isSRet = false;
Entry.Node = Op.getOperand(1); Args.push_back(Entry);
Entry.Node = Op.getOperand(2); Args.push_back(Entry);
Entry.Node = Op.getOperand(3); Args.push_back(Entry);
return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
}
+SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) {
+ // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand DstPtr = Op.getOperand(1);
+ SDOperand SrcPtr = Op.getOperand(2);
+ SrcValueSDNode *DstSV = cast<SrcValueSDNode>(Op.getOperand(3));
+ SrcValueSDNode *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4));
+
+ SrcPtr = DAG.getLoad(getPointerTy(), Chain, SrcPtr,
+ SrcSV->getValue(), SrcSV->getOffset());
+ Chain = SrcPtr.getValue(1);
+ for (unsigned i = 0; i < 3; ++i) {
+ SDOperand Val = DAG.getLoad(MVT::i64, Chain, SrcPtr,
+ SrcSV->getValue(), SrcSV->getOffset());
+ Chain = Val.getValue(1);
+ Chain = DAG.getStore(Chain, Val, DstPtr,
+ DstSV->getValue(), DstSV->getOffset());
+ if (i == 2)
+ break;
+ SrcPtr = DAG.getNode(ISD::ADD, getPointerTy(), SrcPtr,
+ DAG.getConstant(8, getPointerTy()));
+ DstPtr = DAG.getNode(ISD::ADD, getPointerTy(), DstPtr,
+ DAG.getConstant(8, getPointerTy()));
+ }
+ return Chain;
+}
+
SDOperand
X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
case ISD::SHL_PARTS:
case ISD::SRA_PARTS:
case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
+ case ISD::VACOPY: return LowerVACOPY(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
}
return SDOperand();
}
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
+ case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
+ case X86ISD::FRCP: return "X86ISD::FRCP";
+ case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
+ case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
}
}
-/// isLegalAddressImmediate - Return true if the integer value or
-/// GlobalValue can be used as the offset of the target addressing mode.
-bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
- // X86 allows a sign-extended 32-bit immediate field.
- return (V > -(1LL << 32) && V < (1LL << 32)-1);
-}
-
-bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
- // In 64-bit mode, GV is 64-bit so it won't fit in the 32-bit displacement
- // field unless we are in small code model.
- if (Subtarget->is64Bit() &&
- getTargetMachine().getCodeModel() != CodeModel::Small)
+// isLegalAddressingMode - Return true if the addressing mode represented
+// by AM is legal for this target, for a load/store of the specified type.
+bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ const Type *Ty) const {
+ // X86 supports extremely general addressing modes.
+
+ // X86 allows a sign-extended 32-bit immediate field as a displacement.
+ if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
+ return false;
+
+ if (AM.BaseGV) {
+ // X86-64 only supports addr of globals in small code model.
+ if (Subtarget->is64Bit() &&
+ getTargetMachine().getCodeModel() != CodeModel::Small)
+ return false;
+
+ // We can only fold this if we don't need a load either.
+ if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
+ return false;
+ }
+
+ switch (AM.Scale) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ // These scales always work.
+ break;
+ case 3:
+ case 5:
+ case 9:
+ // These scales are formed with basereg+scalereg. Only accept if there is
+ // no basereg yet.
+ if (AM.HasBaseReg)
+ return false;
+ break;
+ default: // Other stuff never works.
return false;
+ }
- return (!Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false));
+ return true;
}
+
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
// Only do shuffles on 128-bit vector types for now.
if (MVT::getSizeInBits(VT) == 64) return false;
return (Mask.Val->getNumOperands() <= 4 ||
+ isIdentityMask(Mask.Val) ||
+ isIdentityMask(Mask.Val, true) ||
isSplatMask(Mask.Val) ||
isPSHUFHW_PSHUFLWMask(Mask.Val) ||
X86::isUNPCKLMask(Mask.Val) ||
+ X86::isUNPCKHMask(Mask.Val) ||
X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
- X86::isUNPCKHMask(Mask.Val));
+ X86::isUNPCKH_v_undef_Mask(Mask.Val));
}
bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
return BB;
}
- case X86::FP_TO_INT16_IN_MEM:
- case X86::FP_TO_INT32_IN_MEM:
- case X86::FP_TO_INT64_IN_MEM: {
+ case X86::FP32_TO_INT16_IN_MEM:
+ case X86::FP32_TO_INT32_IN_MEM:
+ case X86::FP32_TO_INT64_IN_MEM:
+ case X86::FP64_TO_INT16_IN_MEM:
+ case X86::FP64_TO_INT32_IN_MEM:
+ case X86::FP64_TO_INT64_IN_MEM: {
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
MachineFunction *F = BB->getParent();
unsigned Opc;
switch (MI->getOpcode()) {
default: assert(0 && "illegal opcode!");
- case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
- case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
- case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
+ case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
+ case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
+ case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
+ case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
+ case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
+ case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
}
X86AddressMode AM;
uint64_t Mask,
uint64_t &KnownZero,
uint64_t &KnownOne,
+ const SelectionDAG &DAG,
unsigned Depth) const {
unsigned Opc = Op.getOpcode();
assert((Opc >= ISD::BUILTIN_OP_END ||
i %= NumElems;
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
return (i == 0)
- ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
+ ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
} else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
SDOperand Idx = PermMask.getOperand(i);
if (Idx.getOpcode() == ISD::UNDEF)
- return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
+ return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
}
return SDOperand();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT::ValueType VT = N->getValueType(0);
- MVT::ValueType EVT = MVT::getVectorBaseType(VT);
+ MVT::ValueType EVT = MVT::getVectorElementType(VT);
SDOperand PermMask = N->getOperand(2);
int NumElems = (int)PermMask.getNumOperands();
SDNode *Base = NULL;
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
X86TargetLowering::ConstraintType
-X86TargetLowering::getConstraintType(char ConstraintLetter) const {
- switch (ConstraintLetter) {
- case 'A':
- case 'r':
- case 'R':
- case 'l':
- case 'q':
- case 'Q':
- case 'x':
- case 'Y':
- return C_RegisterClass;
- default: return TargetLowering::getConstraintType(ConstraintLetter);
+X86TargetLowering::getConstraintType(const std::string &Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'A':
+ case 'r':
+ case 'R':
+ case 'l':
+ case 'q':
+ case 'Q':
+ case 'x':
+ case 'Y':
+ return C_RegisterClass;
+ default:
+ break;
+ }
}
+ return TargetLowering::getConstraintType(Constraint);
}
/// isOperandValidForConstraint - Return the specified operand (possibly
isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) {
switch (Constraint) {
default: break;
- case 'i':
+ case 'I':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (C->getValue() <= 31)
+ return DAG.getTargetConstant(C->getValue(), Op.getValueType());
+ }
+ return SDOperand(0,0);
+ case 'N':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (C->getValue() <= 255)
+ return DAG.getTargetConstant(C->getValue(), Op.getValueType());
+ }
+ return SDOperand(0,0);
+ case 'i': {
// Literal immediates are always ok.
- if (isa<ConstantSDNode>(Op)) return Op;
+ if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op))
+ return DAG.getTargetConstant(CST->getValue(), Op.getValueType());
- // If we are in non-pic codegen mode, we allow the address of a global to
- // be used with 'i'.
- if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ // If we are in non-pic codegen mode, we allow the address of a global (with
+ // an optional displacement) to be used with 'i'.
+ GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
+ int64_t Offset = 0;
+
+ // Match either (GA) or (GA+C)
+ if (GA) {
+ Offset = GA->getOffset();
+ } else if (Op.getOpcode() == ISD::ADD) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
+ if (C && GA) {
+ Offset = GA->getOffset()+C->getValue();
+ } else {
+ C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
+ if (C && GA)
+ Offset = GA->getOffset()+C->getValue();
+ else
+ C = 0, GA = 0;
+ }
+ }
+
+ if (GA) {
+ // If addressing this global requires a load (e.g. in PIC mode), we can't
+ // match.
+ if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(),
+ false))
return SDOperand(0, 0);
- if (GA->getOpcode() != ISD::TargetGlobalAddress)
- Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
- GA->getOffset());
+ Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
+ Offset);
return Op;
}
// Otherwise, not valid for this mode.
return SDOperand(0, 0);
}
+ }
return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG);
}
-
std::vector<unsigned> X86TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
if (Constraint.size() == 1) {
// FIXME: not handling fp-stack yet!
- // FIXME: not handling MMX registers yet ('y' constraint).
switch (Constraint[0]) { // GCC X86 Constraint Letters
default: break; // Unknown constraint letter
case 'A': // EAX/EDX
if (VT == MVT::i32 || VT == MVT::i64)
return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
break;
- case 'r': // GENERAL_REGS
- case 'R': // LEGACY_REGS
- if (VT == MVT::i64 && Subtarget->is64Bit())
- return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX,
- X86::RSI, X86::RDI, X86::RBP, X86::RSP,
- X86::R8, X86::R9, X86::R10, X86::R11,
- X86::R12, X86::R13, X86::R14, X86::R15, 0);
- if (VT == MVT::i32)
- return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
- X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
- else if (VT == MVT::i16)
- return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
- X86::SI, X86::DI, X86::BP, X86::SP, 0);
- else if (VT == MVT::i8)
- return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
- break;
- case 'l': // INDEX_REGS
- if (VT == MVT::i32)
- return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
- X86::ESI, X86::EDI, X86::EBP, 0);
- else if (VT == MVT::i16)
- return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
- X86::SI, X86::DI, X86::BP, 0);
- else if (VT == MVT::i8)
- return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
- break;
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
case 'Q': // Q_REGS
if (VT == MVT::i32)
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
break;
- case 'x': // SSE_REGS if SSE1 allowed
- if (Subtarget->hasSSE1())
- return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
- 0);
- return std::vector<unsigned>();
- case 'Y': // SSE_REGS if SSE2 allowed
- if (Subtarget->hasSSE2())
- return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
- 0);
- return std::vector<unsigned>();
}
}
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
+ // First, see if this is a constraint that directly corresponds to an LLVM
+ // register class.
+ if (Constraint.size() == 1) {
+ // GCC Constraint Letters
+ switch (Constraint[0]) {
+ default: break;
+ case 'r': // GENERAL_REGS
+ case 'R': // LEGACY_REGS
+ case 'l': // INDEX_REGS
+ if (VT == MVT::i64 && Subtarget->is64Bit())
+ return std::make_pair(0U, X86::GR64RegisterClass);
+ if (VT == MVT::i32)
+ return std::make_pair(0U, X86::GR32RegisterClass);
+ else if (VT == MVT::i16)
+ return std::make_pair(0U, X86::GR16RegisterClass);
+ else if (VT == MVT::i8)
+ return std::make_pair(0U, X86::GR8RegisterClass);
+ break;
+ case 'y': // MMX_REGS if MMX allowed.
+ if (!Subtarget->hasMMX()) break;
+ return std::make_pair(0U, X86::VR64RegisterClass);
+ break;
+ case 'Y': // SSE_REGS if SSE2 allowed
+ if (!Subtarget->hasSSE2()) break;
+ // FALL THROUGH.
+ case 'x': // SSE_REGS if SSE1 allowed
+ if (!Subtarget->hasSSE1()) break;
+
+ switch (VT) {
+ default: break;
+ // Scalar SSE types.
+ case MVT::f32:
+ case MVT::i32:
+ return std::make_pair(0U, X86::FR32RegisterClass);
+ case MVT::f64:
+ case MVT::i64:
+ return std::make_pair(0U, X86::FR64RegisterClass);
+ // Vector types.
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ return std::make_pair(0U, X86::VR128RegisterClass);
+ }
+ break;
+ }
+ }
+
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;