#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
#include "llvm/ADT/VectorExtras.h"
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
!Subtarget->isTargetCygMing())
setOperationAction(ISD::LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
+ setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
+ setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
+ if (Subtarget->is64Bit()) {
+ // FIXME: Verify
+ setExceptionPointerRegister(X86::RAX);
+ setExceptionSelectorRegister(X86::RDX);
+ } else {
+ setExceptionPointerRegister(X86::EAX);
+ setExceptionSelectorRegister(X86::EDX);
+ }
+
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAARG , MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
if (Subtarget->is64Bit())
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
+ if (Subtarget->isTargetCygMing())
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+ else
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
if (X86ScalarSSE) {
// Set up the FP register classes.
addLegalFPImmediate(+0.0); // xorps / xorpd
} else {
// Set up the FP register classes.
- addRegisterClass(MVT::f64, X86::RFPRegisterClass);
+ addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
+ addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
+ setOperationAction(ISD::UNDEF, MVT::f32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FP_ROUND, MVT::f32, Expand);
if (!UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
}
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
addLegalFPImmediate(+0.0); // FLD0
addLegalFPImmediate(+1.0); // FLD1
addLegalFPImmediate(-0.0); // FLD0/FCHS
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
- for (unsigned VT = (unsigned)MVT::Vector + 1;
- VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
+ for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
}
if (Subtarget->hasMMX()) {
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
setOperationAction(ISD::ADD, MVT::v4i16, Legal);
setOperationAction(ISD::ADD, MVT::v2i32, Legal);
+ setOperationAction(ISD::ADD, MVT::v1i64, Legal);
setOperationAction(ISD::SUB, MVT::v8i8, Legal);
setOperationAction(ISD::SUB, MVT::v4i16, Legal);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
}
if (Subtarget->hasSSE1()) {
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
+ setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
+ setOperationAction(ISD::FABS, MVT::v4f32, Custom);
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
+ setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
+ setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
+ setOperationAction(ISD::FABS, MVT::v2f64, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
SmallVector<CCValAssign, 16> RVLocs;
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
- CCState CCInfo(CC, getTargetMachine(), RVLocs);
+ bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
CCInfo.AnalyzeReturn(Op.Val, RetCC_X86);
MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
Chain = DAG.getStore(Op.getOperand(0), Value, MemLoc, NULL, 0);
}
- SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other);
+ SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other);
SDOperand Ops[] = {Chain, MemLoc, DAG.getValueType(RVLocs[0].getValVT())};
Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
Chain = Value.getValue(1);
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallingConv, getTargetMachine(), RVLocs);
+ bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0;
+ CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
// before the fp stackifier runs.
// Copy ST0 into an RFP register with FP_GET_RESULT.
- SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
+ SDVTList Tys = DAG.getVTList(RVLocs[0].getValVT(), MVT::Other, MVT::Flag);
SDOperand GROps[] = { Chain, InFlag };
SDOperand RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, GROps, 2);
Chain = RetVal.getValue(1);
RetVal = DAG.getLoad(RVLocs[0].getValVT(), Chain, StackSlot, NULL, 0);
Chain = RetVal.getValue(1);
}
-
- if (RVLocs[0].getValVT() == MVT::f32 && !X86ScalarSSE)
- // FIXME: we would really like to remember that this FP_ROUND
- // operation is okay to eliminate if we allow excess FP precision.
- RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
ResultVals.push_back(RetVal);
}
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(MF.getFunction()->getCallingConv(), getTargetMachine(),
- ArgLocs);
+ CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ getTargetMachine(), ArgLocs);
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C);
SmallVector<SDOperand, 8> ArgValues;
RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
ReturnAddrIndex = 0; // No return address slot generated yet.
- MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn);
+ MF.getInfo<X86MachineFunctionInfo>()
+ ->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, getTargetMachine(), ArgLocs);
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
// Get a count of how many bytes are to be pushed on the stack.
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(MF.getFunction()->getCallingConv(), getTargetMachine(),
- ArgLocs);
+ CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ getTargetMachine(), ArgLocs);
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_FastCall);
SmallVector<SDOperand, 8> ArgValues;
BytesToPopOnReturn = StackSize; // Callee pops all stack arguments.
BytesCallerReserves = 0;
- MF.getInfo<X86FunctionInfo>()->setBytesToPopOnReturn(BytesToPopOnReturn);
+ MF.getInfo<X86MachineFunctionInfo>()
+ ->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
unsigned CC) {
SDOperand Chain = Op.getOperand(0);
bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
SDOperand Callee = Op.getOperand(4);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, getTargetMachine(), ArgLocs);
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_FastCall);
// Get a count of how many bytes are to be pushed on the stack.
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(MF.getFunction()->getCallingConv(), getTargetMachine(),
- ArgLocs);
+ CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ getTargetMachine(), ArgLocs);
CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C);
SmallVector<SDOperand, 8> ArgValues;
RC = X86::FR64RegisterClass;
else {
assert(MVT::isVector(RegVT));
- RC = X86::VR128RegisterClass;
+ if (MVT::getSizeInBits(RegVT) == 64) {
+ RC = X86::GR64RegisterClass; // MMX values are passed in GPRs.
+ RegVT = MVT::i64;
+ } else
+ RC = X86::VR128RegisterClass;
}
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
if (VA.getLocInfo() != CCValAssign::Full)
ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
+ // Handle MMX values passed in GPRs.
+ if (RegVT != VA.getLocVT() && RC == X86::GR64RegisterClass &&
+ MVT::getSizeInBits(RegVT) == 64)
+ ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
+
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CC, getTargetMachine(), ArgLocs);
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C);
// Get a count of how many bytes are to be pushed on the stack.
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
if (getTargetMachine().getCodeModel() != CodeModel::Large
- && !Subtarget->GVRequiresExtraLoad(G->getGlobal(),
- getTargetMachine(), true))
+ && !Subtarget->GVRequiresExtraLoad(G->getGlobal(),
+ getTargetMachine(), true))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
if (getTargetMachine().getCodeModel() != CodeModel::Large)
return ::isSHUFPMask(N->op_begin(), N->getNumOperands());
}
-/// isCommutedSHUFP - Returns true if the shuffle mask is except
+/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
/// half elements to come from vector 1 (which would equal the dest.) and
/// the upper half to come from vector 2.
assert(N->getOpcode() == ISD::BUILD_VECTOR);
unsigned NumElems = N->getNumOperands();
- if (NumElems != 4 && NumElems != 8 && NumElems != 16)
+ if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
return false;
for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
return true;
}
+/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
+/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
+/// <2, 2, 3, 3>
+bool X86::isUNPCKH_v_undef_Mask(SDNode *N) {
+ assert(N->getOpcode() == ISD::BUILD_VECTOR);
+
+ unsigned NumElems = N->getNumOperands();
+ if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
+ return false;
+
+ for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
+ SDOperand BitI = N->getOperand(i);
+ SDOperand BitI1 = N->getOperand(i + 1);
+
+ if (!isUndefOrEqual(BitI, j))
+ return false;
+ if (!isUndefOrEqual(BitI1, j))
+ return false;
+ }
+
+ return true;
+}
+
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVSS,
/// MOVSD, and MOVD, i.e. setting the lowest element.
return HasHi;
}
+/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand
+/// specifies a identity operation on the LHS or RHS.
+static bool isIdentityMask(SDNode *N, bool RHS = false) {
+ unsigned NumElems = N->getNumOperands();
+ for (unsigned i = 0; i < NumElems; ++i)
+ if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0)))
+ return false;
+ return true;
+}
+
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
/// a splat of a single element.
static bool isSplatMask(SDNode *N) {
SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
MVT::ValueType MaskVT = Mask.getValueType();
- MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
unsigned NumElems = Mask.getNumOperands();
SmallVector<SDOperand, 8> MaskVec;
/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
/// to an undef.
static bool isUndefShuffle(SDNode *N) {
- if (N->getOpcode() != ISD::BUILD_VECTOR)
+ if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
return false;
SDOperand V1 = N->getOperand(0);
return true;
}
+/// isZeroNode - Returns true if Elt is a constant zero or a floating point
+/// constant +0.0.
+static inline bool isZeroNode(SDOperand Elt) {
+ return ((isa<ConstantSDNode>(Elt) &&
+ cast<ConstantSDNode>(Elt)->getValue() == 0) ||
+ (isa<ConstantFPSDNode>(Elt) &&
+ cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
+}
+
+/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
+/// to an zero vector.
+static bool isZeroShuffle(SDNode *N) {
+ if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
+ return false;
+
+ SDOperand V1 = N->getOperand(0);
+ SDOperand V2 = N->getOperand(1);
+ SDOperand Mask = N->getOperand(2);
+ unsigned NumElems = Mask.getNumOperands();
+ for (unsigned i = 0; i != NumElems; ++i) {
+ SDOperand Arg = Mask.getOperand(i);
+ if (Arg.getOpcode() != ISD::UNDEF) {
+ unsigned Idx = cast<ConstantSDNode>(Arg)->getValue();
+ if (Idx < NumElems) {
+ unsigned Opc = V1.Val->getOpcode();
+ if (Opc == ISD::UNDEF)
+ continue;
+ if (Opc != ISD::BUILD_VECTOR ||
+ !isZeroNode(V1.Val->getOperand(Idx)))
+ return false;
+ } else if (Idx >= NumElems) {
+ unsigned Opc = V2.Val->getOpcode();
+ if (Opc == ISD::UNDEF)
+ continue;
+ if (Opc != ISD::BUILD_VECTOR ||
+ !isZeroNode(V2.Val->getOperand(Idx - NumElems)))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/// getZeroVector - Returns a vector of specified type with all zero elements.
+///
+static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
+ assert(MVT::isVector(VT) && "Expected a vector type");
+ unsigned NumElems = MVT::getVectorNumElements(VT);
+ MVT::ValueType EVT = MVT::getVectorElementType(VT);
+ bool isFP = MVT::isFloatingPoint(EVT);
+ SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
+ SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero);
+ return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size());
+}
+
/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
/// that point to V2 points to its first element.
static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
/// operation of specified width.
static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
/// of specified width.
static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
MaskVec.push_back(DAG.getConstant(i, BaseVT));
/// of specified width.
static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
unsigned Half = NumElems/2;
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i != Half; ++i) {
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
}
-/// getZeroVector - Returns a vector of specified type with all zero elements.
-///
-static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
- assert(MVT::isVector(VT) && "Expected a vector type");
- unsigned NumElems = getVectorNumElements(VT);
- MVT::ValueType EVT = MVT::getVectorBaseType(VT);
- bool isFP = MVT::isFloatingPoint(EVT);
- SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
- SmallVector<SDOperand, 8> ZeroVec(NumElems, Zero);
- return DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroVec[0], ZeroVec.size());
-}
-
/// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
///
static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
}
-/// isZeroNode - Returns true if Elt is a constant zero or a floating point
-/// constant +0.0.
-static inline bool isZeroNode(SDOperand Elt) {
- return ((isa<ConstantSDNode>(Elt) &&
- cast<ConstantSDNode>(Elt)->getValue() == 0) ||
- (isa<ConstantFPSDNode>(Elt) &&
- cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
-}
-
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
-/// vector and zero or undef vector.
+/// vector of zero or undef vector.
static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
unsigned NumElems, unsigned Idx,
bool isZero, SelectionDAG &DAG) {
SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
SDOperand Zero = DAG.getConstant(0, EVT);
SmallVector<SDOperand, 8> MaskVec(NumElems, Zero);
MaskVec[Idx] = DAG.getConstant(NumElems, EVT);
return Op;
MVT::ValueType VT = Op.getValueType();
- MVT::ValueType EVT = MVT::getVectorBaseType(VT);
+ MVT::ValueType EVT = MVT::getVectorElementType(VT);
unsigned EVTBits = MVT::getSizeInBits(EVT);
unsigned NumElems = Op.getNumOperands();
}
}
- if (NumNonZero == 0)
- // Must be a mix of zero and undef. Return a zero vector.
- return getZeroVector(VT, DAG);
+ if (NumNonZero == 0) {
+ if (NumZero == 0)
+ // All undef vector. Return an UNDEF.
+ return DAG.getNode(ISD::UNDEF, VT);
+ else
+ // A mix of zero and undef. Return a zero vector.
+ return getZeroVector(VT, DAG);
+ }
// Splat is obviously ok. Let legalizer expand it to a shuffle.
if (Values.size() == 1)
Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
DAG);
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i < NumElems; i++)
MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
}
}
- // Let legalizer expand 2-wide build_vector's.
+ // Let legalizer expand 2-wide build_vectors.
if (EVTBits == 64)
return SDOperand();
if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
return V[0];
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
bool Reverse = (NonZeros & 0x3) == 2;
for (unsigned i = 0; i < 2; ++i)
if (isUndefShuffle(Op.Val))
return DAG.getNode(ISD::UNDEF, VT);
+ if (isZeroShuffle(Op.Val))
+ return getZeroVector(VT, DAG);
+
+ if (isIdentityMask(PermMask.Val))
+ return V1;
+ else if (isIdentityMask(PermMask.Val, true))
+ return V2;
+
if (isSplatMask(PermMask.Val)) {
if (NumElems <= 4) return Op;
// Promote it to a v4i32 splat.
}
if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
+ X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
X86::isUNPCKLMask(PermMask.Val) ||
X86::isUNPCKHMask(PermMask.Val))
return Op;
// Commute is back and try unpck* again.
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
+ X86::isUNPCKH_v_undef_Mask(PermMask.Val) ||
X86::isUNPCKLMask(PermMask.Val) ||
X86::isUNPCKHMask(PermMask.Val))
return Op;
return Op;
}
- if (X86::isSHUFPMask(PermMask.Val))
+ if (X86::isSHUFPMask(PermMask.Val) &&
+ MVT::getSizeInBits(VT) != 64) // Don't do this for MMX.
return Op;
// Handle v8i16 shuffle high / low shuffle node pair.
if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) {
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i != 4; ++i)
MaskVec.push_back(PermMask.getOperand(i));
}
}
- if (NumElems == 4) {
+ if (NumElems == 4 &&
+ // Don't do this for MMX.
+ MVT::getSizeInBits(VT) != 64) {
MVT::ValueType MaskVT = PermMask.getValueType();
- MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
SmallVector<std::pair<int, int>, 8> Locs;
Locs.reserve(NumElems);
SmallVector<SDOperand, 8> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
// SHUFPS the element to the lowest double word, then movss.
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
- IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
+ IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
- IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
+ IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
// Transform it so it match pinsrw which expects a 16-bit value in a GR32
// as its second argument.
MVT::ValueType VT = Op.getValueType();
- MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(VT);
SDOperand N0 = Op.getOperand(0);
SDOperand N1 = Op.getOperand(1);
SDOperand N2 = Op.getOperand(2);
if (N1.getValueType() != MVT::i32)
N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
- N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32);
+ N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(),getPointerTy());
return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
} else if (MVT::getSizeInBits(BaseVT) == 32) {
unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
// Use a movss.
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
- MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
+ MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
SmallVector<SDOperand, 8> MaskVec;
MaskVec.push_back(DAG.getConstant(4, BaseVT));
for (unsigned i = 1; i <= 3; ++i)
return Result;
}
+// Lower ISD::GlobalTLSAddress using the "general dynamic" model
+static SDOperand
+LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
+ const MVT::ValueType PtrVT) {
+ SDOperand InFlag;
+ SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX,
+ DAG.getNode(X86ISD::GlobalBaseReg,
+ PtrVT), InFlag);
+ InFlag = Chain.getValue(1);
+
+ // emit leal symbol@TLSGD(,%ebx,1), %eax
+ SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
+ SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
+ GA->getValueType(0),
+ GA->getOffset());
+ SDOperand Ops[] = { Chain, TGA, InFlag };
+ SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3);
+ InFlag = Result.getValue(2);
+ Chain = Result.getValue(1);
+
+ // call ___tls_get_addr. This function receives its argument in
+ // the register EAX.
+ Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag);
+ InFlag = Chain.getValue(1);
+
+ NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDOperand Ops1[] = { Chain,
+ DAG.getTargetExternalSymbol("___tls_get_addr",
+ PtrVT),
+ DAG.getRegister(X86::EAX, PtrVT),
+ DAG.getRegister(X86::EBX, PtrVT),
+ InFlag };
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5);
+ InFlag = Chain.getValue(1);
+
+ return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag);
+}
+
+// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
+// "local exec" model.
+static SDOperand
+LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
+ const MVT::ValueType PtrVT) {
+ // Get the Thread Pointer
+ SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT);
+ // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
+ // exec)
+ SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
+ GA->getValueType(0),
+ GA->getOffset());
+ SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA);
+
+ if (GA->getGlobal()->isDeclaration()) // initial exec TLS model
+ Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, NULL, 0);
+
+ // The address of the thread local variable is the add of the thread
+ // pointer with the offset of the variable.
+ return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset);
+}
+
+SDOperand
+X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) {
+ // TODO: implement the "local dynamic" model
+ // TODO: implement the "initial exec"model for pic executables
+ assert(!Subtarget->is64Bit() && Subtarget->isTargetELF() &&
+ "TLS not implemented for non-ELF and 64-bit targets");
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
+ // If the relocation model is PIC, use the "General Dynamic" TLS Model,
+ // otherwise use the "Local Exec"TLS Model
+ if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ return LowerToTLSGeneralDynamicModel(GA, DAG, getPointerTy());
+ else
+ return LowerToTLSExecModel(GA, DAG, getPointerTy());
+}
+
SDOperand
X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
if (X86ScalarSSE)
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
else
- Tys = DAG.getVTList(MVT::f64, MVT::Other);
+ Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
SmallVector<SDOperand, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(StackSlot);
if (X86ScalarSSE) {
assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
- SDVTList Tys = DAG.getVTList(MVT::f64, MVT::Other);
+ SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
SDOperand Ops[] = {
Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
};
SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
- const Type *OpNTy = MVT::getTypeForValueType(VT);
+ MVT::ValueType EltVT = VT;
+ if (MVT::isVector(VT))
+ EltVT = MVT::getVectorElementType(VT);
+ const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
- if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ if (EltVT == MVT::f64) {
+ Constant *C = ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)));
+ CV.push_back(C);
+ CV.push_back(C);
} else {
- CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ Constant *C = ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)));
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
MVT::ValueType VT = Op.getValueType();
- const Type *OpNTy = MVT::getTypeForValueType(VT);
+ MVT::ValueType EltVT = VT;
+ if (MVT::isVector(VT))
+ EltVT = MVT::getVectorElementType(VT);
+ const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
- if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ if (EltVT == MVT::f64) {
+ Constant *C = ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63));
+ CV.push_back(C);
+ CV.push_back(C);
} else {
- CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
- CV.push_back(ConstantFP::get(OpNTy, 0.0));
+ Constant *C = ConstantFP::get(OpNTy, BitsToFloat(1U << 31));
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
+ CV.push_back(C);
}
Constant *CS = ConstantStruct::get(CV);
SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
}
}
+
+// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
+// Calls to _alloca is needed to probe the stack when allocating more than 4k
+// bytes in one go. Touching the stack at 4K increments is necessary to ensure
+// that the guard pages used by the OS virtual memory manager are allocated in
+// correct sequence.
+SDOperand
+X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
+ SelectionDAG &DAG) {
+ assert(Subtarget->isTargetCygMing() &&
+ "This should be used only on Cygwin/Mingw targets");
+
+ // Get the inputs.
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand Size = Op.getOperand(1);
+ // FIXME: Ensure alignment here
+
+ SDOperand Flag;
+
+ MVT::ValueType IntPtr = getPointerTy();
+ MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
+
+ Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
+ Flag = Chain.getValue(1);
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDOperand Ops[] = { Chain,
+ DAG.getTargetExternalSymbol("_alloca", IntPtr),
+ DAG.getRegister(X86::EAX, IntPtr),
+ Flag };
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4);
+ Flag = Chain.getValue(1);
+
+ Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
+
+ std::vector<MVT::ValueType> Tys;
+ Tys.push_back(SPTy);
+ Tys.push_back(MVT::Other);
+ SDOperand Ops1[2] = { Chain.getValue(0), Chain };
+ return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops1, 2);
+}
+
SDOperand
X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
if (Fn->hasExternalLinkage() &&
Subtarget->isTargetCygMing() &&
Fn->getName() == "main")
- MF.getInfo<X86FunctionInfo>()->setForceFramePointer(true);
+ MF.getInfo<X86MachineFunctionInfo>()->setForceFramePointer(true);
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (Subtarget->is64Bit())
case CallingConv::C:
return LowerCCCArguments(Op, DAG);
case CallingConv::X86_StdCall:
- MF.getInfo<X86FunctionInfo>()->setDecorationStyle(StdCall);
+ MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(StdCall);
return LowerCCCArguments(Op, DAG, true);
case CallingConv::X86_FastCall:
- MF.getInfo<X86FunctionInfo>()->setDecorationStyle(FastCall);
+ MF.getInfo<X86MachineFunctionInfo>()->setDecorationStyle(FastCall);
return LowerFastCCArguments(Op, DAG);
}
}
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
case ISD::SHL_PARTS:
case ISD::SRA_PARTS:
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
}
return SDOperand();
}
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMIN: return "X86ISD::FMIN";
+ case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
+ case X86ISD::FRCP: return "X86ISD::FRCP";
+ case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
+ case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
}
}
-/// isLegalAddressImmediate - Return true if the integer value can be used
-/// as the offset of the target addressing mode for load / store of the
-/// given type.
-bool X86TargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{
- // X86 allows a sign-extended 32-bit immediate field.
- return (V > -(1LL << 32) && V < (1LL << 32)-1);
-}
-
-/// isLegalAddressImmediate - Return true if the GlobalValue can be used as
-/// the offset of the target addressing mode.
-bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
- // In 64-bit mode, GV is 64-bit so it won't fit in the 32-bit displacement
- // field unless we are in small code model.
- if (Subtarget->is64Bit() &&
- getTargetMachine().getCodeModel() != CodeModel::Small)
+// isLegalAddressingMode - Return true if the addressing mode represented
+// by AM is legal for this target, for a load/store of the specified type.
+bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ const Type *Ty) const {
+ // X86 supports extremely general addressing modes.
+
+ // X86 allows a sign-extended 32-bit immediate field as a displacement.
+ if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
return false;
- return (!Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false));
-}
-
-/// isLegalAddressScale - Return true if the integer value can be used as the
-/// scale of the target addressing mode for load / store of the given type.
-bool X86TargetLowering::isLegalAddressScale(int64_t S, const Type *Ty) const {
- switch (S) {
- default:
+ if (AM.BaseGV) {
+ // X86-64 only supports addr of globals in small code model.
+ if (Subtarget->is64Bit() &&
+ getTargetMachine().getCodeModel() != CodeModel::Small)
+ return false;
+
+ // We can only fold this if we don't need a load either.
+ if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
+ return false;
+ }
+
+ switch (AM.Scale) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ // These scales always work.
+ break;
+ case 3:
+ case 5:
+ case 9:
+ // These scales are formed with basereg+scalereg. Only accept if there is
+ // no basereg yet.
+ if (AM.HasBaseReg)
+ return false;
+ break;
+ default: // Other stuff never works.
return false;
- case 2: case 4: case 8:
- return true;
- // FIXME: These require both scale + index last and thus more expensive.
- // How to tell LSR to try for 2, 4, 8 first?
- case 3: case 5: case 9:
- return true;
}
+
+ return true;
}
-/// isLegalAddressScaleAndImm - Return true if S works for IsLegalAddressScale
-/// and V works for isLegalAddressImmediate _and_ both can be applied
-/// simultaneously to the same instruction.
-bool X86TargetLowering::isLegalAddressScaleAndImm(int64_t S, int64_t V,
- const Type* Ty) const {
- return isLegalAddressScale(S, Ty) && isLegalAddressImmediate(V, Ty);
-}
-
-/// isLegalAddressScaleAndImm - Return true if S works for IsLegalAddressScale
-/// and GV works for isLegalAddressImmediate _and_ both can be applied
-/// simultaneously to the same instruction.
-bool X86TargetLowering::isLegalAddressScaleAndImm(int64_t S, GlobalValue *GV,
- const Type* Ty) const {
- return isLegalAddressScale(S, Ty) && isLegalAddressImmediate(GV);
-}
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
// Only do shuffles on 128-bit vector types for now.
if (MVT::getSizeInBits(VT) == 64) return false;
return (Mask.Val->getNumOperands() <= 4 ||
+ isIdentityMask(Mask.Val) ||
+ isIdentityMask(Mask.Val, true) ||
isSplatMask(Mask.Val) ||
isPSHUFHW_PSHUFLWMask(Mask.Val) ||
X86::isUNPCKLMask(Mask.Val) ||
+ X86::isUNPCKHMask(Mask.Val) ||
X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
- X86::isUNPCKHMask(Mask.Val));
+ X86::isUNPCKH_v_undef_Mask(Mask.Val));
}
bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
return BB;
}
- case X86::FP_TO_INT16_IN_MEM:
- case X86::FP_TO_INT32_IN_MEM:
- case X86::FP_TO_INT64_IN_MEM: {
+ case X86::FP32_TO_INT16_IN_MEM:
+ case X86::FP32_TO_INT32_IN_MEM:
+ case X86::FP32_TO_INT64_IN_MEM:
+ case X86::FP64_TO_INT16_IN_MEM:
+ case X86::FP64_TO_INT32_IN_MEM:
+ case X86::FP64_TO_INT64_IN_MEM: {
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
MachineFunction *F = BB->getParent();
unsigned Opc;
switch (MI->getOpcode()) {
default: assert(0 && "illegal opcode!");
- case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
- case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
- case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
+ case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
+ case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
+ case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
+ case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
+ case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
+ case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
}
X86AddressMode AM;
uint64_t Mask,
uint64_t &KnownZero,
uint64_t &KnownOne,
+ const SelectionDAG &DAG,
unsigned Depth) const {
unsigned Opc = Op.getOpcode();
assert((Opc >= ISD::BUILTIN_OP_END ||
i %= NumElems;
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
return (i == 0)
- ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
+ ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
} else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
SDOperand Idx = PermMask.getOperand(i);
if (Idx.getOpcode() == ISD::UNDEF)
- return DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(VT));
+ return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
}
return SDOperand();
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
MVT::ValueType VT = N->getValueType(0);
- MVT::ValueType EVT = MVT::getVectorBaseType(VT);
+ MVT::ValueType EVT = MVT::getVectorElementType(VT);
SDOperand PermMask = N->getOperand(2);
int NumElems = (int)PermMask.getNumOperands();
SDNode *Base = NULL;
case 'I':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getValue() <= 31)
- return Op;
+ return DAG.getTargetConstant(C->getValue(), Op.getValueType());
}
return SDOperand(0,0);
case 'N':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getValue() <= 255)
- return Op;
+ return DAG.getTargetConstant(C->getValue(), Op.getValueType());
}
return SDOperand(0,0);
- case 'i':
+ case 'i': {
// Literal immediates are always ok.
- if (isa<ConstantSDNode>(Op)) return Op;
+ if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op))
+ return DAG.getTargetConstant(CST->getValue(), Op.getValueType());
- // If we are in non-pic codegen mode, we allow the address of a global to
- // be used with 'i'.
- if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
+ // If we are in non-pic codegen mode, we allow the address of a global (with
+ // an optional displacement) to be used with 'i'.
+ GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
+ int64_t Offset = 0;
+
+ // Match either (GA) or (GA+C)
+ if (GA) {
+ Offset = GA->getOffset();
+ } else if (Op.getOpcode() == ISD::ADD) {
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
+ if (C && GA) {
+ Offset = GA->getOffset()+C->getValue();
+ } else {
+ C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
+ if (C && GA)
+ Offset = GA->getOffset()+C->getValue();
+ else
+ C = 0, GA = 0;
+ }
+ }
+
+ if (GA) {
+ // If addressing this global requires a load (e.g. in PIC mode), we can't
+ // match.
+ if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(),
+ false))
return SDOperand(0, 0);
- if (GA->getOpcode() != ISD::TargetGlobalAddress)
- Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
- GA->getOffset());
+ Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
+ Offset);
return Op;
}
// Otherwise, not valid for this mode.
return SDOperand(0, 0);
}
+ }
return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG);
}
-
std::vector<unsigned> X86TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
if (Constraint.size() == 1) {
// FIXME: not handling fp-stack yet!
- // FIXME: not handling MMX registers yet ('y' constraint).
switch (Constraint[0]) { // GCC X86 Constraint Letters
default: break; // Unknown constraint letter
case 'A': // EAX/EDX
if (VT == MVT::i32 || VT == MVT::i64)
return make_vector<unsigned>(X86::EAX, X86::EDX, 0);
break;
- case 'r': // GENERAL_REGS
- case 'R': // LEGACY_REGS
- if (VT == MVT::i64 && Subtarget->is64Bit())
- return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX,
- X86::RSI, X86::RDI, X86::RBP, X86::RSP,
- X86::R8, X86::R9, X86::R10, X86::R11,
- X86::R12, X86::R13, X86::R14, X86::R15, 0);
- if (VT == MVT::i32)
- return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
- X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
- else if (VT == MVT::i16)
- return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
- X86::SI, X86::DI, X86::BP, X86::SP, 0);
- else if (VT == MVT::i8)
- return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
- break;
- case 'l': // INDEX_REGS
- if (VT == MVT::i32)
- return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
- X86::ESI, X86::EDI, X86::EBP, 0);
- else if (VT == MVT::i16)
- return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
- X86::SI, X86::DI, X86::BP, 0);
- else if (VT == MVT::i8)
- return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
- break;
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
case 'Q': // Q_REGS
if (VT == MVT::i32)
else if (VT == MVT::i8)
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
break;
- case 'x': // SSE_REGS if SSE1 allowed
- if (Subtarget->hasSSE1())
- return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
- 0);
- return std::vector<unsigned>();
- case 'Y': // SSE_REGS if SSE2 allowed
- if (Subtarget->hasSSE2())
- return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
- X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
- 0);
- return std::vector<unsigned>();
}
}
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
MVT::ValueType VT) const {
+ // First, see if this is a constraint that directly corresponds to an LLVM
+ // register class.
+ if (Constraint.size() == 1) {
+ // GCC Constraint Letters
+ switch (Constraint[0]) {
+ default: break;
+ case 'r': // GENERAL_REGS
+ case 'R': // LEGACY_REGS
+ case 'l': // INDEX_REGS
+ if (VT == MVT::i64 && Subtarget->is64Bit())
+ return std::make_pair(0U, X86::GR64RegisterClass);
+ if (VT == MVT::i32)
+ return std::make_pair(0U, X86::GR32RegisterClass);
+ else if (VT == MVT::i16)
+ return std::make_pair(0U, X86::GR16RegisterClass);
+ else if (VT == MVT::i8)
+ return std::make_pair(0U, X86::GR8RegisterClass);
+ break;
+ case 'y': // MMX_REGS if MMX allowed.
+ if (!Subtarget->hasMMX()) break;
+ return std::make_pair(0U, X86::VR64RegisterClass);
+ break;
+ case 'Y': // SSE_REGS if SSE2 allowed
+ if (!Subtarget->hasSSE2()) break;
+ // FALL THROUGH.
+ case 'x': // SSE_REGS if SSE1 allowed
+ if (!Subtarget->hasSSE1()) break;
+
+ switch (VT) {
+ default: break;
+ // Scalar SSE types.
+ case MVT::f32:
+ case MVT::i32:
+ return std::make_pair(0U, X86::FR32RegisterClass);
+ case MVT::f64:
+ case MVT::i64:
+ return std::make_pair(0U, X86::FR64RegisterClass);
+ // Vector types.
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ return std::make_pair(0U, X86::VR128RegisterClass);
+ }
+ break;
+ }
+ }
+
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;