//===----------------------------------------------------------------------===//
#include "X86.h"
-#include "X86CodeEmitter.h"
#include "X86InstrBuilder.h"
#include "X86ISelLowering.h"
#include "X86MachineFunctionInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ParameterAttributes.h"
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
- X86ScalarSSE = Subtarget->hasSSE2();
+ X86ScalarSSEf64 = Subtarget->hasSSE2();
+ X86ScalarSSEf32 = Subtarget->hasSSE1();
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
+
RegInfo = TM.getRegisterInfo();
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
} else {
- if (X86ScalarSSE)
+ if (X86ScalarSSEf64)
// If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
else
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
// SSE has no i16 to fp conversion, only i32
- if (X86ScalarSSE)
+ if (X86ScalarSSEf32) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
- else {
+ // f32 and f64 cases are Legal, f80 case is not
+ setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
+ } else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
- if (!Subtarget->is64Bit()) {
- // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode.
- setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
- setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
- }
+ // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
+ // are Legal, f80 is custom lowered.
+ setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
+ setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
- if (X86ScalarSSE) {
+ if (X86ScalarSSEf32) {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
+ // f32 and f64 cases are Legal, f80 case is not
+ setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
} else {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
} else {
- if (X86ScalarSSE && !Subtarget->hasSSE3())
+ if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
}
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
- if (!X86ScalarSSE) {
+ if (!X86ScalarSSEf64) {
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
}
+ // Scalar integer multiply, multiply-high, divide, and remainder are
+ // lowered to use operations that produce two results, to match the
+ // available instructions. This exposes the two-result form to trivial
+ // CSE, which is able to combine x/y and x%y into a single instruction,
+ // for example. The single-result multiply instructions are introduced
+ // in X86ISelDAGToDAG.cpp, after CSE, for uses where the the high part
+ // is not needed.
+ setOperationAction(ISD::MUL , MVT::i8 , Expand);
+ setOperationAction(ISD::MULHS , MVT::i8 , Expand);
+ setOperationAction(ISD::MULHU , MVT::i8 , Expand);
+ setOperationAction(ISD::SDIV , MVT::i8 , Expand);
+ setOperationAction(ISD::UDIV , MVT::i8 , Expand);
+ setOperationAction(ISD::SREM , MVT::i8 , Expand);
+ setOperationAction(ISD::UREM , MVT::i8 , Expand);
+ setOperationAction(ISD::MUL , MVT::i16 , Expand);
+ setOperationAction(ISD::MULHS , MVT::i16 , Expand);
+ setOperationAction(ISD::MULHU , MVT::i16 , Expand);
+ setOperationAction(ISD::SDIV , MVT::i16 , Expand);
+ setOperationAction(ISD::UDIV , MVT::i16 , Expand);
+ setOperationAction(ISD::SREM , MVT::i16 , Expand);
+ setOperationAction(ISD::UREM , MVT::i16 , Expand);
+ setOperationAction(ISD::MUL , MVT::i32 , Expand);
+ setOperationAction(ISD::MULHS , MVT::i32 , Expand);
+ setOperationAction(ISD::MULHU , MVT::i32 , Expand);
+ setOperationAction(ISD::SDIV , MVT::i32 , Expand);
+ setOperationAction(ISD::UDIV , MVT::i32 , Expand);
+ setOperationAction(ISD::SREM , MVT::i32 , Expand);
+ setOperationAction(ISD::UREM , MVT::i32 , Expand);
+ setOperationAction(ISD::MUL , MVT::i64 , Expand);
+ setOperationAction(ISD::MULHS , MVT::i64 , Expand);
+ setOperationAction(ISD::MULHU , MVT::i64 , Expand);
+ setOperationAction(ISD::SDIV , MVT::i64 , Expand);
+ setOperationAction(ISD::UDIV , MVT::i64 , Expand);
+ setOperationAction(ISD::SREM , MVT::i64 , Expand);
+ setOperationAction(ISD::UREM , MVT::i64 , Expand);
+
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
if (Subtarget->is64Bit())
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
+ setOperationAction(ISD::SELECT , MVT::f80 , Custom);
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
setOperationAction(ISD::SETCC , MVT::i16 , Custom);
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
+ setOperationAction(ISD::SETCC , MVT::f80 , Custom);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::SELECT , MVT::i64 , Custom);
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
setOperationAction(ISD::MEMSET , MVT::Other, Custom);
setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
- // We don't have line number support yet.
+ // Use the default ISD::LOCATION expansion.
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
- setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
// FIXME - use subtarget debug flags
if (!Subtarget->isTargetDarwin() &&
!Subtarget->isTargetELF() &&
setExceptionPointerRegister(X86::EAX);
setExceptionSelectorRegister(X86::EDX);
}
+ setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
- setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
- setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand);
- setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
+ setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
else
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
- if (X86ScalarSSE) {
+ if (X86ScalarSSEf64) {
+ // f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
addRegisterClass(MVT::f64, X86::FR64RegisterClass);
// cases we handle.
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
- addLegalFPImmediate(+0.0); // xorps / xorpd
+ addLegalFPImmediate(APFloat(+0.0)); // xorpd
+ addLegalFPImmediate(APFloat(+0.0f)); // xorps
+
+ // Conversions to long double (in X87) go through memory.
+ setConvertAction(MVT::f32, MVT::f80, Expand);
+ setConvertAction(MVT::f64, MVT::f80, Expand);
+
+ // Conversions from long double (in X87) go through memory.
+ setConvertAction(MVT::f80, MVT::f32, Expand);
+ setConvertAction(MVT::f80, MVT::f64, Expand);
+ } else if (X86ScalarSSEf32) {
+ // Use SSE for f32, x87 for f64.
+ // Set up the FP register classes.
+ addRegisterClass(MVT::f32, X86::FR32RegisterClass);
+ addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
+
+ // Use ANDPS to simulate FABS.
+ setOperationAction(ISD::FABS , MVT::f32, Custom);
+
+ // Use XORP to simulate FNEG.
+ setOperationAction(ISD::FNEG , MVT::f32, Custom);
+
+ setOperationAction(ISD::UNDEF, MVT::f64, Expand);
+
+ // Use ANDPS and ORPS to simulate FCOPYSIGN.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+
+ // We don't support sin/cos/fmod
+ setOperationAction(ISD::FSIN , MVT::f32, Expand);
+ setOperationAction(ISD::FCOS , MVT::f32, Expand);
+ setOperationAction(ISD::FREM , MVT::f32, Expand);
+
+ // Expand FP immediates into loads from the stack, except for the special
+ // cases we handle.
+ setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
+ addLegalFPImmediate(APFloat(+0.0f)); // xorps
+ addLegalFPImmediate(APFloat(+0.0)); // FLD0
+ addLegalFPImmediate(APFloat(+1.0)); // FLD1
+ addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
+ addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
+
+ // SSE->x87 conversions go through memory.
+ setConvertAction(MVT::f32, MVT::f64, Expand);
+ setConvertAction(MVT::f32, MVT::f80, Expand);
+
+ // x87->SSE truncations need to go through memory.
+ setConvertAction(MVT::f80, MVT::f32, Expand);
+ setConvertAction(MVT::f64, MVT::f32, Expand);
+ // And x87->x87 truncations also.
+ setConvertAction(MVT::f80, MVT::f64, Expand);
+
+ if (!UnsafeFPMath) {
+ setOperationAction(ISD::FSIN , MVT::f64 , Expand);
+ setOperationAction(ISD::FCOS , MVT::f64 , Expand);
+ }
} else {
+ // f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
setOperationAction(ISD::UNDEF, MVT::f32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
- setOperationAction(ISD::FP_ROUND, MVT::f32, Expand);
+
+ // Floating truncations need to go through memory.
+ setConvertAction(MVT::f80, MVT::f32, Expand);
+ setConvertAction(MVT::f64, MVT::f32, Expand);
+ setConvertAction(MVT::f80, MVT::f64, Expand);
if (!UnsafeFPMath) {
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
- addLegalFPImmediate(+0.0); // FLD0
- addLegalFPImmediate(+1.0); // FLD1
- addLegalFPImmediate(-0.0); // FLD0/FCHS
- addLegalFPImmediate(-1.0); // FLD1/FCHS
+ addLegalFPImmediate(APFloat(+0.0)); // FLD0
+ addLegalFPImmediate(APFloat(+1.0)); // FLD1
+ addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
+ addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
+ addLegalFPImmediate(APFloat(+0.0f)); // FLD0
+ addLegalFPImmediate(APFloat(+1.0f)); // FLD1
+ addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
+ addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
+ }
+
+ // Long double always uses X87.
+ addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
+ setOperationAction(ISD::UNDEF, MVT::f80, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
+ if (!UnsafeFPMath) {
+ setOperationAction(ISD::FSIN , MVT::f80 , Expand);
+ setOperationAction(ISD::FCOS , MVT::f80 , Expand);
}
// First set operation action for all vector types to expand. Then we
setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
}
if (Subtarget->hasMMX()) {
//===----------------------------------------------------------------------===//
#include "X86GenCallingConv.inc"
+
+/// GetPossiblePreceedingTailCall - Get preceeding X86ISD::TAILCALL node if it
+/// exists skip possible ISD:TokenFactor.
+static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain) {
+ if (Chain.getOpcode()==X86ISD::TAILCALL) {
+ return Chain;
+ } else if (Chain.getOpcode()==ISD::TokenFactor) {
+ if (Chain.getNumOperands() &&
+ Chain.getOperand(0).getOpcode()==X86ISD::TAILCALL)
+ return Chain.getOperand(0);
+ }
+ return Chain;
+}
/// LowerRET - Lower an ISD::RET node.
SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
CCInfo.AnalyzeReturn(Op.Val, RetCC_X86);
-
-
+
// If this is the first return lowered for this function, add the regs to the
// liveout set for the function.
if (DAG.getMachineFunction().liveout_empty()) {
if (RVLocs[i].isRegLoc())
DAG.getMachineFunction().addLiveOut(RVLocs[i].getLocReg());
}
-
SDOperand Chain = Op.getOperand(0);
- SDOperand Flag;
+ // Handle tail call return.
+ Chain = GetPossiblePreceedingTailCall(Chain);
+ if (Chain.getOpcode() == X86ISD::TAILCALL) {
+ SDOperand TailCall = Chain;
+ SDOperand TargetAddress = TailCall.getOperand(1);
+ SDOperand StackAdjustment = TailCall.getOperand(2);
+ assert ( ((TargetAddress.getOpcode() == ISD::Register &&
+ (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX ||
+ cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) ||
+ TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
+ TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
+ "Expecting an global address, external symbol, or register");
+ assert( StackAdjustment.getOpcode() == ISD::Constant &&
+ "Expecting a const value");
+
+ SmallVector<SDOperand,8> Operands;
+ Operands.push_back(Chain.getOperand(0));
+ Operands.push_back(TargetAddress);
+ Operands.push_back(StackAdjustment);
+ // Copy registers used by the call. Last operand is a flag so it is not
+ // copied.
+ for(unsigned i=3; i < TailCall.getNumOperands()-1;i++) {
+ Operands.push_back(Chain.getOperand(i));
+ }
+ return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0], Operands.size());
+ }
+
+ // Regular return.
+ SDOperand Flag;
+
// Copy the result values into the output registers.
if (RVLocs.size() != 1 || !RVLocs[0].isRegLoc() ||
RVLocs[0].getLocReg() != X86::ST0) {
// If this is an FP return with ScalarSSE, we need to move the value from
// an XMM register onto the fp-stack.
- if (X86ScalarSSE) {
+ if ((X86ScalarSSEf32 && RVLocs[0].getValVT()==MVT::f32) ||
+ (X86ScalarSSEf64 && RVLocs[0].getValVT()==MVT::f64)) {
SDOperand MemLoc;
-
+
// If this is a load into a scalarsse value, don't store the loaded value
// back to the stack, only to reload it: just replace the scalar-sse load.
if (ISD::isNON_EXTLoad(Value.Val) &&
// If we are using ScalarSSE, store ST(0) to the stack and reload it into
// an XMM register.
- if (X86ScalarSSE) {
+ if ((X86ScalarSSEf32 && RVLocs[0].getValVT() == MVT::f32) ||
+ (X86ScalarSSEf64 && RVLocs[0].getValVT() == MVT::f64)) {
// FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
// shouldn't be necessary except that RFP cannot be live across
// multiple blocks. When stackifier is fixed, they can be uncoupled.
//===----------------------------------------------------------------------===//
-// C & StdCall Calling Convention implementation
+// C & StdCall & Fast Calling Convention implementation
//===----------------------------------------------------------------------===//
// StdCall calling convention seems to be standard for many Windows' API
// routines and around. It differs from C calling convention just a little:
// callee should clean up the stack, not caller. Symbols should be also
// decorated in some fancy way :) It doesn't support any vector arguments.
+// For info on fast calling convention see Fast Calling Convention (tail call)
+// implementation LowerX86_32FastCCCallTo.
/// AddLiveIn - This helper function adds the specified physical register to the
/// MachineFunction as a live in value. It also creates a corresponding virtual
return VReg;
}
+// align stack arguments according to platform alignment needed for tail calls
+unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG& DAG);
+
+SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
+ const CCValAssign &VA,
+ MachineFrameInfo *MFI,
+ SDOperand Root, unsigned i) {
+ // Create the nodes corresponding to a load from this parameter slot.
+ int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
+ VA.getLocMemOffset());
+ SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
+
+ unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
+
+ if (Flags & ISD::ParamFlags::ByVal)
+ return FIN;
+ else
+ return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0);
+}
+
SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
bool isStdCall) {
unsigned NumArgs = Op.Val->getNumValues() - 1;
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
-
+ unsigned CC = MF.getFunction()->getCallingConv();
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ CCState CCInfo(CC, isVarArg,
getTargetMachine(), ArgLocs);
- CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C);
-
+ // Check for possible tail call calling convention.
+ if (CC == CallingConv::Fast && PerformTailCallOpt)
+ CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_TailCall);
+ else
+ CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_32_C);
+
SmallVector<SDOperand, 8> ArgValues;
unsigned LastVal = ~0U;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
-
- // Create the nodes corresponding to a load from this parameter slot.
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- VA.getLocMemOffset());
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
+ ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
}
}
unsigned StackSize = CCInfo.getNextStackOffset();
+ // align stack specially for tail calls
+ if (CC==CallingConv::Fast)
+ StackSize = GetAlignedArgumentStackSize(StackSize,DAG);
ArgValues.push_back(Root);
if (isVarArg)
VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
- if (isStdCall && !isVarArg) {
+ // Tail call calling convention (CallingConv::Fast) does not support varargs.
+ assert( !(isVarArg && CC == CallingConv::Fast) &&
+ "CallingConv::Fast does not support varargs.");
+
+ if (isStdCall && !isVarArg &&
+ (CC==CallingConv::Fast && PerformTailCallOpt || CC!=CallingConv::Fast)) {
BytesToPopOnReturn = StackSize; // Callee pops everything..
BytesCallerReserves = 0;
} else {
BytesCallerReserves = StackSize;
}
-
+
RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
- ReturnAddrIndex = 0; // No return address slot generated yet.
- MF.getInfo<X86MachineFunctionInfo>()
- ->setBytesToPopOnReturn(BytesToPopOnReturn);
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
unsigned CC) {
SDOperand Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
- bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
SDOperand Callee = Op.getOperand(4);
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
-
+
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
- CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
+ if(CC==CallingConv::Fast && PerformTailCallOpt)
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
+ else
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_C);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
+ if (CC==CallingConv::Fast)
+ NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
assert(VA.isMemLoc());
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
}
}
if (InFlag.Val)
Ops.push_back(InFlag);
-
- Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
- NodeTys, &Ops[0], Ops.size());
+
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush = 0;
- if (CC == CallingConv::X86_StdCall) {
+ if (CC == CallingConv::X86_StdCall ||
+ (CC == CallingConv::Fast && PerformTailCallOpt)) {
if (isVarArg)
NumBytesForCalleeToPush = isSRet ? 4 : 0;
else
NumBytesForCalleeToPush = NumBytes;
+ assert(!(isVarArg && CC==CallingConv::Fast) &&
+ "CallingConv::Fast does not support varargs.");
} else {
// If this is is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
-
- // Create the nodes corresponding to a load from this parameter slot.
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- VA.getLocMemOffset());
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
+ ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
}
}
if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
// Make sure the instruction takes 8n+4 bytes to make sure the start of the
- // arguments and the arguments after the retaddr has been pushed are aligned.
+ // arguments and the arguments after the retaddr has been pushed are
+ // aligned.
if ((StackSize & 7) == 0)
StackSize += 4;
}
VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
RegSaveFrameIndex = 0xAAAAAAA; // X86-64 only.
- ReturnAddrIndex = 0; // No return address slot generated yet.
BytesToPopOnReturn = StackSize; // Callee pops all stack arguments.
BytesCallerReserves = 0;
- MF.getInfo<X86MachineFunctionInfo>()
- ->setBytesToPopOnReturn(BytesToPopOnReturn);
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
&ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
}
+SDOperand
+X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
+ const SDOperand &StackPtr,
+ const CCValAssign &VA,
+ SDOperand Chain,
+ SDOperand Arg) {
+ SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
+ unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
+ if (Flags & ISD::ParamFlags::ByVal) {
+ unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
+ ISD::ParamFlags::ByValAlignOffs);
+
+ unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
+ ISD::ParamFlags::ByValSizeOffs;
+
+ SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
+ SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
+
+ return DAG.getNode(ISD::MEMCPY, MVT::Other, Chain, PtrOff, Arg, SizeNode,
+ AlignNode);
+ } else {
+ return DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
+ }
+}
+
SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
unsigned CC) {
SDOperand Chain = Op.getOperand(0);
if (!Subtarget->isTargetCygMing() && !Subtarget->isTargetWindows()) {
// Make sure the instruction takes 8n+4 bytes to make sure the start of the
- // arguments and the arguments after the retaddr has been pushed are aligned.
+ // arguments and the arguments after the retaddr has been pushed are
+ // aligned.
if ((NumBytes & 7) == 0)
NumBytes += 4;
}
assert(VA.isMemLoc());
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
}
}
if (InFlag.Val)
Ops.push_back(InFlag);
- // FIXME: Do not generate X86ISD::TAILCALL for now.
- Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
+ assert(isTailCall==false && "no tail call here");
+ Chain = DAG.getNode(X86ISD::CALL,
NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
}
+//===----------------------------------------------------------------------===//
+// Fast Calling Convention (tail call) implementation
+//===----------------------------------------------------------------------===//
+
+// Like std call, callee cleans arguments, convention except that ECX is
+// reserved for storing the tail called function address. Only 2 registers are
+// free for argument passing (inreg). Tail call optimization is performed
+// provided:
+// * tailcallopt is enabled
+// * caller/callee are fastcc
+// * elf/pic is disabled OR
+// * elf/pic enabled + callee is in module + callee has
+// visibility protected or hidden
+// To ensure the stack is aligned according to platform abi pass
+// tail-call-align-stack. This makes sure that argument delta is always
+// multiples of stack alignment. (Dynamic linkers need this - darwin's dyld for
+// example)
+// If a tail called function callee has more arguments than the caller the
+// caller needs to make sure that there is room to move the RETADDR to. This is
+// achived by reserving an area the size of the argument delta right after the
+// original REtADDR, but before the saved framepointer or the spilled registers
+// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
+// stack layout:
+// arg1
+// arg2
+// RETADDR
+// [ new RETADDR
+// move area ]
+// (possible EBP)
+// ESI
+// EDI
+// local1 ..
+
+/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
+/// for a 16 byte align requirement.
+unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
+ SelectionDAG& DAG) {
+ if (PerformTailCallOpt) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const TargetMachine &TM = MF.getTarget();
+ const TargetFrameInfo &TFI = *TM.getFrameInfo();
+ unsigned StackAlignment = TFI.getStackAlignment();
+ uint64_t AlignMask = StackAlignment - 1;
+ int64_t Offset = StackSize;
+ unsigned SlotSize = Subtarget->is64Bit() ? 8 : 4;
+ if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
+ // Number smaller than 12 so just add the difference.
+ Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
+ } else {
+ // Mask out lower bits, add stackalignment once plus the 12 bytes.
+ Offset = ((~AlignMask) & Offset) + StackAlignment +
+ (StackAlignment-SlotSize);
+ }
+ StackSize = Offset;
+ }
+ return StackSize;
+}
+
+/// IsEligibleForTailCallElimination - Check to see whether the next instruction
+// following the call is a return. A function is eligible if caller/callee
+// calling conventions match, currently only fastcc supports tail calls, and the
+// function CALL is immediatly followed by a RET.
+bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call,
+ SDOperand Ret,
+ SelectionDAG& DAG) const {
+ bool IsEligible = false;
+
+ // Check whether CALL node immediatly preceeds the RET node and whether the
+ // return uses the result of the node or is a void return.
+ if ((Ret.getNumOperands() == 1 &&
+ (Ret.getOperand(0)== SDOperand(Call.Val,1) ||
+ Ret.getOperand(0)== SDOperand(Call.Val,0))) ||
+ (Ret.getOperand(0)== SDOperand(Call.Val,Call.Val->getNumValues()-1) &&
+ Ret.getOperand(1)== SDOperand(Call.Val,0))) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ unsigned CallerCC = MF.getFunction()->getCallingConv();
+ unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
+ if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
+ SDOperand Callee = Call.getOperand(4);
+ // On elf/pic %ebx needs to be livein.
+ if(getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
+ Subtarget->isPICStyleGOT()) {
+ // Can only do local tail calls with PIC.
+ GlobalValue * GV = 0;
+ GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
+ if(G != 0 &&
+ (GV = G->getGlobal()) &&
+ (GV->hasHiddenVisibility() || GV->hasProtectedVisibility()))
+ IsEligible=true;
+ } else {
+ IsEligible=true;
+ }
+ }
+ }
+ return IsEligible;
+}
+
+SDOperand X86TargetLowering::LowerX86_TailCallTo(SDOperand Op,
+ SelectionDAG &DAG,
+ unsigned CC) {
+ SDOperand Chain = Op.getOperand(0);
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
+ bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
+ SDOperand Callee = Op.getOperand(4);
+ bool is64Bit = Subtarget->is64Bit();
+
+ assert(isTailCall && PerformTailCallOpt && "Should only emit tail calls.");
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
+ if (is64Bit)
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
+ else
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_32_TailCall);
+
+
+ // Lower arguments at fp - stackoffset + fpdiff.
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ unsigned NumBytesToBePushed =
+ GetAlignedArgumentStackSize(CCInfo.getNextStackOffset(), DAG);
+
+ unsigned NumBytesCallerPushed =
+ MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
+ int FPDiff = NumBytesCallerPushed - NumBytesToBePushed;
+
+ // Set the delta of movement of the returnaddr stackslot.
+ // But only set if delta is greater than previous delta.
+ if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
+ MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
+
+ // Adjust the ret address stack slot.
+ if (FPDiff) {
+ MVT::ValueType VT = is64Bit ? MVT::i64 : MVT::i32;
+ SDOperand RetAddrFrIdx = getReturnAddressFrameIndex(DAG);
+ RetAddrFrIdx =
+ DAG.getLoad(VT, DAG.getEntryNode(),RetAddrFrIdx, NULL, 0);
+ // Emit a store of the saved ret value to the new location.
+ int SlotSize = is64Bit ? 8 : 4;
+ int NewReturnAddrFI =
+ MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
+ SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
+ Chain = DAG.getStore(Chain,RetAddrFrIdx, NewRetAddrFrIdx, NULL, 0);
+ }
+
+ Chain = DAG.
+ getCALLSEQ_START(Chain, DAG.getConstant(NumBytesToBePushed, getPointerTy()));
+
+ SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
+ SmallVector<SDOperand, 8> MemOpChains;
+ SmallVector<SDOperand, 8> MemOpChains2;
+ SDOperand FramePtr, StackPtr;
+ SDOperand PtrOff;
+ SDOperand FIN;
+ int FI = 0;
+
+ // Walk the register/memloc assignments, inserting copies/loads. Lower
+ // arguments first to the stack slot where they would normally - in case of a
+ // normal function call - be.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ SDOperand Arg = Op.getOperand(5+2*VA.getValNo());
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
+ break;
+ }
+
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ } else {
+ assert(VA.isMemLoc());
+ if (StackPtr.Val == 0)
+ StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
+
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
+ }
+ }
+
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
+
+ // Build a sequence of copy-to-reg nodes chained together with token chain
+ // and flag operands which copy the outgoing args into registers.
+ SDOperand InFlag;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
+ InFlag);
+ InFlag = Chain.getValue(1);
+ }
+ InFlag = SDOperand();
+ // Copy from stack slots to stack slot of a tail called function. This needs
+ // to be done because if we would lower the arguments directly to their real
+ // stack slot we might end up overwriting each other.
+ // TODO: To make this more efficient (sometimes saving a store/load) we could
+ // analyse the arguments and emit this store/load/store sequence only for
+ // arguments which would be overwritten otherwise.
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (!VA.isRegLoc()) {
+ SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
+ unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
+
+ // Get source stack slot.
+ SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ // Create frame index.
+ int32_t Offset = VA.getLocMemOffset()+FPDiff;
+ uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8;
+ FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
+ FIN = DAG.getFrameIndex(FI, MVT::i32);
+ if (Flags & ISD::ParamFlags::ByVal) {
+ // Copy relative to framepointer.
+ unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
+ ISD::ParamFlags::ByValAlignOffs);
+
+ unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
+ ISD::ParamFlags::ByValSizeOffs;
+
+ SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
+ SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
+ // Copy relative to framepointer.
+ MemOpChains2.push_back(DAG.getNode(ISD::MEMCPY, MVT::Other, Chain, FIN,
+ PtrOff, SizeNode, AlignNode));
+ } else {
+ SDOperand LoadedArg = DAG.getLoad(VA.getValVT(), Chain, PtrOff, NULL,0);
+ // Store relative to framepointer.
+ MemOpChains2.push_back(DAG.getStore(Chain, LoadedArg, FIN, NULL, 0));
+ }
+ }
+ }
+
+ if (!MemOpChains2.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOpChains2[0], MemOpChains.size());
+
+ // ELF / PIC requires GOT in the EBX register before function calls via PLT
+ // GOT pointer.
+ // Does not work with tail call since ebx is not restored correctly by
+ // tailcaller. TODO: at least for x86 - verify for x86-64
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ // We should use extra load for direct calls to dllimported functions in
+ // non-JIT mode.
+ if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
+ getTargetMachine(), true))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
+ } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
+ else {
+ assert(Callee.getOpcode() == ISD::LOAD &&
+ "Function destination must be loaded into virtual register");
+ unsigned Opc = is64Bit ? X86::R9 : X86::ECX;
+
+ Chain = DAG.getCopyToReg(Chain,
+ DAG.getRegister(Opc, getPointerTy()) ,
+ Callee,InFlag);
+ Callee = DAG.getRegister(Opc, getPointerTy());
+ // Add register as live out.
+ DAG.getMachineFunction().addLiveOut(Opc);
+ }
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SmallVector<SDOperand, 8> Ops;
+
+ Ops.push_back(Chain);
+ Ops.push_back(DAG.getConstant(NumBytesToBePushed, getPointerTy()));
+ Ops.push_back(DAG.getConstant(0, getPointerTy()));
+ if (InFlag.Val)
+ Ops.push_back(InFlag);
+ Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
+ InFlag = Chain.getValue(1);
+
+ // Returns a chain & a flag for retval copy to use.
+ NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ Ops.clear();
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
+ // Add argument registers to the end of the list so that they are known live
+ // into the call.
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+ if (InFlag.Val)
+ Ops.push_back(InFlag);
+ assert(InFlag.Val &&
+ "Flag must be set. Depend on flag being set in LowerRET");
+ Chain = DAG.getNode(X86ISD::TAILCALL,
+ Op.Val->getVTList(), &Ops[0], Ops.size());
+
+ return SDOperand(Chain.Val, Op.ResNo);
+}
//===----------------------------------------------------------------------===//
// X86-64 C Calling Convention implementation
MachineFrameInfo *MFI = MF.getFrameInfo();
SDOperand Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
+ unsigned CC= MF.getFunction()->getCallingConv();
static const unsigned GPR64ArgRegs[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(MF.getFunction()->getCallingConv(), isVarArg,
+ CCState CCInfo(CC, isVarArg,
getTargetMachine(), ArgLocs);
- CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C);
+ if (CC == CallingConv::Fast && PerformTailCallOpt)
+ CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_TailCall);
+ else
+ CCInfo.AnalyzeFormalArguments(Op.Val, CC_X86_64_C);
SmallVector<SDOperand, 8> ArgValues;
unsigned LastVal = ~0U;
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
-
- // Create the nodes corresponding to a load from this parameter slot.
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- VA.getLocMemOffset());
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
+ ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
}
}
unsigned StackSize = CCInfo.getNextStackOffset();
+ if (CC==CallingConv::Fast)
+ StackSize =GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
+ assert(CC!=CallingConv::Fast
+ && "Var arg not supported with calling convention fastcc");
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 6);
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
}
ArgValues.push_back(Root);
-
- ReturnAddrIndex = 0; // No return address slot generated yet.
- BytesToPopOnReturn = 0; // Callee pops nothing.
- BytesCallerReserves = StackSize;
+ // Tail call convention (fastcc) needs callee pop.
+ if (CC == CallingConv::Fast && PerformTailCallOpt){
+ BytesToPopOnReturn = StackSize; // Callee pops everything.
+ BytesCallerReserves = 0;
+ } else {
+ BytesToPopOnReturn = 0; // Callee pops nothing.
+ BytesCallerReserves = StackSize;
+ }
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
unsigned CC) {
SDOperand Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
- bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
SDOperand Callee = Op.getOperand(4);
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
- CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C);
+ if (CC==CallingConv::Fast)
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_TailCall);
+ else
+ CCInfo.AnalyzeCallOperands(Op.Val, CC_X86_64_C);
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
+ if (CC == CallingConv::Fast)
+ NumBytes = GetAlignedArgumentStackSize(NumBytes,DAG);
+
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
assert(VA.isMemLoc());
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
}
}
}
if (isVarArg) {
+ assert ( CallingConv::Fast != CC &&
+ "Var args not supported with calling convention fastcc");
+
// From AMD64 ABI document:
// For calls that may call functions that use varargs or stdargs
// (prototype-less calls or calls to functions containing ellipsis (...) in
if (InFlag.Val)
Ops.push_back(InFlag);
- // FIXME: Do not generate X86ISD::TAILCALL for now.
- Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
+ Chain = DAG.getNode(X86ISD::CALL,
NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
-
+ int NumBytesForCalleeToPush = 0;
+ if (CC==CallingConv::Fast) {
+ NumBytesForCalleeToPush = NumBytes; // Callee pops everything
+
+ } else {
+ NumBytesForCalleeToPush = 0; // Callee pops nothing.
+ }
// Returns a flag for retval copy to use.
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
Ops.clear();
Ops.push_back(Chain);
Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
- Ops.push_back(DAG.getConstant(0, getPointerTy()));
+ Ops.push_back(DAG.getConstant(NumBytesForCalleeToPush, getPointerTy()));
Ops.push_back(InFlag);
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ int ReturnAddrIndex = FuncInfo->getRAIndex();
+
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
- MachineFunction &MF = DAG.getMachineFunction();
if (Subtarget->is64Bit())
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(8, -8);
else
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
+
+ FuncInfo->setRAIndex(ReturnAddrIndex);
}
return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
// X < 0 -> X == 0, jump on sign.
X86CC = X86::COND_S;
return true;
+ } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) {
+ // X < 1 -> X <= 0
+ RHS = DAG.getConstant(0, RHS.getValueType());
+ X86CC = X86::COND_LE;
+ return true;
}
}
bool X86::isPSHUFDMask(SDNode *N) {
assert(N->getOpcode() == ISD::BUILD_VECTOR);
- if (N->getNumOperands() != 4)
+ if (N->getNumOperands() != 2 && N->getNumOperands() != 4)
return false;
// Check if the value doesn't reference the second vector.
SDOperand Arg = N->getOperand(i);
if (Arg.getOpcode() == ISD::UNDEF) continue;
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
- if (cast<ConstantSDNode>(Arg)->getValue() >= 4)
+ if (cast<ConstantSDNode>(Arg)->getValue() >= e)
return false;
}
return ((isa<ConstantSDNode>(Elt) &&
cast<ConstantSDNode>(Elt)->getValue() == 0) ||
(isa<ConstantFPSDNode>(Elt) &&
- cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
+ cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
}
/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
// If VT is integer, try PSHUF* first, then SHUFP*.
if (MVT::isInteger(VT)) {
- if (X86::isPSHUFDMask(PermMask.Val) ||
+ // MMX doesn't have PSHUFD; it does have PSHUFW. While it's theoretically
+ // possible to shuffle a v2i32 using PSHUFW, that's not yet implemented.
+ if (((MVT::getSizeInBits(VT) != 64 || NumElems == 4) &&
+ X86::isPSHUFDMask(PermMask.Val)) ||
X86::isPSHUFHWMask(PermMask.Val) ||
X86::isPSHUFLWMask(PermMask.Val)) {
if (V2.getOpcode() != ISD::UNDEF)
// SHUFPS the element to the lowest double word, then movss.
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
- IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.
+ push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
+ IdxVec.
+ push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.
+ push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.
+ push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT)));
- IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ IdxVec.
+ push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
DAG.getConstant(32, MVT::i8));
- SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)};
- SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
+ SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::i32,
+ AndNode, DAG.getConstant(0, MVT::i8));
SDOperand Hi, Lo;
SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
-
+ unsigned Opc = X86ISD::CMOV;
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
SmallVector<SDOperand, 4> Ops;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
- InFlag = Hi.getValue(1);
+ Ops.push_back(Cond);
+ Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
Ops.clear();
Ops.push_back(Tmp3);
Ops.push_back(Tmp1);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
+ Ops.push_back(Cond);
+ Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
} else {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
- InFlag = Lo.getValue(1);
+ Ops.push_back(Cond);
+ Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
Ops.clear();
Ops.push_back(Tmp3);
Ops.push_back(Tmp1);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
+ Ops.push_back(Cond);
+ Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
}
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),
StackSlot, NULL, 0);
+ // These are really Legal; caller falls through into that case.
+ if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f32 && X86ScalarSSEf32)
+ return Result;
+ if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f64 && X86ScalarSSEf64)
+ return Result;
+ if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 &&
+ Subtarget->is64Bit())
+ return Result;
+
// Build the FILD
SDVTList Tys;
- if (X86ScalarSSE)
+ bool useSSE = (X86ScalarSSEf32 && Op.getValueType() == MVT::f32) ||
+ (X86ScalarSSEf64 && Op.getValueType() == MVT::f64);
+ if (useSSE)
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
else
Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
Ops.push_back(Chain);
Ops.push_back(StackSlot);
Ops.push_back(DAG.getValueType(SrcVT));
- Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
+ Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
Tys, &Ops[0], Ops.size());
- if (X86ScalarSSE) {
+ if (useSSE) {
Chain = Result.getValue(1);
SDOperand InFlag = Result.getValue(2);
"Unknown FP_TO_SINT to lower!");
// We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
// stack slot.
+ SDOperand Result;
MachineFunction &MF = DAG.getMachineFunction();
unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
+ // These are really Legal.
+ if (Op.getValueType() == MVT::i32 &&
+ X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32)
+ return Result;
+ if (Op.getValueType() == MVT::i32 &&
+ X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)
+ return Result;
+ if (Subtarget->is64Bit() &&
+ Op.getValueType() == MVT::i64 &&
+ Op.getOperand(0).getValueType() != MVT::f80)
+ return Result;
+
unsigned Opc;
switch (Op.getValueType()) {
default: assert(0 && "Invalid FP_TO_SINT to lower!");
SDOperand Chain = DAG.getEntryNode();
SDOperand Value = Op.getOperand(0);
- if (X86ScalarSSE) {
+ if ((X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) ||
+ (X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)) {
assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63))));
CV.push_back(C);
CV.push_back(C);
} else {
- Constant *C = ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31))));
CV.push_back(C);
CV.push_back(C);
CV.push_back(C);
const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63)));
CV.push_back(C);
CV.push_back(C);
} else {
- Constant *C = ConstantFP::get(OpNTy, BitsToFloat(1U << 31));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31)));
CV.push_back(C);
CV.push_back(C);
CV.push_back(C);
if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) {
Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
SrcVT = VT;
+ SrcTy = MVT::getTypeForValueType(SrcVT);
}
// First get the sign bit of second operand.
std::vector<Constant*> CV;
if (SrcVT == MVT::f64) {
- CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(1ULL << 63)));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
} else {
- CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(1U << 31)));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
}
Constant *C = ConstantVector::get(CV);
SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
// Clear first operand sign bit.
CV.clear();
if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(~(1ULL << 63))));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63)))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
} else {
- CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(~(1U << 31))));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31)))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
}
C = ConstantVector::get(CV);
CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
return DAG.getNode(X86ISD::FOR, VT, Val, SignBit);
}
-SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG,
- SDOperand Chain) {
+SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
SDOperand Cond;
SDOperand Op0 = Op.getOperand(0);
SDOperand Op1 = Op.getOperand(1);
SDOperand CC = Op.getOperand(2);
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
- const MVT::ValueType *VTs1 = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
- const MVT::ValueType *VTs2 = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
unsigned X86CC;
if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
Op0, Op1, DAG)) {
- SDOperand Ops1[] = { Chain, Op0, Op1 };
- Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, Ops1, 3).getValue(1);
- SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
- return DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
+ Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
+ return DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
}
assert(isFP && "Illegal integer SetCC!");
- SDOperand COps[] = { Chain, Op0, Op1 };
- Cond = DAG.getNode(X86ISD::CMP, VTs1, 2, COps, 3).getValue(1);
-
+ Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
switch (SetCCOpcode) {
default: assert(false && "Illegal floating point SetCC!");
case ISD::SETOEQ: { // !PF & ZF
- SDOperand Ops1[] = { DAG.getConstant(X86::COND_NP, MVT::i8), Cond };
- SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2);
- SDOperand Ops2[] = { DAG.getConstant(X86::COND_E, MVT::i8),
- Tmp1.getValue(1) };
- SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86::COND_NP, MVT::i8), Cond);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86::COND_E, MVT::i8), Cond);
return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
}
case ISD::SETUNE: { // PF | !ZF
- SDOperand Ops1[] = { DAG.getConstant(X86::COND_P, MVT::i8), Cond };
- SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops1, 2);
- SDOperand Ops2[] = { DAG.getConstant(X86::COND_NE, MVT::i8),
- Tmp1.getValue(1) };
- SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, VTs2, 2, Ops2, 2);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86::COND_P, MVT::i8), Cond);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86::COND_NE, MVT::i8), Cond);
return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
}
}
}
+
SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
bool addTest = true;
- SDOperand Chain = DAG.getEntryNode();
SDOperand Cond = Op.getOperand(0);
SDOperand CC;
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
if (Cond.getOpcode() == ISD::SETCC)
- Cond = LowerSETCC(Cond, DAG, Chain);
+ Cond = LowerSETCC(Cond, DAG);
+ // If condition flag is set by a X86ISD::CMP, then use it as the condition
+ // setting operand in place of the X86ISD::SETCC.
if (Cond.getOpcode() == X86ISD::SETCC) {
CC = Cond.getOperand(0);
- // If condition flag is set by a X86ISD::CMP, then make a copy of it
- // (since flag operand cannot be shared). Use it as the condition setting
- // operand in place of the X86ISD::SETCC.
- // If the X86ISD::SETCC has more than one use, then perhaps it's better
- // to use a test instead of duplicating the X86ISD::CMP (for register
- // pressure reason)?
SDOperand Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
- bool IllegalFPCMov = !X86ScalarSSE &&
- MVT::isFloatingPoint(Op.getValueType()) &&
- !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
- if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) &&
- !IllegalFPCMov) {
- SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
- Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
+ MVT::ValueType VT = Op.getValueType();
+ bool IllegalFPCMov = false;
+ if (VT == MVT::f32 && !X86ScalarSSEf32)
+ IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
+ else if (VT == MVT::f64 && !X86ScalarSSEf64)
+ IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
+ if ((Opc == X86ISD::CMP ||
+ Opc == X86ISD::COMI ||
+ Opc == X86ISD::UCOMI) && !IllegalFPCMov) {
+ Cond = Cmp;
addTest = false;
}
}
if (addTest) {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
- Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
+ Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
}
- VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag);
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(),
+ MVT::Flag);
SmallVector<SDOperand, 4> Ops;
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
// condition is true.
Ops.push_back(Op.getOperand(2));
Ops.push_back(Op.getOperand(1));
Ops.push_back(CC);
- Ops.push_back(Cond.getValue(1));
+ Ops.push_back(Cond);
return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
}
SDOperand Cond = Op.getOperand(1);
SDOperand Dest = Op.getOperand(2);
SDOperand CC;
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
if (Cond.getOpcode() == ISD::SETCC)
- Cond = LowerSETCC(Cond, DAG, Chain);
+ Cond = LowerSETCC(Cond, DAG);
+ // If condition flag is set by a X86ISD::CMP, then use it as the condition
+ // setting operand in place of the X86ISD::SETCC.
if (Cond.getOpcode() == X86ISD::SETCC) {
CC = Cond.getOperand(0);
- // If condition flag is set by a X86ISD::CMP, then make a copy of it
- // (since flag operand cannot be shared). Use it as the condition setting
- // operand in place of the X86ISD::SETCC.
- // If the X86ISD::SETCC has more than one use, then perhaps it's better
- // to use a test instead of duplicating the X86ISD::CMP (for register
- // pressure reason)?
SDOperand Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
- if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) {
- SDOperand Ops[] = { Chain, Cmp.getOperand(1), Cmp.getOperand(2) };
- Cond = DAG.getNode(Opc, VTs, 2, Ops, 3);
+ if (Opc == X86ISD::CMP ||
+ Opc == X86ISD::COMI ||
+ Opc == X86ISD::UCOMI) {
+ Cond = Cmp;
addTest = false;
}
}
if (addTest) {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- SDOperand Ops[] = { Chain, Cond, DAG.getConstant(0, MVT::i8) };
- Cond = DAG.getNode(X86ISD::CMP, VTs, 2, Ops, 3);
+ Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
}
return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
- Cond, Op.getOperand(2), CC, Cond.getValue(1));
+ Chain, Op.getOperand(2), CC, Cond);
}
SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
- unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
-
- if (Subtarget->is64Bit())
- return LowerX86_64CCCCallTo(Op, DAG, CallingConv);
+ unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
+ bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0;
+
+ if (Subtarget->is64Bit())
+ if(CallingConv==CallingConv::Fast && isTailCall && PerformTailCallOpt)
+ return LowerX86_TailCallTo(Op, DAG, CallingConv);
+ else
+ return LowerX86_64CCCCallTo(Op, DAG, CallingConv);
else
switch (CallingConv) {
default:
assert(0 && "Unsupported calling convention");
case CallingConv::Fast:
- // TODO: Implement fastcc
- // Falls through
+ if (isTailCall && PerformTailCallOpt)
+ return LowerX86_TailCallTo(Op, DAG, CallingConv);
+ else
+ return LowerCCCCallTo(Op,DAG, CallingConv);
case CallingConv::C:
case CallingConv::X86_StdCall:
return LowerCCCCallTo(Op, DAG, CallingConv);
default:
assert(0 && "Unsupported calling convention");
case CallingConv::Fast:
- // TODO: implement fastcc.
-
+ return LowerCCCArguments(Op,DAG, true);
// Falls through
case CallingConv::C:
return LowerCCCArguments(Op, DAG);
if (Align == 0) Align = 1;
ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
- // If not DWORD aligned, call memset if size is less than the threshold.
- // It knows how to align to the right boundary first.
+ // If not DWORD aligned or size is more than the threshold, call memset.
+ // The libc version is likely to be faster for these cases. It can use the
+ // address value and run time information about the CPU.
if ((Align & 3) != 0 ||
- (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
+ (I && I->getValue() > Subtarget->getMinRepStrSizeThreshold())) {
MVT::ValueType IntPtr = getPointerTy();
const Type *IntPtrTy = getTargetData()->getIntPtrType();
TargetLowering::ArgListTy Args;
}
SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) {
- SDOperand Chain = Op.getOperand(0);
- unsigned Align =
- (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
+ SDOperand ChainOp = Op.getOperand(0);
+ SDOperand DestOp = Op.getOperand(1);
+ SDOperand SourceOp = Op.getOperand(2);
+ SDOperand CountOp = Op.getOperand(3);
+ SDOperand AlignOp = Op.getOperand(4);
+ unsigned Align = (unsigned)cast<ConstantSDNode>(AlignOp)->getValue();
if (Align == 0) Align = 1;
- ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
- // If not DWORD aligned, call memcpy if size is less than the threshold.
- // It knows how to align to the right boundary first.
- if ((Align & 3) != 0 ||
- (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
- MVT::ValueType IntPtr = getPointerTy();
- TargetLowering::ArgListTy Args;
- TargetLowering::ArgListEntry Entry;
- Entry.Ty = getTargetData()->getIntPtrType();
- Entry.Node = Op.getOperand(1); Args.push_back(Entry);
- Entry.Node = Op.getOperand(2); Args.push_back(Entry);
- Entry.Node = Op.getOperand(3); Args.push_back(Entry);
- std::pair<SDOperand,SDOperand> CallResult =
+ // The libc version is likely to be faster for the following cases. It can
+ // use the address value and run time information about the CPU.
+ // With glibc 2.6.1 on a core 2, coping an array of 100M longs was 30% faster
+
+ // If not DWORD aligned, call memcpy.
+ if ((Align & 3) != 0)
+ return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG);
+
+ // If size is unknown, call memcpy.
+ ConstantSDNode *I = dyn_cast<ConstantSDNode>(CountOp);
+ if (!I)
+ return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG);
+
+ // If size is more than the threshold, call memcpy.
+ unsigned Size = I->getValue();
+ if (Size > Subtarget->getMinRepStrSizeThreshold())
+ return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG);
+
+ return LowerMEMCPYInline(ChainOp, DestOp, SourceOp, Size, Align, DAG);
+}
+
+SDOperand X86TargetLowering::LowerMEMCPYCall(SDOperand Chain,
+ SDOperand Dest,
+ SDOperand Source,
+ SDOperand Count,
+ SelectionDAG &DAG) {
+ MVT::ValueType IntPtr = getPointerTy();
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Ty = getTargetData()->getIntPtrType();
+ Entry.Node = Dest; Args.push_back(Entry);
+ Entry.Node = Source; Args.push_back(Entry);
+ Entry.Node = Count; Args.push_back(Entry);
+ std::pair<SDOperand,SDOperand> CallResult =
LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false,
DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
- return CallResult.second;
- }
+ return CallResult.second;
+}
+SDOperand X86TargetLowering::LowerMEMCPYInline(SDOperand Chain,
+ SDOperand Dest,
+ SDOperand Source,
+ unsigned Size,
+ unsigned Align,
+ SelectionDAG &DAG) {
MVT::ValueType AVT;
- SDOperand Count;
unsigned BytesLeft = 0;
- bool TwoRepMovs = false;
switch (Align & 3) {
case 2: // WORD aligned
AVT = MVT::i16;
break;
default: // Byte aligned
AVT = MVT::i8;
- Count = Op.getOperand(3);
break;
}
- if (AVT > MVT::i8) {
- if (I) {
- unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
- Count = DAG.getConstant(I->getValue() / UBytes, getPointerTy());
- BytesLeft = I->getValue() % UBytes;
- } else {
- assert(AVT >= MVT::i32 &&
- "Do not use rep;movs if not at least DWORD aligned");
- Count = DAG.getNode(ISD::SRL, Op.getOperand(3).getValueType(),
- Op.getOperand(3), DAG.getConstant(2, MVT::i8));
- TwoRepMovs = true;
- }
- }
+ unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
+ SDOperand Count = DAG.getConstant(Size / UBytes, getPointerTy());
+ BytesLeft = Size % UBytes;
SDOperand InFlag(0, 0);
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
Count, InFlag);
InFlag = Chain.getValue(1);
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
- Op.getOperand(1), InFlag);
+ Dest, InFlag);
InFlag = Chain.getValue(1);
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
- Op.getOperand(2), InFlag);
+ Source, InFlag);
InFlag = Chain.getValue(1);
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
Ops.push_back(InFlag);
Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
- if (TwoRepMovs) {
- InFlag = Chain.getValue(1);
- Count = Op.getOperand(3);
- MVT::ValueType CVT = Count.getValueType();
- SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
- DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
- Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
- Left, InFlag);
- InFlag = Chain.getValue(1);
- Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- Ops.clear();
- Ops.push_back(Chain);
- Ops.push_back(DAG.getValueType(MVT::i8));
- Ops.push_back(InFlag);
- Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
- } else if (BytesLeft) {
+ if (BytesLeft) {
// Issue loads and stores for the last 1 - 7 bytes.
- unsigned Offset = I->getValue() - BytesLeft;
- SDOperand DstAddr = Op.getOperand(1);
+ unsigned Offset = Size - BytesLeft;
+ SDOperand DstAddr = Dest;
MVT::ValueType DstVT = DstAddr.getValueType();
- SDOperand SrcAddr = Op.getOperand(2);
+ SDOperand SrcAddr = Source;
MVT::ValueType SrcVT = SrcAddr.getValueType();
SDOperand Value;
if (BytesLeft >= 4) {
SDOperand TheOp = Op.getOperand(0);
SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheOp, 1);
if (Subtarget->is64Bit()) {
- SDOperand Copy1 = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
+ SDOperand Copy1 =
+ DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
SDOperand Copy2 = DAG.getCopyFromReg(Copy1.getValue(1), X86::RDX,
MVT::i64, Copy1.getValue(2));
SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, Copy2,
SDOperand RHS = Op.getOperand(2);
translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
- SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
- SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
- VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
- SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
- SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
+ SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS);
+ SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
}
}
Function *Func = (Function *)
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
unsigned CC = Func->getCallingConv();
- unsigned char NestReg;
+ unsigned NestReg;
switch (CC) {
default:
case CallingConv::X86_StdCall: {
// Pass 'nest' parameter in ECX.
// Must be kept in sync with X86CallingConv.td
- NestReg = N86::ECX;
+ NestReg = X86::ECX;
// Check that ECX wasn't needed by an 'inreg' parameter.
const FunctionType *FTy = Func->getFunctionType();
case CallingConv::X86_FastCall:
// Pass 'nest' parameter in EAX.
// Must be kept in sync with X86CallingConv.td
- NestReg = N86::EAX;
+ NestReg = X86::EAX;
break;
}
+ const X86InstrInfo *TII =
+ ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
+
SDOperand OutChains[4];
SDOperand Addr, Disp;
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32));
Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
- const unsigned char MOV32ri = 0xB8;
- const unsigned char JMP = 0xE9;
-
- OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|NestReg, MVT::i8),
+ unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
+ unsigned char N86Reg = ((X86RegisterInfo&)RegInfo).getX86RegNum(NestReg);
+ OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
Trmp, TrmpSV->getValue(), TrmpSV->getOffset());
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32));
OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(),
TrmpSV->getOffset() + 1, false, 1);
+ unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32));
OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr,
TrmpSV->getValue() + 5, TrmpSV->getOffset());
OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(),
TrmpSV->getOffset() + 6, false, 1);
- return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4);
+ SDOperand Ops[] =
+ { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) };
+ return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
}
}
case ISD::FABS: return LowerFABS(Op, DAG);
case ISD::FNEG: return LowerFNEG(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
- case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode());
+ case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
+ case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
}
}
case X86::FP32_TO_INT64_IN_MEM:
case X86::FP64_TO_INT16_IN_MEM:
case X86::FP64_TO_INT32_IN_MEM:
- case X86::FP64_TO_INT64_IN_MEM: {
+ case X86::FP64_TO_INT64_IN_MEM:
+ case X86::FP80_TO_INT16_IN_MEM:
+ case X86::FP80_TO_INT32_IN_MEM:
+ case X86::FP80_TO_INT64_IN_MEM: {
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
MachineFunction *F = BB->getParent();
case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
+ case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
+ case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
+ case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
}
X86AddressMode AM;
i %= NumElems;
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
return (i == 0)
- ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
+ ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
} else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
SDOperand Idx = PermMask.getOperand(i);
if (Idx.getOpcode() == ISD::UNDEF)
return TargetLowering::getConstraintType(Constraint);
}
-/// isOperandValidForConstraint - Return the specified operand (possibly
-/// modified) if the specified SDOperand is valid for the specified target
-/// constraint letter, otherwise return null.
-SDOperand X86TargetLowering::
-isOperandValidForConstraint(SDOperand Op, char Constraint, SelectionDAG &DAG) {
+/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+/// vector. If it is invalid, don't add anything to Ops.
+void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
+ char Constraint,
+ std::vector<SDOperand>&Ops,
+ SelectionDAG &DAG) {
+ SDOperand Result(0, 0);
+
switch (Constraint) {
default: break;
case 'I':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- if (C->getValue() <= 31)
- return DAG.getTargetConstant(C->getValue(), Op.getValueType());
+ if (C->getValue() <= 31) {
+ Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
+ break;
+ }
}
- return SDOperand(0,0);
+ return;
case 'N':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- if (C->getValue() <= 255)
- return DAG.getTargetConstant(C->getValue(), Op.getValueType());
+ if (C->getValue() <= 255) {
+ Result = DAG.getTargetConstant(C->getValue(), Op.getValueType());
+ break;
+ }
}
- return SDOperand(0,0);
+ return;
case 'i': {
// Literal immediates are always ok.
- if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op))
- return DAG.getTargetConstant(CST->getValue(), Op.getValueType());
+ if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
+ Result = DAG.getTargetConstant(CST->getValue(), Op.getValueType());
+ break;
+ }
// If we are in non-pic codegen mode, we allow the address of a global (with
// an optional displacement) to be used with 'i'.
// match.
if (Subtarget->GVRequiresExtraLoad(GA->getGlobal(), getTargetMachine(),
false))
- return SDOperand(0, 0);
+ return;
Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
Offset);
- return Op;
+ Result = Op;
+ break;
}
// Otherwise, not valid for this mode.
- return SDOperand(0, 0);
+ return;
+ }
}
+
+ if (Result.Val) {
+ Ops.push_back(Result);
+ return;
}
- return TargetLowering::isOperandValidForConstraint(Op, Constraint, DAG);
+ return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
std::vector<unsigned> X86TargetLowering::
else if (VT == MVT::i16)
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
else if (VT == MVT::i8)
- return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
+ return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
break;
}
}
// GCC calls "st(0)" just plain "st".
if (StringsEqualNoCase("{st}", Constraint)) {
Res.first = X86::ST0;
- Res.second = X86::RSTRegisterClass;
+ Res.second = X86::RFP80RegisterClass;
}
return Res;