#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/CommandLine.h"
// Given an array type, recursively traverse the elements.
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
const Type *EltTy = ATy->getElementType();
- uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy);
+ uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize);
return;
}
+ // Interpret void as zero return values.
+ if (Ty == Type::VoidTy)
+ return;
// Base case: we can get an MVT for this LLVM IR type.
ValueVTs.push_back(TLI.getValueType(Ty));
if (Offsets)
/// this value and returns the result as a ValueVTs value. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
- SDValue getCopyFromRegs(SelectionDAG &DAG,
+ SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
SDValue &Chain, SDValue *Flag) const;
/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
/// specified value into the registers specified by this object. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
- void getCopyToRegs(SDValue Val, SelectionDAG &DAG,
+ void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
SDValue &Chain, SDValue *Flag) const;
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
- /// operand list. This adds the code marker and includes the number of
- /// values added into it.
- void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
- std::vector<SDValue> &Ops) const;
+ /// operand list. This adds the code marker, matching input operand index
+ /// (if applicable), and includes the number of values added into it.
+ void AddInlineAsmOperands(unsigned Code,
+ bool HasMatching, unsigned MatchingIdx,
+ SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
};
}
if (isa<PHINode>(I)) return true;
BasicBlock *BB = I->getParent();
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
- // FIXME: Remove switchinst special case.
- isa<SwitchInst>(*UI))
+ if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
return true;
return false;
}
}
void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
+ SelectionDAG &DAG,
bool EnableFastISel) {
Fn = &fn;
MF = &mf;
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
const Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
AI->getAlignment());
// Create Machine PHI nodes for LLVM PHI nodes, lowering them as
// appropriate.
PHINode *PN;
- for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){
- if (PN->use_empty()) continue;
+ DebugLoc DL;
+ for (BasicBlock::iterator
+ I = BB->begin(), E = BB->end(); I != E; ++I) {
+ if (CallInst *CI = dyn_cast<CallInst>(I)) {
+ if (Function *F = CI->getCalledFunction()) {
+ switch (F->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::dbg_stoppoint: {
+ DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
+
+ if (DIDescriptor::ValidDebugInfo(SPI->getContext(),
+ CodeGenOpt::Default)) {
+ DICompileUnit CU(cast<GlobalVariable>(SPI->getContext()));
+ unsigned idx = MF->getOrCreateDebugLocID(CU.getGV(),
+ SPI->getLine(),
+ SPI->getColumn());
+ DL = DebugLoc::get(idx);
+ }
+
+ break;
+ }
+ case Intrinsic::dbg_func_start: {
+ DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
+ Value *SP = FSI->getSubprogram();
+
+ if (DIDescriptor::ValidDebugInfo(SP, CodeGenOpt::Default)) {
+ DISubprogram Subprogram(cast<GlobalVariable>(SP));
+ DICompileUnit CU(Subprogram.getCompileUnit());
+ unsigned Line = Subprogram.getLineNumber();
+ DL = DebugLoc::get(MF->getOrCreateDebugLocID(CU.getGV(),
+ Line, 0));
+ }
+
+ break;
+ }
+ }
+ }
+ }
+
+ PN = dyn_cast<PHINode>(I);
+ if (!PN || PN->use_empty()) continue;
unsigned PHIReg = ValueMap[PN];
assert(PHIReg && "PHI node does not have an assigned virtual register!");
unsigned NumRegisters = TLI.getNumRegisters(VT);
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
for (unsigned i = 0; i != NumRegisters; ++i)
- BuildMI(MBB, TII->get(TargetInstrInfo::PHI), PHIReg+i);
+ BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
PHIReg += NumRegisters;
}
}
/// larger then ValueVT then AssertOp can be used to specify whether the extra
/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
/// (ISD::AssertSext).
-static SDValue getCopyFromParts(SelectionDAG &DAG, const SDValue *Parts,
+static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
+ const SDValue *Parts,
unsigned NumParts, MVT PartVT, MVT ValueVT,
ISD::NodeType AssertOp = ISD::DELETED_NODE) {
assert(NumParts > 0 && "No parts to assemble!");
if (NumParts > 1) {
// Assemble the value from multiple parts.
- if (!ValueVT.isVector()) {
+ if (!ValueVT.isVector() && ValueVT.isInteger()) {
unsigned PartBits = PartVT.getSizeInBits();
unsigned ValueBits = ValueVT.getSizeInBits();
ValueVT : MVT::getIntegerVT(RoundBits);
SDValue Lo, Hi;
- MVT HalfVT = ValueVT.isInteger() ?
- MVT::getIntegerVT(RoundBits/2) :
- MVT::getFloatingPointVT(RoundBits/2);
+ MVT HalfVT = MVT::getIntegerVT(RoundBits/2);
if (RoundParts > 2) {
- Lo = getCopyFromParts(DAG, Parts, RoundParts/2, PartVT, HalfVT);
- Hi = getCopyFromParts(DAG, Parts+RoundParts/2, RoundParts/2,
+ Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
+ Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
PartVT, HalfVT);
} else {
- Lo = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
- HalfVT, Parts[0]);
- Hi = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
- HalfVT, Parts[1]);
+ Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
+ Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
}
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Val = DAG.getNode(ISD::BUILD_PAIR, DAG.getCurDebugLoc(), RoundVT, Lo, Hi);
+ Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
if (RoundParts < NumParts) {
// Assemble the trailing non-power-of-2 part.
unsigned OddParts = NumParts - RoundParts;
MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
- Hi = getCopyFromParts(DAG, Parts+RoundParts, OddParts, PartVT, OddVT);
+ Hi = getCopyFromParts(DAG, dl,
+ Parts+RoundParts, OddParts, PartVT, OddVT);
// Combine the round and odd parts.
Lo = Val;
if (TLI.isBigEndian())
std::swap(Lo, Hi);
MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
- Hi = DAG.getNode(ISD::ANY_EXTEND, DAG.getCurDebugLoc(), TotalVT, Hi);
- Hi = DAG.getNode(ISD::SHL, DAG.getCurDebugLoc(), TotalVT, Hi,
+ Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
+ Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
DAG.getConstant(Lo.getValueType().getSizeInBits(),
- TLI.getShiftAmountTy()));
- Lo = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(), TotalVT, Lo);
- Val = DAG.getNode(ISD::OR, DAG.getCurDebugLoc(), TotalVT, Lo, Hi);
+ TLI.getPointerTy()));
+ Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
+ Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
}
- } else {
+ } else if (ValueVT.isVector()) {
// Handle a multi-element vector.
MVT IntermediateVT, RegisterVT;
unsigned NumIntermediates;
// If the register was not expanded, truncate or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
- Ops[i] = getCopyFromParts(DAG, &Parts[i], 1,
+ Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
PartVT, IntermediateVT);
} else if (NumParts > 0) {
// If the intermediate type was expanded, build the intermediate operands
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
- Ops[i] = getCopyFromParts(DAG, &Parts[i * Factor], Factor,
+ Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
PartVT, IntermediateVT);
}
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
// operands.
Val = DAG.getNode(IntermediateVT.isVector() ?
- ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR,
- DAG.getCurDebugLoc(),
+ ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
ValueVT, &Ops[0], NumIntermediates);
+ } else if (PartVT.isFloatingPoint()) {
+ // FP split into multiple FP parts (for ppcf128)
+ assert(ValueVT == MVT(MVT::ppcf128) && PartVT == MVT(MVT::f64) &&
+ "Unexpected split");
+ SDValue Lo, Hi;
+ Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[0]);
+ Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[1]);
+ if (TLI.isBigEndian())
+ std::swap(Lo, Hi);
+ Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
+ } else {
+ // FP split into integer parts (soft fp)
+ assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
+ !PartVT.isVector() && "Unexpected split");
+ MVT IntVT = MVT::getIntegerVT(ValueVT.getSizeInBits());
+ Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
}
}
if (PartVT.isVector()) {
assert(ValueVT.isVector() && "Unknown vector conversion!");
- return DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(), ValueVT, Val);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
}
if (ValueVT.isVector()) {
assert(ValueVT.getVectorElementType() == PartVT &&
ValueVT.getVectorNumElements() == 1 &&
"Only trivial scalar-to-vector conversions should get here!");
- return DAG.getNode(ISD::BUILD_VECTOR, DAG.getCurDebugLoc(), ValueVT, Val);
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
}
if (PartVT.isInteger() &&
// indicate whether the truncated bits will always be
// zero or sign-extension.
if (AssertOp != ISD::DELETED_NODE)
- Val = DAG.getNode(AssertOp, DAG.getCurDebugLoc(), PartVT, Val,
+ Val = DAG.getNode(AssertOp, dl, PartVT, Val,
DAG.getValueType(ValueVT));
- return DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), ValueVT, Val);
+ return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
} else {
- return DAG.getNode(ISD::ANY_EXTEND, DAG.getCurDebugLoc(), ValueVT, Val);
+ return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
}
}
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
if (ValueVT.bitsLT(Val.getValueType()))
// FP_ROUND's are always exact here.
- return DAG.getNode(ISD::FP_ROUND, DAG.getCurDebugLoc(), ValueVT, Val,
+ return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
DAG.getIntPtrConstant(1));
- return DAG.getNode(ISD::FP_EXTEND, DAG.getCurDebugLoc(), ValueVT, Val);
+ return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
}
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
- return DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(), ValueVT, Val);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
assert(0 && "Unknown mismatch!");
return SDValue();
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
-static void getCopyToParts(SelectionDAG &DAG, SDValue Val,
+static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
SDValue *Parts, unsigned NumParts, MVT PartVT,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
MVT PtrVT = TLI.getPointerTy();
MVT ValueVT = Val.getValueType();
unsigned PartBits = PartVT.getSizeInBits();
+ unsigned OrigNumParts = NumParts;
assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
if (!NumParts)
// If the parts cover more bits than the value has, promote the value.
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
assert(NumParts == 1 && "Do not know what to promote to!");
- Val = DAG.getNode(ISD::FP_EXTEND, DAG.getCurDebugLoc(), PartVT, Val);
+ Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
} else if (PartVT.isInteger() && ValueVT.isInteger()) {
ValueVT = MVT::getIntegerVT(NumParts * PartBits);
- Val = DAG.getNode(ExtendKind, DAG.getCurDebugLoc(), ValueVT, Val);
+ Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
} else {
assert(0 && "Unknown mismatch!");
}
} else if (PartBits == ValueVT.getSizeInBits()) {
// Different types of the same size.
assert(NumParts == 1 && PartVT != ValueVT);
- Val = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(), PartVT, Val);
+ Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
// If the parts cover less bits than value has, truncate the value.
if (PartVT.isInteger() && ValueVT.isInteger()) {
ValueVT = MVT::getIntegerVT(NumParts * PartBits);
- Val = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), ValueVT, Val);
+ Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
} else {
assert(0 && "Unknown mismatch!");
}
unsigned RoundParts = 1 << Log2_32(NumParts);
unsigned RoundBits = RoundParts * PartBits;
unsigned OddParts = NumParts - RoundParts;
- SDValue OddVal = DAG.getNode(ISD::SRL, DAG.getCurDebugLoc(), ValueVT, Val,
+ SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
DAG.getConstant(RoundBits,
- TLI.getShiftAmountTy()));
- getCopyToParts(DAG, OddVal, Parts + RoundParts, OddParts, PartVT);
+ TLI.getPointerTy()));
+ getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
if (TLI.isBigEndian())
// The odd parts were reversed by getCopyToParts - unreverse them.
std::reverse(Parts + RoundParts, Parts + NumParts);
NumParts = RoundParts;
ValueVT = MVT::getIntegerVT(NumParts * PartBits);
- Val = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), ValueVT, Val);
+ Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
}
// The number of parts is a power of 2. Repeatedly bisect the value using
// EXTRACT_ELEMENT.
- Parts[0] = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
+ Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
MVT::getIntegerVT(ValueVT.getSizeInBits()),
Val);
for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
SDValue &Part0 = Parts[i];
SDValue &Part1 = Parts[i+StepSize/2];
- Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DAG.getCurDebugLoc(),
+ Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
ThisVT, Part0,
DAG.getConstant(1, PtrVT));
- Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DAG.getCurDebugLoc(),
+ Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
ThisVT, Part0,
DAG.getConstant(0, PtrVT));
if (ThisBits == PartBits && ThisVT != PartVT) {
- Part0 = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
+ Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
PartVT, Part0);
- Part1 = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
+ Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
PartVT, Part1);
}
}
}
if (TLI.isBigEndian())
- std::reverse(Parts, Parts + NumParts);
+ std::reverse(Parts, Parts + OrigNumParts);
return;
}
if (NumParts == 1) {
if (PartVT != ValueVT) {
if (PartVT.isVector()) {
- Val = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(), PartVT, Val);
+ Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
} else {
assert(ValueVT.getVectorElementType() == PartVT &&
ValueVT.getVectorNumElements() == 1 &&
"Only trivial vector-to-scalar conversions should get here!");
- Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DAG.getCurDebugLoc(),
+ Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
PartVT, Val,
DAG.getConstant(0, PtrVT));
}
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i)
if (IntermediateVT.isVector())
- Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DAG.getCurDebugLoc(),
+ Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
IntermediateVT, Val,
DAG.getConstant(i * (NumElements / NumIntermediates),
PtrVT));
else
- Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DAG.getCurDebugLoc(),
+ Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
IntermediateVT, Val,
DAG.getConstant(i, PtrVT));
// If the register was not expanded, promote or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
- getCopyToParts(DAG, Ops[i], &Parts[i], 1, PartVT);
+ getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
} else if (NumParts > 0) {
// If the intermediate type was expanded, split each the value into
// legal parts.
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
- getCopyToParts(DAG, Ops[i], &Parts[i * Factor], Factor, PartVT);
+ getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
}
}
PendingLoads.clear();
PendingExports.clear();
DAG.clear();
+ CurDebugLoc = DebugLoc::getUnknownLoc();
}
/// getRoot - Return the current virtual root of the Selection DAG,
}
// Otherwise, we have to make a token factor node.
- SDValue Root = DAG.getNode(ISD::TokenFactor, DAG.getCurDebugLoc(), MVT::Other,
+ SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
&PendingLoads[0], PendingLoads.size());
PendingLoads.clear();
DAG.setRoot(Root);
PendingExports.push_back(Root);
}
- Root = DAG.getNode(ISD::TokenFactor, DAG.getCurDebugLoc(), MVT::Other,
+ Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
&PendingExports[0],
PendingExports.size());
PendingExports.clear();
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return N = DAG.getConstantFP(*CFP, VT);
- if (isa<UndefValue>(C) && !isa<VectorType>(V->getType()) &&
- !V->getType()->isAggregateType())
- return N = DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(), VT);
+ if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
+ return N = DAG.getUNDEF(VT);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
visit(CE->getOpcode(), *CE);
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
Constants.push_back(SDValue(Val, i));
}
- return DAG.getMergeValues(&Constants[0], Constants.size());
+ return DAG.getMergeValues(&Constants[0], Constants.size(),
+ getCurDebugLoc());
}
if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
for (unsigned i = 0; i != NumElts; ++i) {
MVT EltVT = ValueVTs[i];
if (isa<UndefValue>(C))
- Constants[i] = DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(), EltVT);
+ Constants[i] = DAG.getUNDEF(EltVT);
else if (EltVT.isFloatingPoint())
Constants[i] = DAG.getConstantFP(0, EltVT);
else
Constants[i] = DAG.getConstant(0, EltVT);
}
- return DAG.getMergeValues(&Constants[0], NumElts);
+ return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
}
const VectorType *VecTy = cast<VectorType>(V->getType());
for (unsigned i = 0; i != NumElements; ++i)
Ops.push_back(getValue(CP->getOperand(i)));
} else {
- assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
- "Unknown vector constant!");
+ assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
MVT EltVT = TLI.getValueType(VecTy->getElementType());
SDValue Op;
- if (isa<UndefValue>(C))
- Op = DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(), EltVT);
- else if (EltVT.isFloatingPoint())
+ if (EltVT.isFloatingPoint())
Op = DAG.getConstantFP(0, EltVT);
else
Op = DAG.getConstant(0, EltVT);
}
// Create a BUILD_VECTOR node.
- return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, DAG.getCurDebugLoc(),
+ return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
VT, &Ops[0], Ops.size());
}
RegsForValue RFV(TLI, InReg, V->getType());
SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, Chain, NULL);
+ return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
}
void SelectionDAGLowering::visitRet(ReturnInst &I) {
if (I.getNumOperands() == 0) {
- DAG.setRoot(DAG.getNode(ISD::RET, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(),
MVT::Other, getControlRoot()));
return;
}
for (unsigned j = 0, f = NumValues; j != f; ++j) {
MVT VT = ValueVTs[j];
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+
+ const Function *F = I.getParent()->getParent();
+ if (F->paramHasAttr(0, Attribute::SExt))
+ ExtendKind = ISD::SIGN_EXTEND;
+ else if (F->paramHasAttr(0, Attribute::ZExt))
+ ExtendKind = ISD::ZERO_EXTEND;
+
// FIXME: C calling convention requires the return type to be promoted to
// at least 32-bit. But this is not necessary for non-C calling
- // conventions.
- if (VT.isInteger()) {
+ // conventions. The frontend should mark functions whose return values
+ // require promoting with signext or zeroext attributes.
+ if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
MVT MinVT = TLI.getRegisterType(MVT::i32);
if (VT.bitsLT(MinVT))
VT = MinVT;
unsigned NumParts = TLI.getNumRegisters(VT);
MVT PartVT = TLI.getRegisterType(VT);
SmallVector<SDValue, 4> Parts(NumParts);
- ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
-
- const Function *F = I.getParent()->getParent();
- if (F->paramHasAttr(0, Attribute::SExt))
- ExtendKind = ISD::SIGN_EXTEND;
- else if (F->paramHasAttr(0, Attribute::ZExt))
- ExtendKind = ISD::ZERO_EXTEND;
-
- getCopyToParts(DAG, SDValue(RetOp.getNode(), RetOp.getResNo() + j),
+ getCopyToParts(DAG, getCurDebugLoc(),
+ SDValue(RetOp.getNode(), RetOp.getResNo() + j),
&Parts[0], NumParts, PartVT, ExtendKind);
// 'inreg' on function refers to return value
}
}
}
- DAG.setRoot(DAG.getNode(ISD::RET, DAG.getCurDebugLoc(), MVT::Other,
+ DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
&NewValues[0], NewValues.size()));
}
+/// CopyToExportRegsIfNeeded - If the given value has virtual registers
+/// created for it, emit nodes to copy the value into the virtual
+/// registers.
+void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
+ if (!V->use_empty()) {
+ DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
+ if (VMI != FuncInfo.ValueMap.end())
+ CopyValueToVirtualRegister(V, VMI->second);
+ }
+}
+
/// ExportFromCurrentBlock - If this condition isn't known to be exported from
/// the current basic block, add it to ValueMap now so that we'll get a
/// CopyTo/FromReg.
// If this is not a fall-through branch, emit the branch.
if (Succ0MBB != NextBlock)
- DAG.setRoot(DAG.getNode(ISD::BR, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Succ0MBB)));
return;
void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
SDValue Cond;
SDValue CondLHS = getValue(CB.CmpLHS);
+ DebugLoc dl = getCurDebugLoc();
// Build the setcc now.
if (CB.CmpMHS == NULL) {
Cond = CondLHS;
else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
SDValue True = DAG.getConstant(1, CondLHS.getValueType());
- Cond = DAG.getNode(ISD::XOR, DAG.getCurDebugLoc(),
- CondLHS.getValueType(), CondLHS, True);
+ Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
} else
- Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
+ Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
} else {
assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
MVT VT = CmpOp.getValueType();
if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
- Cond = DAG.getSetCC(MVT::i1, CmpOp, DAG.getConstant(High, VT), ISD::SETLE);
+ Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
+ ISD::SETLE);
} else {
- SDValue SUB = DAG.getNode(ISD::SUB, DAG.getCurDebugLoc(),
+ SDValue SUB = DAG.getNode(ISD::SUB, dl,
VT, CmpOp, DAG.getConstant(Low, VT));
- Cond = DAG.getSetCC(MVT::i1, SUB,
+ Cond = DAG.getSetCC(dl, MVT::i1, SUB,
DAG.getConstant(High-Low, VT), ISD::SETULE);
}
}
if (CB.TrueBB == NextBlock) {
std::swap(CB.TrueBB, CB.FalseBB);
SDValue True = DAG.getConstant(1, Cond.getValueType());
- Cond = DAG.getNode(ISD::XOR, DAG.getCurDebugLoc(),
- Cond.getValueType(), Cond, True);
+ Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
}
- SDValue BrCond = DAG.getNode(ISD::BRCOND, DAG.getCurDebugLoc(),
+ SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
MVT::Other, getControlRoot(), Cond,
DAG.getBasicBlock(CB.TrueBB));
if (CB.FalseBB == NextBlock)
DAG.setRoot(BrCond);
else
- DAG.setRoot(DAG.getNode(ISD::BR, DAG.getCurDebugLoc(), MVT::Other, BrCond,
+ DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
DAG.getBasicBlock(CB.FalseBB)));
}
}
// Emit the code for the jump table
assert(JT.Reg != -1U && "Should lower JT Header first!");
MVT PTy = TLI.getPointerTy();
- SDValue Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy);
+ SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
+ JT.Reg, PTy);
SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
- DAG.setRoot(DAG.getNode(ISD::BR_JT, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
MVT::Other, Index.getValue(1),
Table, Index));
}
// difference between smallest and largest cases.
SDValue SwitchOp = getValue(JTH.SValue);
MVT VT = SwitchOp.getValueType();
- SDValue SUB = DAG.getNode(ISD::SUB, DAG.getCurDebugLoc(), VT, SwitchOp,
+ SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
DAG.getConstant(JTH.First, VT));
// The SDNode we just created, which holds the value being switched on minus
// This value may be smaller or larger than the target's pointer type, and
// therefore require extension or truncating.
if (VT.bitsGT(TLI.getPointerTy()))
- SwitchOp = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(),
+ SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
TLI.getPointerTy(), SUB);
else
- SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(),
+ SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
TLI.getPointerTy(), SUB);
unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
- SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), JumpTableReg, SwitchOp);
+ SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
+ JumpTableReg, SwitchOp);
JT.Reg = JumpTableReg;
// Emit the range check for the jump table, and branch to the default block
// for the switch statement if the value being switched on exceeds the largest
// case in the switch.
- SDValue CMP = DAG.getSetCC(TLI.getSetCCResultType(SUB.getValueType()), SUB,
+ SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
+ TLI.getSetCCResultType(SUB.getValueType()), SUB,
DAG.getConstant(JTH.Last-JTH.First,VT),
ISD::SETUGT);
if (++BBI != CurMBB->getParent()->end())
NextBlock = BBI;
- SDValue BrCond = DAG.getNode(ISD::BRCOND, DAG.getCurDebugLoc(),
+ SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
MVT::Other, CopyTo, CMP,
DAG.getBasicBlock(JT.Default));
if (JT.MBB == NextBlock)
DAG.setRoot(BrCond);
else
- DAG.setRoot(DAG.getNode(ISD::BR, DAG.getCurDebugLoc(), MVT::Other, BrCond,
+ DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
DAG.getBasicBlock(JT.MBB)));
}
// Subtract the minimum value
SDValue SwitchOp = getValue(B.SValue);
MVT VT = SwitchOp.getValueType();
- SDValue SUB = DAG.getNode(ISD::SUB, DAG.getCurDebugLoc(), VT, SwitchOp,
+ SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
DAG.getConstant(B.First, VT));
// Check range
- SDValue RangeCmp = DAG.getSetCC(TLI.getSetCCResultType(SUB.getValueType()), SUB,
- DAG.getConstant(B.Range, VT),
+ SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
+ TLI.getSetCCResultType(SUB.getValueType()),
+ SUB, DAG.getConstant(B.Range, VT),
ISD::SETUGT);
SDValue ShiftOp;
- if (VT.bitsGT(TLI.getShiftAmountTy()))
- ShiftOp = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(),
- TLI.getShiftAmountTy(), SUB);
+ if (VT.bitsGT(TLI.getPointerTy()))
+ ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
+ TLI.getPointerTy(), SUB);
else
- ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(),
- TLI.getShiftAmountTy(), SUB);
+ ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
+ TLI.getPointerTy(), SUB);
- B.Reg = FuncInfo.MakeReg(TLI.getShiftAmountTy());
- SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), B.Reg, ShiftOp);
+ B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
+ SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
+ B.Reg, ShiftOp);
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
CurMBB->addSuccessor(B.Default);
CurMBB->addSuccessor(MBB);
- SDValue BrRange = DAG.getNode(ISD::BRCOND, DAG.getCurDebugLoc(),
+ SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
MVT::Other, CopyTo, RangeCmp,
DAG.getBasicBlock(B.Default));
if (MBB == NextBlock)
DAG.setRoot(BrRange);
else
- DAG.setRoot(DAG.getNode(ISD::BR, DAG.getCurDebugLoc(), MVT::Other, CopyTo,
+ DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
DAG.getBasicBlock(MBB)));
}
unsigned Reg,
BitTestCase &B) {
// Make desired shift
- SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), Reg,
- TLI.getShiftAmountTy());
- SDValue SwitchVal = DAG.getNode(ISD::SHL, DAG.getCurDebugLoc(),
+ SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
+ TLI.getPointerTy());
+ SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
TLI.getPointerTy(),
DAG.getConstant(1, TLI.getPointerTy()),
ShiftOp);
// Emit bit tests and jumps
- SDValue AndOp = DAG.getNode(ISD::AND, DAG.getCurDebugLoc(),
+ SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
TLI.getPointerTy(), SwitchVal,
DAG.getConstant(B.Mask, TLI.getPointerTy()));
- SDValue AndCmp = DAG.getSetCC(TLI.getSetCCResultType(AndOp.getValueType()),
+ SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
+ TLI.getSetCCResultType(AndOp.getValueType()),
AndOp, DAG.getConstant(0, TLI.getPointerTy()),
ISD::SETNE);
CurMBB->addSuccessor(B.TargetBB);
CurMBB->addSuccessor(NextMBB);
- SDValue BrAnd = DAG.getNode(ISD::BRCOND, DAG.getCurDebugLoc(),
+ SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
MVT::Other, getControlRoot(),
AndCmp, DAG.getBasicBlock(B.TargetBB));
if (NextMBB == NextBlock)
DAG.setRoot(BrAnd);
else
- DAG.setRoot(DAG.getNode(ISD::BR, DAG.getCurDebugLoc(), MVT::Other, BrAnd,
+ DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
DAG.getBasicBlock(NextMBB)));
}
// If the value of the invoke is used outside of its defining block, make it
// available as a virtual register.
- if (!I.use_empty()) {
- DenseMap<const Value*, unsigned>::iterator VMI = FuncInfo.ValueMap.find(&I);
- if (VMI != FuncInfo.ValueMap.end())
- CopyValueToVirtualRegister(&I, VMI->second);
- }
+ CopyToExportRegsIfNeeded(&I);
// Update successor info
CurMBB->addSuccessor(Return);
CurMBB->addSuccessor(LandingPad);
// Drop into normal successor.
- DAG.setRoot(DAG.getNode(ISD::BR, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Return)));
}
if (I != E-1) {
FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
CurMF->insert(BBI, FallThrough);
+
+ // Put SV in a virtual register to make it available from the new blocks.
+ ExportFromCurrentBlock(SV);
} else {
// If the last case doesn't match, go to the default block.
FallThrough = Default;
TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
CurMF->insert(BBI, TrueBB);
WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
+
+ // Put SV in a virtual register to make it available from the new blocks.
+ ExportFromCurrentBlock(SV);
}
// Similar to the optimization above, if the Value being switched on is
FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
CurMF->insert(BBI, FalseBB);
WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
+
+ // Put SV in a virtual register to make it available from the new blocks.
+ ExportFromCurrentBlock(SV);
}
// Create a CaseBlock record representing a conditional branch to
// inserting any additional MBBs necessary to represent the switch.
MachineFunction *CurMF = CurMBB->getParent();
+ // If target does not have legal shift left, do not emit bit tests at all.
+ if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
+ return false;
+
size_t numCmps = 0;
for (CaseItr I = CR.Range.first, E = CR.Range.second;
I!=E; ++I) {
BTC.push_back(BitTestCase(CasesBits[i].Mask,
CaseBB,
CasesBits[i].BB));
+
+ // Put SV in a virtual register to make it available from the new blocks.
+ ExportFromCurrentBlock(SV);
}
BitTestBlock BTB(lowBound, cmpRange, SV,
// If this is not a fall-through branch, emit the branch.
CurMBB->addSuccessor(Default);
if (Default != NextBlock)
- DAG.setRoot(DAG.getNode(ISD::BR, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Default)));
return;
Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
if (CV == CNZ) {
SDValue Op2 = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(ISD::FNEG, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
Op2.getValueType(), Op2));
return;
}
if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
SDValue Op2 = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(ISD::FNEG, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
Op2.getValueType(), Op2));
return;
}
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(OpCode, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
Op1.getValueType(), Op1, Op2));
}
void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
- if (!isa<VectorType>(I.getType())) {
- if (TLI.getShiftAmountTy().bitsLT(Op2.getValueType()))
- Op2 = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(),
+ if (!isa<VectorType>(I.getType()) &&
+ Op2.getValueType() != TLI.getShiftAmountTy()) {
+ // If the operand is smaller than the shift count type, promote it.
+ if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
+ Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
TLI.getShiftAmountTy(), Op2);
- else if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
- Op2 = DAG.getNode(ISD::ANY_EXTEND, DAG.getCurDebugLoc(),
+ // If the operand is larger than the shift count type but the shift
+ // count type has enough bits to represent any shift value, truncate
+ // it now. This is a common case and it exposes the truncate to
+ // optimization early.
+ else if (TLI.getShiftAmountTy().getSizeInBits() >=
+ Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
+ Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
TLI.getShiftAmountTy(), Op2);
- }
-
- setValue(&I, DAG.getNode(Opcode, DAG.getCurDebugLoc(),
+ // Otherwise we'll need to temporarily settle for some other
+ // convenient type; type legalization will make adjustments as
+ // needed.
+ else if (TLI.getPointerTy().bitsLT(Op2.getValueType()))
+ Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
+ TLI.getPointerTy(), Op2);
+ else if (TLI.getPointerTy().bitsGT(Op2.getValueType()))
+ Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
+ TLI.getPointerTy(), Op2);
+ }
+
+ setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
Op1.getValueType(), Op1, Op2));
}
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode = getICmpCondCode(predicate);
- setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
+ setValue(&I, DAG.getSetCC(getCurDebugLoc(),MVT::i1, Op1, Op2, Opcode));
}
void SelectionDAGLowering::visitFCmp(User &I) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Condition = getFCmpCondCode(predicate);
- setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition));
+ setValue(&I, DAG.getSetCC(getCurDebugLoc(), MVT::i1, Op1, Op2, Condition));
}
void SelectionDAGLowering::visitVICmp(User &I) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
ISD::CondCode Opcode = getICmpCondCode(predicate);
- setValue(&I, DAG.getVSetCC(Op1.getValueType(), Op1, Op2, Opcode));
+ setValue(&I, DAG.getVSetCC(getCurDebugLoc(), Op1.getValueType(),
+ Op1, Op2, Opcode));
}
void SelectionDAGLowering::visitVFCmp(User &I) {
ISD::CondCode Condition = getFCmpCondCode(predicate);
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getVSetCC(DestVT, Op1, Op2, Condition));
+ setValue(&I, DAG.getVSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
}
void SelectionDAGLowering::visitSelect(User &I) {
SDValue FalseVal = getValue(I.getOperand(2));
for (unsigned i = 0; i != NumValues; ++i)
- Values[i] = DAG.getNode(ISD::SELECT, DAG.getCurDebugLoc(),
+ Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
TrueVal.getValueType(), Cond,
SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
- setValue(&I, DAG.getNode(ISD::MERGE_VALUES, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
DAG.getVTList(&ValueVTs[0], NumValues),
&Values[0], NumValues));
}
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitZExt(User &I) {
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitSExt(User &I) {
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitFPTrunc(User &I) {
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::FP_ROUND, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
DestVT, N, DAG.getIntPtrConstant(0)));
}
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::FP_EXTEND, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitFPToUI(User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitFPToSI(User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitUIToFP(User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitSIToFP(User &I){
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
MVT DestVT = TLI.getValueType(I.getType());
- setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGLowering::visitPtrToInt(User &I) {
MVT DestVT = TLI.getValueType(I.getType());
SDValue Result;
if (DestVT.bitsLT(SrcVT))
- Result = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), DestVT, N);
+ Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
else
// Note: ZERO_EXTEND can handle cases where the sizes are equal too
- Result = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(), DestVT, N);
+ Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
setValue(&I, Result);
}
MVT SrcVT = N.getValueType();
MVT DestVT = TLI.getValueType(I.getType());
if (DestVT.bitsLT(SrcVT))
- setValue(&I, DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), DestVT, N));
+ setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
else
// Note: ZERO_EXTEND can handle cases where the sizes are equal too
- setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
DestVT, N));
}
// BitCast assures us that source and destination are the same size so this
// is either a BIT_CONVERT or a no-op.
if (DestVT != N.getValueType())
- setValue(&I, DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
DestVT, N)); // convert types
else
setValue(&I, N); // noop cast.
void SelectionDAGLowering::visitInsertElement(User &I) {
SDValue InVec = getValue(I.getOperand(0));
SDValue InVal = getValue(I.getOperand(1));
- SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(),
+ SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
TLI.getPointerTy(),
getValue(I.getOperand(2)));
- setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
TLI.getValueType(I.getType()),
InVec, InVal, InIdx));
}
void SelectionDAGLowering::visitExtractElement(User &I) {
SDValue InVec = getValue(I.getOperand(0));
- SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(),
+ SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
TLI.getPointerTy(),
getValue(I.getOperand(1)));
- setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
TLI.getValueType(I.getType()), InVec, InIdx));
}
// Utility for visitShuffleVector - Returns true if the mask is mask starting
// from SIndx and increasing to the element length (undefs are allowed).
-static bool SequentialMask(SDValue Mask, unsigned SIndx) {
- unsigned MaskNumElts = Mask.getNumOperands();
- for (unsigned i = 0; i != MaskNumElts; ++i) {
- if (Mask.getOperand(i).getOpcode() != ISD::UNDEF) {
- unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getZExtValue();
- if (Idx != i + SIndx)
- return false;
- }
- }
+static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
+ unsigned MaskNumElts = Mask.size();
+ for (unsigned i = 0; i != MaskNumElts; ++i)
+ if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
+ return false;
return true;
}
void SelectionDAGLowering::visitShuffleVector(User &I) {
+ SmallVector<int, 8> Mask;
SDValue Src1 = getValue(I.getOperand(0));
SDValue Src2 = getValue(I.getOperand(1));
- SDValue Mask = getValue(I.getOperand(2));
+ // Convert the ConstantVector mask operand into an array of ints, with -1
+ // representing undef values.
+ SmallVector<Constant*, 8> MaskElts;
+ cast<Constant>(I.getOperand(2))->getVectorElements(MaskElts);
+ unsigned MaskNumElts = MaskElts.size();
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ if (isa<UndefValue>(MaskElts[i]))
+ Mask.push_back(-1);
+ else
+ Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
+ }
+
MVT VT = TLI.getValueType(I.getType());
MVT SrcVT = Src1.getValueType();
- int MaskNumElts = Mask.getNumOperands();
- int SrcNumElts = SrcVT.getVectorNumElements();
+ unsigned SrcNumElts = SrcVT.getVectorNumElements();
if (SrcNumElts == MaskNumElts) {
- setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, DAG.getCurDebugLoc(),
- VT, Src1, Src2, Mask));
+ setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
+ &Mask[0]));
return;
}
// Normalize the shuffle vector since mask and vector length don't match.
- MVT MaskEltVT = Mask.getValueType().getVectorElementType();
-
if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
// Mask is longer than the source vectors and is a multiple of the source
// vectors. We can use concatenate vector to make the mask and vectors
// lengths match.
if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
// The shuffle is concatenating two vectors together.
- setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
VT, Src1, Src2));
return;
}
// Pad both vectors with undefs to make them the same length as the mask.
unsigned NumConcat = MaskNumElts / SrcNumElts;
- SDValue UndefVal = DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(), SrcVT);
+ bool Src1U = Src1.getOpcode() == ISD::UNDEF;
+ bool Src2U = Src2.getOpcode() == ISD::UNDEF;
+ SDValue UndefVal = DAG.getUNDEF(SrcVT);
- SDValue* MOps1 = new SDValue[NumConcat];
- SDValue* MOps2 = new SDValue[NumConcat];
+ SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
+ SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
MOps1[0] = Src1;
MOps2[0] = Src2;
- for (unsigned i = 1; i != NumConcat; ++i) {
- MOps1[i] = UndefVal;
- MOps2[i] = UndefVal;
- }
- Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DAG.getCurDebugLoc(),
- VT, MOps1, NumConcat);
- Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DAG.getCurDebugLoc(),
- VT, MOps2, NumConcat);
-
- delete [] MOps1;
- delete [] MOps2;
+
+ Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
+ getCurDebugLoc(), VT,
+ &MOps1[0], NumConcat);
+ Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
+ getCurDebugLoc(), VT,
+ &MOps2[0], NumConcat);
// Readjust mask for new input vector length.
- SmallVector<SDValue, 8> MappedOps;
- for (int i = 0; i != MaskNumElts; ++i) {
- if (Mask.getOperand(i).getOpcode() == ISD::UNDEF) {
- MappedOps.push_back(Mask.getOperand(i));
- } else {
- int Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getZExtValue();
- if (Idx < SrcNumElts)
- MappedOps.push_back(DAG.getConstant(Idx, MaskEltVT));
- else
- MappedOps.push_back(DAG.getConstant(Idx + MaskNumElts - SrcNumElts,
- MaskEltVT));
- }
+ SmallVector<int, 8> MappedOps;
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ int Idx = Mask[i];
+ if (Idx < (int)SrcNumElts)
+ MappedOps.push_back(Idx);
+ else
+ MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
}
- Mask = DAG.getNode(ISD::BUILD_VECTOR, DAG.getCurDebugLoc(),
- Mask.getValueType(),
- &MappedOps[0], MappedOps.size());
-
- setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, DAG.getCurDebugLoc(),
- VT, Src1, Src2, Mask));
+ setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
+ &MappedOps[0]));
return;
}
if (SrcNumElts > MaskNumElts) {
- // Resulting vector is shorter than the incoming vector.
- if (SrcNumElts == MaskNumElts && SequentialMask(Mask,0)) {
- // Shuffle extracts 1st vector.
- setValue(&I, Src1);
- return;
- }
-
- if (SrcNumElts == MaskNumElts && SequentialMask(Mask,MaskNumElts)) {
- // Shuffle extracts 2nd vector.
- setValue(&I, Src2);
- return;
- }
-
// Analyze the access pattern of the vector to see if we can extract
// two subvectors and do the shuffle. The analysis is done by calculating
// the range of elements the mask access on both vectors.
int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
int MaxRange[2] = {-1, -1};
- for (int i = 0; i != MaskNumElts; ++i) {
- SDValue Arg = Mask.getOperand(i);
- if (Arg.getOpcode() != ISD::UNDEF) {
- assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
- int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
- int Input = 0;
- if (Idx >= SrcNumElts) {
- Input = 1;
- Idx -= SrcNumElts;
- }
- if (Idx > MaxRange[Input])
- MaxRange[Input] = Idx;
- if (Idx < MinRange[Input])
- MinRange[Input] = Idx;
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ int Idx = Mask[i];
+ int Input = 0;
+ if (Idx < 0)
+ continue;
+
+ if (Idx >= (int)SrcNumElts) {
+ Input = 1;
+ Idx -= SrcNumElts;
}
+ if (Idx > MaxRange[Input])
+ MaxRange[Input] = Idx;
+ if (Idx < MinRange[Input])
+ MinRange[Input] = Idx;
}
// Check if the access is smaller than the vector size and can we find
int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
int StartIdx[2]; // StartIdx to extract from
for (int Input=0; Input < 2; ++Input) {
- if (MinRange[Input] == SrcNumElts+1 && MaxRange[Input] == -1) {
+ if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
RangeUse[Input] = 0; // Unused
StartIdx[Input] = 0;
- } else if (MaxRange[Input] - MinRange[Input] < MaskNumElts) {
+ } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
// Fits within range but we should see if we can find a good
// start index that is a multiple of the mask length.
- if (MaxRange[Input] < MaskNumElts) {
+ if (MaxRange[Input] < (int)MaskNumElts) {
RangeUse[Input] = 1; // Extract from beginning of the vector
StartIdx[Input] = 0;
} else {
StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
- if (MaxRange[Input] - StartIdx[Input] < MaskNumElts &&
+ if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
StartIdx[Input] + MaskNumElts < SrcNumElts)
RangeUse[Input] = 1; // Extract from a multiple of the mask length.
}
}
if (RangeUse[0] == 0 && RangeUse[0] == 0) {
- setValue(&I, DAG.getNode(ISD::UNDEF,
- DAG.getCurDebugLoc(), VT)); // Vectors are not used.
+ setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
return;
}
else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
for (int Input=0; Input < 2; ++Input) {
SDValue& Src = Input == 0 ? Src1 : Src2;
if (RangeUse[Input] == 0) {
- Src = DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(), VT);
+ Src = DAG.getUNDEF(VT);
} else {
- Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DAG.getCurDebugLoc(), VT,
+ Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
Src, DAG.getIntPtrConstant(StartIdx[Input]));
}
}
// Calculate new mask.
- SmallVector<SDValue, 8> MappedOps;
- for (int i = 0; i != MaskNumElts; ++i) {
- SDValue Arg = Mask.getOperand(i);
- if (Arg.getOpcode() == ISD::UNDEF) {
- MappedOps.push_back(Arg);
- } else {
- int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
- if (Idx < SrcNumElts)
- MappedOps.push_back(DAG.getConstant(Idx - StartIdx[0], MaskEltVT));
- else {
- Idx = Idx - SrcNumElts - StartIdx[1] + MaskNumElts;
- MappedOps.push_back(DAG.getConstant(Idx, MaskEltVT));
- }
- }
+ SmallVector<int, 8> MappedOps;
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ int Idx = Mask[i];
+ if (Idx < 0)
+ MappedOps.push_back(Idx);
+ else if (Idx < (int)SrcNumElts)
+ MappedOps.push_back(Idx - StartIdx[0]);
+ else
+ MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
}
- Mask = DAG.getNode(ISD::BUILD_VECTOR, DAG.getCurDebugLoc(),
- Mask.getValueType(),
- &MappedOps[0], MappedOps.size());
- setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, DAG.getCurDebugLoc(),
- VT, Src1, Src2, Mask));
+ setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
+ &MappedOps[0]));
return;
}
}
MVT EltVT = VT.getVectorElementType();
MVT PtrVT = TLI.getPointerTy();
SmallVector<SDValue,8> Ops;
- for (int i = 0; i != MaskNumElts; ++i) {
- SDValue Arg = Mask.getOperand(i);
- if (Arg.getOpcode() == ISD::UNDEF) {
- Ops.push_back(DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(), EltVT));
+ for (unsigned i = 0; i != MaskNumElts; ++i) {
+ if (Mask[i] < 0) {
+ Ops.push_back(DAG.getUNDEF(EltVT));
} else {
- assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
- int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
- if (Idx < SrcNumElts)
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DAG.getCurDebugLoc(),
+ int Idx = Mask[i];
+ if (Idx < (int)SrcNumElts)
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
else
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DAG.getCurDebugLoc(),
- EltVT, Src2,
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
+ EltVT, Src2,
DAG.getConstant(Idx - SrcNumElts, PtrVT)));
}
}
- setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
VT, &Ops[0], Ops.size()));
}
unsigned i = 0;
// Copy the beginning value(s) from the original aggregate.
for (; i != LinearIndex; ++i)
- Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(),
- AggValueVTs[i]) :
+ Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
SDValue(Agg.getNode(), Agg.getResNo() + i);
// Copy values from the inserted value(s).
for (; i != LinearIndex + NumValValues; ++i)
- Values[i] = FromUndef ? DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(),
- AggValueVTs[i]) :
+ Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
// Copy remaining value(s) from the original aggregate.
for (; i != NumAggValues; ++i)
- Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(),
- AggValueVTs[i]) :
+ Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
SDValue(Agg.getNode(), Agg.getResNo() + i);
- setValue(&I, DAG.getNode(ISD::MERGE_VALUES, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
DAG.getVTList(&AggValueVTs[0], NumAggValues),
&Values[0], NumAggValues));
}
for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
Values[i - LinearIndex] =
OutOfUndef ?
- DAG.getNode(ISD::UNDEF, DAG.getCurDebugLoc(),
- Agg.getNode()->getValueType(Agg.getResNo() + i)) :
+ DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
SDValue(Agg.getNode(), Agg.getResNo() + i);
- setValue(&I, DAG.getNode(ISD::MERGE_VALUES, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
DAG.getVTList(&ValValueVTs[0], NumValValues),
&Values[0], NumValValues));
}
if (Field) {
// N = N + Offset
uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
- N = DAG.getNode(ISD::ADD, DAG.getCurDebugLoc(), N.getValueType(), N,
+ N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
DAG.getIntPtrConstant(Offset));
}
Ty = StTy->getElementType(Field);
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
uint64_t Offs =
- TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
- N = DAG.getNode(ISD::ADD, DAG.getCurDebugLoc(), N.getValueType(), N,
- DAG.getIntPtrConstant(Offs));
+ TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ SDValue OffsVal;
+ unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
+ if (PtrBits < 64) {
+ OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
+ TLI.getPointerTy(),
+ DAG.getConstant(Offs, MVT::i64));
+ } else
+ OffsVal = DAG.getIntPtrConstant(Offs);
+ N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
+ OffsVal);
continue;
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD->getTypePaddedSize(Ty);
+ uint64_t ElementSize = TD->getTypeAllocSize(Ty);
SDValue IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
// it.
if (IdxN.getValueType().bitsLT(N.getValueType()))
- IdxN = DAG.getNode(ISD::SIGN_EXTEND, DAG.getCurDebugLoc(),
+ IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
N.getValueType(), IdxN);
else if (IdxN.getValueType().bitsGT(N.getValueType()))
- IdxN = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(),
+ IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
N.getValueType(), IdxN);
// If this is a multiply by a power of two, turn it into a shl
if (ElementSize != 1) {
if (isPowerOf2_64(ElementSize)) {
unsigned Amt = Log2_64(ElementSize);
- IdxN = DAG.getNode(ISD::SHL, DAG.getCurDebugLoc(),
+ IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
N.getValueType(), IdxN,
- DAG.getConstant(Amt, TLI.getShiftAmountTy()));
+ DAG.getConstant(Amt, TLI.getPointerTy()));
} else {
SDValue Scale = DAG.getIntPtrConstant(ElementSize);
- IdxN = DAG.getNode(ISD::MUL, DAG.getCurDebugLoc(),
+ IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
N.getValueType(), IdxN, Scale);
}
}
- N = DAG.getNode(ISD::ADD, DAG.getCurDebugLoc(),
+ N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
N.getValueType(), N, IdxN);
}
}
return; // getValue will auto-populate this.
const Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
I.getAlignment());
SDValue AllocSize = getValue(I.getArraySize());
+
+ AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
+ AllocSize,
+ DAG.getConstant(TySize, AllocSize.getValueType()));
+
+
+
MVT IntPtr = TLI.getPointerTy();
if (IntPtr.bitsLT(AllocSize.getValueType()))
- AllocSize = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(),
+ AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
IntPtr, AllocSize);
else if (IntPtr.bitsGT(AllocSize.getValueType()))
- AllocSize = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(),
+ AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
IntPtr, AllocSize);
- AllocSize = DAG.getNode(ISD::MUL, DAG.getCurDebugLoc(), IntPtr, AllocSize,
- DAG.getIntPtrConstant(TySize));
-
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
// the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
// Round the size of the allocation up to the stack alignment size
// by add SA-1 to the size.
- AllocSize = DAG.getNode(ISD::ADD, DAG.getCurDebugLoc(),
+ AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
AllocSize.getValueType(), AllocSize,
DAG.getIntPtrConstant(StackAlign-1));
// Mask out the low bits for alignment purposes.
- AllocSize = DAG.getNode(ISD::AND, DAG.getCurDebugLoc(),
+ AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
AllocSize.getValueType(), AllocSize,
DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
- const MVT *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
- MVT::Other);
- SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, DAG.getCurDebugLoc(),
- VTs, 2, Ops, 3);
+ SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
+ SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
+ VTs, Ops, 3);
setValue(&I, DSA);
DAG.setRoot(DSA.getValue(1));
SmallVector<SDValue, 4> Chains(NumValues);
MVT PtrVT = Ptr.getValueType();
for (unsigned i = 0; i != NumValues; ++i) {
- SDValue L = DAG.getLoad(ValueVTs[i], DAG.getCurDebugLoc(), Root,
- DAG.getNode(ISD::ADD, DAG.getCurDebugLoc(),
+ SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
+ DAG.getNode(ISD::ADD, getCurDebugLoc(),
PtrVT, Ptr,
DAG.getConstant(Offsets[i], PtrVT)),
SV, Offsets[i],
}
if (!ConstantMemory) {
- SDValue Chain = DAG.getNode(ISD::TokenFactor, DAG.getCurDebugLoc(),
+ SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
MVT::Other,
&Chains[0], NumValues);
if (isVolatile)
PendingLoads.push_back(Chain);
}
- setValue(&I, DAG.getNode(ISD::MERGE_VALUES, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
DAG.getVTList(&ValueVTs[0], NumValues),
&Values[0], NumValues));
}
bool isVolatile = I.isVolatile();
unsigned Alignment = I.getAlignment();
for (unsigned i = 0; i != NumValues; ++i)
- Chains[i] = DAG.getStore(Root, DAG.getCurDebugLoc(),
+ Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
SDValue(Src.getNode(), Src.getResNo() + i),
- DAG.getNode(ISD::ADD, DAG.getCurDebugLoc(),
+ DAG.getNode(ISD::ADD, getCurDebugLoc(),
PtrVT, Ptr,
DAG.getConstant(Offsets[i], PtrVT)),
PtrV, Offsets[i],
isVolatile, Alignment);
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
MVT::Other, &Chains[0], NumValues));
}
Ops.push_back(Op);
}
- std::vector<MVT> VTs;
+ std::vector<MVT> VTArray;
if (I.getType() != Type::VoidTy) {
MVT VT = TLI.getValueType(I.getType());
if (VT.isVector()) {
}
assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
- VTs.push_back(VT);
+ VTArray.push_back(VT);
}
if (HasChain)
- VTs.push_back(MVT::Other);
+ VTArray.push_back(MVT::Other);
- const MVT *VTList = DAG.getNodeValueTypes(VTs);
+ SDVTList VTs = DAG.getVTList(&VTArray[0], VTArray.size());
// Create the node.
SDValue Result;
if (IsTgtIntrinsic) {
// This is target intrinsic that touches memory
- Result = DAG.getMemIntrinsicNode(Info.opc, DAG.getCurDebugLoc(),
- VTList, VTs.size(),
- &Ops[0], Ops.size(),
+ Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
+ VTs, &Ops[0], Ops.size(),
Info.memVT, Info.ptrVal, Info.offset,
Info.align, Info.vol,
Info.readMem, Info.writeMem);
}
else if (!HasChain)
- Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DAG.getCurDebugLoc(),
- VTList, VTs.size(),
- &Ops[0], Ops.size());
+ Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
+ VTs, &Ops[0], Ops.size());
else if (I.getType() != Type::VoidTy)
- Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DAG.getCurDebugLoc(),
- VTList, VTs.size(),
- &Ops[0], Ops.size());
+ Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
+ VTs, &Ops[0], Ops.size());
else
- Result = DAG.getNode(ISD::INTRINSIC_VOID, DAG.getCurDebugLoc(),
- VTList, VTs.size(),
- &Ops[0], Ops.size());
+ Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
+ VTs, &Ops[0], Ops.size());
if (HasChain) {
SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
if (I.getType() != Type::VoidTy) {
if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
MVT VT = TLI.getValueType(PTy);
- Result = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(), VT, Result);
+ Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
}
setValue(&I, Result);
}
///
/// where Op is the hexidecimal representation of floating point value.
static SDValue
-GetSignificand(SelectionDAG &DAG, SDValue Op) {
- SDValue t1 = DAG.getNode(ISD::AND, DAG.getCurDebugLoc(), MVT::i32, Op,
+GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
+ SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
DAG.getConstant(0x007fffff, MVT::i32));
- SDValue t2 = DAG.getNode(ISD::OR, DAG.getCurDebugLoc(), MVT::i32, t1,
+ SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
DAG.getConstant(0x3f800000, MVT::i32));
- return DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(), MVT::f32, t2);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
}
/// GetExponent - Get the exponent:
///
/// where Op is the hexidecimal representation of floating point value.
static SDValue
-GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI) {
- SDValue t0 = DAG.getNode(ISD::AND, DAG.getCurDebugLoc(), MVT::i32, Op,
+GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
+ DebugLoc dl) {
+ SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
DAG.getConstant(0x7f800000, MVT::i32));
- SDValue t1 = DAG.getNode(ISD::SRL, DAG.getCurDebugLoc(), MVT::i32, t0,
- DAG.getConstant(23, TLI.getShiftAmountTy()));
- SDValue t2 = DAG.getNode(ISD::SUB, DAG.getCurDebugLoc(), MVT::i32, t1,
+ SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
+ DAG.getConstant(23, TLI.getPointerTy()));
+ SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
DAG.getConstant(127, MVT::i32));
- return DAG.getNode(ISD::SINT_TO_FP, MVT::f32, t2);
+ return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
}
/// getF32Constant - Get 32-bit floating point constant.
SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
SDValue Root = getRoot();
SDValue L =
- DAG.getAtomic(Op, DAG.getCurDebugLoc(),
+ DAG.getAtomic(Op, getCurDebugLoc(),
getValue(I.getOperand(2)).getValueType().getSimpleVT(),
Root,
getValue(I.getOperand(1)),
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
- MVT ValueVTs[] = { Op1.getValueType(), MVT::i1 };
- SDValue Ops[] = { Op1, Op2 };
-
- SDValue Result = DAG.getNode(Op, DAG.getCurDebugLoc(),
- DAG.getVTList(&ValueVTs[0], 2), &Ops[0], 2);
+ SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
+ SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
setValue(&I, Result);
return 0;
void
SelectionDAGLowering::visitExp(CallInst &I) {
SDValue result;
- DebugLoc dl = DAG.getCurDebugLoc();
+ DebugLoc dl = getCurDebugLoc();
if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
// IntegerPartOfX <<= 23;
IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
- DAG.getConstant(23, TLI.getShiftAmountTy()));
+ DAG.getConstant(23, TLI.getPointerTy()));
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000));
- SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
+ SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
MVT::i32, t13);
// Add the exponent into the result in integer domain.
void
SelectionDAGLowering::visitLog(CallInst &I) {
SDValue result;
- DebugLoc dl = DAG.getCurDebugLoc();
+ DebugLoc dl = getCurDebugLoc();
if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Scale the exponent by log(2) [0.69314718f].
- SDValue Exp = GetExponent(DAG, Op1, TLI);
+ SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
getF32Constant(DAG, 0x3f317218));
// Get the significand and build it into a floating-point number with
// exponent of 1.
- SDValue X = GetSignificand(DAG, Op1);
+ SDValue X = GetSignificand(DAG, Op1, dl);
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3f949a29));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, LogOfMantissa);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3fdef31a));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, LogOfMantissa);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
getF32Constant(DAG, 0x4006dcab));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, LogOfMantissa);
}
} else {
void
SelectionDAGLowering::visitLog2(CallInst &I) {
SDValue result;
- DebugLoc dl = DAG.getCurDebugLoc();
+ DebugLoc dl = getCurDebugLoc();
if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Get the exponent.
- SDValue LogOfExponent = GetExponent(DAG, Op1, TLI);
+ SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
// Get the significand and build it into a floating-point number with
// exponent of 1.
- SDValue X = GetSignificand(DAG, Op1);
+ SDValue X = GetSignificand(DAG, Op1, dl);
// Different possible minimax approximations of significand in
// floating-point for various degrees of accuracy over [1,2].
SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3fd6633d));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, Log2ofMantissa);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
getF32Constant(DAG, 0x4020d29c));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, Log2ofMantissa);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
getF32Constant(DAG, 0x4042902c));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, Log2ofMantissa);
}
} else {
void
SelectionDAGLowering::visitLog10(CallInst &I) {
SDValue result;
- DebugLoc dl = DAG.getCurDebugLoc();
+ DebugLoc dl = getCurDebugLoc();
if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Scale the exponent by log10(2) [0.30102999f].
- SDValue Exp = GetExponent(DAG, Op1, TLI);
+ SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
getF32Constant(DAG, 0x3e9a209a));
// Get the significand and build it into a floating-point number with
// exponent of 1.
- SDValue X = GetSignificand(DAG, Op1);
+ SDValue X = GetSignificand(DAG, Op1, dl);
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
getF32Constant(DAG, 0x3f011300));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, Log10ofMantissa);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f25f7c3));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, Log10ofMantissa);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
getF32Constant(DAG, 0x3f57ce70));
- result = DAG.getNode(ISD::FADD, dl,
+ result = DAG.getNode(ISD::FADD, dl,
MVT::f32, LogOfExponent, Log10ofMantissa);
}
} else {
void
SelectionDAGLowering::visitExp2(CallInst &I) {
SDValue result;
- DebugLoc dl = DAG.getCurDebugLoc();
+ DebugLoc dl = getCurDebugLoc();
if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
// IntegerPartOfX <<= 23;
IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
- DAG.getConstant(23, TLI.getShiftAmountTy()));
+ DAG.getConstant(23, TLI.getPointerTy()));
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BIT_CONVERT, dl,
MVT::f32, TwoToFractionalPartOfX);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12:
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BIT_CONVERT, dl,
MVT::f32, TwoToFractionalPartOfX);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BIT_CONVERT, dl,
MVT::f32, TwoToFractionalPartOfX);
}
} else {
SelectionDAGLowering::visitPow(CallInst &I) {
SDValue result;
Value *Val = I.getOperand(1);
- DebugLoc dl = DAG.getCurDebugLoc();
+ DebugLoc dl = getCurDebugLoc();
bool IsExp10 = false;
if (getValue(Val).getValueType() == MVT::f32 &&
// IntegerPartOfX <<= 23;
IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
- DAG.getConstant(23, TLI.getShiftAmountTy()));
+ DAG.getConstant(23, TLI.getPointerTy()));
if (LimitFloatPrecision <= 6) {
// For floating-point precision of 6:
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BIT_CONVERT, dl,
MVT::f32, TwoToFractionalPartOfX);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18:
SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
- result = DAG.getNode(ISD::BIT_CONVERT, dl,
+ result = DAG.getNode(ISD::BIT_CONVERT, dl,
MVT::f32, TwoToFractionalPartOfX);
}
} else {
/// otherwise lower it and return null.
const char *
SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
- DebugLoc dl = DAG.getCurDebugLoc();
+ DebugLoc dl = getCurDebugLoc();
switch (Intrinsic) {
default:
// By default, turn this into a target intrinsic node.
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false,
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
SDValue Op2 = getValue(I.getOperand(2));
SDValue Op3 = getValue(I.getOperand(3));
unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemset(getRoot(), Op1, Op2, Op3, Align,
+ DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
I.getOperand(1), 0));
return 0;
}
Size = C->getZExtValue();
if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
AliasAnalysis::NoAlias) {
- DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false,
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
- DAG.setRoot(DAG.getMemmove(getRoot(), Op1, Op2, Op3, Align,
+ DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
I.getOperand(1), 0, I.getOperand(2), 0));
return 0;
}
case Intrinsic::dbg_stoppoint: {
- DwarfWriter *DW = DAG.getDwarfWriter();
DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
- if (DW && DW->ValidDebugInfo(SPI.getContext())) {
- DAG.setRoot(DAG.getDbgStopPoint(getRoot(),
- SPI.getLine(),
- SPI.getColumn(),
- SPI.getContext()));
+ if (DIDescriptor::ValidDebugInfo(SPI.getContext(), OptLevel)) {
+ MachineFunction &MF = DAG.getMachineFunction();
DICompileUnit CU(cast<GlobalVariable>(SPI.getContext()));
- unsigned SrcFile = DW->RecordSource(CU.getDirectory(), CU.getFilename());
- unsigned idx = DAG.getMachineFunction().
- getOrCreateDebugLocID(SrcFile,
- SPI.getLine(),
- SPI.getColumn());
- DAG.setCurDebugLoc(DebugLoc::get(idx));
+ DebugLoc Loc = DebugLoc::get(MF.getOrCreateDebugLocID(CU.getGV(),
+ SPI.getLine(), SPI.getColumn()));
+ setCurDebugLoc(Loc);
+
+ if (OptLevel == CodeGenOpt::None)
+ DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
+ SPI.getLine(),
+ SPI.getColumn(),
+ SPI.getContext()));
}
return 0;
}
case Intrinsic::dbg_region_start: {
DwarfWriter *DW = DAG.getDwarfWriter();
DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
- if (DW && DW->ValidDebugInfo(RSI.getContext())) {
+
+ if (DIDescriptor::ValidDebugInfo(RSI.getContext(), OptLevel) &&
+ DW && DW->ShouldEmitDwarfDebug()) {
unsigned LabelID =
DW->RecordRegionStart(cast<GlobalVariable>(RSI.getContext()));
- DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
+ DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
+ getRoot(), LabelID));
}
return 0;
case Intrinsic::dbg_region_end: {
DwarfWriter *DW = DAG.getDwarfWriter();
DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
- if (DW && DW->ValidDebugInfo(REI.getContext())) {
- unsigned LabelID =
- DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
- DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
+
+ if (DIDescriptor::ValidDebugInfo(REI.getContext(), OptLevel) &&
+ DW && DW->ShouldEmitDwarfDebug()) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ DISubprogram Subprogram(cast<GlobalVariable>(REI.getContext()));
+
+ if (Subprogram.isNull() || Subprogram.describes(MF.getFunction())) {
+ unsigned LabelID =
+ DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
+ DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
+ getRoot(), LabelID));
+ } else {
+ // This is end of inlined function. Debugging information for inlined
+ // function is not handled yet (only supported by FastISel).
+ if (OptLevel == CodeGenOpt::None) {
+ unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
+ if (ID != 0)
+ // Returned ID is 0 if this is unbalanced "end of inlined
+ // scope". This could happen if optimizer eats dbg intrinsics or
+ // "beginning of inlined scope" is not recoginized due to missing
+ // location info. In such cases, do ignore this region.end.
+ DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
+ getRoot(), ID));
+ }
+ }
}
return 0;
}
case Intrinsic::dbg_func_start: {
DwarfWriter *DW = DAG.getDwarfWriter();
- if (!DW) return 0;
DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
Value *SP = FSI.getSubprogram();
- if (SP && DW->ValidDebugInfo(SP)) {
+ if (!DIDescriptor::ValidDebugInfo(SP, OptLevel))
+ return 0;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ if (OptLevel == CodeGenOpt::None) {
+ // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is what
+ // (most?) gdb expects.
+ DebugLoc PrevLoc = CurDebugLoc;
+ DISubprogram Subprogram(cast<GlobalVariable>(SP));
+ DICompileUnit CompileUnit = Subprogram.getCompileUnit();
+
+ if (!Subprogram.describes(MF.getFunction())) {
+ // This is a beginning of an inlined function.
+
+ // If llvm.dbg.func.start is seen in a new block before any
+ // llvm.dbg.stoppoint intrinsic then the location info is unknown.
+ // FIXME : Why DebugLoc is reset at the beginning of each block ?
+ if (PrevLoc.isUnknown())
+ return 0;
+
+ // Record the source line.
+ unsigned Line = Subprogram.getLineNumber();
+ setCurDebugLoc(DebugLoc::get(
+ MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
+
+ if (DW && DW->ShouldEmitDwarfDebug()) {
+ DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
+ unsigned LabelID = DW->RecordInlinedFnStart(Subprogram,
+ DICompileUnit(PrevLocTpl.CompileUnit),
+ PrevLocTpl.Line,
+ PrevLocTpl.Col);
+ DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
+ getRoot(), LabelID));
+ }
+ } else {
+ // Record the source line.
+ unsigned Line = Subprogram.getLineNumber();
+ MF.setDefaultDebugLoc(DebugLoc::get(
+ MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
+ if (DW && DW->ShouldEmitDwarfDebug()) {
+ // llvm.dbg.func_start also defines beginning of function scope.
+ DW->RecordRegionStart(cast<GlobalVariable>(FSI.getSubprogram()));
+ }
+ }
+ } else {
+ DISubprogram Subprogram(cast<GlobalVariable>(SP));
+
+ std::string SPName;
+ Subprogram.getLinkageName(SPName);
+ if (!SPName.empty()
+ && strcmp(SPName.c_str(), MF.getFunction()->getNameStart())) {
+ // This is beginning of inlined function. Debugging information for
+ // inlined function is not handled yet (only supported by FastISel).
+ return 0;
+ }
+
// llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
// what (most?) gdb expects.
- DISubprogram Subprogram(cast<GlobalVariable>(SP));
DICompileUnit CompileUnit = Subprogram.getCompileUnit();
- unsigned SrcFile = DW->RecordSource(CompileUnit.getDirectory(),
- CompileUnit.getFilename());
+
// Record the source line but does not create a label for the normal
// function start. It will be emitted at asm emission time. However,
// create a label if this is a beginning of inlined function.
unsigned Line = Subprogram.getLineNumber();
- unsigned LabelID =
- DW->RecordSourceLine(Line, 0, SrcFile);
- if (DW->getRecordSourceLineCount() != 1)
- DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
- DAG.setCurDebugLoc(DebugLoc::get(DAG.getMachineFunction().
- getOrCreateDebugLocID(SrcFile, Line, 0)));
+ setCurDebugLoc(DebugLoc::get(
+ MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
+ // FIXME - Start new region because llvm.dbg.func_start also defines
+ // beginning of function scope.
}
return 0;
}
case Intrinsic::dbg_declare: {
- DwarfWriter *DW = DAG.getDwarfWriter();
- DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
- Value *Variable = DI.getVariable();
- if (DW && DW->ValidDebugInfo(Variable))
- DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
- getValue(DI.getAddress()), getValue(Variable)));
+ if (OptLevel == CodeGenOpt::None) {
+ DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
+ Value *Variable = DI.getVariable();
+ if (DIDescriptor::ValidDebugInfo(Variable, OptLevel))
+ DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
+ getValue(DI.getAddress()), getValue(Variable)));
+ } else {
+ // FIXME: Do something sensible here when we support debug declare.
+ }
return 0;
}
-
case Intrinsic::eh_exception: {
- if (!CurMBB->isLandingPad()) {
- // FIXME: Mark exception register as live in. Hack for PR1508.
- unsigned Reg = TLI.getExceptionAddressRegister();
- if (Reg) CurMBB->addLiveIn(Reg);
- }
// Insert the EXCEPTIONADDR instruction.
+ assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
SDValue Ops[1];
Ops[0] = DAG.getRoot();
}
MVT DestVT = TLI.getValueType(I.getType());
Value* Op1 = I.getOperand(1);
- setValue(&I, DAG.getConvertRndSat(DestVT, getValue(Op1),
+ setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
DAG.getValueType(DestVT),
DAG.getValueType(getValue(Op1).getValueType()),
getValue(I.getOperand(2)),
case Intrinsic::readcyclecounter: {
SDValue Op = getRoot();
SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
- DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2,
- &Op, 1);
+ DAG.getVTList(MVT::i64, MVT::Other),
+ &Op, 1);
setValue(&I, Tmp);
DAG.setRoot(Tmp.getValue(1));
return 0;
case Intrinsic::stacksave: {
SDValue Op = getRoot();
SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
- DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1);
+ DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
setValue(&I, Tmp);
DAG.setRoot(Tmp.getValue(1));
return 0;
SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
// Store the stack protector onto the stack.
- SDValue Result = DAG.getStore(getRoot(), DAG.getCurDebugLoc(), Src, FIN,
+ SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
PseudoSourceValue::getFixedStack(FI),
0, true);
setValue(&I, Result);
Ops[5] = DAG.getSrcValue(F);
SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
- DAG.getNodeValueTypes(TLI.getPointerTy(),
- MVT::Other), 2,
- Ops, 6);
+ DAG.getVTList(TLI.getPointerTy(), MVT::Other),
+ Ops, 6);
setValue(&I, Tmp);
DAG.setRoot(Tmp.getValue(1));
case Intrinsic::atomic_cmp_swap: {
SDValue Root = getRoot();
SDValue L =
- DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, DAG.getCurDebugLoc(),
+ DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
getValue(I.getOperand(2)).getValueType().getSimpleVT(),
Root,
getValue(I.getOperand(1)),
// Both PendingLoads and PendingExports must be flushed here;
// this call might not return.
(void)getRoot();
- DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getControlRoot(), BeginLabel));
+ DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
+ getControlRoot(), BeginLabel));
}
std::pair<SDValue,SDValue> Result =
CS.paramHasAttr(0, Attribute::InReg),
CS.getCallingConv(),
IsTailCall && PerformTailCallOpt,
- Callee, Args, DAG, DAG.getCurDebugLoc());
+ Callee, Args, DAG, getCurDebugLoc());
if (CS.getType() != Type::VoidTy)
setValue(CS.getInstruction(), Result.first);
DAG.setRoot(Result.second);
// Insert a label at the end of the invoke call to mark the try range. This
// can be used to detect deletion of the invoke via the MachineModuleInfo.
EndLabel = MMI->NextLabelID();
- DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getRoot(), EndLabel));
+ DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
+ getRoot(), EndLabel));
// Inform MachineModuleInfo of range.
MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
const char *RenameFn = 0;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
+ const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
+ if (II) {
+ if (unsigned IID = II->getIntrinsicID(F)) {
+ RenameFn = visitIntrinsicCall(I, IID);
+ if (!RenameFn)
+ return;
+ }
+ }
if (unsigned IID = F->getIntrinsicID()) {
RenameFn = visitIntrinsicCall(I, IID);
if (!RenameFn)
I.getType() == I.getOperand(2)->getType()) {
SDValue LHS = getValue(I.getOperand(1));
SDValue RHS = getValue(I.getOperand(2));
- setValue(&I, DAG.getNode(ISD::FCOPYSIGN, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
LHS.getValueType(), LHS, RHS));
return;
}
I.getOperand(1)->getType()->isFloatingPoint() &&
I.getType() == I.getOperand(1)->getType()) {
SDValue Tmp = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(ISD::FABS, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
I.getOperand(1)->getType()->isFloatingPoint() &&
I.getType() == I.getOperand(1)->getType()) {
SDValue Tmp = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(ISD::FSIN, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
I.getOperand(1)->getType()->isFloatingPoint() &&
I.getType() == I.getOperand(1)->getType()) {
SDValue Tmp = getValue(I.getOperand(1));
- setValue(&I, DAG.getNode(ISD::FCOS, DAG.getCurDebugLoc(),
+ setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
/// this value and returns the result as a ValueVT value. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
-SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
+SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
SDValue &Chain,
SDValue *Flag) const {
// Assemble the legal parts into the final values.
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue P;
if (Flag == 0)
- P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT);
+ P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
else {
- P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT, *Flag);
+ P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
*Flag = P.getValue(2);
}
Chain = P.getValue(1);
isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
else if (NumSignBits > RegSize-8)
isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
- else if (NumZeroBits >= RegSize-9)
+ else if (NumZeroBits >= RegSize-8)
isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
else if (NumSignBits > RegSize-16)
isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
- else if (NumZeroBits >= RegSize-17)
+ else if (NumZeroBits >= RegSize-16)
isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
else if (NumSignBits > RegSize-32)
isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
- else if (NumZeroBits >= RegSize-33)
+ else if (NumZeroBits >= RegSize-32)
isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
if (FromVT != MVT::Other) {
- P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext,
- DAG.getCurDebugLoc(),
+ P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
RegisterVT, P, DAG.getValueType(FromVT));
}
Parts[i] = P;
}
- Values[Value] = getCopyFromParts(DAG, Parts.begin(), NumRegs, RegisterVT,
- ValueVT);
+ Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
+ NumRegs, RegisterVT, ValueVT);
Part += NumRegs;
Parts.clear();
}
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getCurDebugLoc(),
+ return DAG.getNode(ISD::MERGE_VALUES, dl,
DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
&Values[0], ValueVTs.size());
}
/// specified value into the registers specified by this object. This uses
/// Chain/Flag as the input and updates them for the output Chain/Flag.
/// If the Flag pointer is NULL, no flag is used.
-void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
+void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
SDValue &Chain, SDValue *Flag) const {
// Get the list of the values's legal parts.
unsigned NumRegs = Regs.size();
unsigned NumParts = TLI->getNumRegisters(ValueVT);
MVT RegisterVT = RegVTs[Value];
- getCopyToParts(DAG, Val.getValue(Val.getResNo() + Value),
+ getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
&Parts[Part], NumParts, RegisterVT);
Part += NumParts;
}
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue Part;
if (Flag == 0)
- Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i]);
+ Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
else {
- Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i], *Flag);
+ Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
*Flag = Part.getValue(1);
}
Chains[i] = Part.getValue(0);
// = op c3, ..., f2
Chain = Chains[NumRegs-1];
else
- Chain = DAG.getNode(ISD::TokenFactor, DAG.getCurDebugLoc(),
- MVT::Other, &Chains[0], NumRegs);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
}
/// AddInlineAsmOperands - Add this value to the specified inlineasm node
/// operand list. This adds the code marker and includes the number of
/// values added into it.
-void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
+void RegsForValue::AddInlineAsmOperands(unsigned Code,
+ bool HasMatching,unsigned MatchingIdx,
+ SelectionDAG &DAG,
std::vector<SDValue> &Ops) const {
MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
- Ops.push_back(DAG.getTargetConstant(Code | (Regs.size() << 3), IntPtrTy));
+ assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
+ unsigned Flag = Code | (Regs.size() << 3);
+ if (HasMatching)
+ Flag |= 0x80000000 | (MatchingIdx << 16);
+ Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
MVT RegisterVT = RegVTs[Value];
namespace llvm {
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
-struct VISIBILITY_HIDDEN SDISelAsmOperandInfo :
+class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
public TargetLowering::AsmOperandInfo {
+public:
/// CallOperand - If this is the result output operand or a clobber
/// this is null, otherwise it is the incoming operand to the CallInst.
/// This gets modified as the asm is processed.
// vector types).
MVT RegVT = *PhysReg.second->vt_begin();
if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
- OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
+ OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
} else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
// into i64, which can be passed with two i32 values on a 32-bit
// machine.
RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
- OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
+ OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT;
}
// If this is a constraint for a specific physical register, like {r17},
// assign it now.
- if (PhysReg.first) {
+ if (unsigned AssignedReg = PhysReg.first) {
+ const TargetRegisterClass *RC = PhysReg.second;
if (OpInfo.ConstraintVT == MVT::Other)
- ValueVT = *PhysReg.second->vt_begin();
+ ValueVT = *RC->vt_begin();
// Get the actual register value type. This is important, because the user
// may have asked for (e.g.) the AX register in i32 type. We need to
// remember that AX is actually i16 to get the right extension.
- RegVT = *PhysReg.second->vt_begin();
+ RegVT = *RC->vt_begin();
// This is a explicit reference to a physical register.
- Regs.push_back(PhysReg.first);
+ Regs.push_back(AssignedReg);
// If this is an expanded reference, add the rest of the regs to Regs.
if (NumRegs != 1) {
- TargetRegisterClass::iterator I = PhysReg.second->begin();
- for (; *I != PhysReg.first; ++I)
- assert(I != PhysReg.second->end() && "Didn't find reg!");
+ TargetRegisterClass::iterator I = RC->begin();
+ for (; *I != AssignedReg; ++I)
+ assert(I != RC->end() && "Didn't find reg!");
// Already added the first reg.
--NumRegs; ++I;
for (; NumRegs; --NumRegs, ++I) {
- assert(I != PhysReg.second->end() && "Ran out of registers to allocate!");
+ assert(I != RC->end() && "Ran out of registers to allocate!");
Regs.push_back(*I);
}
}
// Otherwise, if this was a reference to an LLVM register class, create vregs
// for this reference.
- std::vector<unsigned> RegClassRegs;
- const TargetRegisterClass *RC = PhysReg.second;
- if (RC) {
- // If this is a tied register, our regalloc doesn't know how to maintain
- // the constraint, so we have to pick a register to pin the input/output to.
- // If it isn't a matched constraint, go ahead and create vreg and let the
- // regalloc do its thing.
- if (!OpInfo.hasMatchingInput()) {
- RegVT = *PhysReg.second->vt_begin();
- if (OpInfo.ConstraintVT == MVT::Other)
- ValueVT = RegVT;
-
- // Create the appropriate number of virtual registers.
- MachineRegisterInfo &RegInfo = MF.getRegInfo();
- for (; NumRegs; --NumRegs)
- Regs.push_back(RegInfo.createVirtualRegister(PhysReg.second));
-
- OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
- return;
- }
+ if (const TargetRegisterClass *RC = PhysReg.second) {
+ RegVT = *RC->vt_begin();
+ if (OpInfo.ConstraintVT == MVT::Other)
+ ValueVT = RegVT;
- // Otherwise, we can't allocate it. Let the code below figure out how to
- // maintain these constraints.
- RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end());
+ // Create the appropriate number of virtual registers.
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ for (; NumRegs; --NumRegs)
+ Regs.push_back(RegInfo.createVirtualRegister(RC));
- } else {
- // This is a reference to a register class that doesn't directly correspond
- // to an LLVM register class. Allocate NumRegs consecutive, available,
- // registers from the class.
- RegClassRegs = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
- OpInfo.ConstraintVT);
+ OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
+ return;
}
+
+ // This is a reference to a register class that doesn't directly correspond
+ // to an LLVM register class. Allocate NumRegs consecutive, available,
+ // registers from the class.
+ std::vector<unsigned> RegClassRegs
+ = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
+ OpInfo.ConstraintVT);
const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
unsigned NumAllocated = 0;
// Check to see if this register is allocatable (i.e. don't give out the
// stack pointer).
- if (RC == 0) {
- RC = isAllocatableRegister(Reg, MF, TLI, TRI);
- if (!RC) { // Couldn't allocate this register.
- // Reset NumAllocated to make sure we return consecutive registers.
- NumAllocated = 0;
- continue;
- }
+ const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
+ if (!RC) { // Couldn't allocate this register.
+ // Reset NumAllocated to make sure we return consecutive registers.
+ NumAllocated = 0;
+ continue;
}
// Okay, this register is good, we can use it.
if (CType == TargetLowering::C_Memory)
return true;
}
+
+ // Indirect operand accesses access memory.
+ if (CI.isIndirect)
+ return true;
}
return false;
/// ConstraintOperands - Information about all of the constraints.
std::vector<SDISelAsmOperandInfo> ConstraintOperands;
- SDValue Chain = getRoot();
- SDValue Flag;
-
std::set<unsigned> OutputRegs, InputRegs;
// Do a prepass over the constraints, canonicalizing them, and building up the
ConstraintInfos = IA->ParseConstraints();
bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
+
+ SDValue Chain, Flag;
+
+ // We won't need to flush pending loads if this asm doesn't touch
+ // memory and is nonvolatile.
+ if (hasMemory || IA->hasSideEffects())
+ Chain = getRoot();
+ else
+ Chain = DAG.getRoot();
unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
unsigned ResNo = 0; // ResNo - The result number of the next output.
Input.ConstraintVT.isInteger()) ||
(OpInfo.ConstraintVT.getSizeInBits() !=
Input.ConstraintVT.getSizeInBits())) {
- cerr << "Unsupported asm: input constraint with a matching output "
- << "constraint of incompatible type!\n";
+ cerr << "llvm: error: Unsupported asm: input constraint with a "
+ << "matching output constraint of incompatible type!\n";
exit(1);
}
Input.ConstraintVT = OpInfo.ConstraintVT;
// Otherwise, create a stack slot and emit a store to it before the
// asm.
const Type *Ty = OpVal->getType();
- uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
- Chain = DAG.getStore(Chain, DAG.getCurDebugLoc(),
+ Chain = DAG.getStore(Chain, getCurDebugLoc(),
OpInfo.CallOperand, StackSlot, NULL, 0);
OpInfo.CallOperand = StackSlot;
}
// Copy the output from the appropriate register. Find a register that
// we can use.
if (OpInfo.AssignedRegs.Regs.empty()) {
- cerr << "Couldn't allocate output reg for constraint '"
+ cerr << "llvm: error: Couldn't allocate output reg for constraint '"
<< OpInfo.ConstraintCode << "'!\n";
exit(1);
}
OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
6 /* EARLYCLOBBER REGDEF */ :
2 /* REGDEF */ ,
+ false,
+ 0,
DAG, AsmNodeOperands);
break;
}
unsigned CurOp = 2; // The first operand.
for (; OperandNo; --OperandNo) {
// Advance to the next operand.
- unsigned NumOps =
+ unsigned OpFlag =
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
- assert(((NumOps & 7) == 2 /*REGDEF*/ ||
- (NumOps & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
- (NumOps & 7) == 4 /*MEM*/) &&
+ assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
+ (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
+ (OpFlag & 7) == 4 /*MEM*/) &&
"Skipped past definitions?");
- CurOp += (NumOps>>3)+1;
+ CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
}
- unsigned NumOps =
+ unsigned OpFlag =
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
- if ((NumOps & 7) == 2 /*REGDEF*/
- || (NumOps & 7) == 6 /* EARLYCLOBBER REGDEF */) {
- // Add NumOps>>3 registers to MatchedRegs.
+ if ((OpFlag & 7) == 2 /*REGDEF*/
+ || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
+ // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
+ assert(!OpInfo.isIndirect &&
+ "Don't know how to handle tied indirect register inputs yet!");
RegsForValue MatchedRegs;
MatchedRegs.TLI = &TLI;
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
- MatchedRegs.RegVTs.push_back(AsmNodeOperands[CurOp+1].getValueType());
- for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
- unsigned Reg =
- cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
- MatchedRegs.Regs.push_back(Reg);
- }
+ MVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
+ MatchedRegs.RegVTs.push_back(RegVT);
+ MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
+ for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
+ i != e; ++i)
+ MatchedRegs.Regs.
+ push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
// Use the produced MatchedRegs object to
- MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, &Flag);
- MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
+ MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
+ Chain, &Flag);
+ MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
+ true, OpInfo.getMatchedOperand(),
+ DAG, AsmNodeOperands);
break;
} else {
- assert(((NumOps & 7) == 4) && "Unknown matching constraint!");
- assert((NumOps >> 3) == 1 && "Unexpected number of operands");
+ assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
+ assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
+ "Unexpected number of operands");
// Add information to the INLINEASM node to know about this input.
- AsmNodeOperands.push_back(DAG.getTargetConstant(NumOps,
+ // See InlineAsm.h isUseOperandTiedToDef.
+ OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
+ AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
TLI.getPointerTy()));
AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
break;
TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
hasMemory, Ops, DAG);
if (Ops.empty()) {
- cerr << "Invalid operand for inline asm constraint '"
+ cerr << "llvm: error: Invalid operand for inline asm constraint '"
<< OpInfo.ConstraintCode << "'!\n";
exit(1);
}
// Copy the input into the appropriate registers.
if (OpInfo.AssignedRegs.Regs.empty()) {
- cerr << "Couldn't allocate output reg for constraint '"
+ cerr << "llvm: error: Couldn't allocate output reg for constraint '"
<< OpInfo.ConstraintCode << "'!\n";
exit(1);
}
- OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, Chain, &Flag);
+ OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
+ Chain, &Flag);
- OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/,
+ OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
DAG, AsmNodeOperands);
break;
}
// allocator is aware that the physreg got clobbered.
if (!OpInfo.AssignedRegs.Regs.empty())
OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
- DAG, AsmNodeOperands);
+ false, 0, DAG,AsmNodeOperands);
break;
}
}
AsmNodeOperands[0] = Chain;
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
- Chain = DAG.getNode(ISD::INLINEASM, DAG.getCurDebugLoc(),
- DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2,
+ Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
+ DAG.getVTList(MVT::Other, MVT::Flag),
&AsmNodeOperands[0], AsmNodeOperands.size());
Flag = Chain.getValue(1);
// If this asm returns a register value, copy the result from that register
// and set it as the value of the call.
if (!RetValRegs.Regs.empty()) {
- SDValue Val = RetValRegs.getCopyFromRegs(DAG, Chain, &Flag);
+ SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
+ Chain, &Flag);
// FIXME: Why don't we do this for inline asms with MRVs?
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
// not have the same VT as was expected. Convert it to the right type
// with bit_convert.
if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
- Val = DAG.getNode(ISD::BIT_CONVERT, DAG.getCurDebugLoc(),
+ Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
ResultType, Val);
} else if (ResultType != Val.getValueType() &&
// If a result value was tied to an input value, the computed result may
// have a wider width than the expected result. Extract the relevant
// portion.
- Val = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), ResultType, Val);
+ Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
}
assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
}
setValue(CS.getInstruction(), Val);
+ // Don't need to use this as a chain in this case.
+ if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
+ return;
}
std::vector<std::pair<SDValue, Value*> > StoresToEmit;
for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
Value *Ptr = IndirectStoresToEmit[i].second;
- SDValue OutVal = OutRegs.getCopyFromRegs(DAG, Chain, &Flag);
+ SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
+ Chain, &Flag);
StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
+
}
// Emit the non-flagged stores from the physregs.
SmallVector<SDValue, 8> OutChains;
for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
- OutChains.push_back(DAG.getStore(Chain, DAG.getCurDebugLoc(),
+ OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
StoresToEmit[i].first,
getValue(StoresToEmit[i].second),
StoresToEmit[i].second, 0));
if (!OutChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DAG.getCurDebugLoc(), MVT::Other,
+ Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
&OutChains[0], OutChains.size());
DAG.setRoot(Chain);
}
void SelectionDAGLowering::visitMalloc(MallocInst &I) {
SDValue Src = getValue(I.getOperand(0));
+ // Scale up by the type size in the original i32 type width. Various
+ // mid-level optimizers may make assumptions about demanded bits etc from the
+ // i32-ness of the optimizer: we do not want to promote to i64 and then
+ // multiply on 64-bit targets.
+ // FIXME: Malloc inst should go away: PR715.
+ uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
+ if (ElementSize != 1)
+ Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
+ Src, DAG.getConstant(ElementSize, Src.getValueType()));
+
MVT IntPtr = TLI.getPointerTy();
if (IntPtr.bitsLT(Src.getValueType()))
- Src = DAG.getNode(ISD::TRUNCATE, DAG.getCurDebugLoc(), IntPtr, Src);
+ Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
else if (IntPtr.bitsGT(Src.getValueType()))
- Src = DAG.getNode(ISD::ZERO_EXTEND, DAG.getCurDebugLoc(), IntPtr, Src);
-
- // Scale the source by the type size.
- uint64_t ElementSize = TD->getTypePaddedSize(I.getType()->getElementType());
- Src = DAG.getNode(ISD::MUL, DAG.getCurDebugLoc(), Src.getValueType(),
- Src, DAG.getIntPtrConstant(ElementSize));
+ Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
CallingConv::C, PerformTailCallOpt,
DAG.getExternalSymbol("malloc", IntPtr),
- Args, DAG, DAG.getCurDebugLoc());
+ Args, DAG, getCurDebugLoc());
setValue(&I, Result.first); // Pointers always fit in registers
DAG.setRoot(Result.second);
}
TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
CallingConv::C, PerformTailCallOpt,
DAG.getExternalSymbol("free", IntPtr), Args, DAG,
- DAG.getCurDebugLoc());
+ getCurDebugLoc());
DAG.setRoot(Result.second);
}
void SelectionDAGLowering::visitVAStart(CallInst &I) {
- DAG.setRoot(DAG.getNode(ISD::VASTART, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
MVT::Other, getRoot(),
getValue(I.getOperand(1)),
DAG.getSrcValue(I.getOperand(1))));
}
void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
- SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
- getValue(I.getOperand(0)),
- DAG.getSrcValue(I.getOperand(0)));
+ SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
+ getRoot(), getValue(I.getOperand(0)),
+ DAG.getSrcValue(I.getOperand(0)));
setValue(&I, V);
DAG.setRoot(V.getValue(1));
}
void SelectionDAGLowering::visitVAEnd(CallInst &I) {
- DAG.setRoot(DAG.getNode(ISD::VAEND, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
MVT::Other, getRoot(),
getValue(I.getOperand(1)),
DAG.getSrcValue(I.getOperand(1))));
}
void SelectionDAGLowering::visitVACopy(CallInst &I) {
- DAG.setRoot(DAG.getNode(ISD::VACOPY, DAG.getCurDebugLoc(),
+ DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
MVT::Other, getRoot(),
getValue(I.getOperand(1)),
getValue(I.getOperand(2)),
const PointerType *Ty = cast<PointerType>(I->getType());
const Type *ElementTy = Ty->getElementType();
unsigned FrameAlign = getByValTypeAlignment(ElementTy);
- unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
+ unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
if (F.getParamAlignment(j))
else if (F.paramHasAttr(Idx, Attribute::ZExt))
AssertOp = ISD::AssertZext;
- ArgValues.push_back(getCopyFromParts(DAG, &Parts[0], NumParts, PartVT, VT,
- AssertOp));
+ ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
+ PartVT, VT, AssertOp));
}
}
assert(i == NumArgRegs && "Argument register count mismatch!");
const PointerType *Ty = cast<PointerType>(Args[i].Ty);
const Type *ElementTy = Ty->getElementType();
unsigned FrameAlign = getByValTypeAlignment(ElementTy);
- unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
+ unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
// For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right.
if (Args[i].Alignment)
else if (Args[i].isZExt)
ExtendKind = ISD::ZERO_EXTEND;
- getCopyToParts(DAG, Op, &Parts[0], NumParts, PartVT, ExtendKind);
+ getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
for (unsigned i = 0; i != NumParts; ++i) {
// if it isn't first piece, alignment must be 1
for (; RegNo != RegNoEnd; ++RegNo)
Results.push_back(Res.getValue(RegNo));
SDValue ReturnValue =
- getCopyFromParts(DAG, &Results[0], NumRegs, RegisterVT, VT,
+ getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
AssertOp);
ReturnValues.push_back(ReturnValue);
}
RegsForValue RFV(TLI, Reg, V->getType());
SDValue Chain = DAG.getEntryNode();
- RFV.getCopyToRegs(Op, DAG, Chain, 0);
+ RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
PendingExports.push_back(Chain);
}
Function &F = *LLVMBB->getParent();
SDValue OldRoot = SDL->DAG.getRoot();
SmallVector<SDValue, 16> Args;
- TLI.LowerArguments(F, SDL->DAG, Args, SDL->DAG.getCurDebugLoc());
+ TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc());
unsigned a = 0;
for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
ComputeValueVTs(TLI, AI->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (!AI->use_empty()) {
- SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues));
+ SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues,
+ SDL->getCurDebugLoc()));
// If this argument is live outside of the entry block, insert a copy from
// whereever we got it to the vreg that other BB's will reference it as.
- DenseMap<const Value*, unsigned>::iterator VMI=FuncInfo->ValueMap.find(AI);
- if (VMI != FuncInfo->ValueMap.end()) {
- SDL->CopyValueToVirtualRegister(AI, VMI->second);
- }
+ SDL->CopyToExportRegsIfNeeded(AI);
}
a += NumValues;
}