#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/IntrinsicInst.h"
-#include "llvm/CodeGen/MachineDebugInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
const Type *Ty = AI->getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
unsigned Align =
- std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
+ std::max((unsigned)TLI.getTargetData()->getTypeAlignmentPref(Ty),
AI->getAlignment());
- // If the alignment of the value is smaller than the size of the
- // value, and if the size of the value is particularly small
- // (<= 8 bytes), round up to the size of the value for potentially
- // better performance.
- //
- // FIXME: This could be made better with a preferred alignment hook in
- // TargetData. It serves primarily to 8-byte align doubles for X86.
- if (Align < TySize && TySize <= 8) Align = TySize;
TySize *= CUI->getZExtValue(); // Get total allocated size.
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
StaticAllocaMap[AI] =
void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
- void visitIntBinary(User &I, unsigned IntOp, unsigned VecOp);
- void visitFPBinary(User &I, unsigned FPOp, unsigned VecOp);
+ void visitScalarBinary(User &I, unsigned OpCode);
+ void visitVectorBinary(User &I, unsigned OpCode);
+ void visitEitherBinary(User &I, unsigned ScalarOp, unsigned VectorOp);
void visitShift(User &I, unsigned Opcode);
void visitAdd(User &I) {
- if (I.getType()->isFloatingPoint())
- visitFPBinary(I, ISD::FADD, ISD::VADD);
+ if (isa<PackedType>(I.getType()))
+ visitVectorBinary(I, ISD::VADD);
+ else if (I.getType()->isFloatingPoint())
+ visitScalarBinary(I, ISD::FADD);
else
- visitIntBinary(I, ISD::ADD, ISD::VADD);
+ visitScalarBinary(I, ISD::ADD);
}
void visitSub(User &I);
void visitMul(User &I) {
- if (I.getType()->isFloatingPoint())
- visitFPBinary(I, ISD::FMUL, ISD::VMUL);
+ if (isa<PackedType>(I.getType()))
+ visitVectorBinary(I, ISD::VMUL);
+ else if (I.getType()->isFloatingPoint())
+ visitScalarBinary(I, ISD::FMUL);
else
- visitIntBinary(I, ISD::MUL, ISD::VMUL);
+ visitScalarBinary(I, ISD::MUL);
}
- void visitURem(User &I) { visitIntBinary(I, ISD::UREM, 0); }
- void visitSRem(User &I) { visitIntBinary(I, ISD::SREM, 0); }
- void visitFRem(User &I) { visitFPBinary (I, ISD::FREM, 0); }
- void visitUDiv(User &I) { visitIntBinary(I, ISD::UDIV, ISD::VUDIV); }
- void visitSDiv(User &I) { visitIntBinary(I, ISD::SDIV, ISD::VSDIV); }
- void visitFDiv(User &I) { visitFPBinary (I, ISD::FDIV, ISD::VSDIV); }
- void visitAnd(User &I) { visitIntBinary(I, ISD::AND, ISD::VAND); }
- void visitOr (User &I) { visitIntBinary(I, ISD::OR, ISD::VOR); }
- void visitXor(User &I) { visitIntBinary(I, ISD::XOR, ISD::VXOR); }
- void visitShl(User &I) { visitShift(I, ISD::SHL); }
+ void visitURem(User &I) { visitScalarBinary(I, ISD::UREM); }
+ void visitSRem(User &I) { visitScalarBinary(I, ISD::SREM); }
+ void visitFRem(User &I) { visitScalarBinary(I, ISD::FREM); }
+ void visitUDiv(User &I) { visitEitherBinary(I, ISD::UDIV, ISD::VUDIV); }
+ void visitSDiv(User &I) { visitEitherBinary(I, ISD::SDIV, ISD::VSDIV); }
+ void visitFDiv(User &I) { visitEitherBinary(I, ISD::FDIV, ISD::VSDIV); }
+ void visitAnd (User &I) { visitEitherBinary(I, ISD::AND, ISD::VAND ); }
+ void visitOr (User &I) { visitEitherBinary(I, ISD::OR, ISD::VOR ); }
+ void visitXor (User &I) { visitEitherBinary(I, ISD::XOR, ISD::VXOR ); }
+ void visitShl (User &I) { visitShift(I, ISD::SHL); }
void visitLShr(User &I) { visitShift(I, ISD::SRL); }
void visitAShr(User &I) { visitShift(I, ISD::SRA); }
void visitICmp(User &I);
void visitVAArg(VAArgInst &I);
void visitVAEnd(CallInst &I);
void visitVACopy(CallInst &I);
- void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
void visitMemIntrinsic(CallInst &I, unsigned Op);
return N = DAG.getNode(ISD::VBUILD_VECTOR,MVT::Vector,&Ops[0],Ops.size());
} else {
// Canonicalize all constant ints to be unsigned.
- return N = DAG.getConstant(cast<ConstantIntegral>(C)->getZExtValue(),VT);
+ return N = DAG.getConstant(cast<ConstantInt>(C)->getZExtValue(),VT);
}
}
!InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
const BasicBlock *BB = CurBB->getBasicBlock();
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cond))
- if ((II->getIntrinsicID() == Intrinsic::isunordered_f32 ||
- II->getIntrinsicID() == Intrinsic::isunordered_f64) &&
- // The operands of the setcc have to be in this block. We don't know
- // how to export them from some other block. If this is the first
- // block of the sequence, no exporting is needed.
- (CurBB == CurMBB ||
- (isExportableFromCurrentBlock(II->getOperand(1), BB) &&
- isExportableFromCurrentBlock(II->getOperand(2), BB)))) {
- SelectionDAGISel::CaseBlock CB(ISD::SETUO, II->getOperand(1),
- II->getOperand(2), TBB, FBB, CurBB);
- SwitchCases.push_back(CB);
- return;
- }
-
-
// If the leaf of the tree is a comparison, merge the condition into
// the caseblock.
if ((isa<ICmpInst>(Cond) || isa<FCmpInst>(Cond)) &&
}
// Create a CaseBlock record representing this branch.
- SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantBool::getTrue(),
+ SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
TBB, FBB, CurBB);
SwitchCases.push_back(CB);
return;
}
// Create a CaseBlock record representing this branch.
- SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantBool::getTrue(),
+ SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
Succ0MBB, Succ1MBB, CurMBB);
// Use visitSwitchCase to actually insert the fast branch sequence for this
// cond branch.
// Build the setcc now, fold "(X == true)" to X and "(X == false)" to !X to
// handle common cases produced by branch lowering.
- if (CB.CmpRHS == ConstantBool::getTrue() && CB.CC == ISD::SETEQ)
+ if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
Cond = CondLHS;
- else if (CB.CmpRHS == ConstantBool::getFalse() && CB.CC == ISD::SETEQ) {
+ else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
SDOperand True = DAG.getConstant(1, CondLHS.getValueType());
Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True);
} else
if ((TLI.isOperationLegal(ISD::BR_JT, MVT::Other) ||
TLI.isOperationLegal(ISD::BRIND, MVT::Other)) &&
Cases.size() > 5) {
- uint64_t First =cast<ConstantIntegral>(Cases.front().first)->getZExtValue();
- uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getZExtValue();
+ uint64_t First =cast<ConstantInt>(Cases.front().first)->getZExtValue();
+ uint64_t Last = cast<ConstantInt>(Cases.back().first)->getZExtValue();
double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
if (Density >= 0.3125) {
std::vector<MachineBasicBlock*> DestBBs;
uint64_t TEI = First;
for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI)
- if (cast<ConstantIntegral>(ii->first)->getZExtValue() == TEI) {
+ if (cast<ConstantInt>(ii->first)->getZExtValue() == TEI) {
DestBBs.push_back(ii->second);
++ii;
} else {
// rather than creating a leaf node for it.
if ((LHSR.second - LHSR.first) == 1 &&
LHSR.first->first == CR.GE &&
- cast<ConstantIntegral>(C)->getZExtValue() ==
- (cast<ConstantIntegral>(CR.GE)->getZExtValue() + 1ULL)) {
+ cast<ConstantInt>(C)->getZExtValue() ==
+ (cast<ConstantInt>(CR.GE)->getZExtValue() + 1ULL)) {
TrueBB = LHSR.first->second;
} else {
TrueBB = new MachineBasicBlock(LLVMBB);
// is CR.LT - 1, then we can branch directly to the target block for
// the current Case Value, rather than emitting a RHS leaf node for it.
if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
- cast<ConstantIntegral>(RHSR.first->first)->getZExtValue() ==
- (cast<ConstantIntegral>(CR.LT)->getZExtValue() - 1ULL)) {
+ cast<ConstantInt>(RHSR.first->first)->getZExtValue() ==
+ (cast<ConstantInt>(CR.LT)->getZExtValue() - 1ULL)) {
FalseBB = RHSR.first->second;
} else {
FalseBB = new MachineBasicBlock(LLVMBB);
void SelectionDAGLowering::visitSub(User &I) {
// -0.0 - X --> fneg
- if (I.getType()->isFloatingPoint()) {
+ const Type *Ty = I.getType();
+ if (isa<PackedType>(Ty)) {
+ visitVectorBinary(I, ISD::VSUB);
+ } else if (Ty->isFloatingPoint()) {
if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
if (CFP->isExactlyValue(-0.0)) {
SDOperand Op2 = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
return;
}
- visitFPBinary(I, ISD::FSUB, ISD::VSUB);
+ visitScalarBinary(I, ISD::FSUB);
} else
- visitIntBinary(I, ISD::SUB, ISD::VSUB);
+ visitScalarBinary(I, ISD::SUB);
}
-void
-SelectionDAGLowering::visitIntBinary(User &I, unsigned IntOp, unsigned VecOp) {
- const Type *Ty = I.getType();
+void SelectionDAGLowering::visitScalarBinary(User &I, unsigned OpCode) {
SDOperand Op1 = getValue(I.getOperand(0));
SDOperand Op2 = getValue(I.getOperand(1));
-
- if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
- SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
- SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
- setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
- } else {
- setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
- }
+
+ setValue(&I, DAG.getNode(OpCode, Op1.getValueType(), Op1, Op2));
}
-void
-SelectionDAGLowering::visitFPBinary(User &I, unsigned FPOp, unsigned VecOp) {
- const Type *Ty = I.getType();
- SDOperand Op1 = getValue(I.getOperand(0));
- SDOperand Op2 = getValue(I.getOperand(1));
+void
+SelectionDAGLowering::visitVectorBinary(User &I, unsigned OpCode) {
+ assert(isa<PackedType>(I.getType()));
+ const PackedType *Ty = cast<PackedType>(I.getType());
+ SDOperand Typ = DAG.getValueType(TLI.getValueType(Ty->getElementType()));
+
+ setValue(&I, DAG.getNode(OpCode, MVT::Vector,
+ getValue(I.getOperand(0)),
+ getValue(I.getOperand(1)),
+ DAG.getConstant(Ty->getNumElements(), MVT::i32),
+ Typ));
+}
- if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
- SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
- SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
- setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
- } else {
- setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
- }
+void SelectionDAGLowering::visitEitherBinary(User &I, unsigned ScalarOp,
+ unsigned VectorOp) {
+ if (isa<PackedType>(I.getType()))
+ visitVectorBinary(I, VectorOp);
+ else
+ visitScalarBinary(I, ScalarOp);
}
void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
uint64_t Offs =
- TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getZExtValue();
+ TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
continue;
}
const Type *Ty = I.getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
- unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
- I.getAlignment());
+ unsigned Align =
+ std::max((unsigned)TLI.getTargetData()->getTypeAlignmentPref(Ty),
+ I.getAlignment());
SDOperand AllocSize = getValue(I.getArraySize());
MVT::ValueType IntPtr = TLI.getPointerTy();
case Intrinsic::vastart: visitVAStart(I); return 0;
case Intrinsic::vaend: visitVAEnd(I); return 0;
case Intrinsic::vacopy: visitVACopy(I); return 0;
- case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
- case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
+ case Intrinsic::returnaddress:
+ setValue(&I, DAG.getNode(ISD::RETURNADDR, TLI.getPointerTy(),
+ getValue(I.getOperand(1))));
+ return 0;
+ case Intrinsic::frameaddress:
+ setValue(&I, DAG.getNode(ISD::FRAMEADDR, TLI.getPointerTy(),
+ getValue(I.getOperand(1))));
+ return 0;
case Intrinsic::setjmp:
return "_setjmp"+!TLI.usesUnderscoreSetJmp();
break;
return 0;
case Intrinsic::dbg_stoppoint: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
- if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
+ if (MMI && SPI.getContext() && MMI->Verify(SPI.getContext())) {
SDOperand Ops[5];
Ops[0] = getRoot();
Ops[1] = getValue(SPI.getLineValue());
Ops[2] = getValue(SPI.getColumnValue());
- DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
+ DebugInfoDesc *DD = MMI->getDescFor(SPI.getContext());
assert(DD && "Not a debug information descriptor");
CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
return 0;
}
case Intrinsic::dbg_region_start: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
- if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
- unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
- DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, getRoot(),
+ if (MMI && RSI.getContext() && MMI->Verify(RSI.getContext())) {
+ unsigned LabelID = MMI->RecordRegionStart(RSI.getContext());
+ DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(),
DAG.getConstant(LabelID, MVT::i32)));
}
return 0;
}
case Intrinsic::dbg_region_end: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
- if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
- unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
- DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
+ if (MMI && REI.getContext() && MMI->Verify(REI.getContext())) {
+ unsigned LabelID = MMI->RecordRegionEnd(REI.getContext());
+ DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other,
getRoot(), DAG.getConstant(LabelID, MVT::i32)));
}
return 0;
}
case Intrinsic::dbg_func_start: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
- if (DebugInfo && FSI.getSubprogram() &&
- DebugInfo->Verify(FSI.getSubprogram())) {
- unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
- DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
+ if (MMI && FSI.getSubprogram() &&
+ MMI->Verify(FSI.getSubprogram())) {
+ unsigned LabelID = MMI->RecordRegionStart(FSI.getSubprogram());
+ DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other,
getRoot(), DAG.getConstant(LabelID, MVT::i32)));
}
return 0;
}
case Intrinsic::dbg_declare: {
- MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
+ MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
- if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
+ if (MMI && DI.getVariable() && MMI->Verify(DI.getVariable())) {
SDOperand AddressOp = getValue(DI.getAddress());
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp))
- DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
+ MMI->RecordVariable(DI.getVariable(), FI->getIndex());
}
return 0;
}
- case Intrinsic::isunordered_f32:
- case Intrinsic::isunordered_f64:
- setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
- getValue(I.getOperand(2)), ISD::SETUO));
- return 0;
-
case Intrinsic::sqrt_f32:
case Intrinsic::sqrt_f64:
setValue(&I, DAG.getNode(ISD::FSQRT,
SDOperand ArgNode = getValue(Arg);
Entry.Node = ArgNode; Entry.Ty = Arg->getType();
Entry.isSigned = FTy->paramHasAttr(i, FunctionType::SExtAttribute);
+ Entry.isInReg = FTy->paramHasAttr(i, FunctionType::InRegAttribute);
+ Entry.isSRet = FTy->paramHasAttr(i, FunctionType::StructRetAttribute);
Args.push_back(Entry);
}
return RegsForValue();
}
+/// getConstraintGenerality - Return an integer indicating how general CT is.
+static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
+ switch (CT) {
+ default: assert(0 && "Unknown constraint type!");
+ case TargetLowering::C_Other:
+ case TargetLowering::C_Unknown:
+ return 0;
+ case TargetLowering::C_Register:
+ return 1;
+ case TargetLowering::C_RegisterClass:
+ return 2;
+ case TargetLowering::C_Memory:
+ return 3;
+ }
+}
+
+static std::string GetMostGeneralConstraint(std::vector<std::string> &C,
+ const TargetLowering &TLI) {
+ assert(!C.empty() && "Must have at least one constraint");
+ if (C.size() == 1) return C[0];
+
+ std::string *Current = &C[0];
+ // If we have multiple constraints, try to pick the most general one ahead
+ // of time. This isn't a wonderful solution, but handles common cases.
+ TargetLowering::ConstraintType Flavor = TLI.getConstraintType(Current[0][0]);
+ for (unsigned j = 1, e = C.size(); j != e; ++j) {
+ TargetLowering::ConstraintType ThisFlavor = TLI.getConstraintType(C[j][0]);
+ if (getConstraintGenerality(ThisFlavor) >
+ getConstraintGenerality(Flavor)) {
+ // This constraint letter is more general than the previous one,
+ // use it.
+ Flavor = ThisFlavor;
+ Current = &C[j];
+ }
+ }
+ return *Current;
+}
+
/// visitInlineAsm - Handle a call to an InlineAsm object.
///
std::set<unsigned> OutputRegs, InputRegs;
unsigned OpNum = 1;
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
- assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
- std::string &ConstraintCode = Constraints[i].Codes[0];
+ std::string ConstraintCode =
+ GetMostGeneralConstraint(Constraints[i].Codes, TLI);
MVT::ValueType OpVT;
OpNum = 1;
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
- assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
- std::string &ConstraintCode = Constraints[i].Codes[0];
+ std::string ConstraintCode =
+ GetMostGeneralConstraint(Constraints[i].Codes, TLI);
switch (Constraints[i].Type) {
case InlineAsm::isOutput: {
Entry.Node = Src;
Entry.Ty = TLI.getTargetData()->getIntPtrType();
Entry.isSigned = false;
+ Entry.isInReg = false;
+ Entry.isSRet = false;
Args.push_back(Entry);
std::pair<SDOperand,SDOperand> Result =
Entry.Node = getValue(I.getOperand(0));
Entry.Ty = TLI.getTargetData()->getIntPtrType();
Entry.isSigned = false;
+ Entry.isInReg = false;
+ Entry.isSRet = false;
Args.push_back(Entry);
MVT::ValueType IntPtr = TLI.getPointerTy();
std::pair<SDOperand,SDOperand> Result =
/// integrated into SDISel.
std::vector<SDOperand>
TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
+ const FunctionType *FTy = F.getFunctionType();
// Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
std::vector<SDOperand> Ops;
Ops.push_back(DAG.getRoot());
// Add one result value for each formal argument.
std::vector<MVT::ValueType> RetVals;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
+ unsigned j = 1;
+ for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
+ I != E; ++I, ++j) {
MVT::ValueType VT = getValueType(I->getType());
+ bool isInReg = FTy->paramHasAttr(j, FunctionType::InRegAttribute);
+ bool isSRet = FTy->paramHasAttr(j, FunctionType::StructRetAttribute);
+ unsigned Flags = (isInReg << 1) | (isSRet << 2);
switch (getTypeAction(VT)) {
default: assert(0 && "Unknown type action!");
case Legal:
RetVals.push_back(VT);
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Promote:
RetVals.push_back(getTypeToTransformTo(VT));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Expand:
if (VT != MVT::Vector) {
// integers it turns into.
MVT::ValueType NVT = getTypeToExpandTo(VT);
unsigned NumVals = getNumElements(VT);
- for (unsigned i = 0; i != NumVals; ++i)
+ for (unsigned i = 0; i != NumVals; ++i) {
RetVals.push_back(NVT);
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
+ }
} else {
// Otherwise, this is a vector type. We only support legal vectors
// right now.
MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
if (TVT != MVT::Other && isTypeLegal(TVT)) {
RetVals.push_back(TVT);
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
} else {
assert(0 && "Don't support illegal by-val vector arguments yet!");
}
// Set up the return result vector.
Ops.clear();
- const FunctionType *FTy = F.getFunctionType();
unsigned i = 0;
unsigned Idx = 1;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
/// ExpandScalarCallArgs - Recursively expand call argument node by
/// bit_converting it or extract a pair of elements from the larger node.
static void ExpandScalarCallArgs(MVT::ValueType VT, SDOperand Arg,
- bool isSigned,
+ unsigned Flags,
SmallVector<SDOperand, 32> &Ops,
SelectionDAG &DAG,
TargetLowering &TLI) {
if (TLI.getTypeAction(VT) != TargetLowering::Expand) {
Ops.push_back(Arg);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
return;
}
unsigned NumVals = MVT::getSizeInBits(VT) / MVT::getSizeInBits(EVT);
if (NumVals == 1) {
Arg = DAG.getNode(ISD::BIT_CONVERT, EVT, Arg);
- ExpandScalarCallArgs(EVT, Arg, isSigned, Ops, DAG, TLI);
+ ExpandScalarCallArgs(EVT, Arg, Flags, Ops, DAG, TLI);
} else if (NumVals == 2) {
SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, EVT, Arg,
DAG.getConstant(0, TLI.getPointerTy()));
DAG.getConstant(1, TLI.getPointerTy()));
if (!TLI.isLittleEndian())
std::swap(Lo, Hi);
- ExpandScalarCallArgs(EVT, Lo, isSigned, Ops, DAG, TLI);
- ExpandScalarCallArgs(EVT, Hi, isSigned, Ops, DAG, TLI);
+ ExpandScalarCallArgs(EVT, Lo, Flags, Ops, DAG, TLI);
+ ExpandScalarCallArgs(EVT, Hi, Flags, Ops, DAG, TLI);
} else {
// Value scalarized into many values. Unimp for now.
assert(0 && "Cannot expand i64 -> i16 yet!");
MVT::ValueType VT = getValueType(Args[i].Ty);
SDOperand Op = Args[i].Node;
bool isSigned = Args[i].isSigned;
+ bool isInReg = Args[i].isInReg;
+ bool isSRet = Args[i].isSRet;
+ unsigned Flags = (isSRet << 2) | (isInReg << 1) | isSigned;
switch (getTypeAction(VT)) {
default: assert(0 && "Unknown type action!");
case Legal:
Ops.push_back(Op);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Promote:
if (MVT::isInteger(VT)) {
Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op);
}
Ops.push_back(Op);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
break;
case Expand:
if (VT != MVT::Vector) {
// If this is a large integer, it needs to be broken down into small
// integers. Figure out what the source elt type is and how many small
// integers it is.
- ExpandScalarCallArgs(VT, Op, isSigned, Ops, DAG, *this);
+ ExpandScalarCallArgs(VT, Op, Flags, Ops, DAG, *this);
} else {
// Otherwise, this is a vector type. We only support legal vectors
// right now.
// Insert a VBIT_CONVERT of the MVT::Vector type to the packed type.
Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op);
Ops.push_back(Op);
- Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
+ Ops.push_back(DAG.getConstant(Flags, MVT::i32));
} else {
assert(0 && "Don't support illegal by-val vector call args yet!");
abort();
return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1));
}
-
-
-// It is always conservatively correct for llvm.returnaddress and
-// llvm.frameaddress to return 0.
-//
-// FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be
-// expanded to 0 if the target wants.
-std::pair<SDOperand, SDOperand>
-TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
- unsigned Depth, SelectionDAG &DAG) {
- return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
-}
-
SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
assert(0 && "LowerOperation not implemented for this target!");
abort();
return SDOperand();
}
-void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
- unsigned Depth = (unsigned)cast<ConstantInt>(I.getOperand(1))->getZExtValue();
- std::pair<SDOperand,SDOperand> Result =
- TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
- setValue(&I, Result.first);
- DAG.setRoot(Result.second);
-}
-
/// getMemsetValue - Vectorized representation of the memset value
/// operand.
static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
FunctionLoweringInfo &FuncInfo) {
std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
{
- SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
CurDAG = &DAG;
// First step, lower LLVM code to some DAG. This DAG may use operations and
// whether the PHI is a successor of the range check MBB or the jump table MBB
if (JT.Reg) {
assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
- SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
CurDAG = &SDAG;
SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
MachineBasicBlock *RangeBB = BB;
// If we generated any switch lowering information, build and codegen any
// additional DAGs necessary.
for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
- SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
+ SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
CurDAG = &SDAG;
SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);