X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FSelectionDAG%2FFunctionLoweringInfo.cpp;h=1c85248eb846602b01f68a2df7f9c1597cf6c038;hb=6035518e3bd06cef0515af5a319fbe2cea7df6d4;hp=cc9a3d572254c2f55ba868e04eb3ed72a2b0983b;hpb=d84e806dd4b03fa711ae2665ac1acef646f16c5c;p=oota-llvm.git diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp index cc9a3d57225..1c85248eb84 100644 --- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -12,113 +12,35 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "function-lowering-info" -#include "FunctionLoweringInfo.h" -#include "llvm/CallingConv.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Function.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" -#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Analysis/DebugInfo.h" -#include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetFrameInfo.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetIntrinsicInfo.h" -#include "llvm/Target/TargetLowering.h" -#include "llvm/Target/TargetOptions.h" -#include "llvm/Support/Compiler.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" -#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetFrameLowering.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetLowering.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" #include using namespace llvm; -/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence -/// of insertvalue or extractvalue indices that identify a member, return -/// the linearized index of the start of the member. -/// -unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty, - const unsigned *Indices, - const unsigned *IndicesEnd, - unsigned CurIndex) { - // Base case: We're done. - if (Indices && Indices == IndicesEnd) - return CurIndex; - - // Given a struct type, recursively traverse the elements. - if (const StructType *STy = dyn_cast(Ty)) { - for (StructType::element_iterator EB = STy->element_begin(), - EI = EB, - EE = STy->element_end(); - EI != EE; ++EI) { - if (Indices && *Indices == unsigned(EI - EB)) - return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex); - CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex); - } - return CurIndex; - } - // Given an array type, recursively traverse the elements. - else if (const ArrayType *ATy = dyn_cast(Ty)) { - const Type *EltTy = ATy->getElementType(); - for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) { - if (Indices && *Indices == i) - return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex); - CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex); - } - return CurIndex; - } - // We haven't found the type we're looking for, so keep searching. - return CurIndex + 1; -} - -/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of -/// EVTs that represent all the individual underlying -/// non-aggregate types that comprise it. -/// -/// If Offsets is non-null, it points to a vector to be filled in -/// with the in-memory offsets of each of the individual values. -/// -void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty, - SmallVectorImpl &ValueVTs, - SmallVectorImpl *Offsets, - uint64_t StartingOffset) { - // Given a struct type, recursively traverse the elements. - if (const StructType *STy = dyn_cast(Ty)) { - const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy); - for (StructType::element_iterator EB = STy->element_begin(), - EI = EB, - EE = STy->element_end(); - EI != EE; ++EI) - ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, - StartingOffset + SL->getElementOffset(EI - EB)); - return; - } - // Given an array type, recursively traverse the elements. - if (const ArrayType *ATy = dyn_cast(Ty)) { - const Type *EltTy = ATy->getElementType(); - uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); - for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) - ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, - StartingOffset + i * EltSize); - return; - } - // Interpret void as zero return values. - if (Ty->isVoidTy()) - return; - // Base case: we can get an EVT for this LLVM IR type. - ValueVTs.push_back(TLI.getValueType(Ty)); - if (Offsets) - Offsets->push_back(StartingOffset); -} +#define DEBUG_TYPE "function-lowering-info" /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by /// PHI nodes or outside of the basic block that defines it, or used by a @@ -127,75 +49,134 @@ static bool isUsedOutsideOfDefiningBlock(const Instruction *I) { if (I->use_empty()) return false; if (isa(I)) return true; const BasicBlock *BB = I->getParent(); - for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end(); - UI != E; ++UI) - if (cast(*UI)->getParent() != BB || isa(*UI)) + for (const User *U : I->users()) + if (cast(U)->getParent() != BB || isa(U)) return true; - return false; -} - -/// isOnlyUsedInEntryBlock - If the specified argument is only used in the -/// entry block, return true. This includes arguments used by switches, since -/// the switch may expand into multiple basic blocks. -static bool isOnlyUsedInEntryBlock(const Argument *A, bool EnableFastISel) { - // With FastISel active, we may be splitting blocks, so force creation - // of virtual registers for all non-dead arguments. - // Don't force virtual registers for byval arguments though, because - // fast-isel can't handle those in all cases. - if (EnableFastISel && !A->hasByValAttr()) - return A->use_empty(); - - const BasicBlock *Entry = A->getParent()->begin(); - for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end(); - UI != E; ++UI) - if (cast(*UI)->getParent() != Entry || isa(*UI)) - return false; // Use not in entry block. - return true; -} -FunctionLoweringInfo::FunctionLoweringInfo(const TargetLowering &tli) - : TLI(tli) { + return false; } void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, - bool EnableFastISel) { + SelectionDAG *DAG) { + const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); + Fn = &fn; MF = &mf; RegInfo = &MF->getRegInfo(); - // Create a vreg for each argument register that is not dead and is used - // outside of the entry block for the function. - for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end(); - AI != E; ++AI) - if (!isOnlyUsedInEntryBlock(AI, EnableFastISel)) - InitializeRegForValue(AI); + // Check whether the function can return without sret-demotion. + SmallVector Outs; + GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI); + CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF, + Fn->isVarArg(), + Outs, Fn->getContext()); // Initialize the mapping of values to registers. This is only set up for // instruction values that are used outside of the block that defines // them. Function::const_iterator BB = Fn->begin(), EB = Fn->end(); for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) - if (const AllocaInst *AI = dyn_cast(I)) + if (const AllocaInst *AI = dyn_cast(I)) { + // Don't fold inalloca allocas or other dynamic allocas into the initial + // stack frame allocation, even if they are in the entry block. + if (!AI->isStaticAlloca()) + continue; + if (const ConstantInt *CUI = dyn_cast(AI->getArraySize())) { - const Type *Ty = AI->getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); + Type *Ty = AI->getAllocatedType(); + uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty); unsigned Align = - std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), + std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), AI->getAlignment()); TySize *= CUI->getZExtValue(); // Get total allocated size. if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. + StaticAllocaMap[AI] = - MF->getFrameInfo()->CreateStackObject(TySize, Align, false); + MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI); } + } for (; BB != EB; ++BB) - for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) + for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); + I != E; ++I) { + // Look for dynamic allocas. + if (const AllocaInst *AI = dyn_cast(I)) { + if (!AI->isStaticAlloca()) { + unsigned Align = std::max( + (unsigned)TLI->getDataLayout()->getPrefTypeAlignment( + AI->getAllocatedType()), + AI->getAlignment()); + unsigned StackAlign = + TM.getSubtargetImpl()->getFrameLowering()->getStackAlignment(); + if (Align <= StackAlign) + Align = 0; + // Inform the Frame Information that we have variable-sized objects. + MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, AI); + } + } + + // Look for inline asm that clobbers the SP register. + if (isa(I) || isa(I)) { + ImmutableCallSite CS(I); + if (isa(CS.getCalledValue())) { + unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); + std::vector Ops = + TLI->ParseConstraints(CS); + for (size_t I = 0, E = Ops.size(); I != E; ++I) { + TargetLowering::AsmOperandInfo &Op = Ops[I]; + if (Op.Type == InlineAsm::isClobber) { + // Clobbers don't have SDValue operands, hence SDValue(). + TLI->ComputeConstraintToUse(Op, SDValue(), DAG); + std::pair PhysReg = + TLI->getRegForInlineAsmConstraint(Op.ConstraintCode, + Op.ConstraintVT); + if (PhysReg.first == SP) + MF->getFrameInfo()->setHasInlineAsmWithSPAdjust(true); + } + } + } + } + + // Mark values used outside their block as exported, by allocating + // a virtual register for them. if (isUsedOutsideOfDefiningBlock(I)) if (!isa(I) || !StaticAllocaMap.count(cast(I))) InitializeRegForValue(I); + // Collect llvm.dbg.declare information. This is done now instead of + // during the initial isel pass through the IR so that it is done + // in a predictable order. + if (const DbgDeclareInst *DI = dyn_cast(I)) { + MachineModuleInfo &MMI = MF->getMMI(); + DIVariable DIVar(DI->getVariable()); + assert((!DIVar || DIVar.isVariable()) && + "Variable in DbgDeclareInst should be either null or a DIVariable."); + if (MMI.hasDebugInfo() && + DIVar && + !DI->getDebugLoc().isUnknown()) { + // Don't handle byval struct arguments or VLAs, for example. + // Non-byval arguments are handled here (they refer to the stack + // temporary alloca at this point). + const Value *Address = DI->getAddress(); + if (Address) { + if (const BitCastInst *BCI = dyn_cast(Address)) + Address = BCI->getOperand(0); + if (const AllocaInst *AI = dyn_cast(Address)) { + DenseMap::iterator SI = + StaticAllocaMap.find(AI); + if (SI != StaticAllocaMap.end()) { // Check for VLAs. + int FI = SI->second; + MMI.setVariableDbgInfo(DI->getVariable(), + FI, DI->getDebugLoc()); + } + } + } + } + } + } + // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This // also creates the initial PHI MachineInstrs, though none of the input // operands are populated. @@ -216,16 +197,20 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, const PHINode *PN = dyn_cast(I); ++I) { if (PN->use_empty()) continue; + // Skip empty types + if (PN->getType()->isEmptyTy()) + continue; + DebugLoc DL = PN->getDebugLoc(); unsigned PHIReg = ValueMap[PN]; assert(PHIReg && "PHI node does not have an assigned virtual register!"); SmallVector ValueVTs; - ComputeValueVTs(TLI, PN->getType(), ValueVTs); + ComputeValueVTs(*TLI, PN->getType(), ValueVTs); for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { EVT VT = ValueVTs[vti]; - unsigned NumRegisters = TLI.getNumRegisters(Fn->getContext(), VT); - const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); + unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); for (unsigned i = 0; i != NumRegisters; ++i) BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i); PHIReg += NumRegisters; @@ -254,53 +239,206 @@ void FunctionLoweringInfo::clear() { CatchInfoFound.clear(); #endif LiveOutRegInfo.clear(); + VisitedBBs.clear(); + ArgDbgValues.clear(); + ByValArgFrameIndexMap.clear(); + RegFixups.clear(); } -unsigned FunctionLoweringInfo::MakeReg(EVT VT) { - return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT)); +/// CreateReg - Allocate a single virtual register for the given type. +unsigned FunctionLoweringInfo::CreateReg(MVT VT) { + return RegInfo->createVirtualRegister( + TM.getSubtargetImpl()->getTargetLowering()->getRegClassFor(VT)); } -/// CreateRegForValue - Allocate the appropriate number of virtual registers of +/// CreateRegs - Allocate the appropriate number of virtual registers of /// the correctly promoted or expanded types. Assign these registers /// consecutive vreg numbers and return the first assigned number. /// /// In the case that the given value has struct or array type, this function /// will assign registers for each member or element. /// -unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) { +unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) { + const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); + SmallVector ValueVTs; - ComputeValueVTs(TLI, V->getType(), ValueVTs); + ComputeValueVTs(*TLI, Ty, ValueVTs); unsigned FirstReg = 0; for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { EVT ValueVT = ValueVTs[Value]; - EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT); + MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT); - unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT); + unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT); for (unsigned i = 0; i != NumRegs; ++i) { - unsigned R = MakeReg(RegisterVT); + unsigned R = CreateReg(RegisterVT); if (!FirstReg) FirstReg = R; } } return FirstReg; } -/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. -GlobalVariable *llvm::ExtractTypeInfo(Value *V) { - V = V->stripPointerCasts(); - GlobalVariable *GV = dyn_cast(V); - - if (GV && GV->getName() == ".llvm.eh.catch.all.value") { - assert(GV->hasInitializer() && - "The EH catch-all value must have an initializer"); - Value *Init = GV->getInitializer(); - GV = dyn_cast(Init); - if (!GV) V = cast(Init); +/// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the +/// register is a PHI destination and the PHI's LiveOutInfo is not valid. If +/// the register's LiveOutInfo is for a smaller bit width, it is extended to +/// the larger bit width by zero extension. The bit width must be no smaller +/// than the LiveOutInfo's existing bit width. +const FunctionLoweringInfo::LiveOutInfo * +FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) { + if (!LiveOutRegInfo.inBounds(Reg)) + return nullptr; + + LiveOutInfo *LOI = &LiveOutRegInfo[Reg]; + if (!LOI->IsValid) + return nullptr; + + if (BitWidth > LOI->KnownZero.getBitWidth()) { + LOI->NumSignBits = 1; + LOI->KnownZero = LOI->KnownZero.zextOrTrunc(BitWidth); + LOI->KnownOne = LOI->KnownOne.zextOrTrunc(BitWidth); } - assert((GV || isa(V)) && - "TypeInfo must be a global variable or NULL"); - return GV; + return LOI; +} + +/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination +/// register based on the LiveOutInfo of its operands. +void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) { + Type *Ty = PN->getType(); + if (!Ty->isIntegerTy() || Ty->isVectorTy()) + return; + + const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering(); + + SmallVector ValueVTs; + ComputeValueVTs(*TLI, Ty, ValueVTs); + assert(ValueVTs.size() == 1 && + "PHIs with non-vector integer types should have a single VT."); + EVT IntVT = ValueVTs[0]; + + if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1) + return; + IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT); + unsigned BitWidth = IntVT.getSizeInBits(); + + unsigned DestReg = ValueMap[PN]; + if (!TargetRegisterInfo::isVirtualRegister(DestReg)) + return; + LiveOutRegInfo.grow(DestReg); + LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg]; + + Value *V = PN->getIncomingValue(0); + if (isa(V) || isa(V)) { + DestLOI.NumSignBits = 1; + APInt Zero(BitWidth, 0); + DestLOI.KnownZero = Zero; + DestLOI.KnownOne = Zero; + return; + } + + if (ConstantInt *CI = dyn_cast(V)) { + APInt Val = CI->getValue().zextOrTrunc(BitWidth); + DestLOI.NumSignBits = Val.getNumSignBits(); + DestLOI.KnownZero = ~Val; + DestLOI.KnownOne = Val; + } else { + assert(ValueMap.count(V) && "V should have been placed in ValueMap when its" + "CopyToReg node was created."); + unsigned SrcReg = ValueMap[V]; + if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { + DestLOI.IsValid = false; + return; + } + const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); + if (!SrcLOI) { + DestLOI.IsValid = false; + return; + } + DestLOI = *SrcLOI; + } + + assert(DestLOI.KnownZero.getBitWidth() == BitWidth && + DestLOI.KnownOne.getBitWidth() == BitWidth && + "Masks should have the same bit width as the type."); + + for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { + Value *V = PN->getIncomingValue(i); + if (isa(V) || isa(V)) { + DestLOI.NumSignBits = 1; + APInt Zero(BitWidth, 0); + DestLOI.KnownZero = Zero; + DestLOI.KnownOne = Zero; + return; + } + + if (ConstantInt *CI = dyn_cast(V)) { + APInt Val = CI->getValue().zextOrTrunc(BitWidth); + DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits()); + DestLOI.KnownZero &= ~Val; + DestLOI.KnownOne &= Val; + continue; + } + + assert(ValueMap.count(V) && "V should have been placed in ValueMap when " + "its CopyToReg node was created."); + unsigned SrcReg = ValueMap[V]; + if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { + DestLOI.IsValid = false; + return; + } + const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); + if (!SrcLOI) { + DestLOI.IsValid = false; + return; + } + DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits); + DestLOI.KnownZero &= SrcLOI->KnownZero; + DestLOI.KnownOne &= SrcLOI->KnownOne; + } +} + +/// setArgumentFrameIndex - Record frame index for the byval +/// argument. This overrides previous frame index entry for this argument, +/// if any. +void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A, + int FI) { + ByValArgFrameIndexMap[A] = FI; +} + +/// getArgumentFrameIndex - Get frame index for the byval argument. +/// If the argument does not have any assigned frame index then 0 is +/// returned. +int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) { + DenseMap::iterator I = + ByValArgFrameIndexMap.find(A); + if (I != ByValArgFrameIndexMap.end()) + return I->second; + DEBUG(dbgs() << "Argument does not have assigned frame index!\n"); + return 0; +} + +/// ComputeUsesVAFloatArgument - Determine if any floating-point values are +/// being passed to this variadic function, and set the MachineModuleInfo's +/// usesVAFloatArgument flag if so. This flag is used to emit an undefined +/// reference to _fltused on Windows, which will link in MSVCRT's +/// floating-point support. +void llvm::ComputeUsesVAFloatArgument(const CallInst &I, + MachineModuleInfo *MMI) +{ + FunctionType *FT = cast( + I.getCalledValue()->getType()->getContainedType(0)); + if (FT->isVarArg() && !MMI->usesVAFloatArgument()) { + for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { + Type* T = I.getArgOperand(i)->getType(); + for (po_iterator i = po_begin(T), e = po_end(T); + i != e; ++i) { + if (i->isFloatingPointTy()) { + MMI->setUsesVAFloatArgument(true); + return; + } + } + } + } } /// AddCatchInfo - Extract the personality and type infos from an eh.selector @@ -308,7 +446,7 @@ GlobalVariable *llvm::ExtractTypeInfo(Value *V) { void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI, MachineBasicBlock *MBB) { // Inform the MachineModuleInfo of the personality for this landing pad. - const ConstantExpr *CE = cast(I.getOperand(2)); + const ConstantExpr *CE = cast(I.getArgOperand(1)); assert(CE->getOpcode() == Instruction::BitCast && isa(CE->getOperand(0)) && "Personality should be a function"); @@ -317,18 +455,18 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI, // Gather all the type infos for this landing pad and pass them along to // MachineModuleInfo. std::vector TyInfo; - unsigned N = I.getNumOperands(); + unsigned N = I.getNumArgOperands(); - for (unsigned i = N - 1; i > 2; --i) { - if (const ConstantInt *CI = dyn_cast(I.getOperand(i))) { + for (unsigned i = N - 1; i > 1; --i) { + if (const ConstantInt *CI = dyn_cast(I.getArgOperand(i))) { unsigned FilterLength = CI->getZExtValue(); unsigned FirstCatch = i + FilterLength + !FilterLength; - assert (FirstCatch <= N && "Invalid filter length"); + assert(FirstCatch <= N && "Invalid filter length"); if (FirstCatch < N) { TyInfo.reserve(N - FirstCatch); for (unsigned j = FirstCatch; j < N; ++j) - TyInfo.push_back(ExtractTypeInfo(I.getOperand(j))); + TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j))); MMI->addCatchTypeInfo(MBB, TyInfo); TyInfo.clear(); } @@ -340,7 +478,7 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI, // Filter. TyInfo.reserve(FilterLength - 1); for (unsigned j = i + 1; j < FirstCatch; ++j) - TyInfo.push_back(ExtractTypeInfo(I.getOperand(j))); + TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j))); MMI->addFilterTypeInfo(MBB, TyInfo); TyInfo.clear(); } @@ -349,185 +487,41 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI, } } - if (N > 3) { - TyInfo.reserve(N - 3); - for (unsigned j = 3; j < N; ++j) - TyInfo.push_back(ExtractTypeInfo(I.getOperand(j))); + if (N > 2) { + TyInfo.reserve(N - 2); + for (unsigned j = 2; j < N; ++j) + TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j))); MMI->addCatchTypeInfo(MBB, TyInfo); } } -void llvm::CopyCatchInfo(const BasicBlock *SrcBB, const BasicBlock *DestBB, - MachineModuleInfo *MMI, FunctionLoweringInfo &FLI) { - for (BasicBlock::const_iterator I = SrcBB->begin(), E = --SrcBB->end(); - I != E; ++I) - if (const EHSelectorInst *EHSel = dyn_cast(I)) { - // Apply the catch info to DestBB. - AddCatchInfo(*EHSel, MMI, FLI.MBBMap[DestBB]); -#ifndef NDEBUG - if (!FLI.MBBMap[SrcBB]->isLandingPad()) - FLI.CatchInfoFound.insert(EHSel); -#endif - } -} - -/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being -/// processed uses a memory 'm' constraint. -bool -llvm::hasInlineAsmMemConstraint(std::vector &CInfos, - const TargetLowering &TLI) { - for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { - InlineAsm::ConstraintInfo &CI = CInfos[i]; - for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { - TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]); - if (CType == TargetLowering::C_Memory) - return true; - } - - // Indirect operand accesses access memory. - if (CI.isIndirect) - return true; - } - - return false; -} - -/// getFCmpCondCode - Return the ISD condition code corresponding to -/// the given LLVM IR floating-point condition code. This includes -/// consideration of global floating-point math flags. -/// -ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { - ISD::CondCode FPC, FOC; - switch (Pred) { - case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; - case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break; - case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break; - case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break; - case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break; - case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break; - case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break; - case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break; - case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break; - case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break; - case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break; - case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break; - case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break; - case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break; - case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break; - case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break; - default: - llvm_unreachable("Invalid FCmp predicate opcode!"); - FOC = FPC = ISD::SETFALSE; - break; - } - if (FiniteOnlyFPMath()) - return FOC; - else - return FPC; -} - -/// getICmpCondCode - Return the ISD condition code corresponding to -/// the given LLVM IR integer condition code. -/// -ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { - switch (Pred) { - case ICmpInst::ICMP_EQ: return ISD::SETEQ; - case ICmpInst::ICMP_NE: return ISD::SETNE; - case ICmpInst::ICMP_SLE: return ISD::SETLE; - case ICmpInst::ICMP_ULE: return ISD::SETULE; - case ICmpInst::ICMP_SGE: return ISD::SETGE; - case ICmpInst::ICMP_UGE: return ISD::SETUGE; - case ICmpInst::ICMP_SLT: return ISD::SETLT; - case ICmpInst::ICMP_ULT: return ISD::SETULT; - case ICmpInst::ICMP_SGT: return ISD::SETGT; - case ICmpInst::ICMP_UGT: return ISD::SETUGT; - default: - llvm_unreachable("Invalid ICmp predicate opcode!"); - return ISD::SETNE; - } -} - -/// Test if the given instruction is in a position to be optimized -/// with a tail-call. This roughly means that it's in a block with -/// a return and there's nothing that needs to be scheduled -/// between it and the return. -/// -/// This function only tests target-independent requirements. -bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, - const TargetLowering &TLI) { - const Instruction *I = CS.getInstruction(); - const BasicBlock *ExitBB = I->getParent(); - const TerminatorInst *Term = ExitBB->getTerminator(); - const ReturnInst *Ret = dyn_cast(Term); - const Function *F = ExitBB->getParent(); - - // The block must end in a return statement or unreachable. - // - // FIXME: Decline tailcall if it's not guaranteed and if the block ends in - // an unreachable, for now. The way tailcall optimization is currently - // implemented means it will add an epilogue followed by a jump. That is - // not profitable. Also, if the callee is a special function (e.g. - // longjmp on x86), it can end up causing miscompilation that has not - // been fully understood. - if (!Ret && - (!GuaranteedTailCallOpt || !isa(Term))) return false; - - // If I will have a chain, make sure no other instruction that will have a - // chain interposes between I and the return. - if (I->mayHaveSideEffects() || I->mayReadFromMemory() || - !I->isSafeToSpeculativelyExecute()) - for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ; - --BBI) { - if (&*BBI == I) - break; - // Debug info intrinsics do not get in the way of tail call optimization. - if (isa(BBI)) - continue; - if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || - !BBI->isSafeToSpeculativelyExecute()) - return false; +/// AddLandingPadInfo - Extract the exception handling information from the +/// landingpad instruction and add them to the specified machine module info. +void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI, + MachineBasicBlock *MBB) { + MMI.addPersonality(MBB, + cast(I.getPersonalityFn()->stripPointerCasts())); + + if (I.isCleanup()) + MMI.addCleanup(MBB); + + // FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct, + // but we need to do it this way because of how the DWARF EH emitter + // processes the clauses. + for (unsigned i = I.getNumClauses(); i != 0; --i) { + Value *Val = I.getClause(i - 1); + if (I.isCatch(i - 1)) { + MMI.addCatchTypeInfo(MBB, + dyn_cast(Val->stripPointerCasts())); + } else { + // Add filters in a list. + Constant *CVal = cast(Val); + SmallVector FilterList; + for (User::op_iterator + II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) + FilterList.push_back(cast((*II)->stripPointerCasts())); + + MMI.addFilterTypeInfo(MBB, FilterList); } - - // If the block ends with a void return or unreachable, it doesn't matter - // what the call's return type is. - if (!Ret || Ret->getNumOperands() == 0) return true; - - // If the return value is undef, it doesn't matter what the call's - // return type is. - if (isa(Ret->getOperand(0))) return true; - - // Conservatively require the attributes of the call to match those of - // the return. Ignore noalias because it doesn't affect the call sequence. - unsigned CallerRetAttr = F->getAttributes().getRetAttributes(); - if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) - return false; - - // It's not safe to eliminate the sign / zero extension of the return value. - if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) - return false; - - // Otherwise, make sure the unmodified return value of I is the return value. - for (const Instruction *U = dyn_cast(Ret->getOperand(0)); ; - U = dyn_cast(U->getOperand(0))) { - if (!U) - return false; - if (!U->hasOneUse()) - return false; - if (U == I) - break; - // Check for a truly no-op truncate. - if (isa(U) && - TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType())) - continue; - // Check for a truly no-op bitcast. - if (isa(U) && - (U->getOperand(0)->getType() == U->getType() || - (U->getOperand(0)->getType()->isPointerTy() && - U->getType()->isPointerTy()))) - continue; - // Otherwise it's not a true no-op. - return false; } - - return true; }