#include "llvm/Support/CallSite.h"
#include "llvm/CallingConv.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace llvm;
/// TODO: Perhaps calls like memcpy, strcpy, etc?
bool llvm::callIsSmall(const Function *F) {
if (!F) return false;
-
+
if (F->hasLocalLinkage()) return false;
-
+
if (!F->hasName()) return false;
-
+
StringRef Name = F->getName();
-
+
// These will all likely lower to a single selection DAG node.
if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
Name == "cos" || Name == "cosf" || Name == "cosl" ||
Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
return true;
-
+
// These are all likely to be optimized into something smaller.
if (Name == "pow" || Name == "powf" || Name == "powl" ||
Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
Name == "round" || Name == "ffs" || Name == "ffsl" ||
Name == "abs" || Name == "labs" || Name == "llabs")
return true;
-
+
return false;
}
/// analyzeBasicBlock - Fill in the current structure with information gleaned
/// from the specified block.
-void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
+void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
+ const TargetData *TD) {
++NumBlocks;
unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
// Special handling for calls.
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
- if (isa<DbgInfoIntrinsic>(II))
- continue; // Debug intrinsics don't count as size.
+ if (const IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(II)) {
+ switch (IntrinsicI->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::objectsize:
+ case Intrinsic::ptr_annotation:
+ case Intrinsic::var_annotation:
+ // These intrinsics don't count as size.
+ continue;
+ }
+ }
ImmutableCallSite CS(cast<Instruction>(II));
- // If this function contains a call to setjmp or _setjmp, never inline
- // it. This is a hack because we depend on the user marking their local
- // variables as volatile if they are live across a setjmp call, and they
- // probably won't do this in callers.
if (const Function *F = CS.getCalledFunction()) {
- // If a function is both internal and has a single use, then it is
- // extremely likely to get inlined in the future (it was probably
+ // If a function is both internal and has a single use, then it is
+ // extremely likely to get inlined in the future (it was probably
// exposed by an interleaved devirtualization pass).
- if (F->hasInternalLinkage() && F->hasOneUse())
+ if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
++NumInlineCandidates;
-
- if (F->isDeclaration() &&
- (F->getName() == "setjmp" || F->getName() == "_setjmp"))
- callsSetJmp = true;
-
+
// If this call is to function itself, then the function is recursive.
// Inlining it into other functions is a bad idea, because this is
// basically just a form of loop peeling, and our metrics aren't useful
++NumCalls;
}
}
-
+
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
if (!AI->isStaticAlloca())
this->usesDynamicAlloca = true;
}
if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
- ++NumVectorInsts;
-
+ ++NumVectorInsts;
+
if (const CastInst *CI = dyn_cast<CastInst>(II)) {
// Noop casts, including ptr <-> int, don't count.
- if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
+ if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
isa<PtrToIntInst>(CI))
continue;
+ // trunc to a native type is free (assuming the target has compare and
+ // shift-right of the same width).
+ if (isa<TruncInst>(CI) && TD &&
+ TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType())))
+ continue;
// Result of a cmp instruction is often extended (to be used by other
// cmp instructions, logical or return instructions). These are usually
// nop on most sane targets.
++NumInsts;
}
-
+
if (isa<ReturnInst>(BB->getTerminator()))
++NumRets;
-
+
// We never want to inline functions that contain an indirectbr. This is
// incorrect because all the blockaddress's (in static global initializers
// for example) would be referring to the original function, and this indirect
// jump would jump from the inlined copy of the function into the original
// function which is extremely undefined behavior.
+ // FIXME: This logic isn't really right; we can safely inline functions
+ // with indirectbr's as long as no other function or global references the
+ // blockaddress of a block within the current function. And as a QOI issue,
+ // if someone is using a blockaddress without an indirectbr, and that
+ // reference somehow ends up in another function or global, we probably
+ // don't want to inline this function.
if (isa<IndirectBrInst>(BB->getTerminator()))
containsIndirectBr = true;
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
}
-// CountCodeReductionForConstant - Figure out an approximation for how many
-// instructions will be constant folded if the specified value is constant.
-//
-unsigned CodeMetrics::CountCodeReductionForConstant(Value *V) {
+unsigned InlineCostAnalyzer::FunctionInfo::countCodeReductionForConstant(
+ const CodeMetrics &Metrics, Value *V) {
unsigned Reduction = 0;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
User *U = *UI;
const unsigned NumSucc = TI.getNumSuccessors();
unsigned Instrs = 0;
for (unsigned I = 0; I != NumSucc; ++I)
- Instrs += NumBBInsts[TI.getSuccessor(I)];
+ Instrs += Metrics.NumBBInsts.lookup(TI.getSuccessor(I));
// We don't know which blocks will be eliminated, so use the average size.
Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
} else {
// And any other instructions that use it which become constants
// themselves.
- Reduction += CountCodeReductionForConstant(&Inst);
+ Reduction += countCodeReductionForConstant(Metrics, &Inst);
}
}
}
return Reduction;
}
-// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
-// the function will be if it is inlined into a context where an argument
-// becomes an alloca.
-//
-unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
- if (!V->getType()->isPointerTy()) return 0; // Not a pointer
+static unsigned countCodeReductionForAllocaICmp(const CodeMetrics &Metrics,
+ ICmpInst *ICI) {
unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- Instruction *I = cast<Instruction>(*UI);
- if (isa<LoadInst>(I) || isa<StoreInst>(I))
- Reduction += InlineConstants::InstrCost;
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
- // If the GEP has variable indices, we won't be able to do much with it.
- if (GEP->hasAllConstantIndices())
- Reduction += CountCodeReductionForAlloca(GEP);
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
- // Track pointer through bitcasts.
- Reduction += CountCodeReductionForAlloca(BCI);
- } else {
- // If there is some other strange instruction, we're not going to be able
- // to do much if we inline this.
- return 0;
+
+ // Bail if this is comparing against a non-constant; there is nothing we can
+ // do there.
+ if (!isa<Constant>(ICI->getOperand(1)))
+ return Reduction;
+
+ // An icmp pred (alloca, C) becomes true if the predicate is true when
+ // equal and false otherwise.
+ bool Result = ICI->isTrueWhenEqual();
+
+ SmallVector<Instruction *, 4> Worklist;
+ Worklist.push_back(ICI);
+ do {
+ Instruction *U = Worklist.pop_back_val();
+ Reduction += InlineConstants::InstrCost;
+ for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
+ UI != UE; ++UI) {
+ Instruction *I = dyn_cast<Instruction>(*UI);
+ if (!I || I->mayHaveSideEffects()) continue;
+ if (I->getNumOperands() == 1)
+ Worklist.push_back(I);
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
+ // If BO produces the same value as U, then the other operand is
+ // irrelevant and we can put it into the Worklist to continue
+ // deleting dead instructions. If BO produces the same value as the
+ // other operand, we can delete BO but that's it.
+ if (Result == true) {
+ if (BO->getOpcode() == Instruction::Or)
+ Worklist.push_back(I);
+ if (BO->getOpcode() == Instruction::And)
+ Reduction += InlineConstants::InstrCost;
+ } else {
+ if (BO->getOpcode() == Instruction::Or ||
+ BO->getOpcode() == Instruction::Xor)
+ Reduction += InlineConstants::InstrCost;
+ if (BO->getOpcode() == Instruction::And)
+ Worklist.push_back(I);
+ }
+ }
+ if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
+ BasicBlock *BB = BI->getSuccessor(Result ? 0 : 1);
+ if (BB->getSinglePredecessor())
+ Reduction
+ += InlineConstants::InstrCost * Metrics.NumBBInsts.lookup(BB);
+ }
}
- }
+ } while (!Worklist.empty());
return Reduction;
}
+/// \brief Compute the reduction possible for a given instruction if we are able
+/// to SROA an alloca.
+///
+/// The reduction for this instruction is added to the SROAReduction output
+/// parameter. Returns false if this instruction is expected to defeat SROA in
+/// general.
+bool countCodeReductionForSROAInst(Instruction *I,
+ SmallVectorImpl<Value *> &Worklist,
+ unsigned &SROAReduction) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (!LI->isSimple())
+ return false;
+ SROAReduction += InlineConstants::InstrCost;
+ return true;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ if (!SI->isSimple())
+ return false;
+ SROAReduction += InlineConstants::InstrCost;
+ return true;
+ }
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+ // If the GEP has variable indices, we won't be able to do much with it.
+ if (!GEP->hasAllConstantIndices())
+ return false;
+ // A non-zero GEP will likely become a mask operation after SROA.
+ if (GEP->hasAllZeroIndices())
+ SROAReduction += InlineConstants::InstrCost;
+ Worklist.push_back(GEP);
+ return true;
+ }
+
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
+ // Track pointer through bitcasts.
+ Worklist.push_back(BCI);
+ SROAReduction += InlineConstants::InstrCost;
+ return true;
+ }
+
+ // We just look for non-constant operands to ICmp instructions as those will
+ // defeat SROA. The actual reduction for these happens even without SROA.
+ if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
+ return isa<Constant>(ICI->getOperand(1));
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
+ // SROA can handle a select of alloca iff all uses of the alloca are
+ // loads, and dereferenceable. We assume it's dereferenceable since
+ // we're told the input is an alloca.
+ for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || !LI->isSimple())
+ return false;
+ }
+ // We don't know whether we'll be deleting the rest of the chain of
+ // instructions from the SelectInst on, because we don't know whether
+ // the other side of the select is also an alloca or not.
+ return true;
+ }
+
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ // SROA can usually chew through these intrinsics.
+ SROAReduction += InlineConstants::InstrCost;
+ return true;
+ }
+ }
+
+ // If there is some other strange instruction, we're not going to be
+ // able to do much if we inline this.
+ return false;
+}
+
+unsigned InlineCostAnalyzer::FunctionInfo::countCodeReductionForAlloca(
+ const CodeMetrics &Metrics, Value *V) {
+ if (!V->getType()->isPointerTy()) return 0; // Not a pointer
+ unsigned Reduction = 0;
+ unsigned SROAReduction = 0;
+ bool CanSROAAlloca = true;
+
+ SmallVector<Value *, 4> Worklist;
+ Worklist.push_back(V);
+ do {
+ Value *V = Worklist.pop_back_val();
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI){
+ Instruction *I = cast<Instruction>(*UI);
+
+ if (ICmpInst *ICI = dyn_cast<ICmpInst>(I))
+ Reduction += countCodeReductionForAllocaICmp(Metrics, ICI);
+
+ if (CanSROAAlloca)
+ CanSROAAlloca = countCodeReductionForSROAInst(I, Worklist,
+ SROAReduction);
+ }
+ } while (!Worklist.empty());
+
+ return Reduction + (CanSROAAlloca ? SROAReduction : 0);
+}
+
/// analyzeFunction - Fill in the current structure with information gleaned
/// from the specified function.
-void CodeMetrics::analyzeFunction(Function *F) {
+void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
+ // If this function contains a call that "returns twice" (e.g., setjmp or
+ // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
+ // This is a hack because we depend on the user marking their local variables
+ // as volatile if they are live across a setjmp call, and they probably
+ // won't do this in callers.
+ exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
+ !F->hasFnAttr(Attribute::ReturnsTwice);
+
// Look at the size of the callee.
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- analyzeBasicBlock(&*BB);
+ analyzeBasicBlock(&*BB, TD);
}
/// analyzeFunction - Fill in the current structure with information gleaned
/// from the specified function.
-void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
- Metrics.analyzeFunction(F);
+void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F,
+ const TargetData *TD) {
+ Metrics.analyzeFunction(F, TD);
// A function with exactly one return has it removed during the inlining
// process (see InlineFunction), so don't count it.
// code can be eliminated if one of the arguments is a constant.
ArgumentWeights.reserve(F->arg_size());
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
- ArgumentWeights.push_back(ArgInfo(Metrics.CountCodeReductionForConstant(I),
- Metrics.CountCodeReductionForAlloca(I)));
+ ArgumentWeights.push_back(ArgInfo(countCodeReductionForConstant(Metrics, I),
+ countCodeReductionForAlloca(Metrics, I)));
}
/// NeverInline - returns true if the function should never be inlined into
/// any caller
bool InlineCostAnalyzer::FunctionInfo::NeverInline() {
- return (Metrics.callsSetJmp || Metrics.isRecursive ||
+ return (Metrics.exposesReturnsTwice || Metrics.isRecursive ||
Metrics.containsIndirectBr);
}
// getSpecializationBonus - The heuristic used to determine the per-call
{
if (Callee->mayBeOverridden())
return 0;
-
+
int Bonus = 0;
// If this function uses the coldcc calling convention, prefer not to
// specialize it.
if (Callee->getCallingConv() == CallingConv::Cold)
Bonus -= InlineConstants::ColdccPenalty;
-
+
// Get information about the callee.
FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
+
// If we haven't calculated this information yet, do so now.
if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee);
+ CalleeFI->analyzeFunction(Callee, TD);
unsigned ArgNo = 0;
unsigned i = 0;
Bonus += CountBonusForConstant(I);
}
- // Calls usually take a long time, so they make the specialization gain
+ // Calls usually take a long time, so they make the specialization gain
// smaller.
Bonus -= CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
// inlining because we decide we don't want to give a bonus for
// devirtualizing.
int InlineCostAnalyzer::ConstantFunctionBonus(CallSite CS, Constant *C) {
-
+
// This could just be NULL.
if (!C) return 0;
-
+
Function *F = dyn_cast<Function>(C);
if (!F) return 0;
-
+
int Bonus = InlineConstants::IndirectCallBonus + getInlineSize(CS, F);
return (Bonus > 0) ? 0 : Bonus;
}
Bonus += CountBonusForConstant(&Inst);
}
}
-
+
return Bonus;
}
int InlineCostAnalyzer::getInlineSize(CallSite CS, Function *Callee) {
// Get information about the callee.
FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
+
// If we haven't calculated this information yet, do so now.
if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee);
-
+ CalleeFI->analyzeFunction(Callee, TD);
+
// InlineCost - This value measures how good of an inline candidate this call
// site is to inline. A lower inline cost make is more likely for the call to
// be inlined. This value may go negative.
// weights calculated for the callee to determine how much will be folded
// away with this information.
else if (isa<Constant>(I))
- InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
+ InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
}
-
+
// Each argument passed in has a cost at both the caller and the callee
// sides. Measurements show that each argument costs about the same as an
// instruction.
InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
// Look at the size of the callee. Each instruction counts as 5.
- InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
-
+ InlineCost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
+
return InlineCost;
}
int InlineCostAnalyzer::getInlineBonuses(CallSite CS, Function *Callee) {
// Get information about the callee.
FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
+
// If we haven't calculated this information yet, do so now.
if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee);
-
+ CalleeFI->analyzeFunction(Callee, TD);
+
bool isDirectCall = CS.getCalledFunction() == Callee;
Instruction *TheCall = CS.getInstruction();
int Bonus = 0;
-
+
// If there is only one call of the function, and it has internal linkage,
// make it almost guaranteed to be inlined.
//
if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
Bonus += InlineConstants::LastCallToStaticBonus;
-
+
// If the instruction after the call, or if the normal destination of the
// invoke is an unreachable instruction, the function is noreturn. As such,
// there is little point in inlining this.
Bonus += InlineConstants::NoreturnPenalty;
} else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
Bonus += InlineConstants::NoreturnPenalty;
-
+
// If this function uses the coldcc calling convention, prefer not to inline
// it.
if (Callee->getCallingConv() == CallingConv::Cold)
Bonus += InlineConstants::ColdccPenalty;
-
+
// Add to the inline quality for properties that make the call valuable to
// inline. This includes factors that indicate that the result of inlining
// the function will be optimizable. Currently this just looks at arguments
// Compute any constant bonus due to inlining we want to give here.
if (isa<Constant>(I))
Bonus += CountBonusForConstant(FI, cast<Constant>(I));
-
+
return Bonus;
}
// Get information about the callee.
FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
+
// If we haven't calculated this information yet, do so now.
if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee);
+ CalleeFI->analyzeFunction(Callee, TD);
// If we should never inline this, return a huge cost.
if (CalleeFI->NeverInline())
// requires handling setjmp somewhere else, however.
if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
return InlineCost::getAlways();
-
+
if (CalleeFI->Metrics.usesDynamicAlloca) {
- // Get infomation about the caller.
+ // Get information about the caller.
FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
// If we haven't calculated this information yet, do so now.
if (CallerFI.Metrics.NumBlocks == 0) {
- CallerFI.analyzeFunction(Caller);
-
+ CallerFI.analyzeFunction(Caller, TD);
+
// Recompute the CalleeFI pointer, getting Caller could have invalidated
// it.
CalleeFI = &CachedFunctionInfo[Callee];
// something else.
if (Callee->mayBeOverridden())
return llvm::InlineCost::getNever();
-
+
// Get information about the callee.
FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
-
+
// If we haven't calculated this information yet, do so now.
if (CalleeFI->Metrics.NumBlocks == 0)
- CalleeFI->analyzeFunction(Callee);
+ CalleeFI->analyzeFunction(Callee, TD);
int Cost = 0;
-
- // Look at the orginal size of the callee. Each instruction counts as 5.
+
+ // Look at the original size of the callee. Each instruction counts as 5.
Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
// Offset that with the amount of code that can be constant-folded
// higher threshold to determine if the function call should be inlined.
float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
Function *Callee = CS.getCalledFunction();
-
+
// Get information about the callee.
FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
-
+
// If we haven't calculated this information yet, do so now.
if (CalleeFI.Metrics.NumBlocks == 0)
- CalleeFI.analyzeFunction(Callee);
+ CalleeFI.analyzeFunction(Callee, TD);
float Factor = 1.0f;
// Single BB functions are often written to be inlined.
CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
// For small functions we prefer to recalculate the cost for better accuracy.
- if (CallerMetrics.NumBlocks < 10 || CallerMetrics.NumInsts < 1000) {
+ if (CallerMetrics.NumBlocks < 10 && CallerMetrics.NumInsts < 1000) {
resetCachedCostInfo(Caller);
return;
}
--CallerMetrics.NumCalls;
if (Callee == 0) return;
-
+
CodeMetrics &CalleeMetrics = CachedFunctionInfo[Callee].Metrics;
// If we don't have metrics for the callee, don't recalculate them just to
resetCachedCostInfo(Caller);
return;
}
-
+
// Since CalleeMetrics were already calculated, we know that the CallerMetrics
// reference isn't invalidated: both were in the DenseMap.
CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
// FIXME: If any of these three are true for the callee, the callee was
// not inlined into the caller, so I think they're redundant here.
- CallerMetrics.callsSetJmp |= CalleeMetrics.callsSetJmp;
+ CallerMetrics.exposesReturnsTwice |= CalleeMetrics.exposesReturnsTwice;
CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
CallerMetrics.NumInsts -= Callee->arg_size();
else
CallerMetrics.NumInsts = 0;
-
+
// We are not updating the argument weights. We have already determined that
// Caller is a fairly large function, so we accept the loss of precision.
}