1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inline cost analysis.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "inline-cost"
15 #include "llvm/Analysis/InlineCost.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/ConstantFolding.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/CallingConv.h"
24 #include "llvm/DataLayout.h"
25 #include "llvm/GlobalAlias.h"
26 #include "llvm/InstVisitor.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Operator.h"
29 #include "llvm/Support/CallSite.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/GetElementPtrTypeIterator.h"
32 #include "llvm/Support/raw_ostream.h"
36 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
40 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
41 typedef InstVisitor<CallAnalyzer, bool> Base;
42 friend class InstVisitor<CallAnalyzer, bool>;
44 // DataLayout if available, or null.
45 const DataLayout *const TD;
47 // The called function.
53 bool IsCallerRecursive;
55 bool ExposesReturnsTwice;
56 bool HasDynamicAlloca;
57 bool ContainsNoDuplicateCall;
59 /// Number of bytes allocated statically by the callee.
60 uint64_t AllocatedSize;
61 unsigned NumInstructions, NumVectorInstructions;
62 int FiftyPercentVectorBonus, TenPercentVectorBonus;
65 // While we walk the potentially-inlined instructions, we build up and
66 // maintain a mapping of simplified values specific to this callsite. The
67 // idea is to propagate any special information we have about arguments to
68 // this call through the inlinable section of the function, and account for
69 // likely simplifications post-inlining. The most important aspect we track
70 // is CFG altering simplifications -- when we prove a basic block dead, that
71 // can cause dramatic shifts in the cost of inlining a function.
72 DenseMap<Value *, Constant *> SimplifiedValues;
74 // Keep track of the values which map back (through function arguments) to
75 // allocas on the caller stack which could be simplified through SROA.
76 DenseMap<Value *, Value *> SROAArgValues;
78 // The mapping of caller Alloca values to their accumulated cost savings. If
79 // we have to disable SROA for one of the allocas, this tells us how much
80 // cost must be added.
81 DenseMap<Value *, int> SROAArgCosts;
83 // Keep track of values which map to a pointer base and constant offset.
84 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
86 // Custom simplification helper routines.
87 bool isAllocaDerivedArg(Value *V);
88 bool lookupSROAArgAndCost(Value *V, Value *&Arg,
89 DenseMap<Value *, int>::iterator &CostIt);
90 void disableSROA(DenseMap<Value *, int>::iterator CostIt);
91 void disableSROA(Value *V);
92 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
94 bool handleSROACandidate(bool IsSROAValid,
95 DenseMap<Value *, int>::iterator CostIt,
97 bool isGEPOffsetConstant(GetElementPtrInst &GEP);
98 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
99 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
101 // Custom analysis routines.
102 bool analyzeBlock(BasicBlock *BB);
104 // Disable several entry points to the visitor so we don't accidentally use
105 // them by declaring but not defining them here.
106 void visit(Module *); void visit(Module &);
107 void visit(Function *); void visit(Function &);
108 void visit(BasicBlock *); void visit(BasicBlock &);
110 // Provide base case for our instruction visit.
111 bool visitInstruction(Instruction &I);
113 // Our visit overrides.
114 bool visitAlloca(AllocaInst &I);
115 bool visitPHI(PHINode &I);
116 bool visitGetElementPtr(GetElementPtrInst &I);
117 bool visitBitCast(BitCastInst &I);
118 bool visitPtrToInt(PtrToIntInst &I);
119 bool visitIntToPtr(IntToPtrInst &I);
120 bool visitCastInst(CastInst &I);
121 bool visitUnaryInstruction(UnaryInstruction &I);
122 bool visitICmp(ICmpInst &I);
123 bool visitSub(BinaryOperator &I);
124 bool visitBinaryOperator(BinaryOperator &I);
125 bool visitLoad(LoadInst &I);
126 bool visitStore(StoreInst &I);
127 bool visitCallSite(CallSite CS);
130 CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold)
131 : TD(TD), F(Callee), Threshold(Threshold), Cost(0),
132 IsCallerRecursive(false), IsRecursiveCall(false),
133 ExposesReturnsTwice(false), HasDynamicAlloca(false), ContainsNoDuplicateCall(false),
134 AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
135 FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
136 NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
137 NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
138 NumInstructionsSimplified(0), SROACostSavings(0), SROACostSavingsLost(0) {
141 bool analyzeCall(CallSite CS);
143 int getThreshold() { return Threshold; }
144 int getCost() { return Cost; }
146 // Keep a bunch of stats about the cost savings found so we can print them
147 // out when debugging.
148 unsigned NumConstantArgs;
149 unsigned NumConstantOffsetPtrArgs;
150 unsigned NumAllocaArgs;
151 unsigned NumConstantPtrCmps;
152 unsigned NumConstantPtrDiffs;
153 unsigned NumInstructionsSimplified;
154 unsigned SROACostSavings;
155 unsigned SROACostSavingsLost;
162 /// \brief Test whether the given value is an Alloca-derived function argument.
163 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
164 return SROAArgValues.count(V);
167 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
168 /// Returns false if V does not map to a SROA-candidate.
169 bool CallAnalyzer::lookupSROAArgAndCost(
170 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
171 if (SROAArgValues.empty() || SROAArgCosts.empty())
174 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
175 if (ArgIt == SROAArgValues.end())
179 CostIt = SROAArgCosts.find(Arg);
180 return CostIt != SROAArgCosts.end();
183 /// \brief Disable SROA for the candidate marked by this cost iterator.
185 /// This marks the candidate as no longer viable for SROA, and adds the cost
186 /// savings associated with it back into the inline cost measurement.
187 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
188 // If we're no longer able to perform SROA we need to undo its cost savings
189 // and prevent subsequent analysis.
190 Cost += CostIt->second;
191 SROACostSavings -= CostIt->second;
192 SROACostSavingsLost += CostIt->second;
193 SROAArgCosts.erase(CostIt);
196 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
197 void CallAnalyzer::disableSROA(Value *V) {
199 DenseMap<Value *, int>::iterator CostIt;
200 if (lookupSROAArgAndCost(V, SROAArg, CostIt))
204 /// \brief Accumulate the given cost for a particular SROA candidate.
205 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
206 int InstructionCost) {
207 CostIt->second += InstructionCost;
208 SROACostSavings += InstructionCost;
211 /// \brief Helper for the common pattern of handling a SROA candidate.
212 /// Either accumulates the cost savings if the SROA remains valid, or disables
213 /// SROA for the candidate.
214 bool CallAnalyzer::handleSROACandidate(bool IsSROAValid,
215 DenseMap<Value *, int>::iterator CostIt,
216 int InstructionCost) {
218 accumulateSROACost(CostIt, InstructionCost);
226 /// \brief Check whether a GEP's indices are all constant.
228 /// Respects any simplified values known during the analysis of this callsite.
229 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
230 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
231 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
237 /// \brief Accumulate a constant GEP offset into an APInt if possible.
239 /// Returns false if unable to compute the offset for any reason. Respects any
240 /// simplified values known during the analysis of this callsite.
241 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
245 unsigned IntPtrWidth = TD->getPointerSizeInBits();
246 assert(IntPtrWidth == Offset.getBitWidth());
248 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
250 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
252 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
253 OpC = dyn_cast<ConstantInt>(SimpleOp);
256 if (OpC->isZero()) continue;
258 // Handle a struct index, which adds its field offset to the pointer.
259 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
260 unsigned ElementIdx = OpC->getZExtValue();
261 const StructLayout *SL = TD->getStructLayout(STy);
262 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
266 APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
267 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
272 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
273 // FIXME: Check whether inlining will turn a dynamic alloca into a static
274 // alloca, and handle that case.
276 // Accumulate the allocated size.
277 if (I.isStaticAlloca()) {
278 Type *Ty = I.getAllocatedType();
279 AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
280 Ty->getPrimitiveSizeInBits());
283 // We will happily inline static alloca instructions.
284 if (I.isStaticAlloca())
285 return Base::visitAlloca(I);
287 // FIXME: This is overly conservative. Dynamic allocas are inefficient for
288 // a variety of reasons, and so we would like to not inline them into
289 // functions which don't currently have a dynamic alloca. This simply
290 // disables inlining altogether in the presence of a dynamic alloca.
291 HasDynamicAlloca = true;
295 bool CallAnalyzer::visitPHI(PHINode &I) {
296 // FIXME: We should potentially be tracking values through phi nodes,
297 // especially when they collapse to a single value due to deleted CFG edges
300 // FIXME: We need to propagate SROA *disabling* through phi nodes, even
301 // though we don't want to propagate it's bonuses. The idea is to disable
302 // SROA if it *might* be used in an inappropriate manner.
304 // Phi nodes are always zero-cost.
308 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
310 DenseMap<Value *, int>::iterator CostIt;
311 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
314 // Try to fold GEPs of constant-offset call site argument pointers. This
315 // requires target data and inbounds GEPs.
316 if (TD && I.isInBounds()) {
317 // Check if we have a base + offset for the pointer.
318 Value *Ptr = I.getPointerOperand();
319 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
320 if (BaseAndOffset.first) {
321 // Check if the offset of this GEP is constant, and if so accumulate it
323 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
324 // Non-constant GEPs aren't folded, and disable SROA.
330 // Add the result as a new mapping to Base + Offset.
331 ConstantOffsetPtrs[&I] = BaseAndOffset;
333 // Also handle SROA candidates here, we already know that the GEP is
334 // all-constant indexed.
336 SROAArgValues[&I] = SROAArg;
342 if (isGEPOffsetConstant(I)) {
344 SROAArgValues[&I] = SROAArg;
346 // Constant GEPs are modeled as free.
350 // Variable GEPs will require math and will disable SROA.
356 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
357 // Propagate constants through bitcasts.
358 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
359 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
360 SimplifiedValues[&I] = C;
364 // Track base/offsets through casts
365 std::pair<Value *, APInt> BaseAndOffset
366 = ConstantOffsetPtrs.lookup(I.getOperand(0));
367 // Casts don't change the offset, just wrap it up.
368 if (BaseAndOffset.first)
369 ConstantOffsetPtrs[&I] = BaseAndOffset;
371 // Also look for SROA candidates here.
373 DenseMap<Value *, int>::iterator CostIt;
374 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
375 SROAArgValues[&I] = SROAArg;
377 // Bitcasts are always zero cost.
381 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
382 // Propagate constants through ptrtoint.
383 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
384 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
385 SimplifiedValues[&I] = C;
389 // Track base/offset pairs when converted to a plain integer provided the
390 // integer is large enough to represent the pointer.
391 unsigned IntegerSize = I.getType()->getScalarSizeInBits();
392 if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
393 std::pair<Value *, APInt> BaseAndOffset
394 = ConstantOffsetPtrs.lookup(I.getOperand(0));
395 if (BaseAndOffset.first)
396 ConstantOffsetPtrs[&I] = BaseAndOffset;
399 // This is really weird. Technically, ptrtoint will disable SROA. However,
400 // unless that ptrtoint is *used* somewhere in the live basic blocks after
401 // inlining, it will be nuked, and SROA should proceed. All of the uses which
402 // would block SROA would also block SROA if applied directly to a pointer,
403 // and so we can just add the integer in here. The only places where SROA is
404 // preserved either cannot fire on an integer, or won't in-and-of themselves
405 // disable SROA (ext) w/o some later use that we would see and disable.
407 DenseMap<Value *, int>::iterator CostIt;
408 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
409 SROAArgValues[&I] = SROAArg;
411 return isInstructionFree(&I, TD);
414 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
415 // Propagate constants through ptrtoint.
416 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
417 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
418 SimplifiedValues[&I] = C;
422 // Track base/offset pairs when round-tripped through a pointer without
423 // modifications provided the integer is not too large.
424 Value *Op = I.getOperand(0);
425 unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
426 if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
427 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
428 if (BaseAndOffset.first)
429 ConstantOffsetPtrs[&I] = BaseAndOffset;
432 // "Propagate" SROA here in the same manner as we do for ptrtoint above.
434 DenseMap<Value *, int>::iterator CostIt;
435 if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
436 SROAArgValues[&I] = SROAArg;
438 return isInstructionFree(&I, TD);
441 bool CallAnalyzer::visitCastInst(CastInst &I) {
442 // Propagate constants through ptrtoint.
443 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
444 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
445 SimplifiedValues[&I] = C;
449 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
450 disableSROA(I.getOperand(0));
452 return isInstructionFree(&I, TD);
455 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
456 Value *Operand = I.getOperand(0);
457 Constant *Ops[1] = { dyn_cast<Constant>(Operand) };
458 if (Ops[0] || (Ops[0] = SimplifiedValues.lookup(Operand)))
459 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
461 SimplifiedValues[&I] = C;
465 // Disable any SROA on the argument to arbitrary unary operators.
466 disableSROA(Operand);
471 bool CallAnalyzer::visitICmp(ICmpInst &I) {
472 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
473 // First try to handle simplified comparisons.
474 if (!isa<Constant>(LHS))
475 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
477 if (!isa<Constant>(RHS))
478 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
480 if (Constant *CLHS = dyn_cast<Constant>(LHS))
481 if (Constant *CRHS = dyn_cast<Constant>(RHS))
482 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
483 SimplifiedValues[&I] = C;
487 // Otherwise look for a comparison between constant offset pointers with
489 Value *LHSBase, *RHSBase;
490 APInt LHSOffset, RHSOffset;
491 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
493 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
494 if (RHSBase && LHSBase == RHSBase) {
495 // We have common bases, fold the icmp to a constant based on the
497 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
498 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
499 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
500 SimplifiedValues[&I] = C;
501 ++NumConstantPtrCmps;
507 // If the comparison is an equality comparison with null, we can simplify it
508 // for any alloca-derived argument.
509 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
510 if (isAllocaDerivedArg(I.getOperand(0))) {
511 // We can actually predict the result of comparisons between an
512 // alloca-derived value and null. Note that this fires regardless of
514 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
515 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
516 : ConstantInt::getFalse(I.getType());
520 // Finally check for SROA candidates in comparisons.
522 DenseMap<Value *, int>::iterator CostIt;
523 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
524 if (isa<ConstantPointerNull>(I.getOperand(1))) {
525 accumulateSROACost(CostIt, InlineConstants::InstrCost);
535 bool CallAnalyzer::visitSub(BinaryOperator &I) {
536 // Try to handle a special case: we can fold computing the difference of two
537 // constant-related pointers.
538 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
539 Value *LHSBase, *RHSBase;
540 APInt LHSOffset, RHSOffset;
541 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
543 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
544 if (RHSBase && LHSBase == RHSBase) {
545 // We have common bases, fold the subtract to a constant based on the
547 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
548 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
549 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
550 SimplifiedValues[&I] = C;
551 ++NumConstantPtrDiffs;
557 // Otherwise, fall back to the generic logic for simplifying and handling
559 return Base::visitSub(I);
562 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
563 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
564 if (!isa<Constant>(LHS))
565 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
567 if (!isa<Constant>(RHS))
568 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
570 Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
571 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
572 SimplifiedValues[&I] = C;
576 // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
583 bool CallAnalyzer::visitLoad(LoadInst &I) {
585 DenseMap<Value *, int>::iterator CostIt;
586 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
588 accumulateSROACost(CostIt, InlineConstants::InstrCost);
598 bool CallAnalyzer::visitStore(StoreInst &I) {
600 DenseMap<Value *, int>::iterator CostIt;
601 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
603 accumulateSROACost(CostIt, InlineConstants::InstrCost);
613 bool CallAnalyzer::visitCallSite(CallSite CS) {
614 if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
615 !F.getFnAttributes().hasAttribute(Attribute::ReturnsTwice)) {
616 // This aborts the entire analysis.
617 ExposesReturnsTwice = true;
621 cast<CallInst>(CS.getInstruction())->hasFnAttr(Attribute::NoDuplicate))
622 ContainsNoDuplicateCall = true;
624 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
625 switch (II->getIntrinsicID()) {
627 return Base::visitCallSite(CS);
629 case Intrinsic::memset:
630 case Intrinsic::memcpy:
631 case Intrinsic::memmove:
632 // SROA can usually chew through these intrinsics, but they aren't free.
637 if (Function *F = CS.getCalledFunction()) {
638 if (F == CS.getInstruction()->getParent()->getParent()) {
639 // This flag will fully abort the analysis, so don't bother with anything
641 IsRecursiveCall = true;
645 if (!callIsSmall(CS)) {
646 // We account for the average 1 instruction per call argument setup
648 Cost += CS.arg_size() * InlineConstants::InstrCost;
650 // Everything other than inline ASM will also have a significant cost
651 // merely from making the call.
652 if (!isa<InlineAsm>(CS.getCalledValue()))
653 Cost += InlineConstants::CallPenalty;
656 return Base::visitCallSite(CS);
659 // Otherwise we're in a very special case -- an indirect function call. See
660 // if we can be particularly clever about this.
661 Value *Callee = CS.getCalledValue();
663 // First, pay the price of the argument setup. We account for the average
664 // 1 instruction per call argument setup here.
665 Cost += CS.arg_size() * InlineConstants::InstrCost;
667 // Next, check if this happens to be an indirect function call to a known
668 // function in this inline context. If not, we've done all we can.
669 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
671 return Base::visitCallSite(CS);
673 // If we have a constant that we are calling as a function, we can peer
674 // through it and see the function target. This happens not infrequently
675 // during devirtualization and so we want to give it a hefty bonus for
676 // inlining, but cap that bonus in the event that inlining wouldn't pan
677 // out. Pretend to inline the function, with a custom threshold.
678 CallAnalyzer CA(TD, *F, InlineConstants::IndirectCallThreshold);
679 if (CA.analyzeCall(CS)) {
680 // We were able to inline the indirect call! Subtract the cost from the
681 // bonus we want to apply, but don't go below zero.
682 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
685 return Base::visitCallSite(CS);
688 bool CallAnalyzer::visitInstruction(Instruction &I) {
689 // Some instructions are free. All of the free intrinsics can also be
690 // handled by SROA, etc.
691 if (isInstructionFree(&I, TD))
694 // We found something we don't understand or can't handle. Mark any SROA-able
695 // values in the operand list as no longer viable.
696 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
703 /// \brief Analyze a basic block for its contribution to the inline cost.
705 /// This method walks the analyzer over every instruction in the given basic
706 /// block and accounts for their cost during inlining at this callsite. It
707 /// aborts early if the threshold has been exceeded or an impossible to inline
708 /// construct has been detected. It returns false if inlining is no longer
709 /// viable, and true if inlining remains viable.
710 bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
711 for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end());
714 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
715 ++NumVectorInstructions;
717 // If the instruction simplified to a constant, there is no cost to this
718 // instruction. Visit the instructions using our InstVisitor to account for
719 // all of the per-instruction logic. The visit tree returns true if we
720 // consumed the instruction in any way, and false if the instruction's base
721 // cost should count against inlining.
723 ++NumInstructionsSimplified;
725 Cost += InlineConstants::InstrCost;
727 // If the visit this instruction detected an uninlinable pattern, abort.
728 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
731 // If the caller is a recursive function then we don't want to inline
732 // functions which allocate a lot of stack space because it would increase
733 // the caller stack usage dramatically.
734 if (IsCallerRecursive &&
735 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
738 if (NumVectorInstructions > NumInstructions/2)
739 VectorBonus = FiftyPercentVectorBonus;
740 else if (NumVectorInstructions > NumInstructions/10)
741 VectorBonus = TenPercentVectorBonus;
745 // Check if we've past the threshold so we don't spin in huge basic
746 // blocks that will never inline.
747 if (Cost > (Threshold + VectorBonus))
754 /// \brief Compute the base pointer and cumulative constant offsets for V.
756 /// This strips all constant offsets off of V, leaving it the base pointer, and
757 /// accumulates the total constant offset applied in the returned constant. It
758 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
759 /// no constant offsets applied.
760 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
761 if (!TD || !V->getType()->isPointerTy())
764 unsigned IntPtrWidth = TD->getPointerSizeInBits();
765 APInt Offset = APInt::getNullValue(IntPtrWidth);
767 // Even though we don't look through PHI nodes, we could be called on an
768 // instruction in an unreachable block, which may be on a cycle.
769 SmallPtrSet<Value *, 4> Visited;
772 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
773 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
775 V = GEP->getPointerOperand();
776 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
777 V = cast<Operator>(V)->getOperand(0);
778 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
779 if (GA->mayBeOverridden())
781 V = GA->getAliasee();
785 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
786 } while (Visited.insert(V));
788 Type *IntPtrTy = TD->getIntPtrType(V->getContext());
789 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
792 /// \brief Analyze a call site for potential inlining.
794 /// Returns true if inlining this call is viable, and false if it is not
795 /// viable. It computes the cost and adjusts the threshold based on numerous
796 /// factors and heuristics. If this method returns false but the computed cost
797 /// is below the computed threshold, then inlining was forcibly disabled by
798 /// some artifact of the routine.
799 bool CallAnalyzer::analyzeCall(CallSite CS) {
802 // Track whether the post-inlining function would have more than one basic
803 // block. A single basic block is often intended for inlining. Balloon the
804 // threshold by 50% until we pass the single-BB phase.
805 bool SingleBB = true;
806 int SingleBBBonus = Threshold / 2;
807 Threshold += SingleBBBonus;
809 // Perform some tweaks to the cost and threshold based on the direct
810 // callsite information.
812 // We want to more aggressively inline vector-dense kernels, so up the
813 // threshold, and we'll lower it if the % of vector instructions gets too
815 assert(NumInstructions == 0);
816 assert(NumVectorInstructions == 0);
817 FiftyPercentVectorBonus = Threshold;
818 TenPercentVectorBonus = Threshold / 2;
820 // Give out bonuses per argument, as the instructions setting them up will
821 // be gone after inlining.
822 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
823 if (TD && CS.isByValArgument(I)) {
824 // We approximate the number of loads and stores needed by dividing the
825 // size of the byval type by the target's pointer size.
826 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
827 unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
828 unsigned PointerSize = TD->getPointerSizeInBits();
830 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
832 // If it generates more than 8 stores it is likely to be expanded as an
833 // inline memcpy so we take that as an upper bound. Otherwise we assume
834 // one load and one store per word copied.
835 // FIXME: The maxStoresPerMemcpy setting from the target should be used
836 // here instead of a magic number of 8, but it's not available via
838 NumStores = std::min(NumStores, 8U);
840 Cost -= 2 * NumStores * InlineConstants::InstrCost;
842 // For non-byval arguments subtract off one instruction per call
844 Cost -= InlineConstants::InstrCost;
848 // If there is only one call of the function, and it has internal linkage,
849 // the cost of inlining it drops dramatically.
850 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
851 &F == CS.getCalledFunction();
852 if (OnlyOneCallAndLocalLinkage)
853 Cost += InlineConstants::LastCallToStaticBonus;
855 // If the instruction after the call, or if the normal destination of the
856 // invoke is an unreachable instruction, the function is noreturn. As such,
857 // there is little point in inlining this unless there is literally zero
859 Instruction *Instr = CS.getInstruction();
860 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
861 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
863 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
866 // If this function uses the coldcc calling convention, prefer not to inline
868 if (F.getCallingConv() == CallingConv::Cold)
869 Cost += InlineConstants::ColdccPenalty;
871 // Check if we're done. This can happen due to bonuses and penalties.
872 if (Cost > Threshold)
878 Function *Caller = CS.getInstruction()->getParent()->getParent();
879 // Check if the caller function is recursive itself.
880 for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
882 CallSite Site(cast<Value>(*U));
885 Instruction *I = Site.getInstruction();
886 if (I->getParent()->getParent() == Caller) {
887 IsCallerRecursive = true;
892 // Track whether we've seen a return instruction. The first return
893 // instruction is free, as at least one will usually disappear in inlining.
894 bool HasReturn = false;
896 // Populate our simplified values by mapping from function arguments to call
897 // arguments with known important simplifications.
898 CallSite::arg_iterator CAI = CS.arg_begin();
899 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
900 FAI != FAE; ++FAI, ++CAI) {
901 assert(CAI != CS.arg_end());
902 if (Constant *C = dyn_cast<Constant>(CAI))
903 SimplifiedValues[FAI] = C;
905 Value *PtrArg = *CAI;
906 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
907 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
909 // We can SROA any pointer arguments derived from alloca instructions.
910 if (isa<AllocaInst>(PtrArg)) {
911 SROAArgValues[FAI] = PtrArg;
912 SROAArgCosts[PtrArg] = 0;
916 NumConstantArgs = SimplifiedValues.size();
917 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
918 NumAllocaArgs = SROAArgValues.size();
920 // The worklist of live basic blocks in the callee *after* inlining. We avoid
921 // adding basic blocks of the callee which can be proven to be dead for this
922 // particular call site in order to get more accurate cost estimates. This
923 // requires a somewhat heavyweight iteration pattern: we need to walk the
924 // basic blocks in a breadth-first order as we insert live successors. To
925 // accomplish this, prioritizing for small iterations because we exit after
926 // crossing our threshold, we use a small-size optimized SetVector.
927 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
928 SmallPtrSet<BasicBlock *, 16> > BBSetVector;
929 BBSetVector BBWorklist;
930 BBWorklist.insert(&F.getEntryBlock());
931 // Note that we *must not* cache the size, this loop grows the worklist.
932 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
933 // Bail out the moment we cross the threshold. This means we'll under-count
934 // the cost, but only when undercounting doesn't matter.
935 if (Cost > (Threshold + VectorBonus))
938 BasicBlock *BB = BBWorklist[Idx];
942 // Handle the terminator cost here where we can track returns and other
943 // function-wide constructs.
944 TerminatorInst *TI = BB->getTerminator();
946 // We never want to inline functions that contain an indirectbr. This is
947 // incorrect because all the blockaddress's (in static global initializers
948 // for example) would be referring to the original function, and this
949 // indirect jump would jump from the inlined copy of the function into the
950 // original function which is extremely undefined behavior.
951 // FIXME: This logic isn't really right; we can safely inline functions
952 // with indirectbr's as long as no other function or global references the
953 // blockaddress of a block within the current function. And as a QOI issue,
954 // if someone is using a blockaddress without an indirectbr, and that
955 // reference somehow ends up in another function or global, we probably
956 // don't want to inline this function.
957 if (isa<IndirectBrInst>(TI))
960 if (!HasReturn && isa<ReturnInst>(TI))
963 Cost += InlineConstants::InstrCost;
965 // Analyze the cost of this block. If we blow through the threshold, this
966 // returns false, and we can bail on out.
967 if (!analyzeBlock(BB)) {
968 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
971 // If the caller is a recursive function then we don't want to inline
972 // functions which allocate a lot of stack space because it would increase
973 // the caller stack usage dramatically.
974 if (IsCallerRecursive &&
975 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
981 // Add in the live successors by first checking whether we have terminator
982 // that may be simplified based on the values simplified by this call.
983 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
984 if (BI->isConditional()) {
985 Value *Cond = BI->getCondition();
986 if (ConstantInt *SimpleCond
987 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
988 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
992 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
993 Value *Cond = SI->getCondition();
994 if (ConstantInt *SimpleCond
995 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
996 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1001 // If we're unable to select a particular successor, just count all of
1003 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1005 BBWorklist.insert(TI->getSuccessor(TIdx));
1007 // If we had any successors at this point, than post-inlining is likely to
1008 // have them as well. Note that we assume any basic blocks which existed
1009 // due to branches or switches which folded above will also fold after
1011 if (SingleBB && TI->getNumSuccessors() > 1) {
1012 // Take off the bonus we applied to the threshold.
1013 Threshold -= SingleBBBonus;
1018 // If this is a noduplicate call, we can still inline as long as
1019 // inlining this would cause the removal of the caller (so the instruction
1020 // is not actually duplicated, just moved).
1021 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
1024 Threshold += VectorBonus;
1026 return Cost < Threshold;
1029 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1030 /// \brief Dump stats about this call's analysis.
1031 void CallAnalyzer::dump() {
1032 #define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
1033 DEBUG_PRINT_STAT(NumConstantArgs);
1034 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1035 DEBUG_PRINT_STAT(NumAllocaArgs);
1036 DEBUG_PRINT_STAT(NumConstantPtrCmps);
1037 DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1038 DEBUG_PRINT_STAT(NumInstructionsSimplified);
1039 DEBUG_PRINT_STAT(SROACostSavings);
1040 DEBUG_PRINT_STAT(SROACostSavingsLost);
1041 DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
1042 #undef DEBUG_PRINT_STAT
1046 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) {
1047 return getInlineCost(CS, CS.getCalledFunction(), Threshold);
1050 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
1052 // Cannot inline indirect calls.
1054 return llvm::InlineCost::getNever();
1056 // Calls to functions with always-inline attributes should be inlined
1057 // whenever possible.
1058 if (Callee->getFnAttributes().hasAttribute(Attribute::AlwaysInline)) {
1059 if (isInlineViable(*Callee))
1060 return llvm::InlineCost::getAlways();
1061 return llvm::InlineCost::getNever();
1064 // Don't inline functions which can be redefined at link-time to mean
1065 // something else. Don't inline functions marked noinline or call sites
1067 if (Callee->mayBeOverridden() ||
1068 Callee->getFnAttributes().hasAttribute(Attribute::NoInline) ||
1070 return llvm::InlineCost::getNever();
1072 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
1075 CallAnalyzer CA(TD, *Callee, Threshold);
1076 bool ShouldInline = CA.analyzeCall(CS);
1080 // Check if there was a reason to force inlining or no inlining.
1081 if (!ShouldInline && CA.getCost() < CA.getThreshold())
1082 return InlineCost::getNever();
1083 if (ShouldInline && CA.getCost() >= CA.getThreshold())
1084 return InlineCost::getAlways();
1086 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
1089 bool InlineCostAnalyzer::isInlineViable(Function &F) {
1090 bool ReturnsTwice =F.getFnAttributes().hasAttribute(Attribute::ReturnsTwice);
1091 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
1092 // Disallow inlining of functions which contain an indirect branch.
1093 if (isa<IndirectBrInst>(BI->getTerminator()))
1096 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
1102 // Disallow recursive calls.
1103 if (&F == CS.getCalledFunction())
1106 // Disallow calls which expose returns-twice to a function not previously
1107 // attributed as such.
1108 if (!ReturnsTwice && CS.isCall() &&
1109 cast<CallInst>(CS.getInstruction())->canReturnTwice())